diff mbox

netpoll: allow execution of multiple rx_hooks per interface

Message ID 4B44F895.9080205@gmail.com
State Changes Requested, archived
Delegated to: David Miller
Headers show

Commit Message

Daniel Borkmann Jan. 6, 2010, 8:54 p.m. UTC
Hi,

this patch allows the registration and _execution_ of multiple netpoll
rx_hooks per interface. Currently, it is possible to register multiple
netpoll structures to one interface, _but_ only one single rx_hook (from
the netpoll struct that has been registered last) can be executed, which
was an oversight in the implementation [1].
So, this patch fixes it. I've sucessfully tested it within 2.6.32.2 with
the registration of multiple rx_hook clients for several times. I'd
appreciate comments / feedback.

Thanks,
Daniel

[1] http://lwn.net/Articles/140852/

Comments

Matt Mackall Jan. 7, 2010, 3:54 a.m. UTC | #1
On Wed, 2010-01-06 at 21:54 +0100, Daniel Borkmann wrote:
> Hi,
> 
> this patch allows the registration and _execution_ of multiple netpoll
> rx_hooks per interface. Currently, it is possible to register multiple
> netpoll structures to one interface, _but_ only one single rx_hook (from
> the netpoll struct that has been registered last) can be executed, which
> was an oversight in the implementation [1].
> So, this patch fixes it. I've sucessfully tested it within 2.6.32.2 with
> the registration of multiple rx_hook clients for several times. I'd
> appreciate comments / feedback.

(grumbles about cc:)

Please inline patches so they can be reviewed easily in reply.


-       struct netpoll *np = npi->rx_np;
+       struct netpoll **np = &npi->rx_np;
 
-       if (!np)
+       if (!(*np))

This makes everything horrible. Can you avoid the double indirection?
Using a list head might be a good answer.
David Miller Jan. 7, 2010, 9:02 a.m. UTC | #2
From: Matt Mackall <mpm@selenic.com>
Date: Wed, 06 Jan 2010 21:54:05 -0600

> Please inline patches so they can be reviewed easily in reply.
> 
> 
> -       struct netpoll *np = npi->rx_np;
> +       struct netpoll **np = &npi->rx_np;
>  
> -       if (!np)
> +       if (!(*np))
> 
> This makes everything horrible. Can you avoid the double indirection?
> Using a list head might be a good answer.
> 

Agreed on all counts.
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Daniel Borkmann Jan. 7, 2010, 7:06 p.m. UTC | #3
David Miller wrote:
> From: Matt Mackall <mpm@selenic.com>
> Date: Wed, 06 Jan 2010 21:54:05 -0600
> 
>> Please inline patches so they can be reviewed easily in reply.
>>
>>
>> -       struct netpoll *np = npi->rx_np;
>> +       struct netpoll **np = &npi->rx_np;
>>  
>> -       if (!np)
>> +       if (!(*np))
>>
>> This makes everything horrible. Can you avoid the double indirection?
>> Using a list head might be a good answer.
>>
> 
> Agreed on all counts.
> 

Agreed on the double indirection, I'll fix it.

I've already considered the list_head structure, but then I was the opinion
that a double linked list might not be necessary for this, so I did it that
way ... (compare: kernel notifier by Alan Cox). If you insist on that I'll
fix it of course ;)

Best regards,
Daniel

P.s.: Sorry Matt for not CCing. I mainly took those addresses from Jeffs post.
diff mbox

Patch

Signed-off-by: Daniel Borkmann <danborkmann@googlemail.com>

diff -Nur a/include/linux/netpoll.h b/include/linux/netpoll.h
--- a/include/linux/netpoll.h	2010-01-05 23:52:58.000000000 +0100
+++ b/include/linux/netpoll.h	2010-01-06 00:12:54.000000000 +0100
@@ -21,6 +21,7 @@ 
 	__be32 local_ip, remote_ip;
 	u16 local_port, remote_port;
 	u8 remote_mac[ETH_ALEN];
+	struct netpoll *next; /* rx list for net device */
 };
 
 struct netpoll_info {
diff -Nur a/net/core/netpoll.c b/net/core/netpoll.c
--- a/net/core/netpoll.c	2010-01-05 23:53:07.000000000 +0100
+++ b/net/core/netpoll.c	2010-01-06 20:44:59.000000000 +0100
@@ -493,13 +493,13 @@ 
 
 int __netpoll_rx(struct sk_buff *skb)
 {
-	int proto, len, ulen;
+	int proto, len, ulen, hits;
 	struct iphdr *iph;
 	struct udphdr *uh;
 	struct netpoll_info *npi = skb->dev->npinfo;
-	struct netpoll *np = npi->rx_np;
+	struct netpoll **np = &npi->rx_np;
 
-	if (!np)
+	if (!(*np))
 		goto out;
 	if (skb->dev->type != ARPHRD_ETHER)
 		goto out;
@@ -551,16 +551,23 @@ 
 		goto out;
 	if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
 		goto out;
-	if (np->local_ip && np->local_ip != iph->daddr)
-		goto out;
-	if (np->remote_ip && np->remote_ip != iph->saddr)
-		goto out;
-	if (np->local_port && np->local_port != ntohs(uh->dest))
-		goto out;
 
-	np->rx_hook(np, ntohs(uh->source),
-		    (char *)(uh+1),
-		    ulen - sizeof(struct udphdr));
+	for (hits = 0; (*np) != NULL; np = &((*np)->next)) {
+		if ((*np)->local_ip && (*np)->local_ip != iph->daddr)
+			continue;
+		if ((*np)->remote_ip && (*np)->remote_ip != iph->saddr)
+			continue;
+		if ((*np)->local_port && (*np)->local_port != ntohs(uh->dest))
+			continue;
+
+		(*np)->rx_hook((*np), ntohs(uh->source),
+			       (char *)(uh+1),
+			       ulen - sizeof(struct udphdr));
+		hits++;
+	}
+
+	if (!hits)
+		goto out;
 
 	kfree_skb(skb);
 	return 1;
@@ -679,6 +686,45 @@ 
 	return -1;
 }
 
+static int netpoll_rx_list_register(struct netpoll **np_head,
+				    struct netpoll *np)
+{
+	while ((*np_head) != NULL) {
+		if ((*np_head) == np)
+			return -EEXIST;
+		np_head = &((*np_head)->next);
+	}
+
+	np->next = *np_head;
+	rcu_assign_pointer(*np_head, np);
+	return 0;
+}
+
+static int netpoll_rx_list_unregister(struct netpoll **np_head,
+				      struct netpoll *np)
+{
+	while ((*np_head) != NULL) {
+		if ((*np_head) == np) {
+			rcu_assign_pointer(*np_head, np->next);
+			return 0;
+		}
+		np_head = &((*np_head)->next);
+	}
+	return -ENOENT;
+}
+
+static void netpoll_rx_list_nullify(struct netpoll **np_head)
+{
+	struct netpoll *np = NULL;
+
+	while ((*np_head) != NULL) {
+		np = (*np_head);
+		np_head = &((*np_head)->next);
+		np->dev = NULL;
+		np->next = NULL;
+	}
+}
+
 int netpoll_setup(struct netpoll *np)
 {
 	struct net_device *ndev = NULL;
@@ -695,6 +741,7 @@ 
 		return -ENODEV;
 	}
 
+	np->next = NULL;
 	np->dev = ndev;
 	if (!ndev->npinfo) {
 		npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
@@ -785,7 +832,7 @@ 
 	if (np->rx_hook) {
 		spin_lock_irqsave(&npinfo->rx_lock, flags);
 		npinfo->rx_flags |= NETPOLL_RX_ENABLED;
-		npinfo->rx_np = np;
+		netpoll_rx_list_register(&npinfo->rx_np, np);
 		spin_unlock_irqrestore(&npinfo->rx_lock, flags);
 	}
 
@@ -801,9 +848,14 @@ 
 	return 0;
 
  release:
-	if (!ndev->npinfo)
+	if (!ndev->npinfo) {
+		spin_lock_irqsave(&npinfo->rx_lock, flags);
+		netpoll_rx_list_nullify(&npinfo->rx_np);
+		spin_unlock_irqrestore(&npinfo->rx_lock, flags);
+
 		kfree(npinfo);
-	np->dev = NULL;
+	}
+
 	dev_put(ndev);
 	return err;
 }
@@ -823,10 +875,11 @@ 
 	if (np->dev) {
 		npinfo = np->dev->npinfo;
 		if (npinfo) {
-			if (npinfo->rx_np == np) {
+			if (npinfo->rx_np) {
 				spin_lock_irqsave(&npinfo->rx_lock, flags);
-				npinfo->rx_np = NULL;
-				npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
+				netpoll_rx_list_unregister(&npinfo->rx_np, np);
+				if (!npinfo->rx_np)
+					npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
 				spin_unlock_irqrestore(&npinfo->rx_lock, flags);
 			}
 
@@ -845,7 +898,10 @@ 
 		dev_put(np->dev);
 	}
 
+	np->next = NULL;
 	np->dev = NULL;
+
+	synchronize_rcu();
 }
 
 int netpoll_trap(void)