diff mbox

[v2] Fix locking in flush_backlog

Message ID alpine.DEB.1.00.1003231635190.360@pokey.mtv.corp.google.com
State Accepted, archived
Delegated to: David Miller
Headers show

Commit Message

Tom Herbert March 23, 2010, 11:39 p.m. UTC
Need to take spinlocks when dequeuing from input_pkt_queue in flush_backlog.
Also, flush_backlog can now be called directly from netdev_run_todo.

Signed-off-by: Tom Herbert <therbert@google.com>
---
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Comments

Eric Dumazet March 24, 2010, 5:34 a.m. UTC | #1
Le mardi 23 mars 2010 à 16:39 -0700, Tom Herbert a écrit :
> Need to take spinlocks when dequeuing from input_pkt_queue in flush_backlog.
> Also, flush_backlog can now be called directly from netdev_run_todo.
> 
> Signed-off-by: Tom Herbert <therbert@google.com>

Acked-by: Eric Dumazet <eric.dumazet@gmail.com>

Thanks Tom

> ---
> diff --git a/net/core/dev.c b/net/core/dev.c
> diff --git a/net/core/dev.c b/net/core/dev.c
> index a03aab4..5e3dc28 100644
> --- a/net/core/dev.c
> +++ b/net/core/dev.c
> @@ -2766,17 +2766,19 @@ int netif_receive_skb(struct sk_buff *skb)
>  EXPORT_SYMBOL(netif_receive_skb);
>  
>  /* Network device is going away, flush any packets still pending  */
> -static void flush_backlog(void *arg)
> +static void flush_backlog(struct net_device *dev, int cpu)
>  {
> -	struct net_device *dev = arg;
> -	struct softnet_data *queue = &__get_cpu_var(softnet_data);
> +	struct softnet_data *queue = &per_cpu(softnet_data, cpu);
>  	struct sk_buff *skb, *tmp;
> +	unsigned long flags;
>  
> +	spin_lock_irqsave(&queue->input_pkt_queue.lock, flags);
>  	skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
>  		if (skb->dev == dev) {
>  			__skb_unlink(skb, &queue->input_pkt_queue);
>  			kfree_skb(skb);
>  		}
> +	spin_unlock_irqrestore(&queue->input_pkt_queue.lock, flags);
>  }
>  
>  static int napi_gro_complete(struct sk_buff *skb)
> @@ -5545,6 +5547,7 @@ void netdev_run_todo(void)
>  	while (!list_empty(&list)) {
>  		struct net_device *dev
>  			= list_first_entry(&list, struct net_device, todo_list);
> +		int i;
>  		list_del(&dev->todo_list);
>  
>  		if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
> @@ -5556,7 +5559,8 @@ void netdev_run_todo(void)
>  
>  		dev->reg_state = NETREG_UNREGISTERED;
>  
> -		on_each_cpu(flush_backlog, dev, 1);
> +		for_each_online_cpu(i)
> +			flush_backlog(dev, i);
>  
>  		netdev_wait_allrefs(dev);
>  

--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
David Miller March 24, 2010, 6:17 a.m. UTC | #2
From: Eric Dumazet <eric.dumazet@gmail.com>
Date: Wed, 24 Mar 2010 06:34:42 +0100

> Le mardi 23 mars 2010 à 16:39 -0700, Tom Herbert a écrit :
>> Need to take spinlocks when dequeuing from input_pkt_queue in flush_backlog.
>> Also, flush_backlog can now be called directly from netdev_run_todo.
>> 
>> Signed-off-by: Tom Herbert <therbert@google.com>
> 
> Acked-by: Eric Dumazet <eric.dumazet@gmail.com>

Applied, thanks everyone.
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/net/core/dev.c b/net/core/dev.c
diff --git a/net/core/dev.c b/net/core/dev.c
index a03aab4..5e3dc28 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2766,17 +2766,19 @@  int netif_receive_skb(struct sk_buff *skb)
 EXPORT_SYMBOL(netif_receive_skb);
 
 /* Network device is going away, flush any packets still pending  */
-static void flush_backlog(void *arg)
+static void flush_backlog(struct net_device *dev, int cpu)
 {
-	struct net_device *dev = arg;
-	struct softnet_data *queue = &__get_cpu_var(softnet_data);
+	struct softnet_data *queue = &per_cpu(softnet_data, cpu);
 	struct sk_buff *skb, *tmp;
+	unsigned long flags;
 
+	spin_lock_irqsave(&queue->input_pkt_queue.lock, flags);
 	skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
 		if (skb->dev == dev) {
 			__skb_unlink(skb, &queue->input_pkt_queue);
 			kfree_skb(skb);
 		}
+	spin_unlock_irqrestore(&queue->input_pkt_queue.lock, flags);
 }
 
 static int napi_gro_complete(struct sk_buff *skb)
@@ -5545,6 +5547,7 @@  void netdev_run_todo(void)
 	while (!list_empty(&list)) {
 		struct net_device *dev
 			= list_first_entry(&list, struct net_device, todo_list);
+		int i;
 		list_del(&dev->todo_list);
 
 		if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
@@ -5556,7 +5559,8 @@  void netdev_run_todo(void)
 
 		dev->reg_state = NETREG_UNREGISTERED;
 
-		on_each_cpu(flush_backlog, dev, 1);
+		for_each_online_cpu(i)
+			flush_backlog(dev, i);
 
 		netdev_wait_allrefs(dev);