From patchwork Fri Dec 2 19:56:25 2011 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Haiyang Zhang X-Patchwork-Id: 128950 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming@ozlabs.org Delivered-To: patchwork-incoming@ozlabs.org Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id 09321B6F18 for ; Sat, 3 Dec 2011 06:56:49 +1100 (EST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1754562Ab1LBT42 (ORCPT ); Fri, 2 Dec 2011 14:56:28 -0500 Received: from p3plsmtps2ded03.prod.phx3.secureserver.net ([208.109.80.60]:42725 "HELO p3plsmtps2ded03-01.prod.phx3.secureserver.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with SMTP id S1754270Ab1LBT40 (ORCPT ); Fri, 2 Dec 2011 14:56:26 -0500 Received: (qmail 29524 invoked from network); 2 Dec 2011 19:56:26 -0000 Received: from unknown (HELO linuxonhyperv.com) (72.167.245.219) by p3plsmtps2ded03-01.prod.phx3.secureserver.net (208.109.80.60) with ESMTP; 02 Dec 2011 19:56:25 -0000 Received: by linuxonhyperv.com (Postfix, from userid 503) id 0287019018D; Fri, 2 Dec 2011 11:56:37 -0800 (PST) From: Haiyang Zhang To: haiyangz@microsoft.com, kys@microsoft.com, davem@davemloft.net, gregkh@suse.de, linux-kernel@vger.kernel.org, netdev@vger.kernel.org, devel@linuxdriverproject.org Subject: [PATCH] net/hyperv: Fix the stop/wake queue mechanism Date: Fri, 2 Dec 2011 11:56:25 -0800 Message-Id: <1322855785-22776-1-git-send-email-haiyangz@microsoft.com> X-Mailer: git-send-email 1.7.4.1 Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org The ring buffer is only used to pass meta data for outbound packets. The actual payload is accessed by DMA from the host. So the stop/wake queue mechanism based on counting and comparing number of pages sent v.s. number of pages in the ring buffer is wrong. Also, there is a race condition in the stop/wake queue calls, which can stop xmit queue forever. The new stop/wake queue mechanism is based on the actual bytes used by outbound packets in the ring buffer. The check for number of outstanding sends after stop queue prevents the race condition that can cause wake queue happening earlier than stop queue. Signed-off-by: Haiyang Zhang Signed-off-by: K. Y. Srinivasan Reported-by: Long Li --- drivers/net/hyperv/netvsc.c | 14 +++++++++++--- drivers/net/hyperv/netvsc_drv.c | 24 +----------------------- 2 files changed, 12 insertions(+), 26 deletions(-) diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 4a807e4..b6ac152 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -435,6 +435,9 @@ static void netvsc_send_completion(struct hv_device *device, nvsc_packet->completion.send.send_completion_ctx); atomic_dec(&net_device->num_outstanding_sends); + + if (netif_queue_stopped(ndev)) + netif_wake_queue(ndev); } else { netdev_err(ndev, "Unknown send completion packet type- " "%d received!!\n", nvsp_packet->hdr.msg_type); @@ -485,11 +488,16 @@ int netvsc_send(struct hv_device *device, } - if (ret != 0) + if (ret == 0) { + atomic_inc(&net_device->num_outstanding_sends); + } else if (ret == -EAGAIN) { + netif_stop_queue(ndev); + if (atomic_read(&net_device->num_outstanding_sends) < 1) + netif_wake_queue(ndev); + } else { netdev_err(ndev, "Unable to send packet %p ret %d\n", packet, ret); - else - atomic_inc(&net_device->num_outstanding_sends); + } return ret; } diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index b69c3a4..7da85eb 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -43,15 +43,10 @@ struct net_device_context { /* point back to our device context */ struct hv_device *device_ctx; - atomic_t avail; struct delayed_work dwork; }; -#define PACKET_PAGES_LOWATER 8 -/* Need this many pages to handle worst case fragmented packet */ -#define PACKET_PAGES_HIWATER (MAX_SKB_FRAGS + 2) - static int ring_size = 128; module_param(ring_size, int, S_IRUGO); MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)"); @@ -144,18 +139,8 @@ static void netvsc_xmit_completion(void *context) kfree(packet); - if (skb) { - struct net_device *net = skb->dev; - struct net_device_context *net_device_ctx = netdev_priv(net); - unsigned int num_pages = skb_shinfo(skb)->nr_frags + 2; - + if (skb) dev_kfree_skb_any(skb); - - atomic_add(num_pages, &net_device_ctx->avail); - if (atomic_read(&net_device_ctx->avail) >= - PACKET_PAGES_HIWATER) - netif_wake_queue(net); - } } static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) @@ -167,8 +152,6 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) /* Add 1 for skb->data and additional one for RNDIS */ num_pages = skb_shinfo(skb)->nr_frags + 1 + 1; - if (num_pages > atomic_read(&net_device_ctx->avail)) - return NETDEV_TX_BUSY; /* Allocate a netvsc packet based on # of frags. */ packet = kzalloc(sizeof(struct hv_netvsc_packet) + @@ -218,10 +201,6 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) if (ret == 0) { net->stats.tx_bytes += skb->len; net->stats.tx_packets++; - - atomic_sub(num_pages, &net_device_ctx->avail); - if (atomic_read(&net_device_ctx->avail) < PACKET_PAGES_LOWATER) - netif_stop_queue(net); } else { /* we are shutting down or bus overloaded, just drop packet */ net->stats.tx_dropped++; @@ -391,7 +370,6 @@ static int netvsc_probe(struct hv_device *dev, net_device_ctx = netdev_priv(net); net_device_ctx->device_ctx = dev; - atomic_set(&net_device_ctx->avail, ring_size); hv_set_drvdata(dev, net); INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_send_garp);