From patchwork Sat Dec 4 05:55:21 2010 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Changli Gao X-Patchwork-Id: 74251 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming@ozlabs.org Delivered-To: patchwork-incoming@ozlabs.org Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id 40426B6EF1 for ; Sat, 4 Dec 2010 16:56:17 +1100 (EST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1754002Ab0LDF4M (ORCPT ); Sat, 4 Dec 2010 00:56:12 -0500 Received: from mail-gy0-f174.google.com ([209.85.160.174]:63830 "EHLO mail-gy0-f174.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752670Ab0LDF4L (ORCPT ); Sat, 4 Dec 2010 00:56:11 -0500 Received: by gyb11 with SMTP id 11so5179847gyb.19 for ; Fri, 03 Dec 2010 21:56:10 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=gamma; h=domainkey-signature:received:received:from:to:cc:subject:date :message-id:x-mailer:in-reply-to:references; bh=CigYW63kKzhplhtMbk8ZYPkWVMT8X7Rl3IGfeCMNqTU=; b=V3tsrPMF8Sph4G/k4g8SNDOQ2SCuO0i6LM+Ty0dBQBqNJXwpGlNrkN03q8KpUKwjSu b+hMMaZxu4q0yYl9gwcT7fip0JJVqM6qzZcd8kefdWPpVJIAJGmRa8qv4bz9165NaeWx qRFTao6OiWj2ch/rGG3Qbkn0xNVQqNn6JOTOg= DomainKey-Signature: a=rsa-sha1; c=nofws; d=gmail.com; s=gamma; h=from:to:cc:subject:date:message-id:x-mailer:in-reply-to:references; b=dMKt75OJHOP2tp7YhLws2eh8tuEtru7EQmVEQ3z7G891GhvudXtWV41EO3fNYNThnK 5RV2ElS0MKHFilPOce+GdsW4+T9IoAkotFxQUI1YIhWbXSsFa6Lr21JVSuH/226yjm2U MI9YOOTYbC6Pz/QD43AmGvbih444SaCw3mK8g= Received: by 10.101.69.3 with SMTP id w3mr2129096ank.32.1291442170622; Fri, 03 Dec 2010 21:56:10 -0800 (PST) Received: from localhost.localdomain ([221.239.34.230]) by mx.google.com with ESMTPS id c7sm2637565ana.37.2010.12.03.21.56.05 (version=TLSv1/SSLv3 cipher=RC4-MD5); Fri, 03 Dec 2010 21:56:09 -0800 (PST) From: Changli Gao To: jamal Cc: netdev@vger.kernel.org, jamal , Changli Gao Subject: [PATCH 3/3] ifb: move tq from ifb_private Date: Sat, 4 Dec 2010 13:55:21 +0800 Message-Id: <1291442121-3302-3-git-send-email-xiaosuo@gmail.com> X-Mailer: git-send-email 1.7.1 In-Reply-To: <1291442121-3302-1-git-send-email-xiaosuo@gmail.com> References: <1291442121-3302-1-git-send-email-xiaosuo@gmail.com> Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org tq is only used in ri_tasklet, so we move it from ifb_private to a stack variable of ri_tasklet. skb_queue_splice_tail_init() is used instead of the open coded and slow one. Signed-off-by: Changli Gao --- drivers/net/ifb.c | 49 ++++++++++++------------------------------------- 1 file changed, 12 insertions(+), 37 deletions(-) -- To unsubscribe from this list: send the line "unsubscribe netdev" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c index d1e362a..cd6e90d 100644 --- a/drivers/net/ifb.c +++ b/drivers/net/ifb.c @@ -39,9 +39,7 @@ #define TX_Q_LIMIT 32 struct ifb_private { struct tasklet_struct ifb_tasklet; - int tasklet_pending; struct sk_buff_head rq; - struct sk_buff_head tq; }; static int numifbs = 2; @@ -53,27 +51,25 @@ static int ifb_close(struct net_device *dev); static void ri_tasklet(unsigned long dev) { - struct net_device *_dev = (struct net_device *)dev; struct ifb_private *dp = netdev_priv(_dev); struct net_device_stats *stats = &_dev->stats; struct netdev_queue *txq; struct sk_buff *skb; + struct sk_buff_head tq; + __skb_queue_head_init(&tq); txq = netdev_get_tx_queue(_dev, 0); - if ((skb = skb_peek(&dp->tq)) == NULL) { - if (__netif_tx_trylock(txq)) { - while ((skb = skb_dequeue(&dp->rq)) != NULL) { - skb_queue_tail(&dp->tq, skb); - } - __netif_tx_unlock(txq); - } else { - /* reschedule */ - goto resched; - } + if (!__netif_tx_trylock(txq)) { + tasklet_schedule(&dp->ifb_tasklet); + return; } + skb_queue_splice_tail_init(&dp->rq, &tq); + if (netif_tx_queue_stopped(txq)) + netif_tx_wake_queue(txq); + __netif_tx_unlock(txq); - while ((skb = skb_dequeue(&dp->tq)) != NULL) { + while ((skb = __skb_dequeue(&tq)) != NULL) { u32 from = G_TC_FROM(skb->tc_verd); skb->tc_verd = 0; @@ -87,7 +83,7 @@ static void ri_tasklet(unsigned long dev) rcu_read_unlock(); dev_kfree_skb(skb); stats->tx_dropped++; - break; + continue; } rcu_read_unlock(); skb->skb_iif = _dev->ifindex; @@ -100,23 +96,6 @@ static void ri_tasklet(unsigned long dev) } else BUG(); } - - if (__netif_tx_trylock(txq)) { - if ((skb = skb_peek(&dp->rq)) == NULL) { - dp->tasklet_pending = 0; - if (netif_queue_stopped(_dev)) - netif_wake_queue(_dev); - } else { - __netif_tx_unlock(txq); - goto resched; - } - __netif_tx_unlock(txq); - } else { -resched: - dp->tasklet_pending = 1; - tasklet_schedule(&dp->ifb_tasklet); - } - } static const struct net_device_ops ifb_netdev_ops = { @@ -162,10 +141,8 @@ static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev) } skb_queue_tail(&dp->rq, skb); - if (!dp->tasklet_pending) { - dp->tasklet_pending = 1; + if (skb_queue_len(&dp->rq) == 1) tasklet_schedule(&dp->ifb_tasklet); - } return NETDEV_TX_OK; } @@ -177,7 +154,6 @@ static int ifb_close(struct net_device *dev) tasklet_kill(&dp->ifb_tasklet); netif_stop_queue(dev); skb_queue_purge(&dp->rq); - skb_queue_purge(&dp->tq); return 0; } @@ -187,7 +163,6 @@ static int ifb_open(struct net_device *dev) tasklet_init(&dp->ifb_tasklet, ri_tasklet, (unsigned long)dev); skb_queue_head_init(&dp->rq); - skb_queue_head_init(&dp->tq); netif_start_queue(dev); return 0;