Message ID | 20091105165738.GA31923@oksana.dev.rtsoft.ru (mailing list archive) |
---|---|
State | Superseded |
Delegated to: | Kumar Gala |
Headers | show |
[.....] > drivers/net/gianfar.c | 19 +++---------------- > 1 files changed, 3 insertions(+), 16 deletions(-) > >diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c >index 197b358..a0ae604 100644 >--- a/drivers/net/gianfar.c >+++ b/drivers/net/gianfar.c >@@ -1899,10 +1899,8 @@ static int gfar_start_xmit(struct >sk_buff *skb, struct net_device *dev) > u32 lstatus; > int i, rq = 0; > u32 bufaddr; >- unsigned long flags; > unsigned int nr_frags, length; > >- > rq = skb->queue_mapping; > tx_queue = priv->tx_queue[rq]; > txq = netdev_get_tx_queue(dev, rq); >@@ -1928,14 +1926,11 @@ static int gfar_start_xmit(struct >sk_buff *skb, struct net_device *dev) > /* total number of fragments in the SKB */ > nr_frags = skb_shinfo(skb)->nr_frags; > >- spin_lock_irqsave(&tx_queue->txlock, flags); >- > /* check if there is space to queue this packet */ > if ((nr_frags+1) > tx_queue->num_txbdfree) { > /* no space, stop the queue */ > netif_tx_stop_queue(txq); > dev->stats.tx_fifo_errors++; >- spin_unlock_irqrestore(&tx_queue->txlock, flags); > return NETDEV_TX_BUSY; > } > >@@ -2033,9 +2028,6 @@ static int gfar_start_xmit(struct >sk_buff *skb, struct net_device *dev) > /* Tell the DMA to go go go */ > gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex); > >- /* Unlock priv */ >- spin_unlock_irqrestore(&tx_queue->txlock, flags); >- > return NETDEV_TX_OK; > } > >@@ -2550,7 +2542,6 @@ static int gfar_poll(struct napi_struct >*napi, int budget) > int tx_cleaned = 0, i, left_over_budget = budget; > unsigned long serviced_queues = 0; > int num_queues = 0; >- unsigned long flags; > > num_queues = gfargrp->num_rx_queues; > budget_per_queue = budget/num_queues; >@@ -2570,13 +2561,9 @@ static int gfar_poll(struct napi_struct >*napi, int budget) > rx_queue = priv->rx_queue[i]; > tx_queue = priv->tx_queue[rx_queue->qindex]; > >- /* If we fail to get the lock, >- * don't bother with the TX BDs */ >- if >(spin_trylock_irqsave(&tx_queue->txlock, flags)) { >- tx_cleaned += >gfar_clean_tx_ring(tx_queue); >- >spin_unlock_irqrestore(&tx_queue->txlock, >- flags); >- } >+ netif_tx_lock_bh(priv->ndev); Will this not lead to locking all the tx queues even though at this point we are working on a "particular queue" ? >+ tx_cleaned += gfar_clean_tx_ring(tx_queue); >+ netif_tx_unlock_bh(priv->ndev); > > rx_cleaned_per_queue = >gfar_clean_rx_ring(rx_queue, > >budget_per_queue); >-- -- Thanks Sandeep
On Thu, Nov 05, 2009 at 10:53:08PM +0530, Kumar Gopalpet-B05799 wrote: [...] > >(spin_trylock_irqsave(&tx_queue->txlock, flags)) { > >- tx_cleaned += > >gfar_clean_tx_ring(tx_queue); > >- > >spin_unlock_irqrestore(&tx_queue->txlock, > >- flags); > >- } > >+ netif_tx_lock_bh(priv->ndev); > > Will this not lead to locking all the tx queues even though at this > point we are working on a "particular queue" ? Yeah, per-txq locking would be better (or not.. I need to netperf it). Thanks,
> > [.....] > > drivers/net/gianfar.c | 19 +++---------------- > > 1 files changed, 3 insertions(+), 16 deletions(-) > > > >diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c=20 > >index 197b358..a0ae604 100644 > >--- a/drivers/net/gianfar.c > >+++ b/drivers/net/gianfar.c > >@@ -1899,10 +1899,8 @@ static int gfar_start_xmit(struct=20 > >sk_buff *skb, struct net_device *dev) > > u32 lstatus; > > int i, rq = 0; > > u32 bufaddr; > >- unsigned long flags; > > unsigned int nr_frags, length; > > > >- > > rq = skb->queue_mapping; > > tx_queue = priv->tx_queue[rq]; > > txq = netdev_get_tx_queue(dev, rq); > >@@ -1928,14 +1926,11 @@ static int gfar_start_xmit(struct > >sk_buff *skb, struct net_device *dev) > > /* total number of fragments in the SKB */ > > nr_frags = skb_shinfo(skb)->nr_frags; > > > >- spin_lock_irqsave(&tx_queue->txlock, flags); > >- > > /* check if there is space to queue this packet */ > > if ((nr_frags+1) > tx_queue->num_txbdfree) { > > /* no space, stop the queue */ > > netif_tx_stop_queue(txq); > > dev->stats.tx_fifo_errors++; > >- spin_unlock_irqrestore(&tx_queue->txlock, flags); > > return NETDEV_TX_BUSY; > > } > > > >@@ -2033,9 +2028,6 @@ static int gfar_start_xmit(struct > >sk_buff *skb, struct net_device *dev) > > /* Tell the DMA to go go go */ > > gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex); > > > >- /* Unlock priv */ > >- spin_unlock_irqrestore(&tx_queue->txlock, flags); > >- > > return NETDEV_TX_OK; > > } > > > >@@ -2550,7 +2542,6 @@ static int gfar_poll(struct napi_struct > >*napi, int budget) > > int tx_cleaned = 0, i, left_over_budget = budget; > > unsigned long serviced_queues = 0; > > int num_queues = 0; > >- unsigned long flags; > > > > num_queues = gfargrp->num_rx_queues; > > budget_per_queue = budget/num_queues; > >@@ -2570,13 +2561,9 @@ static int gfar_poll(struct napi_struct > >*napi, int budget) > > rx_queue = priv->rx_queue[i]; > > tx_queue = priv->tx_queue[rx_queue->qindex]; > > > >- /* If we fail to get the lock, > >- * don't bother with the TX BDs */ > >- if > >(spin_trylock_irqsave(&tx_queue->txlock, flags)) { > >- tx_cleaned += > >gfar_clean_tx_ring(tx_queue); > >- =09 > >spin_unlock_irqrestore(&tx_queue->txlock, > >- flags); > >- } > >+ netif_tx_lock_bh(priv->ndev); > > Will this not lead to locking all the tx queues even though at this > point we are working on a "particular queue" ? Similar but different question: What version is this patch based upon? I can't find: > >index 197b358..a0ae604 100644 My search of 2.6.31 up through current linux head (v2.6.32-rc6-26-g91d3f9b) and benh's current head (94a8d5caba74211ec76dac80fc6e2d5c391530df) do not have a version of this code with separate txqueues. I confess I'm not too familiar with the history here, so I don't know if that feature is in flux, or came, or went, or what. That boils down to: I can't directly apply your patch, but I could hand-fudge its intent, but only on a single tx queue variant. jdl
> >Similar but different question: What version is this patch based upon? >I can't find: > > > >index 197b358..a0ae604 100644 > >My search of 2.6.31 up through current linux head >(v2.6.32-rc6-26-g91d3f9b) and benh's current head >(94a8d5caba74211ec76dac80fc6e2d5c391530df) do not have a >version of this code with separate txqueues. > >I confess I'm not too familiar with the history here, so I >don't know if that feature is in flux, or came, or went, or what. > >That boils down to: I can't directly apply your patch, but I >could hand-fudge its intent, but only on a single tx queue variant. > You might want to get the code base from the top of davem/net-next-2.6 tree git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6.git The multi queue is a feature available in etsec2.0 version. On older versions you might want to work in a single queue mode itself. -- Thanks Sandeep
From: Anton Vorontsov <avorontsov@ru.mvista.com> Date: Thu, 5 Nov 2009 19:57:38 +0300 > But that basically means that with skb recycling we can't safely > use KGDBoE, though we can add something like this: Please stop adding special logic only to your driver to handle these things. Either it's a non-issue, or it's going to potentially be an issue for everyone using skb_recycle_check() in a NAPI driver, right? So why not add the "in_interrupt()" or whatever check to skb_recycle_check() and if the context is unsuitable return false (0) ok?
On Sun, Nov 08, 2009 at 01:08:48AM -0800, David Miller wrote: > From: Anton Vorontsov <avorontsov@ru.mvista.com> > Date: Thu, 5 Nov 2009 19:57:38 +0300 > > > But that basically means that with skb recycling we can't safely > > use KGDBoE, though we can add something like this: > > Please stop adding special logic only to your driver to handle these > things. > > Either it's a non-issue, or it's going to potentially be an issue for > everyone using skb_recycle_check() in a NAPI driver, right? > > So why not add the "in_interrupt()" or whatever check to > skb_recycle_check() and if the context is unsuitable return false (0) > ok? I think the check is needed, yes. But if we add just that check, then we'll never do skb recycling in the tx path (since gianfar always grabs irqsave spinlock during tx ring cleanup, so the check will always return false). So we must add the check to keep polling with disabled IRQs safe, but we also should get rid of irqsave spinlock in the gianfar driver (to make the skb recycling actually work in most cases). Thanks!
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 197b358..a0ae604 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c @@ -1899,10 +1899,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) u32 lstatus; int i, rq = 0; u32 bufaddr; - unsigned long flags; unsigned int nr_frags, length; - rq = skb->queue_mapping; tx_queue = priv->tx_queue[rq]; txq = netdev_get_tx_queue(dev, rq); @@ -1928,14 +1926,11 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) /* total number of fragments in the SKB */ nr_frags = skb_shinfo(skb)->nr_frags; - spin_lock_irqsave(&tx_queue->txlock, flags); - /* check if there is space to queue this packet */ if ((nr_frags+1) > tx_queue->num_txbdfree) { /* no space, stop the queue */ netif_tx_stop_queue(txq); dev->stats.tx_fifo_errors++; - spin_unlock_irqrestore(&tx_queue->txlock, flags); return NETDEV_TX_BUSY; } @@ -2033,9 +2028,6 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) /* Tell the DMA to go go go */ gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex); - /* Unlock priv */ - spin_unlock_irqrestore(&tx_queue->txlock, flags); - return NETDEV_TX_OK; } @@ -2550,7 +2542,6 @@ static int gfar_poll(struct napi_struct *napi, int budget) int tx_cleaned = 0, i, left_over_budget = budget; unsigned long serviced_queues = 0; int num_queues = 0; - unsigned long flags; num_queues = gfargrp->num_rx_queues; budget_per_queue = budget/num_queues; @@ -2570,13 +2561,9 @@ static int gfar_poll(struct napi_struct *napi, int budget) rx_queue = priv->rx_queue[i]; tx_queue = priv->tx_queue[rx_queue->qindex]; - /* If we fail to get the lock, - * don't bother with the TX BDs */ - if (spin_trylock_irqsave(&tx_queue->txlock, flags)) { - tx_cleaned += gfar_clean_tx_ring(tx_queue); - spin_unlock_irqrestore(&tx_queue->txlock, - flags); - } + netif_tx_lock_bh(priv->ndev); + tx_cleaned += gfar_clean_tx_ring(tx_queue); + netif_tx_unlock_bh(priv->ndev); rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue, budget_per_queue);