Message ID | 1377832428-18117-6-git-send-email-robherring2@gmail.com |
---|---|
State | Changes Requested, archived |
Delegated to: | David Miller |
Headers | show |
On Thu, 2013-08-29 at 22:13 -0500, Rob Herring wrote: > From: Rob Herring <rob.herring@calxeda.com> > > Since the xgmac transmit start and completion work locklessly, it is > possible for xgmac_xmit to stop the tx queue after the xgmac_tx_complete > has run resulting in the tx queue never being woken up. Fix this by > ensuring that ring buffer index updates are visible and recheck the ring > space after stopping the queue. > > The implementation used here was copied from > drivers/net/ethernet/broadcom/tg3.c. > > Signed-off-by: Rob Herring <rob.herring@calxeda.com> > --- > v2: > - drop netif_tx_lock > - use netif_start_queue instead of netif_wake_queue [...] > @@ -1125,10 +1130,15 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev) > > priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ); > > - if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) < > - MAX_SKB_FRAGS) > + /* Ensure tx_head update is visible to tx completion */ > + smp_mb(); > + if (unlikely(tx_dma_ring_space(priv) < MAX_SKB_FRAGS)) { [...] On re-reading, I see there's an off-by-one bug lingering here. You use up to 1 + MAX_SKB_FRAGS descriptors per skb. So you must stop the queue when the ring space is <= MAX_SKB_FRAGS. Ben.
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c index f630855..54be55b 100644 --- a/drivers/net/ethernet/calxeda/xgmac.c +++ b/drivers/net/ethernet/calxeda/xgmac.c @@ -410,6 +410,9 @@ struct xgmac_priv { #define dma_ring_space(h, t, s) CIRC_SPACE(h, t, s) #define dma_ring_cnt(h, t, s) CIRC_CNT(h, t, s) +#define tx_dma_ring_space(p) \ + dma_ring_space((p)->tx_head, (p)->tx_tail, DMA_TX_RING_SZ) + /* XGMAC Descriptor Access Helpers */ static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz) { @@ -886,8 +889,10 @@ static void xgmac_tx_complete(struct xgmac_priv *priv) priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ); } - if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) > - MAX_SKB_FRAGS) + /* Ensure tx_tail is visible to xgmac_xmit */ + smp_mb(); + if (unlikely(netif_queue_stopped(priv->dev) && + (tx_dma_ring_space(priv) > MAX_SKB_FRAGS))) netif_wake_queue(priv->dev); } @@ -1125,10 +1130,15 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev) priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ); - if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) < - MAX_SKB_FRAGS) + /* Ensure tx_head update is visible to tx completion */ + smp_mb(); + if (unlikely(tx_dma_ring_space(priv) < MAX_SKB_FRAGS)) { netif_stop_queue(dev); - + /* Ensure netif_stop_queue is visible to tx completion */ + smp_mb(); + if (tx_dma_ring_space(priv) > MAX_SKB_FRAGS) + netif_start_queue(dev); + } return NETDEV_TX_OK; }