Patchwork 8139cp: enable bql

login
register
mail settings
Submitter David Woodhouse
Date Nov. 22, 2012, 1:16 p.m.
Message ID <1353590218.26346.214.camel@shinybook.infradead.org>
Download mbox | patch
Permalink /patch/201225/
State Accepted
Delegated to: David Miller
Headers show

Comments

David Woodhouse - Nov. 22, 2012, 1:16 p.m.
This adds support for byte queue limits on RTL8139C+

Tested on real hardware.

Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Acked-By: Dave Täht <dave.taht@bufferbloat.net>
---
dtaht looking over my shoulder and says it seems to be doing the right thing...
David Miller - Nov. 25, 2012, 8:55 p.m.
From: David Woodhouse <dwmw2@infradead.org>
Date: Thu, 22 Nov 2012 13:16:58 +0000

> This adds support for byte queue limits on RTL8139C+
> 
> Tested on real hardware.
> 
> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
> Acked-By: Dave Täht <dave.taht@bufferbloat.net>

Applied to net-next.

> --- drivers/net/ethernet/realtek/8139cp.c~	2012-11-21 20:49:47.000000000 +0000
> +++ drivers/net/ethernet/realtek/8139cp.c	2012-11-22 13:07:26.119076315 +0000

Please "-p1" root your patches in the future.
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Patch

--- drivers/net/ethernet/realtek/8139cp.c~	2012-11-21 20:49:47.000000000 +0000
+++ drivers/net/ethernet/realtek/8139cp.c	2012-11-22 13:07:26.119076315 +0000
@@ -648,6 +648,7 @@  static void cp_tx (struct cp_private *cp
 {
 	unsigned tx_head = cp->tx_head;
 	unsigned tx_tail = cp->tx_tail;
+	unsigned bytes_compl = 0, pkts_compl = 0;
 
 	while (tx_tail != tx_head) {
 		struct cp_desc *txd = cp->tx_ring + tx_tail;
@@ -666,6 +667,9 @@  static void cp_tx (struct cp_private *cp
 				 le32_to_cpu(txd->opts1) & 0xffff,
 				 PCI_DMA_TODEVICE);
 
+		bytes_compl += skb->len;
+		pkts_compl++;
+
 		if (status & LastFrag) {
 			if (status & (TxError | TxFIFOUnder)) {
 				netif_dbg(cp, tx_err, cp->dev,
@@ -697,6 +701,7 @@  static void cp_tx (struct cp_private *cp
 
 	cp->tx_tail = tx_tail;
 
+	netdev_completed_queue(cp->dev, pkts_compl, bytes_compl);
 	if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1))
 		netif_wake_queue(cp->dev);
 }
@@ -843,6 +848,8 @@  static netdev_tx_t cp_start_xmit (struct
 		wmb();
 	}
 	cp->tx_head = entry;
+
+	netdev_sent_queue(dev, skb->len);
 	netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
 		  entry, skb->len);
 	if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
@@ -937,6 +944,8 @@  static void cp_stop_hw (struct cp_privat
 
 	cp->rx_tail = 0;
 	cp->tx_head = cp->tx_tail = 0;
+
+	netdev_reset_queue(cp->dev);
 }
 
 static void cp_reset_hw (struct cp_private *cp)
@@ -981,6 +990,8 @@  static inline void cp_start_hw (struct c
 	cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
 
 	cpw8(Cmd, RxOn | TxOn);
+
+	netdev_reset_queue(cp->dev);
 }
 
 static void cp_enable_irq(struct cp_private *cp)