diff mbox series

[PATCHv2,1/1] net: forcedeth: add xmit_more support

Message ID 1571737634-5830-1-git-send-email-yanjun.zhu@oracle.com
State Changes Requested
Delegated to: David Miller
Headers show
Series [PATCHv2,1/1] net: forcedeth: add xmit_more support | expand

Commit Message

Zhu Yanjun Oct. 22, 2019, 9:47 a.m. UTC
This change adds support for xmit_more based on the igb commit 6f19e12f6230
("igb: flush when in xmit_more mode and under descriptor pressure") and
commit 6b16f9ee89b8 ("net: move skb->xmit_more hint to softnet data") that
were made to igb to support this feature. The function netif_xmit_stopped
is called to check if transmit queue on device is currently unable to send
to determine if we must write the tail because we can add no further
buffers.
When normal packets and/or xmit_more packets fill up tx_desc, it is
necessary to trigger NIC tx reg.

Tested:
  - pktgen (xmit_more packets) SMP x86_64 ->
    Test command:
    ./pktgen_sample03_burst_single_flow.sh ... -b 8 -n 1000000
    Test results:
    Params:
    ...
    burst: 8
    ...
    Result: OK: 12194004(c12188996+d5007) usec, 1000001 (1500byte,0frags)
    82007pps 984Mb/sec (984084000bps) errors: 0

  - iperf (normal packets) SMP x86_64 ->
    Test command:
    Server: iperf -s
    Client: iperf -c serverip
    Result:
    TCP window size: 85.0 KByte (default)
    ------------------------------------------------------------
    [ ID] Interval       Transfer     Bandwidth
    [  3]  0.0-10.0 sec  1.10 GBytes   942 Mbits/sec

CC: Joe Jin <joe.jin@oracle.com>
CC: JUNXIAO_BI <junxiao.bi@oracle.com>
Reported-and-tested-by: Nan san <nan.1986san@gmail.com>
Signed-off-by: Zhu Yanjun <yanjun.zhu@oracle.com>
---
V1->V2: use the lower case label.
---
 drivers/net/ethernet/nvidia/forcedeth.c | 37 +++++++++++++++++++++++++++------
 1 file changed, 31 insertions(+), 6 deletions(-)

Comments

Rain River Oct. 23, 2019, 11:39 a.m. UTC | #1
On Tue, Oct 22, 2019 at 5:38 PM Zhu Yanjun <yanjun.zhu@oracle.com> wrote:
>
> This change adds support for xmit_more based on the igb commit 6f19e12f6230
> ("igb: flush when in xmit_more mode and under descriptor pressure") and
> commit 6b16f9ee89b8 ("net: move skb->xmit_more hint to softnet data") that
> were made to igb to support this feature. The function netif_xmit_stopped
> is called to check if transmit queue on device is currently unable to send
> to determine if we must write the tail because we can add no further
> buffers.
> When normal packets and/or xmit_more packets fill up tx_desc, it is
> necessary to trigger NIC tx reg.
>
> Tested:
>   - pktgen (xmit_more packets) SMP x86_64 ->
>     Test command:
>     ./pktgen_sample03_burst_single_flow.sh ... -b 8 -n 1000000
>     Test results:
>     Params:
>     ...
>     burst: 8
>     ...
>     Result: OK: 12194004(c12188996+d5007) usec, 1000001 (1500byte,0frags)
>     82007pps 984Mb/sec (984084000bps) errors: 0
>
>   - iperf (normal packets) SMP x86_64 ->
>     Test command:
>     Server: iperf -s
>     Client: iperf -c serverip
>     Result:
>     TCP window size: 85.0 KByte (default)
>     ------------------------------------------------------------
>     [ ID] Interval       Transfer     Bandwidth
>     [  3]  0.0-10.0 sec  1.10 GBytes   942 Mbits/sec
>
> CC: Joe Jin <joe.jin@oracle.com>
> CC: JUNXIAO_BI <junxiao.bi@oracle.com>
> Reported-and-tested-by: Nan san <nan.1986san@gmail.com>
> Signed-off-by: Zhu Yanjun <yanjun.zhu@oracle.com>

Thanks.
Acked-by: Rain River <rain.1986.08.12@gmail.com>

> ---
> V1->V2: use the lower case label.
> ---
>  drivers/net/ethernet/nvidia/forcedeth.c | 37 +++++++++++++++++++++++++++------
>  1 file changed, 31 insertions(+), 6 deletions(-)
>
> diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
> index 05d2b47..e2bb0cd 100644
> --- a/drivers/net/ethernet/nvidia/forcedeth.c
> +++ b/drivers/net/ethernet/nvidia/forcedeth.c
> @@ -2225,6 +2225,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
>         struct nv_skb_map *prev_tx_ctx;
>         struct nv_skb_map *tmp_tx_ctx = NULL, *start_tx_ctx = NULL;
>         unsigned long flags;
> +       netdev_tx_t ret = NETDEV_TX_OK;
>
>         /* add fragments to entries count */
>         for (i = 0; i < fragments; i++) {
> @@ -2240,7 +2241,12 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
>                 netif_stop_queue(dev);
>                 np->tx_stop = 1;
>                 spin_unlock_irqrestore(&np->lock, flags);
> -               return NETDEV_TX_BUSY;
> +
> +               /* When normal packets and/or xmit_more packets fill up
> +                * tx_desc, it is necessary to trigger NIC tx reg.
> +                */
> +               ret = NETDEV_TX_BUSY;
> +               goto txkick;
>         }
>         spin_unlock_irqrestore(&np->lock, flags);
>
> @@ -2357,8 +2363,14 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
>
>         spin_unlock_irqrestore(&np->lock, flags);
>
> -       writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
> -       return NETDEV_TX_OK;
> +txkick:
> +       if (netif_queue_stopped(dev) || !netdev_xmit_more()) {
> +               u32 txrxctl_kick = NVREG_TXRXCTL_KICK | np->txrxctl_bits;
> +
> +               writel(txrxctl_kick, get_hwbase(dev) + NvRegTxRxControl);
> +       }
> +
> +       return ret;
>  }
>
>  static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
> @@ -2381,6 +2393,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
>         struct nv_skb_map *start_tx_ctx = NULL;
>         struct nv_skb_map *tmp_tx_ctx = NULL;
>         unsigned long flags;
> +       netdev_tx_t ret = NETDEV_TX_OK;
>
>         /* add fragments to entries count */
>         for (i = 0; i < fragments; i++) {
> @@ -2396,7 +2409,13 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
>                 netif_stop_queue(dev);
>                 np->tx_stop = 1;
>                 spin_unlock_irqrestore(&np->lock, flags);
> -               return NETDEV_TX_BUSY;
> +
> +               /* When normal packets and/or xmit_more packets fill up
> +                * tx_desc, it is necessary to trigger NIC tx reg.
> +                */
> +               ret = NETDEV_TX_BUSY;
> +
> +               goto txkick;
>         }
>         spin_unlock_irqrestore(&np->lock, flags);
>
> @@ -2542,8 +2561,14 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
>
>         spin_unlock_irqrestore(&np->lock, flags);
>
> -       writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
> -       return NETDEV_TX_OK;
> +txkick:
> +       if (netif_queue_stopped(dev) || !netdev_xmit_more()) {
> +               u32 txrxctl_kick = NVREG_TXRXCTL_KICK | np->txrxctl_bits;
> +
> +               writel(txrxctl_kick, get_hwbase(dev) + NvRegTxRxControl);
> +       }
> +
> +       return ret;
>  }
>
>  static inline void nv_tx_flip_ownership(struct net_device *dev)
> --
> 2.7.4
>
diff mbox series

Patch

diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 05d2b47..e2bb0cd 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -2225,6 +2225,7 @@  static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
 	struct nv_skb_map *prev_tx_ctx;
 	struct nv_skb_map *tmp_tx_ctx = NULL, *start_tx_ctx = NULL;
 	unsigned long flags;
+	netdev_tx_t ret = NETDEV_TX_OK;
 
 	/* add fragments to entries count */
 	for (i = 0; i < fragments; i++) {
@@ -2240,7 +2241,12 @@  static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
 		netif_stop_queue(dev);
 		np->tx_stop = 1;
 		spin_unlock_irqrestore(&np->lock, flags);
-		return NETDEV_TX_BUSY;
+
+		/* When normal packets and/or xmit_more packets fill up
+		 * tx_desc, it is necessary to trigger NIC tx reg.
+		 */
+		ret = NETDEV_TX_BUSY;
+		goto txkick;
 	}
 	spin_unlock_irqrestore(&np->lock, flags);
 
@@ -2357,8 +2363,14 @@  static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
 	spin_unlock_irqrestore(&np->lock, flags);
 
-	writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
-	return NETDEV_TX_OK;
+txkick:
+	if (netif_queue_stopped(dev) || !netdev_xmit_more()) {
+		u32 txrxctl_kick = NVREG_TXRXCTL_KICK | np->txrxctl_bits;
+
+		writel(txrxctl_kick, get_hwbase(dev) + NvRegTxRxControl);
+	}
+
+	return ret;
 }
 
 static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
@@ -2381,6 +2393,7 @@  static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
 	struct nv_skb_map *start_tx_ctx = NULL;
 	struct nv_skb_map *tmp_tx_ctx = NULL;
 	unsigned long flags;
+	netdev_tx_t ret = NETDEV_TX_OK;
 
 	/* add fragments to entries count */
 	for (i = 0; i < fragments; i++) {
@@ -2396,7 +2409,13 @@  static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
 		netif_stop_queue(dev);
 		np->tx_stop = 1;
 		spin_unlock_irqrestore(&np->lock, flags);
-		return NETDEV_TX_BUSY;
+
+		/* When normal packets and/or xmit_more packets fill up
+		 * tx_desc, it is necessary to trigger NIC tx reg.
+		 */
+		ret = NETDEV_TX_BUSY;
+
+		goto txkick;
 	}
 	spin_unlock_irqrestore(&np->lock, flags);
 
@@ -2542,8 +2561,14 @@  static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
 
 	spin_unlock_irqrestore(&np->lock, flags);
 
-	writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
-	return NETDEV_TX_OK;
+txkick:
+	if (netif_queue_stopped(dev) || !netdev_xmit_more()) {
+		u32 txrxctl_kick = NVREG_TXRXCTL_KICK | np->txrxctl_bits;
+
+		writel(txrxctl_kick, get_hwbase(dev) + NvRegTxRxControl);
+	}
+
+	return ret;
 }
 
 static inline void nv_tx_flip_ownership(struct net_device *dev)