diff mbox series

[ovs-dev] netdev-dpdk: shrink critical region under tx_q[qid].tx_lock

Message ID 1548902823-18719-1-git-send-email-lirongqing@baidu.com
State Deferred
Delegated to: Ian Stokes
Headers show
Series [ovs-dev] netdev-dpdk: shrink critical region under tx_q[qid].tx_lock | expand

Commit Message

Li RongQing Jan. 31, 2019, 2:47 a.m. UTC
netdev_dpdk_filter_packet_len() does not need to be protected
by tx_q[].tx_lock, and tx_q[].tx_lock can not protect it too,
same to netdev_dpdk_qos_run

so move them out of this lock to improve the scalability

Signed-off-by: Li RongQing <lirongqing@baidu.com>
---
 lib/netdev-dpdk.c | 33 ++++++++++++++++++++++-----------
 1 file changed, 22 insertions(+), 11 deletions(-)

Comments

Stokes, Ian Feb. 6, 2019, 3:33 p.m. UTC | #1
On 1/31/2019 2:47 AM, Li RongQing wrote:
> netdev_dpdk_filter_packet_len() does not need to be protected
> by tx_q[].tx_lock, and tx_q[].tx_lock can not protect it too,
> same to netdev_dpdk_qos_run
> 
> so move them out of this lock to improve the scalability
> 

Thanks for the Patch Li, can you give more details by what you mean in 
terms of scalability? The changes are small beow so I'm curious to as to 
the usecase you have where your seeing an improvement?

> Signed-off-by: Li RongQing <lirongqing@baidu.com>
> ---
>   lib/netdev-dpdk.c | 33 ++++++++++++++++++++++-----------
>   1 file changed, 22 insertions(+), 11 deletions(-)
> 
> diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c
> index 4bf0ca9e8..bf4918e2c 100644
> --- a/lib/netdev-dpdk.c
> +++ b/lib/netdev-dpdk.c
> @@ -2333,15 +2333,15 @@ __netdev_dpdk_vhost_send(struct netdev *netdev, int qid,
>           goto out;
>       }
>   
> -    rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
> -
>       cnt = netdev_dpdk_filter_packet_len(dev, cur_pkts, cnt);
>       /* Check has QoS has been configured for the netdev */
>       cnt = netdev_dpdk_qos_run(dev, cur_pkts, cnt, true);
>       dropped = total_pkts - cnt;
>   
> +    rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
> +
> +    int vhost_qid = qid * VIRTIO_QNUM + VIRTIO_RXQ;
>       do {
> -        int vhost_qid = qid * VIRTIO_QNUM + VIRTIO_RXQ;
>           unsigned int tx_pkts;
>   
>           tx_pkts = rte_vhost_enqueue_burst(vid, vhost_qid, cur_pkts, cnt);
> @@ -2462,15 +2462,20 @@ netdev_dpdk_send__(struct netdev_dpdk *dev, int qid,
>           return;
>       }
>   
> -    if (OVS_UNLIKELY(concurrent_txq)) {
> -        qid = qid % dev->up.n_txq;
> -        rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
> -    }
> -
>       if (OVS_UNLIKELY(batch->packets[0]->source != DPBUF_DPDK)) {
>           struct netdev *netdev = &dev->up;
>   

The change below introduces code duplication in both the if and else 
statements specifically for the unlikely case. I'm slow to introduce 
this change as it seems the key benefit is in the case where concurrent 
txqs are used which to date has not been the common use case for the 
wider community. I take it here this case is the beneficiary?

Ian

> +        if (OVS_UNLIKELY(concurrent_txq)) {
> +            qid = qid % dev->up.n_txq;
> +            rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
> +        }
> +
>           dpdk_do_tx_copy(netdev, qid, batch);
> +
> +        if (OVS_UNLIKELY(concurrent_txq)) {
> +            rte_spinlock_unlock(&dev->tx_q[qid].tx_lock);
> +        }
> +
>           dp_packet_delete_batch(batch, true);
>       } else {
>           int tx_cnt, dropped;
> @@ -2481,8 +2486,17 @@ netdev_dpdk_send__(struct netdev_dpdk *dev, int qid,
>           tx_cnt = netdev_dpdk_qos_run(dev, pkts, tx_cnt, true);
>           dropped = batch_cnt - tx_cnt;
>   
> +        if (OVS_UNLIKELY(concurrent_txq)) {
> +            qid = qid % dev->up.n_txq;
> +            rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
> +        }
> +
>           dropped += netdev_dpdk_eth_tx_burst(dev, qid, pkts, tx_cnt);
>   
> +        if (OVS_UNLIKELY(concurrent_txq)) {
> +            rte_spinlock_unlock(&dev->tx_q[qid].tx_lock);
> +        }
> +
>           if (OVS_UNLIKELY(dropped)) {
>               rte_spinlock_lock(&dev->stats_lock);
>               dev->stats.tx_dropped += dropped;
> @@ -2490,9 +2504,6 @@ netdev_dpdk_send__(struct netdev_dpdk *dev, int qid,
>           }
>       }
>   
> -    if (OVS_UNLIKELY(concurrent_txq)) {
> -        rte_spinlock_unlock(&dev->tx_q[qid].tx_lock);
> -    }
>   }
>   
>   static int
>
Li RongQing Feb. 11, 2019, 8:25 a.m. UTC | #2
> -----邮件原件-----
> 发件人: Ian Stokes [mailto:ian.stokes@intel.com]
> 发送时间: 2019年2月6日 23:34
> 收件人: Li,Rongqing <lirongqing@baidu.com>; dev@openvswitch.org
> 主题: Re: [ovs-dev] [PATCH] netdev-dpdk: shrink critical region under
> tx_q[qid].tx_lock
> 
> On 1/31/2019 2:47 AM, Li RongQing wrote:
> > netdev_dpdk_filter_packet_len() does not need to be protected by
> > tx_q[].tx_lock, and tx_q[].tx_lock can not protect it too, same to
> > netdev_dpdk_qos_run
> >
> > so move them out of this lock to improve the scalability
> >
> 
> Thanks for the Patch Li, can you give more details by what you mean in terms of
> scalability? The changes are small beow so I'm curious to as to the usecase you
> have where your seeing an improvement?
> 

means to increase parallel

I did not have performance data, but it is strange from code review, critical region
Have some codes which is not need to protect by the same lock

thanks

-RongQing 


> > Signed-off-by: Li RongQing <lirongqing@baidu.com>
> > ---
> >   lib/netdev-dpdk.c | 33 ++++++++++++++++++++++-----------
> >   1 file changed, 22 insertions(+), 11 deletions(-)
> >
> > diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c index
> > 4bf0ca9e8..bf4918e2c 100644
> > --- a/lib/netdev-dpdk.c
> > +++ b/lib/netdev-dpdk.c
> > @@ -2333,15 +2333,15 @@ __netdev_dpdk_vhost_send(struct netdev
> *netdev, int qid,
> >           goto out;
> >       }
> >
> > -    rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
> > -
> >       cnt = netdev_dpdk_filter_packet_len(dev, cur_pkts, cnt);
> >       /* Check has QoS has been configured for the netdev */
> >       cnt = netdev_dpdk_qos_run(dev, cur_pkts, cnt, true);
> >       dropped = total_pkts - cnt;
> >
> > +    rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
> > +
> > +    int vhost_qid = qid * VIRTIO_QNUM + VIRTIO_RXQ;
> >       do {
> > -        int vhost_qid = qid * VIRTIO_QNUM + VIRTIO_RXQ;
> >           unsigned int tx_pkts;
> >
> >           tx_pkts = rte_vhost_enqueue_burst(vid, vhost_qid, cur_pkts,
> > cnt); @@ -2462,15 +2462,20 @@ netdev_dpdk_send__(struct netdev_dpdk
> *dev, int qid,
> >           return;
> >       }
> >
> > -    if (OVS_UNLIKELY(concurrent_txq)) {
> > -        qid = qid % dev->up.n_txq;
> > -        rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
> > -    }
> > -
> >       if (OVS_UNLIKELY(batch->packets[0]->source != DPBUF_DPDK)) {
> >           struct netdev *netdev = &dev->up;
> >
> 
> The change below introduces code duplication in both the if and else
> statements specifically for the unlikely case. I'm slow to introduce this change
> as it seems the key benefit is in the case where concurrent txqs are used which
> to date has not been the common use case for the wider community. I take it
> here this case is the beneficiary?
> 
> Ian
> 
> > +        if (OVS_UNLIKELY(concurrent_txq)) {
> > +            qid = qid % dev->up.n_txq;
> > +            rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
> > +        }
> > +
> >           dpdk_do_tx_copy(netdev, qid, batch);
> > +
> > +        if (OVS_UNLIKELY(concurrent_txq)) {
> > +            rte_spinlock_unlock(&dev->tx_q[qid].tx_lock);
> > +        }
> > +
> >           dp_packet_delete_batch(batch, true);
> >       } else {
> >           int tx_cnt, dropped;
> > @@ -2481,8 +2486,17 @@ netdev_dpdk_send__(struct netdev_dpdk *dev,
> int qid,
> >           tx_cnt = netdev_dpdk_qos_run(dev, pkts, tx_cnt, true);
> >           dropped = batch_cnt - tx_cnt;
> >
> > +        if (OVS_UNLIKELY(concurrent_txq)) {
> > +            qid = qid % dev->up.n_txq;
> > +            rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
> > +        }
> > +
> >           dropped += netdev_dpdk_eth_tx_burst(dev, qid, pkts, tx_cnt);
> >
> > +        if (OVS_UNLIKELY(concurrent_txq)) {
> > +            rte_spinlock_unlock(&dev->tx_q[qid].tx_lock);
> > +        }
> > +
> >           if (OVS_UNLIKELY(dropped)) {
> >               rte_spinlock_lock(&dev->stats_lock);
> >               dev->stats.tx_dropped += dropped;
> > @@ -2490,9 +2504,6 @@ netdev_dpdk_send__(struct netdev_dpdk *dev, int
> qid,
> >           }
> >       }
> >
> > -    if (OVS_UNLIKELY(concurrent_txq)) {
> > -        rte_spinlock_unlock(&dev->tx_q[qid].tx_lock);
> > -    }
> >   }
> >
> >   static int
> >
diff mbox series

Patch

diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c
index 4bf0ca9e8..bf4918e2c 100644
--- a/lib/netdev-dpdk.c
+++ b/lib/netdev-dpdk.c
@@ -2333,15 +2333,15 @@  __netdev_dpdk_vhost_send(struct netdev *netdev, int qid,
         goto out;
     }
 
-    rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
-
     cnt = netdev_dpdk_filter_packet_len(dev, cur_pkts, cnt);
     /* Check has QoS has been configured for the netdev */
     cnt = netdev_dpdk_qos_run(dev, cur_pkts, cnt, true);
     dropped = total_pkts - cnt;
 
+    rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
+
+    int vhost_qid = qid * VIRTIO_QNUM + VIRTIO_RXQ;
     do {
-        int vhost_qid = qid * VIRTIO_QNUM + VIRTIO_RXQ;
         unsigned int tx_pkts;
 
         tx_pkts = rte_vhost_enqueue_burst(vid, vhost_qid, cur_pkts, cnt);
@@ -2462,15 +2462,20 @@  netdev_dpdk_send__(struct netdev_dpdk *dev, int qid,
         return;
     }
 
-    if (OVS_UNLIKELY(concurrent_txq)) {
-        qid = qid % dev->up.n_txq;
-        rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
-    }
-
     if (OVS_UNLIKELY(batch->packets[0]->source != DPBUF_DPDK)) {
         struct netdev *netdev = &dev->up;
 
+        if (OVS_UNLIKELY(concurrent_txq)) {
+            qid = qid % dev->up.n_txq;
+            rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
+        }
+
         dpdk_do_tx_copy(netdev, qid, batch);
+
+        if (OVS_UNLIKELY(concurrent_txq)) {
+            rte_spinlock_unlock(&dev->tx_q[qid].tx_lock);
+        }
+
         dp_packet_delete_batch(batch, true);
     } else {
         int tx_cnt, dropped;
@@ -2481,8 +2486,17 @@  netdev_dpdk_send__(struct netdev_dpdk *dev, int qid,
         tx_cnt = netdev_dpdk_qos_run(dev, pkts, tx_cnt, true);
         dropped = batch_cnt - tx_cnt;
 
+        if (OVS_UNLIKELY(concurrent_txq)) {
+            qid = qid % dev->up.n_txq;
+            rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
+        }
+
         dropped += netdev_dpdk_eth_tx_burst(dev, qid, pkts, tx_cnt);
 
+        if (OVS_UNLIKELY(concurrent_txq)) {
+            rte_spinlock_unlock(&dev->tx_q[qid].tx_lock);
+        }
+
         if (OVS_UNLIKELY(dropped)) {
             rte_spinlock_lock(&dev->stats_lock);
             dev->stats.tx_dropped += dropped;
@@ -2490,9 +2504,6 @@  netdev_dpdk_send__(struct netdev_dpdk *dev, int qid,
         }
     }
 
-    if (OVS_UNLIKELY(concurrent_txq)) {
-        rte_spinlock_unlock(&dev->tx_q[qid].tx_lock);
-    }
 }
 
 static int