diff mbox

[ovs-dev,v3,2/6] netdev-dpdk: Add netdev_dpdk_txq_flush function.

Message ID 1498775976-4142-3-git-send-email-bhanuprakash.bodireddy@intel.com
State Not Applicable
Delegated to: Darrell Ball
Headers show

Commit Message

Bodireddy, Bhanuprakash June 29, 2017, 10:39 p.m. UTC
This commit adds netdev_dpdk_txq_flush() function. If there are
any packets waiting in the queue, they are transmitted instantly
using the rte_eth_tx_burst function. In XPS enabled case, lock is
taken on the tx queue before flushing the queue.

Signed-off-by: Bhanuprakash Bodireddy <bhanuprakash.bodireddy@intel.com>
Signed-off-by: Antonio Fischetti <antonio.fischetti@intel.com>
Co-authored-by: Antonio Fischetti <antonio.fischetti@intel.com>
Signed-off-by: Markus Magnusson <markus.magnusson@ericsson.com>
Co-authored-by: Markus Magnusson <markus.magnusson@ericsson.com>
Acked-by: Eelco Chaudron <echaudro@redhat.com>
---
 lib/netdev-dpdk.c | 31 +++++++++++++++++++++++++++++--
 1 file changed, 29 insertions(+), 2 deletions(-)

Comments

Darrell Ball Aug. 7, 2017, 5:29 a.m. UTC | #1
Hi Bhanu

Would it be possible to combine patches 1 and 2, rather than initially defining an empty
netdev_txq_flush for dpdk ? I think the combined patch would have more context.

Darrell


-----Original Message-----
From: <ovs-dev-bounces@openvswitch.org> on behalf of Bhanuprakash Bodireddy <bhanuprakash.bodireddy@intel.com>
Date: Thursday, June 29, 2017 at 3:39 PM
To: "dev@openvswitch.org" <dev@openvswitch.org>
Subject: [ovs-dev] [PATCH v3 2/6] netdev-dpdk: Add netdev_dpdk_txq_flush	function.

    This commit adds netdev_dpdk_txq_flush() function. If there are
    any packets waiting in the queue, they are transmitted instantly
    using the rte_eth_tx_burst function. In XPS enabled case, lock is
    taken on the tx queue before flushing the queue.
    
    Signed-off-by: Bhanuprakash Bodireddy <bhanuprakash.bodireddy@intel.com>
    Signed-off-by: Antonio Fischetti <antonio.fischetti@intel.com>
    Co-authored-by: Antonio Fischetti <antonio.fischetti@intel.com>
    Signed-off-by: Markus Magnusson <markus.magnusson@ericsson.com>
    Co-authored-by: Markus Magnusson <markus.magnusson@ericsson.com>
    Acked-by: Eelco Chaudron <echaudro@redhat.com>
    ---
     lib/netdev-dpdk.c | 31 +++++++++++++++++++++++++++++--
     1 file changed, 29 insertions(+), 2 deletions(-)
    
    diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c
    index 9ca4433..dd42716 100644
    --- a/lib/netdev-dpdk.c
    +++ b/lib/netdev-dpdk.c
    @@ -293,6 +293,11 @@ struct dpdk_mp {
         struct ovs_list list_node OVS_GUARDED_BY(dpdk_mp_mutex);
     };
     
    +/* Queue 'INTERIM_QUEUE_BURST_THRESHOLD' packets before transmitting.
    + * Defaults to 'NETDEV_MAX_BURST'(32) packets.
    + */
    +#define INTERIM_QUEUE_BURST_THRESHOLD NETDEV_MAX_BURST
    +
     /* There should be one 'struct dpdk_tx_queue' created for
      * each cpu core. */
     struct dpdk_tx_queue {
    @@ -302,6 +307,12 @@ struct dpdk_tx_queue {
                                         * pmd threads (see 'concurrent_txq'). */
         int map;                       /* Mapping of configured vhost-user queues
                                         * to enabled by guest. */
    +    int dpdk_pkt_cnt;              /* Number of buffered packets waiting to
    +                                      be sent on DPDK tx queue. */
    +    struct rte_mbuf *dpdk_burst_pkts[INTERIM_QUEUE_BURST_THRESHOLD];
    +                                   /* Intermediate queue where packets can
    +                                    * be buffered to amortize the cost of MMIO
    +                                    * writes. */
     };
     
     /* dpdk has no way to remove dpdk ring ethernet devices
    @@ -1897,9 +1908,25 @@ netdev_dpdk_send__(struct netdev_dpdk *dev, int qid,
      * few packets (< INTERIM_QUEUE_BURST_THRESHOLD) buffered in the queue.
      */
     static int
    -netdev_dpdk_txq_flush(struct netdev *netdev OVS_UNUSED,
    -                      int qid OVS_UNUSED, bool concurrent_txq OVS_UNUSED)
    +netdev_dpdk_txq_flush(struct netdev *netdev,
    +                      int qid, bool concurrent_txq)
     {
    +    struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
    +    struct dpdk_tx_queue *txq = &dev->tx_q[qid];
    +
    +    if (OVS_LIKELY(txq->dpdk_pkt_cnt)) {
    +        if (OVS_UNLIKELY(concurrent_txq)) {
    +            qid = qid % dev->up.n_txq;
    +            rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
    +        }
    +
    +        netdev_dpdk_eth_tx_burst(dev, qid, txq->dpdk_burst_pkts,
    +                                 txq->dpdk_pkt_cnt);
    +
    +        if (OVS_UNLIKELY(concurrent_txq)) {
    +            rte_spinlock_unlock(&dev->tx_q[qid].tx_lock);
    +        }
    +    }
         return 0;
     }
     
    -- 
    2.4.11
    
    _______________________________________________
    dev mailing list
    dev@openvswitch.org
    https://urldefense.proofpoint.com/v2/url?u=https-3A__mail.openvswitch.org_mailman_listinfo_ovs-2Ddev&d=DwICAg&c=uilaK90D4TOVoH58JNXRgQ&r=BVhFA09CGX7JQ5Ih-uZnsw&m=1wUGGHlSVXpqn5THs-saPYXoqzsKYA6zy3m0dzrOr5c&s=HDVtHRNK1uhmuU70EfLAxfXvZXasjTmO8b8zpS7M9t4&e=
Bodireddy, Bhanuprakash Aug. 8, 2017, 8:52 a.m. UTC | #2
>Hi Bhanu
>
>Would it be possible to combine patches 1 and 2, rather than initially defining
>an empty netdev_txq_flush for dpdk ? I think the combined patch would have
>more context.

No problem Darrell . I will merge 1 & 2  in V4.

- Bhanuprakash.

>
>
>-----Original Message-----
>From: <ovs-dev-bounces@openvswitch.org> on behalf of Bhanuprakash
>Bodireddy <bhanuprakash.bodireddy@intel.com>
>Date: Thursday, June 29, 2017 at 3:39 PM
>To: "dev@openvswitch.org" <dev@openvswitch.org>
>Subject: [ovs-dev] [PATCH v3 2/6] netdev-dpdk: Add netdev_dpdk_txq_flush
>	function.
>
>    This commit adds netdev_dpdk_txq_flush() function. If there are
>    any packets waiting in the queue, they are transmitted instantly
>    using the rte_eth_tx_burst function. In XPS enabled case, lock is
>    taken on the tx queue before flushing the queue.
>
>    Signed-off-by: Bhanuprakash Bodireddy
><bhanuprakash.bodireddy@intel.com>
>    Signed-off-by: Antonio Fischetti <antonio.fischetti@intel.com>
>    Co-authored-by: Antonio Fischetti <antonio.fischetti@intel.com>
>    Signed-off-by: Markus Magnusson <markus.magnusson@ericsson.com>
>    Co-authored-by: Markus Magnusson <markus.magnusson@ericsson.com>
>    Acked-by: Eelco Chaudron <echaudro@redhat.com>
>    ---
>     lib/netdev-dpdk.c | 31 +++++++++++++++++++++++++++++--
>     1 file changed, 29 insertions(+), 2 deletions(-)
>
>    diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c
>    index 9ca4433..dd42716 100644
>    --- a/lib/netdev-dpdk.c
>    +++ b/lib/netdev-dpdk.c
>    @@ -293,6 +293,11 @@ struct dpdk_mp {
>         struct ovs_list list_node OVS_GUARDED_BY(dpdk_mp_mutex);
>     };
>
>    +/* Queue 'INTERIM_QUEUE_BURST_THRESHOLD' packets before
>transmitting.
>    + * Defaults to 'NETDEV_MAX_BURST'(32) packets.
>    + */
>    +#define INTERIM_QUEUE_BURST_THRESHOLD NETDEV_MAX_BURST
>    +
>     /* There should be one 'struct dpdk_tx_queue' created for
>      * each cpu core. */
>     struct dpdk_tx_queue {
>    @@ -302,6 +307,12 @@ struct dpdk_tx_queue {
>                                         * pmd threads (see 'concurrent_txq'). */
>         int map;                       /* Mapping of configured vhost-user queues
>                                         * to enabled by guest. */
>    +    int dpdk_pkt_cnt;              /* Number of buffered packets waiting to
>    +                                      be sent on DPDK tx queue. */
>    +    struct rte_mbuf
>*dpdk_burst_pkts[INTERIM_QUEUE_BURST_THRESHOLD];
>    +                                   /* Intermediate queue where packets can
>    +                                    * be buffered to amortize the cost of MMIO
>    +                                    * writes. */
>     };
>
>     /* dpdk has no way to remove dpdk ring ethernet devices
>    @@ -1897,9 +1908,25 @@ netdev_dpdk_send__(struct netdev_dpdk *dev,
>int qid,
>      * few packets (< INTERIM_QUEUE_BURST_THRESHOLD) buffered in the
>queue.
>      */
>     static int
>    -netdev_dpdk_txq_flush(struct netdev *netdev OVS_UNUSED,
>    -                      int qid OVS_UNUSED, bool concurrent_txq OVS_UNUSED)
>    +netdev_dpdk_txq_flush(struct netdev *netdev,
>    +                      int qid, bool concurrent_txq)
>     {
>    +    struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
>    +    struct dpdk_tx_queue *txq = &dev->tx_q[qid];
>    +
>    +    if (OVS_LIKELY(txq->dpdk_pkt_cnt)) {
>    +        if (OVS_UNLIKELY(concurrent_txq)) {
>    +            qid = qid % dev->up.n_txq;
>    +            rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
>    +        }
>    +
>    +        netdev_dpdk_eth_tx_burst(dev, qid, txq->dpdk_burst_pkts,
>    +                                 txq->dpdk_pkt_cnt);
>    +
>    +        if (OVS_UNLIKELY(concurrent_txq)) {
>    +            rte_spinlock_unlock(&dev->tx_q[qid].tx_lock);
>    +        }
>    +    }
>         return 0;
>     }
>
>    --
>    2.4.11
>
>    _______________________________________________
>    dev mailing list
>    dev@openvswitch.org
>    https://urldefense.proofpoint.com/v2/url?u=https-
>3A__mail.openvswitch.org_mailman_listinfo_ovs-
>2Ddev&d=DwICAg&c=uilaK90D4TOVoH58JNXRgQ&r=BVhFA09CGX7JQ5Ih-
>uZnsw&m=1wUGGHlSVXpqn5THs-
>saPYXoqzsKYA6zy3m0dzrOr5c&s=HDVtHRNK1uhmuU70EfLAxfXvZXasjTmO8b8
>zpS7M9t4&e=
>
>
>
>
>
>
>
diff mbox

Patch

diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c
index 9ca4433..dd42716 100644
--- a/lib/netdev-dpdk.c
+++ b/lib/netdev-dpdk.c
@@ -293,6 +293,11 @@  struct dpdk_mp {
     struct ovs_list list_node OVS_GUARDED_BY(dpdk_mp_mutex);
 };
 
+/* Queue 'INTERIM_QUEUE_BURST_THRESHOLD' packets before transmitting.
+ * Defaults to 'NETDEV_MAX_BURST'(32) packets.
+ */
+#define INTERIM_QUEUE_BURST_THRESHOLD NETDEV_MAX_BURST
+
 /* There should be one 'struct dpdk_tx_queue' created for
  * each cpu core. */
 struct dpdk_tx_queue {
@@ -302,6 +307,12 @@  struct dpdk_tx_queue {
                                     * pmd threads (see 'concurrent_txq'). */
     int map;                       /* Mapping of configured vhost-user queues
                                     * to enabled by guest. */
+    int dpdk_pkt_cnt;              /* Number of buffered packets waiting to
+                                      be sent on DPDK tx queue. */
+    struct rte_mbuf *dpdk_burst_pkts[INTERIM_QUEUE_BURST_THRESHOLD];
+                                   /* Intermediate queue where packets can
+                                    * be buffered to amortize the cost of MMIO
+                                    * writes. */
 };
 
 /* dpdk has no way to remove dpdk ring ethernet devices
@@ -1897,9 +1908,25 @@  netdev_dpdk_send__(struct netdev_dpdk *dev, int qid,
  * few packets (< INTERIM_QUEUE_BURST_THRESHOLD) buffered in the queue.
  */
 static int
-netdev_dpdk_txq_flush(struct netdev *netdev OVS_UNUSED,
-                      int qid OVS_UNUSED, bool concurrent_txq OVS_UNUSED)
+netdev_dpdk_txq_flush(struct netdev *netdev,
+                      int qid, bool concurrent_txq)
 {
+    struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
+    struct dpdk_tx_queue *txq = &dev->tx_q[qid];
+
+    if (OVS_LIKELY(txq->dpdk_pkt_cnt)) {
+        if (OVS_UNLIKELY(concurrent_txq)) {
+            qid = qid % dev->up.n_txq;
+            rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
+        }
+
+        netdev_dpdk_eth_tx_burst(dev, qid, txq->dpdk_burst_pkts,
+                                 txq->dpdk_pkt_cnt);
+
+        if (OVS_UNLIKELY(concurrent_txq)) {
+            rte_spinlock_unlock(&dev->tx_q[qid].tx_lock);
+        }
+    }
     return 0;
 }