diff mbox series

[ovs-dev,v1,16/23] netdev-offload-dpdk: Use per-thread HW offload stats

Message ID 0f95979461825d788abd0b8ad20e30436c6f5d7e.1612968146.git.grive@u256.net
State Changes Requested
Headers show
Series dpif-netdev: Parallel offload processing | expand

Commit Message

Gaetan Rivet Feb. 10, 2021, 3:34 p.m. UTC
The implementation of hardware offload counters in currently meant to be
managed by a single thread. Use the offload thread pool API to manage
one counter per thread.

Signed-off-by: Gaetan Rivet <grive@u256.net>
Reviewed-by: Eli Britstein <elibr@nvidia.com>
---
 lib/netdev-offload-dpdk.c | 16 ++++++++++++----
 1 file changed, 12 insertions(+), 4 deletions(-)
diff mbox series

Patch

diff --git a/lib/netdev-offload-dpdk.c b/lib/netdev-offload-dpdk.c
index 1d60c5a81..338ca4dc6 100644
--- a/lib/netdev-offload-dpdk.c
+++ b/lib/netdev-offload-dpdk.c
@@ -65,7 +65,7 @@  struct ufid_to_rte_flow_data {
 
 struct netdev_offload_dpdk_data {
     struct cmap ufid_to_rte_flow;
-    uint64_t rte_flow_counter;
+    uint64_t *rte_flow_counters;
 };
 
 static int
@@ -75,6 +75,8 @@  offload_data_init(struct netdev *netdev)
 
     data = xzalloc(sizeof *data);
     cmap_init(&data->ufid_to_rte_flow);
+    data->rte_flow_counters = xcalloc(netdev_offload_thread_nb(),
+                                      sizeof *data->rte_flow_counters);
 
     ovsrcu_set(&netdev->hw_info.offload_data, (void *) data);
 
@@ -84,6 +86,7 @@  offload_data_init(struct netdev *netdev)
 static void
 offload_data_destroy__(struct netdev_offload_dpdk_data *data)
 {
+    free(data->rte_flow_counters);
     free(data);
 }
 
@@ -646,10 +649,11 @@  netdev_offload_dpdk_flow_create(struct netdev *netdev,
     flow = netdev_dpdk_rte_flow_create(netdev, attr, items, actions, error);
     if (flow) {
         struct netdev_offload_dpdk_data *data;
+        unsigned int tid = netdev_offload_thread_id();
 
         data = (struct netdev_offload_dpdk_data *)
             ovsrcu_get(void *, &netdev->hw_info.offload_data);
-        data->rte_flow_counter++;
+        data->rte_flow_counters[tid]++;
 
         if (!VLOG_DROP_DBG(&rl)) {
             dump_flow(&s, &s_extra, attr, items, actions);
@@ -1532,10 +1536,11 @@  netdev_offload_dpdk_flow_destroy(struct ufid_to_rte_flow_data *rte_flow_data)
 
     if (ret == 0) {
         struct netdev_offload_dpdk_data *data;
+        unsigned int tid = netdev_offload_thread_id();
 
         data = (struct netdev_offload_dpdk_data *)
             ovsrcu_get(void *, &netdev->hw_info.offload_data);
-        data->rte_flow_counter--;
+        data->rte_flow_counters[tid]--;
 
         ufid_to_rte_flow_disassociate(rte_flow_data);
         VLOG_DBG_RL(&rl, "%s: rte_flow 0x%"PRIxPTR
@@ -1698,6 +1703,7 @@  netdev_offload_dpdk_get_n_flows(struct netdev *netdev,
                                 uint64_t *n_flows)
 {
     struct netdev_offload_dpdk_data *data;
+    unsigned int tid;
 
     data = (struct netdev_offload_dpdk_data *)
         ovsrcu_get(void *, &netdev->hw_info.offload_data);
@@ -1705,7 +1711,9 @@  netdev_offload_dpdk_get_n_flows(struct netdev *netdev,
         return -1;
     }
 
-    *n_flows = data->rte_flow_counter;
+    for (tid = 0; tid < netdev_offload_thread_nb(); tid++) {
+        n_flows[tid] = data->rte_flow_counters[tid];
+    }
 
     return 0;
 }