diff mbox series

[ovs-dev,v1,18/23] netdev-offload-dpdk: Protect concurrent offload destroy/query

Message ID 3c8ccc6a4c0b609103bbb72d54c763e56138afe0.1612968146.git.grive@u256.net
State New
Headers show
Series dpif-netdev: Parallel offload processing | expand

Commit Message

Gaƫtan Rivet Feb. 10, 2021, 3:34 p.m. UTC
The rte_flow API in DPDK is now thread safe for insertion and deletion.
It is not however safe for concurrent query while the offload is being
inserted or deleted.

Insertion is not an issue as the rte_flow handle will be published to
other threads only once it has been inserted in the hardware, so the
query will only be able to proceed once it is already available.

For the deletion path however, offload status queries can be made while
an offload is being destroyed. This would create race conditions and
use-after-free if not properly protected.

As a pre-step before removing the OVS-level locks on the rte_flow API,
mutually exclude offload query and deletion from concurrent execution.

Signed-off-by: Gaetan Rivet <grive@u256.net>
Reviewed-by: Eli Britstein <elibr@nvidia.com>
---
 lib/netdev-offload-dpdk.c | 39 ++++++++++++++++++++++++++++++++++++---
 1 file changed, 36 insertions(+), 3 deletions(-)
diff mbox series

Patch

diff --git a/lib/netdev-offload-dpdk.c b/lib/netdev-offload-dpdk.c
index 617f87eca..72cff5688 100644
--- a/lib/netdev-offload-dpdk.c
+++ b/lib/netdev-offload-dpdk.c
@@ -58,6 +58,8 @@  struct ufid_to_rte_flow_data {
     struct rte_flow *rte_flow;
     bool actions_offloaded;
     struct dpif_flow_stats stats;
+    struct ovs_mutex lock;
+    bool dead;
 };
 
 struct netdev_offload_dpdk_data {
@@ -233,6 +235,7 @@  ufid_to_rte_flow_associate(struct netdev *netdev, const ovs_u128 *ufid,
     data->netdev = netdev_ref(netdev);
     data->rte_flow = rte_flow;
     data->actions_offloaded = actions_offloaded;
+    ovs_mutex_init(&data->lock);
 
     cmap_insert(map, CONST_CAST(struct cmap_node *, &data->node), hash);
 
@@ -240,8 +243,16 @@  ufid_to_rte_flow_associate(struct netdev *netdev, const ovs_u128 *ufid,
     return data;
 }
 
+static void
+rte_flow_data_unref(struct ufid_to_rte_flow_data *data)
+{
+    ovs_mutex_destroy(&data->lock);
+    free(data);
+}
+
 static inline void
 ufid_to_rte_flow_disassociate(struct ufid_to_rte_flow_data *data)
+    OVS_REQUIRES(data->lock)
 {
     size_t hash = hash_bytes(&data->ufid, sizeof data->ufid, 0);
     struct cmap *map = offload_data_map(data->netdev);
@@ -255,7 +266,7 @@  ufid_to_rte_flow_disassociate(struct ufid_to_rte_flow_data *data)
     offload_data_unlock(data->netdev);
 
     netdev_close(data->netdev);
-    ovsrcu_postpone(free, data);
+    ovsrcu_postpone(rte_flow_data_unref, data);
 }
 
 /*
@@ -1581,6 +1592,15 @@  netdev_offload_dpdk_flow_destroy(struct ufid_to_rte_flow_data *rte_flow_data)
     ovs_u128 *ufid;
     int ret;
 
+    ovs_mutex_lock(&rte_flow_data->lock);
+
+    if (rte_flow_data->dead) {
+        ovs_mutex_unlock(&rte_flow_data->lock);
+        return 0;
+    }
+
+    rte_flow_data->dead = true;
+
     rte_flow = rte_flow_data->rte_flow;
     netdev = rte_flow_data->netdev;
     ufid = &rte_flow_data->ufid;
@@ -1607,6 +1627,8 @@  netdev_offload_dpdk_flow_destroy(struct ufid_to_rte_flow_data *rte_flow_data)
                  UUID_ARGS((struct uuid *) ufid));
     }
 
+    ovs_mutex_unlock(&rte_flow_data->lock);
+
     return ret;
 }
 
@@ -1702,8 +1724,19 @@  netdev_offload_dpdk_flow_get(struct netdev *netdev,
     struct rte_flow_error error;
     int ret = 0;
 
+    attrs->dp_extra_info = NULL;
+
     rte_flow_data = ufid_to_rte_flow_data_find(netdev, ufid, false);
-    if (!rte_flow_data || !rte_flow_data->rte_flow) {
+    if (!rte_flow_data || !rte_flow_data->rte_flow ||
+        rte_flow_data->dead || ovs_mutex_trylock(&rte_flow_data->lock)) {
+        return -1;
+    }
+
+    /* Check again whether the data is dead, as it could have been
+     * updated while the lock was not yet taken. The first check above
+     * was only to avoid unnecessary locking if possible.
+     */
+    if (rte_flow_data->dead) {
         ret = -1;
         goto out;
     }
@@ -1730,7 +1763,7 @@  netdev_offload_dpdk_flow_get(struct netdev *netdev,
     }
     memcpy(stats, &rte_flow_data->stats, sizeof *stats);
 out:
-    attrs->dp_extra_info = NULL;
+    ovs_mutex_unlock(&rte_flow_data->lock);
     return ret;
 }