@@ -58,6 +58,8 @@ struct ufid_to_rte_flow_data {
struct rte_flow *rte_flow;
bool actions_offloaded;
struct dpif_flow_stats stats;
+ struct ovs_mutex lock;
+ bool dead;
};
struct netdev_offload_dpdk_data {
@@ -233,6 +235,7 @@ ufid_to_rte_flow_associate(struct netdev *netdev, const ovs_u128 *ufid,
data->netdev = netdev_ref(netdev);
data->rte_flow = rte_flow;
data->actions_offloaded = actions_offloaded;
+ ovs_mutex_init(&data->lock);
cmap_insert(map, CONST_CAST(struct cmap_node *, &data->node), hash);
@@ -240,8 +243,16 @@ ufid_to_rte_flow_associate(struct netdev *netdev, const ovs_u128 *ufid,
return data;
}
+static void
+rte_flow_data_unref(struct ufid_to_rte_flow_data *data)
+{
+ ovs_mutex_destroy(&data->lock);
+ free(data);
+}
+
static inline void
ufid_to_rte_flow_disassociate(struct ufid_to_rte_flow_data *data)
+ OVS_REQUIRES(data->lock)
{
size_t hash = hash_bytes(&data->ufid, sizeof data->ufid, 0);
struct cmap *map = offload_data_map(data->netdev);
@@ -255,7 +266,7 @@ ufid_to_rte_flow_disassociate(struct ufid_to_rte_flow_data *data)
offload_data_unlock(data->netdev);
netdev_close(data->netdev);
- ovsrcu_postpone(free, data);
+ ovsrcu_postpone(rte_flow_data_unref, data);
}
/*
@@ -1581,6 +1592,15 @@ netdev_offload_dpdk_flow_destroy(struct ufid_to_rte_flow_data *rte_flow_data)
ovs_u128 *ufid;
int ret;
+ ovs_mutex_lock(&rte_flow_data->lock);
+
+ if (rte_flow_data->dead) {
+ ovs_mutex_unlock(&rte_flow_data->lock);
+ return 0;
+ }
+
+ rte_flow_data->dead = true;
+
rte_flow = rte_flow_data->rte_flow;
netdev = rte_flow_data->netdev;
ufid = &rte_flow_data->ufid;
@@ -1607,6 +1627,8 @@ netdev_offload_dpdk_flow_destroy(struct ufid_to_rte_flow_data *rte_flow_data)
UUID_ARGS((struct uuid *) ufid));
}
+ ovs_mutex_unlock(&rte_flow_data->lock);
+
return ret;
}
@@ -1702,8 +1724,19 @@ netdev_offload_dpdk_flow_get(struct netdev *netdev,
struct rte_flow_error error;
int ret = 0;
+ attrs->dp_extra_info = NULL;
+
rte_flow_data = ufid_to_rte_flow_data_find(netdev, ufid, false);
- if (!rte_flow_data || !rte_flow_data->rte_flow) {
+ if (!rte_flow_data || !rte_flow_data->rte_flow ||
+ rte_flow_data->dead || ovs_mutex_trylock(&rte_flow_data->lock)) {
+ return -1;
+ }
+
+ /* Check again whether the data is dead, as it could have been
+ * updated while the lock was not yet taken. The first check above
+ * was only to avoid unnecessary locking if possible.
+ */
+ if (rte_flow_data->dead) {
ret = -1;
goto out;
}
@@ -1730,7 +1763,7 @@ netdev_offload_dpdk_flow_get(struct netdev *netdev,
}
memcpy(stats, &rte_flow_data->stats, sizeof *stats);
out:
- attrs->dp_extra_info = NULL;
+ ovs_mutex_unlock(&rte_flow_data->lock);
return ret;
}