@@ -2590,7 +2590,7 @@ mark_to_flow_disassociate(struct dp_netdev_pmd_thread *pmd,
port = netdev_ports_get(in_port, dpif_type_str);
if (port) {
/* Taking a global 'port_mutex' to fulfill thread safety
- * restrictions for the netdev-offload-dpdk module. */
+ * restrictions regarding netdev port mapping. */
ovs_mutex_lock(&pmd->dp->port_mutex);
ret = netdev_flow_del(port, &flow->mega_ufid, NULL);
ovs_mutex_unlock(&pmd->dp->port_mutex);
@@ -2770,8 +2770,8 @@ dp_netdev_flow_offload_put(struct dp_offload_flow_item *offload)
netdev_close(port);
goto err_free;
}
- /* Taking a global 'port_mutex' to fulfill thread safety restrictions for
- * the netdev-offload-dpdk module. */
+ /* Taking a global 'port_mutex' to fulfill thread safety
+ * restrictions regarding the netdev port mapping. */
ovs_mutex_lock(&pmd->dp->port_mutex);
ret = netdev_flow_put(port, &offload->match,
CONST_CAST(struct nlattr *, offload->actions),
@@ -3574,7 +3574,7 @@ dpif_netdev_get_flow_offload_status(const struct dp_netdev *dp,
}
ofpbuf_use_stack(&buf, &act_buf, sizeof act_buf);
/* Taking a global 'port_mutex' to fulfill thread safety
- * restrictions for the netdev-offload-dpdk module.
+ * restrictions regarding netdev port mapping.
*
* XXX: Main thread will try to pause/stop all revalidators during datapath
* reconfiguration via datapath purge callback (dp_purge_cb) while
@@ -38,9 +38,6 @@ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(100, 5);
*
* Below API is NOT thread safe in following terms:
*
- * - The caller must be sure that none of these functions will be called
- * simultaneously. Even for different 'netdev's.
- *
* - The caller must be sure that 'netdev' will not be destructed/deallocated.
*
* - The caller must be sure that 'netdev' configuration will not be changed.
@@ -66,6 +63,7 @@ struct ufid_to_rte_flow_data {
struct netdev_offload_dpdk_data {
struct cmap ufid_to_rte_flow;
uint64_t *rte_flow_counters;
+ struct ovs_mutex map_lock;
};
static int
@@ -74,6 +72,7 @@ offload_data_init(struct netdev *netdev)
struct netdev_offload_dpdk_data *data;
data = xzalloc(sizeof *data);
+ ovs_mutex_init(&data->map_lock);
cmap_init(&data->ufid_to_rte_flow);
data->rte_flow_counters = xcalloc(netdev_offload_thread_nb(),
sizeof *data->rte_flow_counters);
@@ -86,6 +85,7 @@ offload_data_init(struct netdev *netdev)
static void
offload_data_destroy__(struct netdev_offload_dpdk_data *data)
{
+ ovs_mutex_destroy(&data->map_lock);
free(data->rte_flow_counters);
free(data);
}
@@ -117,6 +117,34 @@ offload_data_destroy(struct netdev *netdev)
ovsrcu_set(&netdev->hw_info.offload_data, NULL);
}
+static void
+offload_data_lock(struct netdev *netdev)
+ OVS_NO_THREAD_SAFETY_ANALYSIS
+{
+ struct netdev_offload_dpdk_data *data;
+
+ data = (struct netdev_offload_dpdk_data *)
+ ovsrcu_get(void *, &netdev->hw_info.offload_data);
+ if (!data) {
+ return;
+ }
+ ovs_mutex_lock(&data->map_lock);
+}
+
+static void
+offload_data_unlock(struct netdev *netdev)
+ OVS_NO_THREAD_SAFETY_ANALYSIS
+{
+ struct netdev_offload_dpdk_data *data;
+
+ data = (struct netdev_offload_dpdk_data *)
+ ovsrcu_get(void *, &netdev->hw_info.offload_data);
+ if (!data) {
+ return;
+ }
+ ovs_mutex_unlock(&data->map_lock);
+}
+
static struct cmap *
offload_data_map(struct netdev *netdev)
{
@@ -155,6 +183,24 @@ ufid_to_rte_flow_data_find(struct netdev *netdev,
return NULL;
}
+/* Find rte_flow with @ufid, lock-protected. */
+static struct ufid_to_rte_flow_data *
+ufid_to_rte_flow_data_find_protected(struct netdev *netdev,
+ const ovs_u128 *ufid)
+{
+ size_t hash = hash_bytes(ufid, sizeof *ufid, 0);
+ struct ufid_to_rte_flow_data *data;
+ struct cmap *map = offload_data_map(netdev);
+
+ CMAP_FOR_EACH_WITH_HASH_PROTECTED (data, node, hash, map) {
+ if (ovs_u128_equals(*ufid, data->ufid)) {
+ return data;
+ }
+ }
+
+ return NULL;
+}
+
static inline struct ufid_to_rte_flow_data *
ufid_to_rte_flow_associate(struct netdev *netdev, const ovs_u128 *ufid,
struct rte_flow *rte_flow, bool actions_offloaded)
@@ -170,13 +216,15 @@ ufid_to_rte_flow_associate(struct netdev *netdev, const ovs_u128 *ufid,
data = xzalloc(sizeof *data);
+ offload_data_lock(netdev);
+
/*
* We should not simply overwrite an existing rte flow.
* We should have deleted it first before re-adding it.
* Thus, if following assert triggers, something is wrong:
* the rte_flow is not destroyed.
*/
- data_prev = ufid_to_rte_flow_data_find(netdev, ufid, false);
+ data_prev = ufid_to_rte_flow_data_find_protected(netdev, ufid);
if (data_prev) {
ovs_assert(data_prev->rte_flow == NULL);
}
@@ -187,6 +235,8 @@ ufid_to_rte_flow_associate(struct netdev *netdev, const ovs_u128 *ufid,
data->actions_offloaded = actions_offloaded;
cmap_insert(map, CONST_CAST(struct cmap_node *, &data->node), hash);
+
+ offload_data_unlock(netdev);
return data;
}
@@ -200,7 +250,10 @@ ufid_to_rte_flow_disassociate(struct ufid_to_rte_flow_data *data)
return;
}
+ offload_data_lock(data->netdev);
cmap_remove(map, CONST_CAST(struct cmap_node *, &data->node), hash);
+ offload_data_unlock(data->netdev);
+
netdev_close(data->netdev);
ovsrcu_postpone(free, data);
}