@@ -333,8 +333,8 @@ struct dp_netdev {
/* Ports.
*
* Any lookup into 'ports' or any access to the dp_netdev_ports found
- * through 'ports' requires taking 'port_mutex'. */
- struct ovs_mutex port_mutex;
+ * through 'ports' requires taking 'port_rwlock'. */
+ struct ovs_rwlock port_rwlock;
struct hmap ports;
struct seq *port_seq; /* Incremented whenever a port changes. */
@@ -410,7 +410,7 @@ static void meter_unlock(const struct dp_netdev *dp, uint32_t meter_id)
static struct dp_netdev_port *dp_netdev_lookup_port(const struct dp_netdev *dp,
odp_port_t)
- OVS_REQUIRES(dp->port_mutex);
+ OVS_REQ_RDLOCK(dp->port_rwlock);
enum rxq_cycles_counter_type {
RXQ_CYCLES_PROC_CURR, /* Cycles spent successfully polling and
@@ -851,17 +851,17 @@ struct dpif_netdev {
static int get_port_by_number(struct dp_netdev *dp, odp_port_t port_no,
struct dp_netdev_port **portp)
- OVS_REQUIRES(dp->port_mutex);
+ OVS_REQ_RDLOCK(dp->port_rwlock);
static int get_port_by_name(struct dp_netdev *dp, const char *devname,
struct dp_netdev_port **portp)
- OVS_REQUIRES(dp->port_mutex);
+ OVS_REQ_RDLOCK(dp->port_rwlock);
static void dp_netdev_free(struct dp_netdev *)
OVS_REQUIRES(dp_netdev_mutex);
static int do_add_port(struct dp_netdev *dp, const char *devname,
const char *type, odp_port_t port_no)
- OVS_REQUIRES(dp->port_mutex);
+ OVS_REQ_WRLOCK(dp->port_rwlock);
static void do_del_port(struct dp_netdev *dp, struct dp_netdev_port *)
- OVS_REQUIRES(dp->port_mutex);
+ OVS_REQ_WRLOCK(dp->port_rwlock);
static int dpif_netdev_open(const struct dpif_class *, const char *name,
bool create, struct dpif **);
static void dp_netdev_execute_actions(struct dp_netdev_pmd_thread *pmd,
@@ -882,7 +882,7 @@ static void dp_netdev_configure_pmd(struct dp_netdev_pmd_thread *pmd,
int numa_id);
static void dp_netdev_destroy_pmd(struct dp_netdev_pmd_thread *pmd);
static void dp_netdev_set_nonpmd(struct dp_netdev *dp)
- OVS_REQUIRES(dp->port_mutex);
+ OVS_REQ_WRLOCK(dp->port_rwlock);
static void *pmd_thread_main(void *);
static struct dp_netdev_pmd_thread *dp_netdev_get_pmd(struct dp_netdev *dp,
@@ -919,7 +919,7 @@ static void dp_netdev_offload_flush(struct dp_netdev *dp,
struct dp_netdev_port *port);
static void reconfigure_datapath(struct dp_netdev *dp)
- OVS_REQUIRES(dp->port_mutex);
+ OVS_REQ_RDLOCK(dp->port_rwlock);
static bool dp_netdev_pmd_try_ref(struct dp_netdev_pmd_thread *pmd);
static void dp_netdev_pmd_unref(struct dp_netdev_pmd_thread *pmd);
static void dp_netdev_pmd_flow_flush(struct dp_netdev_pmd_thread *pmd);
@@ -1425,8 +1425,8 @@ dpif_netdev_subtable_lookup_set(struct unixctl_conn *conn, int argc,
struct dp_netdev_pmd_thread **pmd_list;
sorted_poll_thread_list(dp, &pmd_list, &n);
- /* take port mutex as HMAP iters over them. */
- ovs_mutex_lock(&dp->port_mutex);
+ /* take port rwlock as HMAP iters over them. */
+ ovs_rwlock_rdlock(&dp->port_rwlock);
for (size_t i = 0; i < n; i++) {
struct dp_netdev_pmd_thread *pmd = pmd_list[i];
@@ -1449,8 +1449,8 @@ dpif_netdev_subtable_lookup_set(struct unixctl_conn *conn, int argc,
}
}
- /* release port mutex before netdev mutex. */
- ovs_mutex_unlock(&dp->port_mutex);
+ /* release port rwlock before netdev mutex. */
+ ovs_rwlock_unlock(&dp->port_rwlock);
ovs_mutex_unlock(&dp_netdev_mutex);
struct ds reply = DS_EMPTY_INITIALIZER;
@@ -1743,7 +1743,7 @@ create_dpif_netdev(struct dp_netdev *dp)
* Return ODPP_NONE on failure. */
static odp_port_t
choose_port(struct dp_netdev *dp, const char *name)
- OVS_REQUIRES(dp->port_mutex)
+ OVS_REQ_RDLOCK(dp->port_rwlock)
{
uint32_t port_no;
@@ -1806,7 +1806,7 @@ create_dp_netdev(const char *name, const struct dpif_class *class,
ovs_refcount_init(&dp->ref_cnt);
atomic_flag_clear(&dp->destroyed);
- ovs_mutex_init_recursive(&dp->port_mutex);
+ ovs_rwlock_init(&dp->port_rwlock);
hmap_init(&dp->ports);
dp->port_seq = seq_create();
ovs_mutex_init(&dp->bond_mutex);
@@ -1841,7 +1841,7 @@ create_dp_netdev(const char *name, const struct dpif_class *class,
ovs_mutex_init_recursive(&dp->non_pmd_mutex);
ovsthread_key_create(&dp->per_pmd_key, NULL);
- ovs_mutex_lock(&dp->port_mutex);
+ ovs_rwlock_wrlock(&dp->port_rwlock);
/* non-PMD will be created before all other threads and will
* allocate static_tx_qid = 0. */
dp_netdev_set_nonpmd(dp);
@@ -1849,7 +1849,7 @@ create_dp_netdev(const char *name, const struct dpif_class *class,
error = do_add_port(dp, name, dpif_netdev_port_open_type(dp->class,
"internal"),
ODPP_LOCAL);
- ovs_mutex_unlock(&dp->port_mutex);
+ ovs_rwlock_unlock(&dp->port_rwlock);
if (error) {
dp_netdev_free(dp);
return error;
@@ -1935,11 +1935,11 @@ dp_netdev_free(struct dp_netdev *dp)
shash_find_and_delete(&dp_netdevs, dp->name);
- ovs_mutex_lock(&dp->port_mutex);
+ ovs_rwlock_wrlock(&dp->port_rwlock);
HMAP_FOR_EACH_SAFE (port, next, node, &dp->ports) {
do_del_port(dp, port);
}
- ovs_mutex_unlock(&dp->port_mutex);
+ ovs_rwlock_unlock(&dp->port_rwlock);
ovs_mutex_lock(&dp->bond_mutex);
CMAP_FOR_EACH (bond, node, &dp->tx_bonds) {
@@ -1964,7 +1964,7 @@ dp_netdev_free(struct dp_netdev *dp)
seq_destroy(dp->port_seq);
hmap_destroy(&dp->ports);
- ovs_mutex_destroy(&dp->port_mutex);
+ ovs_rwlock_destroy(&dp->port_rwlock);
cmap_destroy(&dp->tx_bonds);
ovs_mutex_destroy(&dp->bond_mutex);
@@ -2132,7 +2132,7 @@ out:
static int
do_add_port(struct dp_netdev *dp, const char *devname, const char *type,
odp_port_t port_no)
- OVS_REQUIRES(dp->port_mutex)
+ OVS_REQ_WRLOCK(dp->port_rwlock)
{
struct netdev_saved_flags *sf;
struct dp_netdev_port *port;
@@ -2184,7 +2184,7 @@ dpif_netdev_port_add(struct dpif *dpif, struct netdev *netdev,
odp_port_t port_no;
int error;
- ovs_mutex_lock(&dp->port_mutex);
+ ovs_rwlock_wrlock(&dp->port_rwlock);
dpif_port = netdev_vport_get_dpif_port(netdev, namebuf, sizeof namebuf);
if (*port_nop != ODPP_NONE) {
port_no = *port_nop;
@@ -2197,7 +2197,7 @@ dpif_netdev_port_add(struct dpif *dpif, struct netdev *netdev,
*port_nop = port_no;
error = do_add_port(dp, dpif_port, netdev_get_type(netdev), port_no);
}
- ovs_mutex_unlock(&dp->port_mutex);
+ ovs_rwlock_unlock(&dp->port_rwlock);
return error;
}
@@ -2208,7 +2208,7 @@ dpif_netdev_port_del(struct dpif *dpif, odp_port_t port_no)
struct dp_netdev *dp = get_dp_netdev(dpif);
int error;
- ovs_mutex_lock(&dp->port_mutex);
+ ovs_rwlock_wrlock(&dp->port_rwlock);
if (port_no == ODPP_LOCAL) {
error = EINVAL;
} else {
@@ -2219,7 +2219,7 @@ dpif_netdev_port_del(struct dpif *dpif, odp_port_t port_no)
do_del_port(dp, port);
}
}
- ovs_mutex_unlock(&dp->port_mutex);
+ ovs_rwlock_unlock(&dp->port_rwlock);
return error;
}
@@ -2232,7 +2232,7 @@ is_valid_port_number(odp_port_t port_no)
static struct dp_netdev_port *
dp_netdev_lookup_port(const struct dp_netdev *dp, odp_port_t port_no)
- OVS_REQUIRES(dp->port_mutex)
+ OVS_REQ_RDLOCK(dp->port_rwlock)
{
struct dp_netdev_port *port;
@@ -2247,7 +2247,7 @@ dp_netdev_lookup_port(const struct dp_netdev *dp, odp_port_t port_no)
static int
get_port_by_number(struct dp_netdev *dp,
odp_port_t port_no, struct dp_netdev_port **portp)
- OVS_REQUIRES(dp->port_mutex)
+ OVS_REQ_RDLOCK(dp->port_rwlock)
{
if (!is_valid_port_number(port_no)) {
*portp = NULL;
@@ -2282,7 +2282,7 @@ port_destroy(struct dp_netdev_port *port)
static int
get_port_by_name(struct dp_netdev *dp,
const char *devname, struct dp_netdev_port **portp)
- OVS_REQUIRES(dp->port_mutex)
+ OVS_REQ_RDLOCK(dp->port_rwlock)
{
struct dp_netdev_port *port;
@@ -2301,7 +2301,7 @@ get_port_by_name(struct dp_netdev *dp,
/* Returns 'true' if there is a port with pmd netdev. */
static bool
has_pmd_port(struct dp_netdev *dp)
- OVS_REQUIRES(dp->port_mutex)
+ OVS_REQ_RDLOCK(dp->port_rwlock)
{
struct dp_netdev_port *port;
@@ -2316,7 +2316,7 @@ has_pmd_port(struct dp_netdev *dp)
static void
do_del_port(struct dp_netdev *dp, struct dp_netdev_port *port)
- OVS_REQUIRES(dp->port_mutex)
+ OVS_REQ_WRLOCK(dp->port_rwlock)
{
dp_netdev_offload_flush(dp, port);
netdev_uninit_flow_api(port->netdev);
@@ -2345,12 +2345,12 @@ dpif_netdev_port_query_by_number(const struct dpif *dpif, odp_port_t port_no,
struct dp_netdev_port *port;
int error;
- ovs_mutex_lock(&dp->port_mutex);
+ ovs_rwlock_wrlock(&dp->port_rwlock);
error = get_port_by_number(dp, port_no, &port);
if (!error && dpif_port) {
answer_port_query(port, dpif_port);
}
- ovs_mutex_unlock(&dp->port_mutex);
+ ovs_rwlock_unlock(&dp->port_rwlock);
return error;
}
@@ -2363,12 +2363,12 @@ dpif_netdev_port_query_by_name(const struct dpif *dpif, const char *devname,
struct dp_netdev_port *port;
int error;
- ovs_mutex_lock(&dp->port_mutex);
+ ovs_rwlock_rdlock(&dp->port_rwlock);
error = get_port_by_name(dp, devname, &port);
if (!error && dpif_port) {
answer_port_query(port, dpif_port);
}
- ovs_mutex_unlock(&dp->port_mutex);
+ ovs_rwlock_unlock(&dp->port_rwlock);
return error;
}
@@ -2586,9 +2586,9 @@ mark_to_flow_disassociate(struct dp_netdev_pmd_thread *pmd,
if (port) {
/* Taking a global 'port_mutex' to fulfill thread safety
* restrictions regarding netdev port mapping. */
- ovs_mutex_lock(&pmd->dp->port_mutex);
+ ovs_rwlock_rdlock(&pmd->dp->port_rwlock);
ret = netdev_flow_del(port, &flow->mega_ufid, NULL);
- ovs_mutex_unlock(&pmd->dp->port_mutex);
+ ovs_rwlock_unlock(&pmd->dp->port_rwlock);
netdev_close(port);
}
@@ -2764,12 +2764,12 @@ dp_netdev_flow_offload_put(struct dp_offload_flow_item *offload)
}
/* Taking a global 'port_mutex' to fulfill thread safety
* restrictions regarding the netdev port mapping. */
- ovs_mutex_lock(&pmd->dp->port_mutex);
+ ovs_rwlock_rdlock(&pmd->dp->port_rwlock);
ret = netdev_flow_put(port, &offload->match,
CONST_CAST(struct nlattr *, offload->actions),
offload->actions_len, &flow->mega_ufid, &info,
NULL);
- ovs_mutex_unlock(&pmd->dp->port_mutex);
+ ovs_rwlock_unlock(&pmd->dp->port_rwlock);
netdev_close(port);
if (ret) {
@@ -2825,9 +2825,9 @@ dp_offload_flush(struct dp_offload_thread_item *item)
{
struct dp_offload_flush_item *flush = &item->data->flush;
- ovs_mutex_lock(&flush->dp->port_mutex);
+ ovs_rwlock_rdlock(&flush->dp->port_rwlock);
netdev_flow_flush(flush->netdev);
- ovs_mutex_unlock(&flush->dp->port_mutex);
+ ovs_rwlock_unlock(&flush->dp->port_rwlock);
ovs_barrier_block(flush->barrier);
@@ -3006,7 +3006,7 @@ dp_netdev_offload_flush_enqueue(struct dp_netdev *dp,
* complete its work. As the flush order will only be
* enqueued after existing offload requests, those previous
* offload requests must be processed, which requires being
- * able to lock the 'port_mutex' from the offload thread.
+ * able to lock the 'port_rwlock' from the offload thread.
*
* Flow offload flush is done when a port is being deleted.
* Right after this call executes, the offload API is disabled
@@ -3016,7 +3016,7 @@ dp_netdev_offload_flush_enqueue(struct dp_netdev *dp,
static void
dp_netdev_offload_flush(struct dp_netdev *dp,
struct dp_netdev_port *port)
- OVS_REQUIRES(dp->port_mutex)
+ OVS_REQ_WRLOCK(dp->port_rwlock)
{
/* The flush mutex only serves to protect the static memory barrier.
* The memory barrier needs to go beyond the function scope as
@@ -3034,7 +3034,7 @@ dp_netdev_offload_flush(struct dp_netdev *dp,
return;
}
- ovs_mutex_unlock(&dp->port_mutex);
+ ovs_rwlock_unlock(&dp->port_rwlock);
ovs_mutex_lock(&flush_mutex);
/* This thread and the offload thread. */
@@ -3052,7 +3052,7 @@ dp_netdev_offload_flush(struct dp_netdev *dp,
* Some offload provider (e.g. DPDK) keeps a netdev reference with
* the offload data. If this reference is not closed, the netdev is
* kept indefinitely. */
- ovs_mutex_lock(&dp->port_mutex);
+ ovs_rwlock_wrlock(&dp->port_rwlock);
ovs_barrier_block(&barrier);
ovs_barrier_destroy(&barrier);
@@ -3106,7 +3106,7 @@ dpif_netdev_port_dump_next(const struct dpif *dpif, void *state_,
struct hmap_node *node;
int retval;
- ovs_mutex_lock(&dp->port_mutex);
+ ovs_rwlock_rdlock(&dp->port_rwlock);
node = hmap_at_position(&dp->ports, &state->position);
if (node) {
struct dp_netdev_port *port;
@@ -3123,7 +3123,7 @@ dpif_netdev_port_dump_next(const struct dpif *dpif, void *state_,
} else {
retval = EOF;
}
- ovs_mutex_unlock(&dp->port_mutex);
+ ovs_rwlock_unlock(&dp->port_rwlock);
return retval;
}
@@ -3574,24 +3574,24 @@ dpif_netdev_get_flow_offload_status(const struct dp_netdev *dp,
return false;
}
ofpbuf_use_stack(&buf, &act_buf, sizeof act_buf);
- /* Taking a global 'port_mutex' to fulfill thread safety
+ /* Taking a global 'port_rwlock' to fulfill thread safety
* restrictions regarding netdev port mapping.
*
* XXX: Main thread will try to pause/stop all revalidators during datapath
* reconfiguration via datapath purge callback (dp_purge_cb) while
- * holding 'dp->port_mutex'. So we're not waiting for mutex here.
- * Otherwise, deadlock is possible, bcause revalidators might sleep
+ * rw-holding 'dp->port_rwlock'. So we're not waiting for lock here.
+ * Otherwise, deadlock is possible, because revalidators might sleep
* waiting for the main thread to release the lock and main thread
* will wait for them to stop processing.
* This workaround might make statistics less accurate. Especially
* for flow deletion case, since there will be no other attempt. */
- if (!ovs_mutex_trylock(&dp->port_mutex)) {
+ if (!ovs_rwlock_tryrdlock(&dp->port_rwlock)) {
ret = netdev_flow_get(netdev, &match, &actions,
&netdev_flow->mega_ufid, stats, attrs, &buf);
/* Storing statistics and attributes from the last request for
* later use on mutex contention. */
dp_netdev_flow_set_last_stats_attrs(netdev_flow, stats, attrs, ret);
- ovs_mutex_unlock(&dp->port_mutex);
+ ovs_rwlock_unlock(&dp->port_rwlock);
} else {
dp_netdev_flow_get_last_stats_attrs(netdev_flow, stats, attrs, &ret);
if (!ret && !attrs->dp_layer) {
@@ -4461,7 +4461,7 @@ dpif_netdev_offload_stats_get(struct dpif *dpif,
nb_offloads = 0;
- ovs_mutex_lock(&dp->port_mutex);
+ ovs_rwlock_rdlock(&dp->port_rwlock);
HMAP_FOR_EACH (port, node, &dp->ports) {
uint64_t port_nb_offloads = 0;
@@ -4470,7 +4470,7 @@ dpif_netdev_offload_stats_get(struct dpif *dpif,
nb_offloads += port_nb_offloads;
}
}
- ovs_mutex_unlock(&dp->port_mutex);
+ ovs_rwlock_unlock(&dp->port_rwlock);
atomic_read_relaxed(&dp_offload_thread.enqueued_item,
&stats->counters[DP_NETDEV_HW_OFFLOADS_STATS_ENQUEUED].value);
@@ -4780,7 +4780,7 @@ dpif_netdev_port_set_config(struct dpif *dpif, odp_port_t port_no,
const char *affinity_list = smap_get(cfg, "pmd-rxq-affinity");
bool emc_enabled = smap_get_bool(cfg, "emc-enable", true);
- ovs_mutex_lock(&dp->port_mutex);
+ ovs_rwlock_wrlock(&dp->port_rwlock);
error = get_port_by_number(dp, port_no, &port);
if (error) {
goto unlock;
@@ -4834,7 +4834,7 @@ dpif_netdev_port_set_config(struct dpif *dpif, odp_port_t port_no,
dp_netdev_request_reconfigure(dp);
unlock:
- ovs_mutex_unlock(&dp->port_mutex);
+ ovs_rwlock_unlock(&dp->port_rwlock);
return error;
}
@@ -5324,7 +5324,8 @@ compare_rxq_cycles(const void *a, const void *b)
* The function doesn't touch the pmd threads, it just stores the assignment
* in the 'pmd' member of each rxq. */
static void
-rxq_scheduling(struct dp_netdev *dp, bool pinned) OVS_REQUIRES(dp->port_mutex)
+rxq_scheduling(struct dp_netdev *dp, bool pinned)
+ OVS_REQ_RDLOCK(dp->port_rwlock)
{
struct dp_netdev_port *port;
struct rr_numa_list rr;
@@ -5468,7 +5469,7 @@ reload_affected_pmds(struct dp_netdev *dp)
static void
reconfigure_pmd_threads(struct dp_netdev *dp)
- OVS_REQUIRES(dp->port_mutex)
+ OVS_REQ_RDLOCK(dp->port_rwlock)
{
struct dp_netdev_pmd_thread *pmd;
struct ovs_numa_dump *pmd_cores;
@@ -5566,7 +5567,7 @@ static void
pmd_remove_stale_ports(struct dp_netdev *dp,
struct dp_netdev_pmd_thread *pmd)
OVS_EXCLUDED(pmd->port_mutex)
- OVS_REQUIRES(dp->port_mutex)
+ OVS_REQ_RDLOCK(dp->port_rwlock)
{
struct rxq_poll *poll, *poll_next;
struct tx_port *tx, *tx_next;
@@ -5596,7 +5597,7 @@ pmd_remove_stale_ports(struct dp_netdev *dp,
* rxqs and assigns all rxqs/txqs to pmd threads. */
static void
reconfigure_datapath(struct dp_netdev *dp)
- OVS_REQUIRES(dp->port_mutex)
+ OVS_REQ_RDLOCK(dp->port_rwlock)
{
struct hmapx busy_threads = HMAPX_INITIALIZER(&busy_threads);
struct dp_netdev_pmd_thread *pmd;
@@ -5780,7 +5781,7 @@ reconfigure_datapath(struct dp_netdev *dp)
/* Returns true if one of the netdevs in 'dp' requires a reconfiguration */
static bool
ports_require_restart(const struct dp_netdev *dp)
- OVS_REQUIRES(dp->port_mutex)
+ OVS_REQ_RDLOCK(dp->port_rwlock)
{
struct dp_netdev_port *port;
@@ -5831,7 +5832,7 @@ variance(uint64_t a[], int n)
static bool
get_dry_run_variance(struct dp_netdev *dp, uint32_t *core_list,
uint32_t num_pmds, uint64_t *predicted_variance)
- OVS_REQUIRES(dp->port_mutex)
+ OVS_REQ_RDLOCK(dp->port_rwlock)
{
struct dp_netdev_port *port;
struct dp_netdev_pmd_thread *pmd;
@@ -5954,7 +5955,7 @@ cleanup:
* better distribution of load on PMDs. */
static bool
pmd_rebalance_dry_run(struct dp_netdev *dp)
- OVS_REQUIRES(dp->port_mutex)
+ OVS_REQ_RDLOCK(dp->port_rwlock)
{
struct dp_netdev_pmd_thread *pmd;
uint64_t *curr_pmd_usage;
@@ -6049,7 +6050,7 @@ dpif_netdev_run(struct dpif *dpif)
long long int now = time_msec();
struct dp_netdev_pmd_thread *pmd;
- ovs_mutex_lock(&dp->port_mutex);
+ ovs_rwlock_rdlock(&dp->port_rwlock);
non_pmd = dp_netdev_get_pmd(dp, NON_PMD_CORE_ID);
if (non_pmd) {
ovs_mutex_lock(&dp->non_pmd_mutex);
@@ -6121,7 +6122,7 @@ dpif_netdev_run(struct dpif *dpif)
if (dp_netdev_is_reconf_required(dp) || ports_require_restart(dp)) {
reconfigure_datapath(dp);
}
- ovs_mutex_unlock(&dp->port_mutex);
+ ovs_rwlock_unlock(&dp->port_rwlock);
tnl_neigh_cache_run();
tnl_port_map_run();
@@ -6141,7 +6142,7 @@ dpif_netdev_wait(struct dpif *dpif)
struct dp_netdev *dp = get_dp_netdev(dpif);
ovs_mutex_lock(&dp_netdev_mutex);
- ovs_mutex_lock(&dp->port_mutex);
+ ovs_rwlock_rdlock(&dp->port_rwlock);
HMAP_FOR_EACH (port, node, &dp->ports) {
netdev_wait_reconf_required(port->netdev);
if (!netdev_is_pmd(port->netdev)) {
@@ -6152,7 +6153,7 @@ dpif_netdev_wait(struct dpif *dpif)
}
}
}
- ovs_mutex_unlock(&dp->port_mutex);
+ ovs_rwlock_unlock(&dp->port_rwlock);
ovs_mutex_unlock(&dp_netdev_mutex);
seq_wait(tnl_conf_seq, dp->last_tnl_conf_seq);
}
@@ -6770,7 +6771,7 @@ dp_netdev_get_pmd(struct dp_netdev *dp, unsigned core_id)
/* Sets the 'struct dp_netdev_pmd_thread' for non-pmd threads. */
static void
dp_netdev_set_nonpmd(struct dp_netdev *dp)
- OVS_REQUIRES(dp->port_mutex)
+ OVS_REQ_WRLOCK(dp->port_rwlock)
{
struct dp_netdev_pmd_thread *non_pmd;
@@ -8833,7 +8834,7 @@ dpif_dummy_change_port_number(struct unixctl_conn *conn, int argc OVS_UNUSED,
ovs_refcount_ref(&dp->ref_cnt);
ovs_mutex_unlock(&dp_netdev_mutex);
- ovs_mutex_lock(&dp->port_mutex);
+ ovs_rwlock_wrlock(&dp->port_rwlock);
if (get_port_by_name(dp, argv[2], &port)) {
unixctl_command_reply_error(conn, "unknown port");
goto exit;
@@ -8862,7 +8863,7 @@ dpif_dummy_change_port_number(struct unixctl_conn *conn, int argc OVS_UNUSED,
unixctl_command_reply(conn, NULL);
exit:
- ovs_mutex_unlock(&dp->port_mutex);
+ ovs_rwlock_unlock(&dp->port_rwlock);
dp_netdev_unref(dp);
}
@@ -44,8 +44,8 @@ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(100, 5);
* For example, simultaneous call of 'netdev_reconfigure()' for the same
* 'netdev' is forbidden.
*
- * For current implementation all above restrictions could be fulfilled by
- * taking the datapath 'port_mutex' in lib/dpif-netdev.c. */
+ * For current implementation all above restrictions are fulfilled by
+ * read-locking the datapath 'port_rwlock' in lib/dpif-netdev.c. */
/*
* A mapping from ufid to dpdk rte_flow.