@@ -330,8 +330,8 @@ struct dp_netdev {
/* Ports.
*
* Any lookup into 'ports' or any access to the dp_netdev_ports found
- * through 'ports' requires taking 'port_mutex'. */
- struct ovs_mutex port_mutex;
+ * through 'ports' requires taking 'port_rwlock'. */
+ struct ovs_rwlock port_rwlock;
struct hmap ports;
struct seq *port_seq; /* Incremented whenever a port changes. */
@@ -407,7 +407,7 @@ static void meter_unlock(const struct dp_netdev *dp, uint32_t meter_id)
static struct dp_netdev_port *dp_netdev_lookup_port(const struct dp_netdev *dp,
odp_port_t)
- OVS_REQUIRES(dp->port_mutex);
+ OVS_REQ_WRLOCK(dp->port_rwlock);
enum rxq_cycles_counter_type {
RXQ_CYCLES_PROC_CURR, /* Cycles spent successfully polling and
@@ -828,17 +828,17 @@ struct dpif_netdev {
static int get_port_by_number(struct dp_netdev *dp, odp_port_t port_no,
struct dp_netdev_port **portp)
- OVS_REQUIRES(dp->port_mutex);
+ OVS_REQ_WRLOCK(dp->port_rwlock);
static int get_port_by_name(struct dp_netdev *dp, const char *devname,
struct dp_netdev_port **portp)
- OVS_REQUIRES(dp->port_mutex);
+ OVS_REQ_WRLOCK(dp->port_rwlock);
static void dp_netdev_free(struct dp_netdev *)
OVS_REQUIRES(dp_netdev_mutex);
static int do_add_port(struct dp_netdev *dp, const char *devname,
const char *type, odp_port_t port_no)
- OVS_REQUIRES(dp->port_mutex);
+ OVS_REQ_WRLOCK(dp->port_rwlock);
static void do_del_port(struct dp_netdev *dp, struct dp_netdev_port *)
- OVS_REQUIRES(dp->port_mutex);
+ OVS_REQ_WRLOCK(dp->port_rwlock);
static int dpif_netdev_open(const struct dpif_class *, const char *name,
bool create, struct dpif **);
static void dp_netdev_execute_actions(struct dp_netdev_pmd_thread *pmd,
@@ -859,7 +859,7 @@ static void dp_netdev_configure_pmd(struct dp_netdev_pmd_thread *pmd,
int numa_id);
static void dp_netdev_destroy_pmd(struct dp_netdev_pmd_thread *pmd);
static void dp_netdev_set_nonpmd(struct dp_netdev *dp)
- OVS_REQUIRES(dp->port_mutex);
+ OVS_REQ_WRLOCK(dp->port_rwlock);
static void *pmd_thread_main(void *);
static struct dp_netdev_pmd_thread *dp_netdev_get_pmd(struct dp_netdev *dp,
@@ -893,7 +893,7 @@ static void dp_netdev_del_bond_tx_from_pmd(struct dp_netdev_pmd_thread *pmd,
OVS_EXCLUDED(pmd->bond_mutex);
static void reconfigure_datapath(struct dp_netdev *dp)
- OVS_REQUIRES(dp->port_mutex);
+ OVS_REQ_WRLOCK(dp->port_rwlock);
static bool dp_netdev_pmd_try_ref(struct dp_netdev_pmd_thread *pmd);
static void dp_netdev_pmd_unref(struct dp_netdev_pmd_thread *pmd);
static void dp_netdev_pmd_flow_flush(struct dp_netdev_pmd_thread *pmd);
@@ -1400,7 +1400,7 @@ dpif_netdev_subtable_lookup_set(struct unixctl_conn *conn, int argc,
sorted_poll_thread_list(dp, &pmd_list, &n);
/* take port mutex as HMAP iters over them. */
- ovs_mutex_lock(&dp->port_mutex);
+ ovs_rwlock_rdlock(&dp->port_rwlock);
for (size_t i = 0; i < n; i++) {
struct dp_netdev_pmd_thread *pmd = pmd_list[i];
@@ -1424,7 +1424,7 @@ dpif_netdev_subtable_lookup_set(struct unixctl_conn *conn, int argc,
}
/* release port mutex before netdev mutex. */
- ovs_mutex_unlock(&dp->port_mutex);
+ ovs_rwlock_unlock(&dp->port_rwlock);
ovs_mutex_unlock(&dp_netdev_mutex);
struct ds reply = DS_EMPTY_INITIALIZER;
@@ -1717,7 +1717,7 @@ create_dpif_netdev(struct dp_netdev *dp)
* Return ODPP_NONE on failure. */
static odp_port_t
choose_port(struct dp_netdev *dp, const char *name)
- OVS_REQUIRES(dp->port_mutex)
+ OVS_REQ_WRLOCK(dp->port_rwlock)
{
uint32_t port_no;
@@ -1780,7 +1780,7 @@ create_dp_netdev(const char *name, const struct dpif_class *class,
ovs_refcount_init(&dp->ref_cnt);
atomic_flag_clear(&dp->destroyed);
- ovs_mutex_init(&dp->port_mutex);
+ ovs_rwlock_init(&dp->port_rwlock);
hmap_init(&dp->ports);
dp->port_seq = seq_create();
ovs_mutex_init(&dp->bond_mutex);
@@ -1815,7 +1815,7 @@ create_dp_netdev(const char *name, const struct dpif_class *class,
ovs_mutex_init_recursive(&dp->non_pmd_mutex);
ovsthread_key_create(&dp->per_pmd_key, NULL);
- ovs_mutex_lock(&dp->port_mutex);
+ ovs_rwlock_wrlock(&dp->port_rwlock);
/* non-PMD will be created before all other threads and will
* allocate static_tx_qid = 0. */
dp_netdev_set_nonpmd(dp);
@@ -1823,7 +1823,7 @@ create_dp_netdev(const char *name, const struct dpif_class *class,
error = do_add_port(dp, name, dpif_netdev_port_open_type(dp->class,
"internal"),
ODPP_LOCAL);
- ovs_mutex_unlock(&dp->port_mutex);
+ ovs_rwlock_unlock(&dp->port_rwlock);
if (error) {
dp_netdev_free(dp);
return error;
@@ -1909,11 +1909,11 @@ dp_netdev_free(struct dp_netdev *dp)
shash_find_and_delete(&dp_netdevs, dp->name);
- ovs_mutex_lock(&dp->port_mutex);
+ ovs_rwlock_wrlock(&dp->port_rwlock);
HMAP_FOR_EACH_SAFE (port, next, node, &dp->ports) {
do_del_port(dp, port);
}
- ovs_mutex_unlock(&dp->port_mutex);
+ ovs_rwlock_unlock(&dp->port_rwlock);
ovs_mutex_lock(&dp->bond_mutex);
CMAP_FOR_EACH (bond, node, &dp->tx_bonds) {
@@ -1938,7 +1938,7 @@ dp_netdev_free(struct dp_netdev *dp)
seq_destroy(dp->port_seq);
hmap_destroy(&dp->ports);
- ovs_mutex_destroy(&dp->port_mutex);
+ ovs_rwlock_destroy(&dp->port_rwlock);
cmap_destroy(&dp->tx_bonds);
ovs_mutex_destroy(&dp->bond_mutex);
@@ -2106,7 +2106,7 @@ out:
static int
do_add_port(struct dp_netdev *dp, const char *devname, const char *type,
odp_port_t port_no)
- OVS_REQUIRES(dp->port_mutex)
+ OVS_REQ_WRLOCK(dp->port_rwlock)
{
struct netdev_saved_flags *sf;
struct dp_netdev_port *port;
@@ -2158,7 +2158,7 @@ dpif_netdev_port_add(struct dpif *dpif, struct netdev *netdev,
odp_port_t port_no;
int error;
- ovs_mutex_lock(&dp->port_mutex);
+ ovs_rwlock_wrlock(&dp->port_rwlock);
dpif_port = netdev_vport_get_dpif_port(netdev, namebuf, sizeof namebuf);
if (*port_nop != ODPP_NONE) {
port_no = *port_nop;
@@ -2171,7 +2171,7 @@ dpif_netdev_port_add(struct dpif *dpif, struct netdev *netdev,
*port_nop = port_no;
error = do_add_port(dp, dpif_port, netdev_get_type(netdev), port_no);
}
- ovs_mutex_unlock(&dp->port_mutex);
+ ovs_rwlock_unlock(&dp->port_rwlock);
return error;
}
@@ -2182,7 +2182,7 @@ dpif_netdev_port_del(struct dpif *dpif, odp_port_t port_no)
struct dp_netdev *dp = get_dp_netdev(dpif);
int error;
- ovs_mutex_lock(&dp->port_mutex);
+ ovs_rwlock_wrlock(&dp->port_rwlock);
if (port_no == ODPP_LOCAL) {
error = EINVAL;
} else {
@@ -2193,7 +2193,7 @@ dpif_netdev_port_del(struct dpif *dpif, odp_port_t port_no)
do_del_port(dp, port);
}
}
- ovs_mutex_unlock(&dp->port_mutex);
+ ovs_rwlock_unlock(&dp->port_rwlock);
return error;
}
@@ -2206,7 +2206,7 @@ is_valid_port_number(odp_port_t port_no)
static struct dp_netdev_port *
dp_netdev_lookup_port(const struct dp_netdev *dp, odp_port_t port_no)
- OVS_REQUIRES(dp->port_mutex)
+ OVS_REQ_WRLOCK(dp->port_rwlock)
{
struct dp_netdev_port *port;
@@ -2221,7 +2221,7 @@ dp_netdev_lookup_port(const struct dp_netdev *dp, odp_port_t port_no)
static int
get_port_by_number(struct dp_netdev *dp,
odp_port_t port_no, struct dp_netdev_port **portp)
- OVS_REQUIRES(dp->port_mutex)
+ OVS_REQ_WRLOCK(dp->port_rwlock)
{
if (!is_valid_port_number(port_no)) {
*portp = NULL;
@@ -2256,7 +2256,7 @@ port_destroy(struct dp_netdev_port *port)
static int
get_port_by_name(struct dp_netdev *dp,
const char *devname, struct dp_netdev_port **portp)
- OVS_REQUIRES(dp->port_mutex)
+ OVS_REQ_WRLOCK(dp->port_rwlock)
{
struct dp_netdev_port *port;
@@ -2275,7 +2275,7 @@ get_port_by_name(struct dp_netdev *dp,
/* Returns 'true' if there is a port with pmd netdev. */
static bool
has_pmd_port(struct dp_netdev *dp)
- OVS_REQUIRES(dp->port_mutex)
+ OVS_REQ_WRLOCK(dp->port_rwlock)
{
struct dp_netdev_port *port;
@@ -2290,7 +2290,7 @@ has_pmd_port(struct dp_netdev *dp)
static void
do_del_port(struct dp_netdev *dp, struct dp_netdev_port *port)
- OVS_REQUIRES(dp->port_mutex)
+ OVS_REQ_WRLOCK(dp->port_rwlock)
{
hmap_remove(&dp->ports, &port->node);
seq_change(dp->port_seq);
@@ -2317,12 +2317,12 @@ dpif_netdev_port_query_by_number(const struct dpif *dpif, odp_port_t port_no,
struct dp_netdev_port *port;
int error;
- ovs_mutex_lock(&dp->port_mutex);
+ ovs_rwlock_wrlock(&dp->port_rwlock);
error = get_port_by_number(dp, port_no, &port);
if (!error && dpif_port) {
answer_port_query(port, dpif_port);
}
- ovs_mutex_unlock(&dp->port_mutex);
+ ovs_rwlock_unlock(&dp->port_rwlock);
return error;
}
@@ -2335,12 +2335,12 @@ dpif_netdev_port_query_by_name(const struct dpif *dpif, const char *devname,
struct dp_netdev_port *port;
int error;
- ovs_mutex_lock(&dp->port_mutex);
+ ovs_rwlock_wrlock(&dp->port_rwlock);
error = get_port_by_name(dp, devname, &port);
if (!error && dpif_port) {
answer_port_query(port, dpif_port);
}
- ovs_mutex_unlock(&dp->port_mutex);
+ ovs_rwlock_unlock(&dp->port_rwlock);
return error;
}
@@ -2558,9 +2558,9 @@ mark_to_flow_disassociate(struct dp_netdev_pmd_thread *pmd,
if (port) {
/* Taking a global 'port_mutex' to fulfill thread safety
* restrictions regarding netdev port mapping. */
- ovs_mutex_lock(&pmd->dp->port_mutex);
+ ovs_rwlock_rdlock(&pmd->dp->port_rwlock);
ret = netdev_flow_del(port, &flow->mega_ufid, NULL);
- ovs_mutex_unlock(&pmd->dp->port_mutex);
+ ovs_rwlock_unlock(&pmd->dp->port_rwlock);
netdev_close(port);
}
@@ -2713,12 +2713,12 @@ dp_netdev_flow_offload_put(struct dp_offload_thread_item *offload)
}
/* Taking a global 'port_mutex' to fulfill thread safety
* restrictions regarding the netdev port mapping. */
- ovs_mutex_lock(&pmd->dp->port_mutex);
+ ovs_rwlock_rdlock(&pmd->dp->port_rwlock);
ret = netdev_flow_put(port, &offload->match,
CONST_CAST(struct nlattr *, offload->actions),
offload->actions_len, &flow->mega_ufid, &info,
NULL);
- ovs_mutex_unlock(&pmd->dp->port_mutex);
+ ovs_rwlock_unlock(&pmd->dp->port_rwlock);
netdev_close(port);
if (ret) {
@@ -2944,7 +2944,7 @@ dpif_netdev_port_dump_next(const struct dpif *dpif, void *state_,
struct hmap_node *node;
int retval;
- ovs_mutex_lock(&dp->port_mutex);
+ ovs_rwlock_rdlock(&dp->port_rwlock);
node = hmap_at_position(&dp->ports, &state->position);
if (node) {
struct dp_netdev_port *port;
@@ -2961,7 +2961,7 @@ dpif_netdev_port_dump_next(const struct dpif *dpif, void *state_,
} else {
retval = EOF;
}
- ovs_mutex_unlock(&dp->port_mutex);
+ ovs_rwlock_unlock(&dp->port_rwlock);
return retval;
}
@@ -3412,24 +3412,24 @@ dpif_netdev_get_flow_offload_status(const struct dp_netdev *dp,
return false;
}
ofpbuf_use_stack(&buf, &act_buf, sizeof act_buf);
- /* Taking a global 'port_mutex' to fulfill thread safety
+ /* Taking a global 'port_rwlock' to fulfill thread safety
* restrictions regarding netdev port mapping.
*
* XXX: Main thread will try to pause/stop all revalidators during datapath
* reconfiguration via datapath purge callback (dp_purge_cb) while
- * holding 'dp->port_mutex'. So we're not waiting for mutex here.
- * Otherwise, deadlock is possible, bcause revalidators might sleep
+ * rw-holding 'dp->port_rwlock'. So we're not waiting for lock here.
+ * Otherwise, deadlock is possible, because revalidators might sleep
* waiting for the main thread to release the lock and main thread
* will wait for them to stop processing.
* This workaround might make statistics less accurate. Especially
* for flow deletion case, since there will be no other attempt. */
- if (!ovs_mutex_trylock(&dp->port_mutex)) {
+ if (!ovs_rwlock_tryrdlock(&dp->port_rwlock)) {
ret = netdev_flow_get(netdev, &match, &actions,
&netdev_flow->mega_ufid, stats, attrs, &buf);
/* Storing statistics and attributes from the last request for
* later use on mutex contention. */
dp_netdev_flow_set_last_stats_attrs(netdev_flow, stats, attrs, ret);
- ovs_mutex_unlock(&dp->port_mutex);
+ ovs_rwlock_unlock(&dp->port_rwlock);
} else {
dp_netdev_flow_get_last_stats_attrs(netdev_flow, stats, attrs, &ret);
if (!ret && !attrs->dp_layer) {
@@ -4278,7 +4278,7 @@ dpif_netdev_offload_stats_get(struct dpif *dpif,
nb_offloads = 0;
- ovs_mutex_lock(&dp->port_mutex);
+ ovs_rwlock_rdlock(&dp->port_rwlock);
HMAP_FOR_EACH (port, node, &dp->ports) {
uint64_t port_nb_offloads = 0;
@@ -4287,7 +4287,7 @@ dpif_netdev_offload_stats_get(struct dpif *dpif,
nb_offloads += port_nb_offloads;
}
}
- ovs_mutex_unlock(&dp->port_mutex);
+ ovs_rwlock_unlock(&dp->port_rwlock);
atomic_read_relaxed(&dp_offload_thread.enqueued_item,
&stats->counters[DP_NETDEV_HW_OFFLOADS_STATS_ENQUEUED].value);
@@ -4548,7 +4548,7 @@ dpif_netdev_port_set_config(struct dpif *dpif, odp_port_t port_no,
const char *affinity_list = smap_get(cfg, "pmd-rxq-affinity");
bool emc_enabled = smap_get_bool(cfg, "emc-enable", true);
- ovs_mutex_lock(&dp->port_mutex);
+ ovs_rwlock_wrlock(&dp->port_rwlock);
error = get_port_by_number(dp, port_no, &port);
if (error) {
goto unlock;
@@ -4602,7 +4602,7 @@ dpif_netdev_port_set_config(struct dpif *dpif, odp_port_t port_no,
dp_netdev_request_reconfigure(dp);
unlock:
- ovs_mutex_unlock(&dp->port_mutex);
+ ovs_rwlock_unlock(&dp->port_rwlock);
return error;
}
@@ -5086,7 +5086,8 @@ compare_rxq_cycles(const void *a, const void *b)
* The function doesn't touch the pmd threads, it just stores the assignment
* in the 'pmd' member of each rxq. */
static void
-rxq_scheduling(struct dp_netdev *dp, bool pinned) OVS_REQUIRES(dp->port_mutex)
+rxq_scheduling(struct dp_netdev *dp, bool pinned)
+ OVS_REQ_WRLOCK(dp->port_rwlock)
{
struct dp_netdev_port *port;
struct rr_numa_list rr;
@@ -5230,7 +5231,7 @@ reload_affected_pmds(struct dp_netdev *dp)
static void
reconfigure_pmd_threads(struct dp_netdev *dp)
- OVS_REQUIRES(dp->port_mutex)
+ OVS_REQ_WRLOCK(dp->port_rwlock)
{
struct dp_netdev_pmd_thread *pmd;
struct ovs_numa_dump *pmd_cores;
@@ -5328,7 +5329,7 @@ static void
pmd_remove_stale_ports(struct dp_netdev *dp,
struct dp_netdev_pmd_thread *pmd)
OVS_EXCLUDED(pmd->port_mutex)
- OVS_REQUIRES(dp->port_mutex)
+ OVS_REQ_WRLOCK(dp->port_rwlock)
{
struct rxq_poll *poll, *poll_next;
struct tx_port *tx, *tx_next;
@@ -5358,7 +5359,7 @@ pmd_remove_stale_ports(struct dp_netdev *dp,
* rxqs and assigns all rxqs/txqs to pmd threads. */
static void
reconfigure_datapath(struct dp_netdev *dp)
- OVS_REQUIRES(dp->port_mutex)
+ OVS_REQ_WRLOCK(dp->port_rwlock)
{
struct hmapx busy_threads = HMAPX_INITIALIZER(&busy_threads);
struct dp_netdev_pmd_thread *pmd;
@@ -5542,7 +5543,7 @@ reconfigure_datapath(struct dp_netdev *dp)
/* Returns true if one of the netdevs in 'dp' requires a reconfiguration */
static bool
ports_require_restart(const struct dp_netdev *dp)
- OVS_REQUIRES(dp->port_mutex)
+ OVS_REQ_WRLOCK(dp->port_rwlock)
{
struct dp_netdev_port *port;
@@ -5593,7 +5594,7 @@ variance(uint64_t a[], int n)
static bool
get_dry_run_variance(struct dp_netdev *dp, uint32_t *core_list,
uint32_t num_pmds, uint64_t *predicted_variance)
- OVS_REQUIRES(dp->port_mutex)
+ OVS_REQ_WRLOCK(dp->port_rwlock)
{
struct dp_netdev_port *port;
struct dp_netdev_pmd_thread *pmd;
@@ -5709,7 +5710,7 @@ cleanup:
* better distribution of load on PMDs. */
static bool
pmd_rebalance_dry_run(struct dp_netdev *dp)
- OVS_REQUIRES(dp->port_mutex)
+ OVS_REQ_WRLOCK(dp->port_rwlock)
{
struct dp_netdev_pmd_thread *pmd;
uint64_t *curr_pmd_usage;
@@ -5804,7 +5805,7 @@ dpif_netdev_run(struct dpif *dpif)
long long int now = time_msec();
struct dp_netdev_pmd_thread *pmd;
- ovs_mutex_lock(&dp->port_mutex);
+ ovs_rwlock_wrlock(&dp->port_rwlock);
non_pmd = dp_netdev_get_pmd(dp, NON_PMD_CORE_ID);
if (non_pmd) {
ovs_mutex_lock(&dp->non_pmd_mutex);
@@ -5876,7 +5877,7 @@ dpif_netdev_run(struct dpif *dpif)
if (dp_netdev_is_reconf_required(dp) || ports_require_restart(dp)) {
reconfigure_datapath(dp);
}
- ovs_mutex_unlock(&dp->port_mutex);
+ ovs_rwlock_unlock(&dp->port_rwlock);
tnl_neigh_cache_run();
tnl_port_map_run();
@@ -5896,7 +5897,7 @@ dpif_netdev_wait(struct dpif *dpif)
struct dp_netdev *dp = get_dp_netdev(dpif);
ovs_mutex_lock(&dp_netdev_mutex);
- ovs_mutex_lock(&dp->port_mutex);
+ ovs_rwlock_wrlock(&dp->port_rwlock);
HMAP_FOR_EACH (port, node, &dp->ports) {
netdev_wait_reconf_required(port->netdev);
if (!netdev_is_pmd(port->netdev)) {
@@ -5907,7 +5908,7 @@ dpif_netdev_wait(struct dpif *dpif)
}
}
}
- ovs_mutex_unlock(&dp->port_mutex);
+ ovs_rwlock_unlock(&dp->port_rwlock);
ovs_mutex_unlock(&dp_netdev_mutex);
seq_wait(tnl_conf_seq, dp->last_tnl_conf_seq);
}
@@ -6524,7 +6525,7 @@ dp_netdev_get_pmd(struct dp_netdev *dp, unsigned core_id)
/* Sets the 'struct dp_netdev_pmd_thread' for non-pmd threads. */
static void
dp_netdev_set_nonpmd(struct dp_netdev *dp)
- OVS_REQUIRES(dp->port_mutex)
+ OVS_REQ_WRLOCK(dp->port_rwlock)
{
struct dp_netdev_pmd_thread *non_pmd;
@@ -8587,7 +8588,7 @@ dpif_dummy_change_port_number(struct unixctl_conn *conn, int argc OVS_UNUSED,
ovs_refcount_ref(&dp->ref_cnt);
ovs_mutex_unlock(&dp_netdev_mutex);
- ovs_mutex_lock(&dp->port_mutex);
+ ovs_rwlock_wrlock(&dp->port_rwlock);
if (get_port_by_name(dp, argv[2], &port)) {
unixctl_command_reply_error(conn, "unknown port");
goto exit;
@@ -8616,7 +8617,7 @@ dpif_dummy_change_port_number(struct unixctl_conn *conn, int argc OVS_UNUSED,
unixctl_command_reply(conn, NULL);
exit:
- ovs_mutex_unlock(&dp->port_mutex);
+ ovs_rwlock_unlock(&dp->port_rwlock);
dp_netdev_unref(dp);
}
@@ -44,7 +44,7 @@ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(100, 5);
* 'netdev' is forbidden.
*
* For current implementation all above restrictions could be fulfilled by
- * taking the datapath 'port_mutex' in lib/dpif-netdev.c. */
+ * taking the datapath 'port_rwlock' in lib/dpif-netdev.c. */
/*
* A mapping from ufid to dpdk rte_flow.