@@ -332,8 +332,8 @@ struct dp_netdev {
/* Ports.
*
* Any lookup into 'ports' or any access to the dp_netdev_ports found
- * through 'ports' requires taking 'port_mutex'. */
- struct ovs_mutex port_mutex;
+ * through 'ports' requires taking 'port_rwlock'. */
+ struct ovs_rwlock port_rwlock;
struct hmap ports;
struct seq *port_seq; /* Incremented whenever a port changes. */
@@ -409,7 +409,7 @@ static void meter_unlock(const struct dp_netdev *dp, uint32_t meter_id)
static struct dp_netdev_port *dp_netdev_lookup_port(const struct dp_netdev *dp,
odp_port_t)
- OVS_REQUIRES(dp->port_mutex);
+ OVS_REQ_RDLOCK(dp->port_rwlock);
enum rxq_cycles_counter_type {
RXQ_CYCLES_PROC_CURR, /* Cycles spent successfully polling and
@@ -830,17 +830,17 @@ struct dpif_netdev {
static int get_port_by_number(struct dp_netdev *dp, odp_port_t port_no,
struct dp_netdev_port **portp)
- OVS_REQUIRES(dp->port_mutex);
+ OVS_REQ_RDLOCK(dp->port_rwlock);
static int get_port_by_name(struct dp_netdev *dp, const char *devname,
struct dp_netdev_port **portp)
- OVS_REQUIRES(dp->port_mutex);
+ OVS_REQ_RDLOCK(dp->port_rwlock);
static void dp_netdev_free(struct dp_netdev *)
OVS_REQUIRES(dp_netdev_mutex);
static int do_add_port(struct dp_netdev *dp, const char *devname,
const char *type, odp_port_t port_no)
- OVS_REQUIRES(dp->port_mutex);
+ OVS_REQ_WRLOCK(dp->port_rwlock);
static void do_del_port(struct dp_netdev *dp, struct dp_netdev_port *)
- OVS_REQUIRES(dp->port_mutex);
+ OVS_REQ_WRLOCK(dp->port_rwlock);
static int dpif_netdev_open(const struct dpif_class *, const char *name,
bool create, struct dpif **);
static void dp_netdev_execute_actions(struct dp_netdev_pmd_thread *pmd,
@@ -861,7 +861,7 @@ static void dp_netdev_configure_pmd(struct dp_netdev_pmd_thread *pmd,
int numa_id);
static void dp_netdev_destroy_pmd(struct dp_netdev_pmd_thread *pmd);
static void dp_netdev_set_nonpmd(struct dp_netdev *dp)
- OVS_REQUIRES(dp->port_mutex);
+ OVS_REQ_WRLOCK(dp->port_rwlock);
static void *pmd_thread_main(void *);
static struct dp_netdev_pmd_thread *dp_netdev_get_pmd(struct dp_netdev *dp,
@@ -895,7 +895,7 @@ static void dp_netdev_del_bond_tx_from_pmd(struct dp_netdev_pmd_thread *pmd,
OVS_EXCLUDED(pmd->bond_mutex);
static void reconfigure_datapath(struct dp_netdev *dp)
- OVS_REQUIRES(dp->port_mutex);
+ OVS_REQ_RDLOCK(dp->port_rwlock);
static bool dp_netdev_pmd_try_ref(struct dp_netdev_pmd_thread *pmd);
static void dp_netdev_pmd_unref(struct dp_netdev_pmd_thread *pmd);
static void dp_netdev_pmd_flow_flush(struct dp_netdev_pmd_thread *pmd);
@@ -1401,8 +1401,8 @@ dpif_netdev_subtable_lookup_set(struct unixctl_conn *conn, int argc,
struct dp_netdev_pmd_thread **pmd_list;
sorted_poll_thread_list(dp, &pmd_list, &n);
- /* take port mutex as HMAP iters over them. */
- ovs_mutex_lock(&dp->port_mutex);
+ /* take port rwlock as HMAP iters over them. */
+ ovs_rwlock_rdlock(&dp->port_rwlock);
for (size_t i = 0; i < n; i++) {
struct dp_netdev_pmd_thread *pmd = pmd_list[i];
@@ -1425,8 +1425,8 @@ dpif_netdev_subtable_lookup_set(struct unixctl_conn *conn, int argc,
}
}
- /* release port mutex before netdev mutex. */
- ovs_mutex_unlock(&dp->port_mutex);
+ /* release port rwlock before netdev mutex. */
+ ovs_rwlock_unlock(&dp->port_rwlock);
ovs_mutex_unlock(&dp_netdev_mutex);
struct ds reply = DS_EMPTY_INITIALIZER;
@@ -1719,7 +1719,7 @@ create_dpif_netdev(struct dp_netdev *dp)
* Return ODPP_NONE on failure. */
static odp_port_t
choose_port(struct dp_netdev *dp, const char *name)
- OVS_REQUIRES(dp->port_mutex)
+ OVS_REQ_RDLOCK(dp->port_rwlock)
{
uint32_t port_no;
@@ -1782,7 +1782,7 @@ create_dp_netdev(const char *name, const struct dpif_class *class,
ovs_refcount_init(&dp->ref_cnt);
atomic_flag_clear(&dp->destroyed);
- ovs_mutex_init_recursive(&dp->port_mutex);
+ ovs_rwlock_init(&dp->port_rwlock);
hmap_init(&dp->ports);
dp->port_seq = seq_create();
ovs_mutex_init(&dp->bond_mutex);
@@ -1817,7 +1817,7 @@ create_dp_netdev(const char *name, const struct dpif_class *class,
ovs_mutex_init_recursive(&dp->non_pmd_mutex);
ovsthread_key_create(&dp->per_pmd_key, NULL);
- ovs_mutex_lock(&dp->port_mutex);
+ ovs_rwlock_wrlock(&dp->port_rwlock);
/* non-PMD will be created before all other threads and will
* allocate static_tx_qid = 0. */
dp_netdev_set_nonpmd(dp);
@@ -1825,7 +1825,7 @@ create_dp_netdev(const char *name, const struct dpif_class *class,
error = do_add_port(dp, name, dpif_netdev_port_open_type(dp->class,
"internal"),
ODPP_LOCAL);
- ovs_mutex_unlock(&dp->port_mutex);
+ ovs_rwlock_unlock(&dp->port_rwlock);
if (error) {
dp_netdev_free(dp);
return error;
@@ -1911,11 +1911,11 @@ dp_netdev_free(struct dp_netdev *dp)
shash_find_and_delete(&dp_netdevs, dp->name);
- ovs_mutex_lock(&dp->port_mutex);
+ ovs_rwlock_wrlock(&dp->port_rwlock);
HMAP_FOR_EACH_SAFE (port, next, node, &dp->ports) {
do_del_port(dp, port);
}
- ovs_mutex_unlock(&dp->port_mutex);
+ ovs_rwlock_unlock(&dp->port_rwlock);
ovs_mutex_lock(&dp->bond_mutex);
CMAP_FOR_EACH (bond, node, &dp->tx_bonds) {
@@ -1940,7 +1940,7 @@ dp_netdev_free(struct dp_netdev *dp)
seq_destroy(dp->port_seq);
hmap_destroy(&dp->ports);
- ovs_mutex_destroy(&dp->port_mutex);
+ ovs_rwlock_destroy(&dp->port_rwlock);
cmap_destroy(&dp->tx_bonds);
ovs_mutex_destroy(&dp->bond_mutex);
@@ -2108,7 +2108,7 @@ out:
static int
do_add_port(struct dp_netdev *dp, const char *devname, const char *type,
odp_port_t port_no)
- OVS_REQUIRES(dp->port_mutex)
+ OVS_REQ_WRLOCK(dp->port_rwlock)
{
struct netdev_saved_flags *sf;
struct dp_netdev_port *port;
@@ -2160,7 +2160,7 @@ dpif_netdev_port_add(struct dpif *dpif, struct netdev *netdev,
odp_port_t port_no;
int error;
- ovs_mutex_lock(&dp->port_mutex);
+ ovs_rwlock_wrlock(&dp->port_rwlock);
dpif_port = netdev_vport_get_dpif_port(netdev, namebuf, sizeof namebuf);
if (*port_nop != ODPP_NONE) {
port_no = *port_nop;
@@ -2173,7 +2173,7 @@ dpif_netdev_port_add(struct dpif *dpif, struct netdev *netdev,
*port_nop = port_no;
error = do_add_port(dp, dpif_port, netdev_get_type(netdev), port_no);
}
- ovs_mutex_unlock(&dp->port_mutex);
+ ovs_rwlock_unlock(&dp->port_rwlock);
return error;
}
@@ -2184,7 +2184,7 @@ dpif_netdev_port_del(struct dpif *dpif, odp_port_t port_no)
struct dp_netdev *dp = get_dp_netdev(dpif);
int error;
- ovs_mutex_lock(&dp->port_mutex);
+ ovs_rwlock_wrlock(&dp->port_rwlock);
if (port_no == ODPP_LOCAL) {
error = EINVAL;
} else {
@@ -2195,7 +2195,7 @@ dpif_netdev_port_del(struct dpif *dpif, odp_port_t port_no)
do_del_port(dp, port);
}
}
- ovs_mutex_unlock(&dp->port_mutex);
+ ovs_rwlock_unlock(&dp->port_rwlock);
return error;
}
@@ -2208,7 +2208,7 @@ is_valid_port_number(odp_port_t port_no)
static struct dp_netdev_port *
dp_netdev_lookup_port(const struct dp_netdev *dp, odp_port_t port_no)
- OVS_REQUIRES(dp->port_mutex)
+ OVS_REQ_RDLOCK(dp->port_rwlock)
{
struct dp_netdev_port *port;
@@ -2223,7 +2223,7 @@ dp_netdev_lookup_port(const struct dp_netdev *dp, odp_port_t port_no)
static int
get_port_by_number(struct dp_netdev *dp,
odp_port_t port_no, struct dp_netdev_port **portp)
- OVS_REQUIRES(dp->port_mutex)
+ OVS_REQ_RDLOCK(dp->port_rwlock)
{
if (!is_valid_port_number(port_no)) {
*portp = NULL;
@@ -2258,7 +2258,7 @@ port_destroy(struct dp_netdev_port *port)
static int
get_port_by_name(struct dp_netdev *dp,
const char *devname, struct dp_netdev_port **portp)
- OVS_REQUIRES(dp->port_mutex)
+ OVS_REQ_RDLOCK(dp->port_rwlock)
{
struct dp_netdev_port *port;
@@ -2277,7 +2277,7 @@ get_port_by_name(struct dp_netdev *dp,
/* Returns 'true' if there is a port with pmd netdev. */
static bool
has_pmd_port(struct dp_netdev *dp)
- OVS_REQUIRES(dp->port_mutex)
+ OVS_REQ_RDLOCK(dp->port_rwlock)
{
struct dp_netdev_port *port;
@@ -2292,7 +2292,7 @@ has_pmd_port(struct dp_netdev *dp)
static void
do_del_port(struct dp_netdev *dp, struct dp_netdev_port *port)
- OVS_REQUIRES(dp->port_mutex)
+ OVS_REQ_WRLOCK(dp->port_rwlock)
{
netdev_flow_flush(port->netdev);
netdev_uninit_flow_api(port->netdev);
@@ -2321,12 +2321,12 @@ dpif_netdev_port_query_by_number(const struct dpif *dpif, odp_port_t port_no,
struct dp_netdev_port *port;
int error;
- ovs_mutex_lock(&dp->port_mutex);
+ ovs_rwlock_wrlock(&dp->port_rwlock);
error = get_port_by_number(dp, port_no, &port);
if (!error && dpif_port) {
answer_port_query(port, dpif_port);
}
- ovs_mutex_unlock(&dp->port_mutex);
+ ovs_rwlock_unlock(&dp->port_rwlock);
return error;
}
@@ -2339,12 +2339,12 @@ dpif_netdev_port_query_by_name(const struct dpif *dpif, const char *devname,
struct dp_netdev_port *port;
int error;
- ovs_mutex_lock(&dp->port_mutex);
+ ovs_rwlock_rdlock(&dp->port_rwlock);
error = get_port_by_name(dp, devname, &port);
if (!error && dpif_port) {
answer_port_query(port, dpif_port);
}
- ovs_mutex_unlock(&dp->port_mutex);
+ ovs_rwlock_unlock(&dp->port_rwlock);
return error;
}
@@ -2562,9 +2562,9 @@ mark_to_flow_disassociate(struct dp_netdev_pmd_thread *pmd,
if (port) {
/* Taking a global 'port_mutex' to fulfill thread safety
* restrictions regarding netdev port mapping. */
- ovs_mutex_lock(&pmd->dp->port_mutex);
+ ovs_rwlock_rdlock(&pmd->dp->port_rwlock);
ret = netdev_flow_del(port, &flow->mega_ufid, NULL);
- ovs_mutex_unlock(&pmd->dp->port_mutex);
+ ovs_rwlock_unlock(&pmd->dp->port_rwlock);
netdev_close(port);
}
@@ -2716,12 +2716,12 @@ dp_netdev_flow_offload_put(struct dp_offload_thread_item *offload)
}
/* Taking a global 'port_mutex' to fulfill thread safety
* restrictions regarding the netdev port mapping. */
- ovs_mutex_lock(&pmd->dp->port_mutex);
+ ovs_rwlock_rdlock(&pmd->dp->port_rwlock);
ret = netdev_flow_put(port, &offload->match,
CONST_CAST(struct nlattr *, offload->actions),
offload->actions_len, &flow->mega_ufid, &info,
NULL);
- ovs_mutex_unlock(&pmd->dp->port_mutex);
+ ovs_rwlock_unlock(&pmd->dp->port_rwlock);
netdev_close(port);
if (ret) {
@@ -2949,7 +2949,7 @@ dpif_netdev_port_dump_next(const struct dpif *dpif, void *state_,
struct hmap_node *node;
int retval;
- ovs_mutex_lock(&dp->port_mutex);
+ ovs_rwlock_rdlock(&dp->port_rwlock);
node = hmap_at_position(&dp->ports, &state->position);
if (node) {
struct dp_netdev_port *port;
@@ -2966,7 +2966,7 @@ dpif_netdev_port_dump_next(const struct dpif *dpif, void *state_,
} else {
retval = EOF;
}
- ovs_mutex_unlock(&dp->port_mutex);
+ ovs_rwlock_unlock(&dp->port_rwlock);
return retval;
}
@@ -3417,24 +3417,24 @@ dpif_netdev_get_flow_offload_status(const struct dp_netdev *dp,
return false;
}
ofpbuf_use_stack(&buf, &act_buf, sizeof act_buf);
- /* Taking a global 'port_mutex' to fulfill thread safety
+ /* Taking a global 'port_rwlock' to fulfill thread safety
* restrictions regarding netdev port mapping.
*
* XXX: Main thread will try to pause/stop all revalidators during datapath
* reconfiguration via datapath purge callback (dp_purge_cb) while
- * holding 'dp->port_mutex'. So we're not waiting for mutex here.
- * Otherwise, deadlock is possible, bcause revalidators might sleep
+ * rw-holding 'dp->port_rwlock'. So we're not waiting for lock here.
+ * Otherwise, deadlock is possible, because revalidators might sleep
* waiting for the main thread to release the lock and main thread
* will wait for them to stop processing.
* This workaround might make statistics less accurate. Especially
* for flow deletion case, since there will be no other attempt. */
- if (!ovs_mutex_trylock(&dp->port_mutex)) {
+ if (!ovs_rwlock_tryrdlock(&dp->port_rwlock)) {
ret = netdev_flow_get(netdev, &match, &actions,
&netdev_flow->mega_ufid, stats, attrs, &buf);
/* Storing statistics and attributes from the last request for
* later use on mutex contention. */
dp_netdev_flow_set_last_stats_attrs(netdev_flow, stats, attrs, ret);
- ovs_mutex_unlock(&dp->port_mutex);
+ ovs_rwlock_unlock(&dp->port_rwlock);
} else {
dp_netdev_flow_get_last_stats_attrs(netdev_flow, stats, attrs, &ret);
if (!ret && !attrs->dp_layer) {
@@ -4295,7 +4295,7 @@ dpif_netdev_offload_stats_get(struct dpif *dpif,
nb_offloads = 0;
- ovs_mutex_lock(&dp->port_mutex);
+ ovs_rwlock_rdlock(&dp->port_rwlock);
HMAP_FOR_EACH (port, node, &dp->ports) {
uint64_t port_nb_offloads = 0;
@@ -4304,7 +4304,7 @@ dpif_netdev_offload_stats_get(struct dpif *dpif,
nb_offloads += port_nb_offloads;
}
}
- ovs_mutex_unlock(&dp->port_mutex);
+ ovs_rwlock_unlock(&dp->port_rwlock);
atomic_read_relaxed(&dp_offload_thread.enqueued_item,
&stats->counters[DP_NETDEV_HW_OFFLOADS_STATS_ENQUEUED].value);
@@ -4610,7 +4610,7 @@ dpif_netdev_port_set_config(struct dpif *dpif, odp_port_t port_no,
const char *affinity_list = smap_get(cfg, "pmd-rxq-affinity");
bool emc_enabled = smap_get_bool(cfg, "emc-enable", true);
- ovs_mutex_lock(&dp->port_mutex);
+ ovs_rwlock_wrlock(&dp->port_rwlock);
error = get_port_by_number(dp, port_no, &port);
if (error) {
goto unlock;
@@ -4664,7 +4664,7 @@ dpif_netdev_port_set_config(struct dpif *dpif, odp_port_t port_no,
dp_netdev_request_reconfigure(dp);
unlock:
- ovs_mutex_unlock(&dp->port_mutex);
+ ovs_rwlock_unlock(&dp->port_rwlock);
return error;
}
@@ -5148,7 +5148,8 @@ compare_rxq_cycles(const void *a, const void *b)
* The function doesn't touch the pmd threads, it just stores the assignment
* in the 'pmd' member of each rxq. */
static void
-rxq_scheduling(struct dp_netdev *dp, bool pinned) OVS_REQUIRES(dp->port_mutex)
+rxq_scheduling(struct dp_netdev *dp, bool pinned)
+ OVS_REQ_RDLOCK(dp->port_rwlock)
{
struct dp_netdev_port *port;
struct rr_numa_list rr;
@@ -5292,7 +5293,7 @@ reload_affected_pmds(struct dp_netdev *dp)
static void
reconfigure_pmd_threads(struct dp_netdev *dp)
- OVS_REQUIRES(dp->port_mutex)
+ OVS_REQ_RDLOCK(dp->port_rwlock)
{
struct dp_netdev_pmd_thread *pmd;
struct ovs_numa_dump *pmd_cores;
@@ -5390,7 +5391,7 @@ static void
pmd_remove_stale_ports(struct dp_netdev *dp,
struct dp_netdev_pmd_thread *pmd)
OVS_EXCLUDED(pmd->port_mutex)
- OVS_REQUIRES(dp->port_mutex)
+ OVS_REQ_RDLOCK(dp->port_rwlock)
{
struct rxq_poll *poll, *poll_next;
struct tx_port *tx, *tx_next;
@@ -5420,7 +5421,7 @@ pmd_remove_stale_ports(struct dp_netdev *dp,
* rxqs and assigns all rxqs/txqs to pmd threads. */
static void
reconfigure_datapath(struct dp_netdev *dp)
- OVS_REQUIRES(dp->port_mutex)
+ OVS_REQ_RDLOCK(dp->port_rwlock)
{
struct hmapx busy_threads = HMAPX_INITIALIZER(&busy_threads);
struct dp_netdev_pmd_thread *pmd;
@@ -5604,7 +5605,7 @@ reconfigure_datapath(struct dp_netdev *dp)
/* Returns true if one of the netdevs in 'dp' requires a reconfiguration */
static bool
ports_require_restart(const struct dp_netdev *dp)
- OVS_REQUIRES(dp->port_mutex)
+ OVS_REQ_RDLOCK(dp->port_rwlock)
{
struct dp_netdev_port *port;
@@ -5655,7 +5656,7 @@ variance(uint64_t a[], int n)
static bool
get_dry_run_variance(struct dp_netdev *dp, uint32_t *core_list,
uint32_t num_pmds, uint64_t *predicted_variance)
- OVS_REQUIRES(dp->port_mutex)
+ OVS_REQ_RDLOCK(dp->port_rwlock)
{
struct dp_netdev_port *port;
struct dp_netdev_pmd_thread *pmd;
@@ -5771,7 +5772,7 @@ cleanup:
* better distribution of load on PMDs. */
static bool
pmd_rebalance_dry_run(struct dp_netdev *dp)
- OVS_REQUIRES(dp->port_mutex)
+ OVS_REQ_RDLOCK(dp->port_rwlock)
{
struct dp_netdev_pmd_thread *pmd;
uint64_t *curr_pmd_usage;
@@ -5866,7 +5867,7 @@ dpif_netdev_run(struct dpif *dpif)
long long int now = time_msec();
struct dp_netdev_pmd_thread *pmd;
- ovs_mutex_lock(&dp->port_mutex);
+ ovs_rwlock_rdlock(&dp->port_rwlock);
non_pmd = dp_netdev_get_pmd(dp, NON_PMD_CORE_ID);
if (non_pmd) {
ovs_mutex_lock(&dp->non_pmd_mutex);
@@ -5938,7 +5939,7 @@ dpif_netdev_run(struct dpif *dpif)
if (dp_netdev_is_reconf_required(dp) || ports_require_restart(dp)) {
reconfigure_datapath(dp);
}
- ovs_mutex_unlock(&dp->port_mutex);
+ ovs_rwlock_unlock(&dp->port_rwlock);
tnl_neigh_cache_run();
tnl_port_map_run();
@@ -5958,7 +5959,7 @@ dpif_netdev_wait(struct dpif *dpif)
struct dp_netdev *dp = get_dp_netdev(dpif);
ovs_mutex_lock(&dp_netdev_mutex);
- ovs_mutex_lock(&dp->port_mutex);
+ ovs_rwlock_rdlock(&dp->port_rwlock);
HMAP_FOR_EACH (port, node, &dp->ports) {
netdev_wait_reconf_required(port->netdev);
if (!netdev_is_pmd(port->netdev)) {
@@ -5969,7 +5970,7 @@ dpif_netdev_wait(struct dpif *dpif)
}
}
}
- ovs_mutex_unlock(&dp->port_mutex);
+ ovs_rwlock_unlock(&dp->port_rwlock);
ovs_mutex_unlock(&dp_netdev_mutex);
seq_wait(tnl_conf_seq, dp->last_tnl_conf_seq);
}
@@ -6586,7 +6587,7 @@ dp_netdev_get_pmd(struct dp_netdev *dp, unsigned core_id)
/* Sets the 'struct dp_netdev_pmd_thread' for non-pmd threads. */
static void
dp_netdev_set_nonpmd(struct dp_netdev *dp)
- OVS_REQUIRES(dp->port_mutex)
+ OVS_REQ_WRLOCK(dp->port_rwlock)
{
struct dp_netdev_pmd_thread *non_pmd;
@@ -8649,7 +8650,7 @@ dpif_dummy_change_port_number(struct unixctl_conn *conn, int argc OVS_UNUSED,
ovs_refcount_ref(&dp->ref_cnt);
ovs_mutex_unlock(&dp_netdev_mutex);
- ovs_mutex_lock(&dp->port_mutex);
+ ovs_rwlock_wrlock(&dp->port_rwlock);
if (get_port_by_name(dp, argv[2], &port)) {
unixctl_command_reply_error(conn, "unknown port");
goto exit;
@@ -8678,7 +8679,7 @@ dpif_dummy_change_port_number(struct unixctl_conn *conn, int argc OVS_UNUSED,
unixctl_command_reply(conn, NULL);
exit:
- ovs_mutex_unlock(&dp->port_mutex);
+ ovs_rwlock_unlock(&dp->port_rwlock);
dp_netdev_unref(dp);
}
@@ -45,7 +45,7 @@ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(100, 5);
* 'netdev' is forbidden.
*
* For current implementation all above restrictions could be fulfilled by
- * taking the datapath 'port_mutex' in lib/dpif-netdev.c. */
+ * taking the datapath 'port_rwlock' in lib/dpif-netdev.c. */
/*
* A mapping from ufid to dpdk rte_flow.