diff mbox series

[ovs-dev,v5,25/27] dpif-netdev: Replace port mutex by rwlock

Message ID f3259aed9a21ba21078770c888efe3b34b9c8a9c.1631094144.git.grive@u256.net
State Accepted
Commit 7daa5034683083574199e34ad748088ef1942c8f
Headers show
Series dpif-netdev: Parallel offload processing | expand

Checks

Context Check Description
ovsrobot/apply-robot success apply and check: success
ovsrobot/github-robot-_Build_and_Test success github build: passed

Commit Message

Gaetan Rivet Sept. 8, 2021, 9:47 a.m. UTC
The port mutex protects the netdev mapping, that can be changed by port
addition or port deletion. HW offloads operations can be considered read
operations on the port mapping itself. Use a rwlock to differentiate
between read and write operations, allowing concurrent queries and
offload insertions.

Because offload queries, deletion, and reconfigure_datapath() calls are
all rdlock, the deadlock fixed by [1] is still avoided, as the rdlock
side is recursive as prescribed by the POSIX standard. Executing
'reconfigure_datapath()' only requires a rdlock taken, but it is sometimes
executed in contexts where wrlock is taken ('do_add_port()' and
'do_del_port()').

This means that the deadlock described in [2] is still valid and should
be mitigated. The rdlock is taken using 'tryrdlock()' during offload query,
keeping the current behavior.

[1]: 81e89d5c2645 ("dpif-netdev: Make datapath port mutex recursive.")

[2]: 12d0edd75eba ("dpif-netdev: Avoid deadlock with offloading during PMD
     thread deletion.").

Signed-off-by: Gaetan Rivet <grive@u256.net>
Reviewed-by: Eli Britstein <elibr@nvidia.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
 lib/dpif-netdev.c         | 143 +++++++++++++++++++-------------------
 lib/netdev-offload-dpdk.c |   4 +-
 2 files changed, 74 insertions(+), 73 deletions(-)
diff mbox series

Patch

diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index 365726ed5..30547c0ec 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -245,7 +245,7 @@  enum sched_assignment_type {
  * Acquisition order is, from outermost to innermost:
  *
  *    dp_netdev_mutex (global)
- *    port_mutex
+ *    port_rwlock
  *    bond_mutex
  *    non_pmd_mutex
  */
@@ -258,8 +258,8 @@  struct dp_netdev {
     /* Ports.
      *
      * Any lookup into 'ports' or any access to the dp_netdev_ports found
-     * through 'ports' requires taking 'port_mutex'. */
-    struct ovs_mutex port_mutex;
+     * through 'ports' requires taking 'port_rwlock'. */
+    struct ovs_rwlock port_rwlock;
     struct hmap ports;
     struct seq *port_seq;       /* Incremented whenever a port changes. */
 
@@ -323,7 +323,7 @@  struct dp_netdev {
 
 static struct dp_netdev_port *dp_netdev_lookup_port(const struct dp_netdev *dp,
                                                     odp_port_t)
-    OVS_REQUIRES(dp->port_mutex);
+    OVS_REQ_RDLOCK(dp->port_rwlock);
 
 enum rxq_cycles_counter_type {
     RXQ_CYCLES_PROC_CURR,       /* Cycles spent successfully polling and
@@ -491,17 +491,17 @@  struct dpif_netdev {
 
 static int get_port_by_number(struct dp_netdev *dp, odp_port_t port_no,
                               struct dp_netdev_port **portp)
-    OVS_REQUIRES(dp->port_mutex);
+    OVS_REQ_RDLOCK(dp->port_rwlock);
 static int get_port_by_name(struct dp_netdev *dp, const char *devname,
                             struct dp_netdev_port **portp)
-    OVS_REQUIRES(dp->port_mutex);
+    OVS_REQ_RDLOCK(dp->port_rwlock);
 static void dp_netdev_free(struct dp_netdev *)
     OVS_REQUIRES(dp_netdev_mutex);
 static int do_add_port(struct dp_netdev *dp, const char *devname,
                        const char *type, odp_port_t port_no)
-    OVS_REQUIRES(dp->port_mutex);
+    OVS_REQ_WRLOCK(dp->port_rwlock);
 static void do_del_port(struct dp_netdev *dp, struct dp_netdev_port *)
-    OVS_REQUIRES(dp->port_mutex);
+    OVS_REQ_WRLOCK(dp->port_rwlock);
 static int dpif_netdev_open(const struct dpif_class *, const char *name,
                             bool create, struct dpif **);
 static void dp_netdev_execute_actions(struct dp_netdev_pmd_thread *pmd,
@@ -520,7 +520,7 @@  static void dp_netdev_configure_pmd(struct dp_netdev_pmd_thread *pmd,
                                     int numa_id);
 static void dp_netdev_destroy_pmd(struct dp_netdev_pmd_thread *pmd);
 static void dp_netdev_set_nonpmd(struct dp_netdev *dp)
-    OVS_REQUIRES(dp->port_mutex);
+    OVS_REQ_WRLOCK(dp->port_rwlock);
 
 static void *pmd_thread_main(void *);
 static struct dp_netdev_pmd_thread *dp_netdev_get_pmd(struct dp_netdev *dp,
@@ -557,7 +557,7 @@  static void dp_netdev_offload_flush(struct dp_netdev *dp,
                                     struct dp_netdev_port *port);
 
 static void reconfigure_datapath(struct dp_netdev *dp)
-    OVS_REQUIRES(dp->port_mutex);
+    OVS_REQ_RDLOCK(dp->port_rwlock);
 static bool dp_netdev_pmd_try_ref(struct dp_netdev_pmd_thread *pmd);
 static void dp_netdev_pmd_unref(struct dp_netdev_pmd_thread *pmd);
 static void dp_netdev_pmd_flow_flush(struct dp_netdev_pmd_thread *pmd);
@@ -1003,7 +1003,7 @@  dpif_netdev_subtable_lookup_set(struct unixctl_conn *conn, int argc OVS_UNUSED,
         sorted_poll_thread_list(dp, &pmd_list, &n);
 
         /* take port mutex as HMAP iters over them. */
-        ovs_mutex_lock(&dp->port_mutex);
+        ovs_rwlock_rdlock(&dp->port_rwlock);
 
         for (size_t i = 0; i < n; i++) {
             struct dp_netdev_pmd_thread *pmd = pmd_list[i];
@@ -1027,7 +1027,7 @@  dpif_netdev_subtable_lookup_set(struct unixctl_conn *conn, int argc OVS_UNUSED,
         }
 
         /* release port mutex before netdev mutex. */
-        ovs_mutex_unlock(&dp->port_mutex);
+        ovs_rwlock_unlock(&dp->port_rwlock);
         free(pmd_list);
     }
     ovs_mutex_unlock(&dp_netdev_mutex);
@@ -1638,7 +1638,7 @@  create_dpif_netdev(struct dp_netdev *dp)
  * Return ODPP_NONE on failure. */
 static odp_port_t
 choose_port(struct dp_netdev *dp, const char *name)
-    OVS_REQUIRES(dp->port_mutex)
+    OVS_REQ_RDLOCK(dp->port_rwlock)
 {
     uint32_t port_no;
 
@@ -1759,7 +1759,7 @@  create_dp_netdev(const char *name, const struct dpif_class *class,
     ovs_refcount_init(&dp->ref_cnt);
     atomic_flag_clear(&dp->destroyed);
 
-    ovs_mutex_init_recursive(&dp->port_mutex);
+    ovs_rwlock_init(&dp->port_rwlock);
     hmap_init(&dp->ports);
     dp->port_seq = seq_create();
     ovs_mutex_init(&dp->bond_mutex);
@@ -1796,7 +1796,7 @@  create_dp_netdev(const char *name, const struct dpif_class *class,
     ovs_mutex_init_recursive(&dp->non_pmd_mutex);
     ovsthread_key_create(&dp->per_pmd_key, NULL);
 
-    ovs_mutex_lock(&dp->port_mutex);
+    ovs_rwlock_wrlock(&dp->port_rwlock);
     /* non-PMD will be created before all other threads and will
      * allocate static_tx_qid = 0. */
     dp_netdev_set_nonpmd(dp);
@@ -1804,7 +1804,7 @@  create_dp_netdev(const char *name, const struct dpif_class *class,
     error = do_add_port(dp, name, dpif_netdev_port_open_type(dp->class,
                                                              "internal"),
                         ODPP_LOCAL);
-    ovs_mutex_unlock(&dp->port_mutex);
+    ovs_rwlock_unlock(&dp->port_rwlock);
     if (error) {
         dp_netdev_free(dp);
         return error;
@@ -1880,11 +1880,11 @@  dp_netdev_free(struct dp_netdev *dp)
 
     shash_find_and_delete(&dp_netdevs, dp->name);
 
-    ovs_mutex_lock(&dp->port_mutex);
+    ovs_rwlock_wrlock(&dp->port_rwlock);
     HMAP_FOR_EACH_SAFE (port, next, node, &dp->ports) {
         do_del_port(dp, port);
     }
-    ovs_mutex_unlock(&dp->port_mutex);
+    ovs_rwlock_unlock(&dp->port_rwlock);
 
     ovs_mutex_lock(&dp->bond_mutex);
     CMAP_FOR_EACH (bond, node, &dp->tx_bonds) {
@@ -1909,7 +1909,7 @@  dp_netdev_free(struct dp_netdev *dp)
 
     seq_destroy(dp->port_seq);
     hmap_destroy(&dp->ports);
-    ovs_mutex_destroy(&dp->port_mutex);
+    ovs_rwlock_destroy(&dp->port_rwlock);
 
     cmap_destroy(&dp->tx_bonds);
     ovs_mutex_destroy(&dp->bond_mutex);
@@ -2069,7 +2069,7 @@  out:
 static int
 do_add_port(struct dp_netdev *dp, const char *devname, const char *type,
             odp_port_t port_no)
-    OVS_REQUIRES(dp->port_mutex)
+    OVS_REQ_WRLOCK(dp->port_rwlock)
 {
     struct netdev_saved_flags *sf;
     struct dp_netdev_port *port;
@@ -2121,7 +2121,7 @@  dpif_netdev_port_add(struct dpif *dpif, struct netdev *netdev,
     odp_port_t port_no;
     int error;
 
-    ovs_mutex_lock(&dp->port_mutex);
+    ovs_rwlock_wrlock(&dp->port_rwlock);
     dpif_port = netdev_vport_get_dpif_port(netdev, namebuf, sizeof namebuf);
     if (*port_nop != ODPP_NONE) {
         port_no = *port_nop;
@@ -2134,7 +2134,7 @@  dpif_netdev_port_add(struct dpif *dpif, struct netdev *netdev,
         *port_nop = port_no;
         error = do_add_port(dp, dpif_port, netdev_get_type(netdev), port_no);
     }
-    ovs_mutex_unlock(&dp->port_mutex);
+    ovs_rwlock_unlock(&dp->port_rwlock);
 
     return error;
 }
@@ -2145,7 +2145,7 @@  dpif_netdev_port_del(struct dpif *dpif, odp_port_t port_no)
     struct dp_netdev *dp = get_dp_netdev(dpif);
     int error;
 
-    ovs_mutex_lock(&dp->port_mutex);
+    ovs_rwlock_wrlock(&dp->port_rwlock);
     if (port_no == ODPP_LOCAL) {
         error = EINVAL;
     } else {
@@ -2156,7 +2156,7 @@  dpif_netdev_port_del(struct dpif *dpif, odp_port_t port_no)
             do_del_port(dp, port);
         }
     }
-    ovs_mutex_unlock(&dp->port_mutex);
+    ovs_rwlock_unlock(&dp->port_rwlock);
 
     return error;
 }
@@ -2169,7 +2169,7 @@  is_valid_port_number(odp_port_t port_no)
 
 static struct dp_netdev_port *
 dp_netdev_lookup_port(const struct dp_netdev *dp, odp_port_t port_no)
-    OVS_REQUIRES(dp->port_mutex)
+    OVS_REQ_RDLOCK(dp->port_rwlock)
 {
     struct dp_netdev_port *port;
 
@@ -2184,7 +2184,7 @@  dp_netdev_lookup_port(const struct dp_netdev *dp, odp_port_t port_no)
 static int
 get_port_by_number(struct dp_netdev *dp,
                    odp_port_t port_no, struct dp_netdev_port **portp)
-    OVS_REQUIRES(dp->port_mutex)
+    OVS_REQ_RDLOCK(dp->port_rwlock)
 {
     if (!is_valid_port_number(port_no)) {
         *portp = NULL;
@@ -2219,7 +2219,7 @@  port_destroy(struct dp_netdev_port *port)
 static int
 get_port_by_name(struct dp_netdev *dp,
                  const char *devname, struct dp_netdev_port **portp)
-    OVS_REQUIRES(dp->port_mutex)
+    OVS_REQ_RDLOCK(dp->port_rwlock)
 {
     struct dp_netdev_port *port;
 
@@ -2238,7 +2238,7 @@  get_port_by_name(struct dp_netdev *dp,
 /* Returns 'true' if there is a port with pmd netdev. */
 static bool
 has_pmd_port(struct dp_netdev *dp)
-    OVS_REQUIRES(dp->port_mutex)
+    OVS_REQ_RDLOCK(dp->port_rwlock)
 {
     struct dp_netdev_port *port;
 
@@ -2253,7 +2253,7 @@  has_pmd_port(struct dp_netdev *dp)
 
 static void
 do_del_port(struct dp_netdev *dp, struct dp_netdev_port *port)
-    OVS_REQUIRES(dp->port_mutex)
+    OVS_REQ_WRLOCK(dp->port_rwlock)
 {
     dp_netdev_offload_flush(dp, port);
     netdev_uninit_flow_api(port->netdev);
@@ -2282,12 +2282,12 @@  dpif_netdev_port_query_by_number(const struct dpif *dpif, odp_port_t port_no,
     struct dp_netdev_port *port;
     int error;
 
-    ovs_mutex_lock(&dp->port_mutex);
+    ovs_rwlock_wrlock(&dp->port_rwlock);
     error = get_port_by_number(dp, port_no, &port);
     if (!error && dpif_port) {
         answer_port_query(port, dpif_port);
     }
-    ovs_mutex_unlock(&dp->port_mutex);
+    ovs_rwlock_unlock(&dp->port_rwlock);
 
     return error;
 }
@@ -2300,12 +2300,12 @@  dpif_netdev_port_query_by_name(const struct dpif *dpif, const char *devname,
     struct dp_netdev_port *port;
     int error;
 
-    ovs_mutex_lock(&dp->port_mutex);
+    ovs_rwlock_rdlock(&dp->port_rwlock);
     error = get_port_by_name(dp, devname, &port);
     if (!error && dpif_port) {
         answer_port_query(port, dpif_port);
     }
-    ovs_mutex_unlock(&dp->port_mutex);
+    ovs_rwlock_unlock(&dp->port_rwlock);
 
     return error;
 }
@@ -2515,11 +2515,11 @@  mark_to_flow_disassociate(struct dp_netdev_pmd_thread *pmd,
 
         port = netdev_ports_get(in_port, dpif_type_str);
         if (port) {
-            /* Taking a global 'port_mutex' to fulfill thread safety
+            /* Taking a global 'port_rwlock' to fulfill thread safety
              * restrictions regarding netdev port mapping. */
-            ovs_mutex_lock(&pmd->dp->port_mutex);
+            ovs_rwlock_rdlock(&pmd->dp->port_rwlock);
             ret = netdev_flow_del(port, &flow->mega_ufid, NULL);
-            ovs_mutex_unlock(&pmd->dp->port_mutex);
+            ovs_rwlock_unlock(&pmd->dp->port_rwlock);
             netdev_close(port);
         }
 
@@ -2682,14 +2682,14 @@  dp_netdev_flow_offload_put(struct dp_offload_flow_item *offload)
         goto err_free;
     }
 
-    /* Taking a global 'port_mutex' to fulfill thread safety
+    /* Taking a global 'port_rwlock' to fulfill thread safety
      * restrictions regarding the netdev port mapping. */
-    ovs_mutex_lock(&pmd->dp->port_mutex);
+    ovs_rwlock_rdlock(&pmd->dp->port_rwlock);
     ret = netdev_flow_put(port, &offload->match,
                           CONST_CAST(struct nlattr *, offload->actions),
                           offload->actions_len, &flow->mega_ufid, &info,
                           NULL);
-    ovs_mutex_unlock(&pmd->dp->port_mutex);
+    ovs_rwlock_unlock(&pmd->dp->port_rwlock);
     netdev_close(port);
 
     if (ret) {
@@ -2745,9 +2745,9 @@  dp_offload_flush(struct dp_offload_thread_item *item)
 {
     struct dp_offload_flush_item *flush = &item->data->flush;
 
-    ovs_mutex_lock(&flush->dp->port_mutex);
+    ovs_rwlock_rdlock(&flush->dp->port_rwlock);
     netdev_flow_flush(flush->netdev);
-    ovs_mutex_unlock(&flush->dp->port_mutex);
+    ovs_rwlock_unlock(&flush->dp->port_rwlock);
 
     ovs_barrier_block(flush->barrier);
 
@@ -2992,7 +2992,7 @@  dp_netdev_offload_flush_enqueue(struct dp_netdev *dp,
  * complete its work.  As the flush order will only be
  * enqueued after existing offload requests, those previous
  * offload requests must be processed, which requires being
- * able to lock the 'port_mutex' from the offload thread.
+ * able to lock the 'port_rwlock' from the offload thread.
  *
  * Flow offload flush is done when a port is being deleted.
  * Right after this call executes, the offload API is disabled
@@ -3002,7 +3002,7 @@  dp_netdev_offload_flush_enqueue(struct dp_netdev *dp,
 static void
 dp_netdev_offload_flush(struct dp_netdev *dp,
                         struct dp_netdev_port *port)
-    OVS_REQUIRES(dp->port_mutex)
+    OVS_REQ_WRLOCK(dp->port_rwlock)
 {
     /* The flush mutex only serves to protect the static memory barrier.
      * The memory barrier needs to go beyond the function scope as
@@ -3020,7 +3020,7 @@  dp_netdev_offload_flush(struct dp_netdev *dp,
         return;
     }
 
-    ovs_mutex_unlock(&dp->port_mutex);
+    ovs_rwlock_unlock(&dp->port_rwlock);
     ovs_mutex_lock(&flush_mutex);
 
     /* This thread and the offload thread. */
@@ -3038,7 +3038,7 @@  dp_netdev_offload_flush(struct dp_netdev *dp,
      * Some offload provider (e.g. DPDK) keeps a netdev reference with
      * the offload data. If this reference is not closed, the netdev is
      * kept indefinitely. */
-    ovs_mutex_lock(&dp->port_mutex);
+    ovs_rwlock_wrlock(&dp->port_rwlock);
 
     ovs_barrier_block(&barrier);
     ovs_barrier_destroy(&barrier);
@@ -3092,7 +3092,7 @@  dpif_netdev_port_dump_next(const struct dpif *dpif, void *state_,
     struct hmap_node *node;
     int retval;
 
-    ovs_mutex_lock(&dp->port_mutex);
+    ovs_rwlock_rdlock(&dp->port_rwlock);
     node = hmap_at_position(&dp->ports, &state->position);
     if (node) {
         struct dp_netdev_port *port;
@@ -3109,7 +3109,7 @@  dpif_netdev_port_dump_next(const struct dpif *dpif, void *state_,
     } else {
         retval = EOF;
     }
-    ovs_mutex_unlock(&dp->port_mutex);
+    ovs_rwlock_unlock(&dp->port_rwlock);
 
     return retval;
 }
@@ -3534,24 +3534,24 @@  dpif_netdev_get_flow_offload_status(const struct dp_netdev *dp,
         return false;
     }
     ofpbuf_use_stack(&buf, &act_buf, sizeof act_buf);
-    /* Taking a global 'port_mutex' to fulfill thread safety
+    /* Taking a global 'port_rwlock' to fulfill thread safety
      * restrictions regarding netdev port mapping.
      *
      * XXX: Main thread will try to pause/stop all revalidators during datapath
      *      reconfiguration via datapath purge callback (dp_purge_cb) while
-     *      holding 'dp->port_mutex'.  So we're not waiting for mutex here.
-     *      Otherwise, deadlock is possible, bcause revalidators might sleep
+     *      rw-holding 'dp->port_rwlock'.  So we're not waiting for lock here.
+     *      Otherwise, deadlock is possible, because revalidators might sleep
      *      waiting for the main thread to release the lock and main thread
      *      will wait for them to stop processing.
      *      This workaround might make statistics less accurate. Especially
      *      for flow deletion case, since there will be no other attempt.  */
-    if (!ovs_mutex_trylock(&dp->port_mutex)) {
+    if (!ovs_rwlock_tryrdlock(&dp->port_rwlock)) {
         ret = netdev_flow_get(netdev, &match, &actions,
                               &netdev_flow->mega_ufid, stats, attrs, &buf);
         /* Storing statistics and attributes from the last request for
          * later use on mutex contention. */
         dp_netdev_flow_set_last_stats_attrs(netdev_flow, stats, attrs, ret);
-        ovs_mutex_unlock(&dp->port_mutex);
+        ovs_rwlock_unlock(&dp->port_rwlock);
     } else {
         dp_netdev_flow_get_last_stats_attrs(netdev_flow, stats, attrs, &ret);
         if (!ret && !attrs->dp_layer) {
@@ -4380,7 +4380,7 @@  dpif_netdev_offload_stats_get(struct dpif *dpif,
 
     nb_offloads = 0;
 
-    ovs_mutex_lock(&dp->port_mutex);
+    ovs_rwlock_rdlock(&dp->port_rwlock);
     HMAP_FOR_EACH (port, node, &dp->ports) {
         uint64_t port_nb_offloads = 0;
 
@@ -4389,7 +4389,7 @@  dpif_netdev_offload_stats_get(struct dpif *dpif,
             nb_offloads += port_nb_offloads;
         }
     }
-    ovs_mutex_unlock(&dp->port_mutex);
+    ovs_rwlock_unlock(&dp->port_rwlock);
 
     atomic_read_relaxed(&dp_offload_thread.enqueued_item,
         &stats->counters[DP_NETDEV_HW_OFFLOADS_STATS_ENQUEUED].value);
@@ -4724,7 +4724,7 @@  dpif_netdev_port_set_config(struct dpif *dpif, odp_port_t port_no,
     const char *affinity_list = smap_get(cfg, "pmd-rxq-affinity");
     bool emc_enabled = smap_get_bool(cfg, "emc-enable", true);
 
-    ovs_mutex_lock(&dp->port_mutex);
+    ovs_rwlock_wrlock(&dp->port_rwlock);
     error = get_port_by_number(dp, port_no, &port);
     if (error) {
         goto unlock;
@@ -4778,7 +4778,7 @@  dpif_netdev_port_set_config(struct dpif *dpif, odp_port_t port_no,
 
     dp_netdev_request_reconfigure(dp);
 unlock:
-    ovs_mutex_unlock(&dp->port_mutex);
+    ovs_rwlock_unlock(&dp->port_rwlock);
     return error;
 }
 
@@ -5288,7 +5288,7 @@  sched_pmd_add_rxq(struct sched_pmd *sched_pmd, struct dp_netdev_rxq *rxq,
 static void
 sched_numa_list_assignments(struct sched_numa_list *numa_list,
                             struct dp_netdev *dp)
-    OVS_REQUIRES(dp->port_mutex)
+    OVS_REQ_RDLOCK(dp->port_rwlock)
 {
     struct dp_netdev_port *port;
 
@@ -5540,7 +5540,7 @@  sched_numa_list_schedule(struct sched_numa_list *numa_list,
                          struct dp_netdev *dp,
                          enum sched_assignment_type algo,
                          enum vlog_level level)
-    OVS_REQUIRES(dp->port_mutex)
+    OVS_REQ_RDLOCK(dp->port_rwlock)
 {
     struct dp_netdev_port *port;
     struct dp_netdev_rxq **rxqs = NULL;
@@ -5701,7 +5701,8 @@  sched_numa_list_schedule(struct sched_numa_list *numa_list,
 }
 
 static void
-rxq_scheduling(struct dp_netdev *dp) OVS_REQUIRES(dp->port_mutex)
+rxq_scheduling(struct dp_netdev *dp)
+    OVS_REQ_RDLOCK(dp->port_rwlock)
 {
     struct sched_numa_list numa_list;
     enum sched_assignment_type algo = dp->pmd_rxq_assign_type;
@@ -5758,7 +5759,7 @@  sched_numa_list_variance(struct sched_numa_list *numa_list)
 
 static bool
 pmd_rebalance_dry_run(struct dp_netdev *dp)
-    OVS_REQUIRES(dp->port_mutex)
+    OVS_REQ_RDLOCK(dp->port_rwlock)
 {
     struct sched_numa_list numa_list_cur;
     struct sched_numa_list numa_list_est;
@@ -5840,7 +5841,7 @@  reload_affected_pmds(struct dp_netdev *dp)
 
 static void
 reconfigure_pmd_threads(struct dp_netdev *dp)
-    OVS_REQUIRES(dp->port_mutex)
+    OVS_REQ_RDLOCK(dp->port_rwlock)
 {
     struct dp_netdev_pmd_thread *pmd;
     struct ovs_numa_dump *pmd_cores;
@@ -5938,7 +5939,7 @@  static void
 pmd_remove_stale_ports(struct dp_netdev *dp,
                        struct dp_netdev_pmd_thread *pmd)
     OVS_EXCLUDED(pmd->port_mutex)
-    OVS_REQUIRES(dp->port_mutex)
+    OVS_REQ_RDLOCK(dp->port_rwlock)
 {
     struct rxq_poll *poll, *poll_next;
     struct tx_port *tx, *tx_next;
@@ -5968,7 +5969,7 @@  pmd_remove_stale_ports(struct dp_netdev *dp,
  * rxqs and assigns all rxqs/txqs to pmd threads. */
 static void
 reconfigure_datapath(struct dp_netdev *dp)
-    OVS_REQUIRES(dp->port_mutex)
+    OVS_REQ_RDLOCK(dp->port_rwlock)
 {
     struct hmapx busy_threads = HMAPX_INITIALIZER(&busy_threads);
     struct dp_netdev_pmd_thread *pmd;
@@ -6147,7 +6148,7 @@  reconfigure_datapath(struct dp_netdev *dp)
 /* Returns true if one of the netdevs in 'dp' requires a reconfiguration */
 static bool
 ports_require_restart(const struct dp_netdev *dp)
-    OVS_REQUIRES(dp->port_mutex)
+    OVS_REQ_RDLOCK(dp->port_rwlock)
 {
     struct dp_netdev_port *port;
 
@@ -6205,7 +6206,7 @@  dpif_netdev_run(struct dpif *dpif)
     long long int now = time_msec();
     struct dp_netdev_pmd_thread *pmd;
 
-    ovs_mutex_lock(&dp->port_mutex);
+    ovs_rwlock_rdlock(&dp->port_rwlock);
     non_pmd = dp_netdev_get_pmd(dp, NON_PMD_CORE_ID);
     if (non_pmd) {
         ovs_mutex_lock(&dp->non_pmd_mutex);
@@ -6280,7 +6281,7 @@  dpif_netdev_run(struct dpif *dpif)
     if (dp_netdev_is_reconf_required(dp) || ports_require_restart(dp)) {
         reconfigure_datapath(dp);
     }
-    ovs_mutex_unlock(&dp->port_mutex);
+    ovs_rwlock_unlock(&dp->port_rwlock);
 
     tnl_neigh_cache_run();
     tnl_port_map_run();
@@ -6300,7 +6301,7 @@  dpif_netdev_wait(struct dpif *dpif)
     struct dp_netdev *dp = get_dp_netdev(dpif);
 
     ovs_mutex_lock(&dp_netdev_mutex);
-    ovs_mutex_lock(&dp->port_mutex);
+    ovs_rwlock_rdlock(&dp->port_rwlock);
     HMAP_FOR_EACH (port, node, &dp->ports) {
         netdev_wait_reconf_required(port->netdev);
         if (!netdev_is_pmd(port->netdev)) {
@@ -6311,7 +6312,7 @@  dpif_netdev_wait(struct dpif *dpif)
             }
         }
     }
-    ovs_mutex_unlock(&dp->port_mutex);
+    ovs_rwlock_unlock(&dp->port_rwlock);
     ovs_mutex_unlock(&dp_netdev_mutex);
     seq_wait(tnl_conf_seq, dp->last_tnl_conf_seq);
 }
@@ -6938,7 +6939,7 @@  dp_netdev_get_pmd(struct dp_netdev *dp, unsigned core_id)
 /* Sets the 'struct dp_netdev_pmd_thread' for non-pmd threads. */
 static void
 dp_netdev_set_nonpmd(struct dp_netdev *dp)
-    OVS_REQUIRES(dp->port_mutex)
+    OVS_REQ_WRLOCK(dp->port_rwlock)
 {
     struct dp_netdev_pmd_thread *non_pmd;
 
@@ -9097,7 +9098,7 @@  dpif_dummy_change_port_number(struct unixctl_conn *conn, int argc OVS_UNUSED,
     ovs_refcount_ref(&dp->ref_cnt);
     ovs_mutex_unlock(&dp_netdev_mutex);
 
-    ovs_mutex_lock(&dp->port_mutex);
+    ovs_rwlock_wrlock(&dp->port_rwlock);
     if (get_port_by_name(dp, argv[2], &port)) {
         unixctl_command_reply_error(conn, "unknown port");
         goto exit;
@@ -9126,7 +9127,7 @@  dpif_dummy_change_port_number(struct unixctl_conn *conn, int argc OVS_UNUSED,
     unixctl_command_reply(conn, NULL);
 
 exit:
-    ovs_mutex_unlock(&dp->port_mutex);
+    ovs_rwlock_unlock(&dp->port_rwlock);
     dp_netdev_unref(dp);
 }
 
diff --git a/lib/netdev-offload-dpdk.c b/lib/netdev-offload-dpdk.c
index 28cb2f96b..ac4739b71 100644
--- a/lib/netdev-offload-dpdk.c
+++ b/lib/netdev-offload-dpdk.c
@@ -46,8 +46,8 @@  static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(600, 600);
  *    For example, simultaneous call of 'netdev_reconfigure()' for the same
  *    'netdev' is forbidden.
  *
- * For current implementation all above restrictions could be fulfilled by
- * taking the datapath 'port_mutex' in lib/dpif-netdev.c.  */
+ * For current implementation all above restrictions are fulfilled by
+ * read-locking the datapath 'port_rwlock' in lib/dpif-netdev.c.  */
 
 /*
  * A mapping from ufid to dpdk rte_flow.