Patch Detail
get:
Show a patch.
patch:
Update a patch.
put:
Update a patch.
GET /api/patches/1525736/?format=api
{ "id": 1525736, "url": "http://patchwork.ozlabs.org/api/patches/1525736/?format=api", "web_url": "http://patchwork.ozlabs.org/project/openvswitch/patch/f3259aed9a21ba21078770c888efe3b34b9c8a9c.1631094144.git.grive@u256.net/", "project": { "id": 47, "url": "http://patchwork.ozlabs.org/api/projects/47/?format=api", "name": "Open vSwitch", "link_name": "openvswitch", "list_id": "ovs-dev.openvswitch.org", "list_email": "ovs-dev@openvswitch.org", "web_url": "http://openvswitch.org/", "scm_url": "git@github.com:openvswitch/ovs.git", "webscm_url": "https://github.com/openvswitch/ovs", "list_archive_url": "", "list_archive_url_format": "", "commit_url_format": "" }, "msgid": "<f3259aed9a21ba21078770c888efe3b34b9c8a9c.1631094144.git.grive@u256.net>", "list_archive_url": null, "date": "2021-09-08T09:47:49", "name": "[ovs-dev,v5,25/27] dpif-netdev: Replace port mutex by rwlock", "commit_ref": "7daa5034683083574199e34ad748088ef1942c8f", "pull_url": null, "state": "accepted", "archived": false, "hash": "c2af49cafdca7e519bcb68ef312a042703d650df", "submitter": { "id": 78795, "url": "http://patchwork.ozlabs.org/api/people/78795/?format=api", "name": "Gaetan Rivet", "email": "grive@u256.net" }, "delegate": null, "mbox": "http://patchwork.ozlabs.org/project/openvswitch/patch/f3259aed9a21ba21078770c888efe3b34b9c8a9c.1631094144.git.grive@u256.net/mbox/", "series": [ { "id": 261424, "url": "http://patchwork.ozlabs.org/api/series/261424/?format=api", "web_url": "http://patchwork.ozlabs.org/project/openvswitch/list/?series=261424", "date": "2021-09-08T09:47:24", "name": "dpif-netdev: Parallel offload processing", "version": 5, "mbox": "http://patchwork.ozlabs.org/series/261424/mbox/" } ], "comments": "http://patchwork.ozlabs.org/api/patches/1525736/comments/", "check": "success", "checks": "http://patchwork.ozlabs.org/api/patches/1525736/checks/", "tags": {}, "related": [], "headers": { "Return-Path": "<ovs-dev-bounces@openvswitch.org>", "X-Original-To": [ "incoming@patchwork.ozlabs.org", "ovs-dev@openvswitch.org" ], "Delivered-To": [ "patchwork-incoming@bilbo.ozlabs.org", "ovs-dev@lists.linuxfoundation.org" ], "Authentication-Results": [ "ozlabs.org;\n\tdkim=fail reason=\"signature verification failed\" (2048-bit key;\n unprotected) header.d=u256.net header.i=@u256.net header.a=rsa-sha256\n header.s=fm2 header.b=aycoiurZ;\n\tdkim=fail reason=\"signature verification failed\" (2048-bit key;\n unprotected) header.d=messagingengine.com header.i=@messagingengine.com\n header.a=rsa-sha256 header.s=fm3 header.b=e61wGV3J;\n\tdkim-atps=neutral", "ozlabs.org;\n spf=pass (sender SPF authorized) smtp.mailfrom=openvswitch.org\n (client-ip=2605:bc80:3010::136; helo=smtp3.osuosl.org;\n envelope-from=ovs-dev-bounces@openvswitch.org; receiver=<UNKNOWN>)", "smtp1.osuosl.org (amavisd-new);\n dkim=pass (2048-bit key) header.d=u256.net header.b=\"aycoiurZ\";\n dkim=pass (2048-bit key) header.d=messagingengine.com\n header.b=\"e61wGV3J\"" ], "Received": [ "from smtp3.osuosl.org (smtp3.osuosl.org [IPv6:2605:bc80:3010::136])\n\t(using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits)\n\t key-exchange X25519 server-signature RSA-PSS (4096 bits) server-digest\n SHA256)\n\t(No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 4H4HSd3zjyz9sW8\n\tfor <incoming@patchwork.ozlabs.org>; Wed, 8 Sep 2021 19:50:41 +1000 (AEST)", "from localhost (localhost [127.0.0.1])\n\tby smtp3.osuosl.org (Postfix) with ESMTP id 803D86153B;\n\tWed, 8 Sep 2021 09:50:38 +0000 (UTC)", "from smtp3.osuosl.org ([127.0.0.1])\n\tby localhost (smtp3.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id uye_QxkIXAJV; Wed, 8 Sep 2021 09:50:30 +0000 (UTC)", "from lists.linuxfoundation.org (lf-lists.osuosl.org\n [IPv6:2605:bc80:3010:104::8cd3:938])\n\tby smtp3.osuosl.org (Postfix) with ESMTPS id 3381361533;\n\tWed, 8 Sep 2021 09:50:23 +0000 (UTC)", "from lf-lists.osuosl.org (localhost [127.0.0.1])\n\tby lists.linuxfoundation.org (Postfix) with ESMTP id EB785C0011;\n\tWed, 8 Sep 2021 09:50:22 +0000 (UTC)", "from smtp1.osuosl.org (smtp1.osuosl.org [IPv6:2605:bc80:3010::138])\n by lists.linuxfoundation.org (Postfix) with ESMTP id 1BCBCC001E\n for <ovs-dev@openvswitch.org>; Wed, 8 Sep 2021 09:50:22 +0000 (UTC)", "from localhost (localhost [127.0.0.1])\n by smtp1.osuosl.org (Postfix) with ESMTP id 3C63683046\n for <ovs-dev@openvswitch.org>; Wed, 8 Sep 2021 09:48:43 +0000 (UTC)", "from smtp1.osuosl.org ([127.0.0.1])\n by localhost (smtp1.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n with ESMTP id eybEUBzjSlwY for <ovs-dev@openvswitch.org>;\n Wed, 8 Sep 2021 09:48:41 +0000 (UTC)", "from wout3-smtp.messagingengine.com (wout3-smtp.messagingengine.com\n [64.147.123.19])\n by smtp1.osuosl.org (Postfix) with ESMTPS id A9A49837D3\n for <ovs-dev@openvswitch.org>; Wed, 8 Sep 2021 09:48:41 +0000 (UTC)", "from compute5.internal (compute5.nyi.internal [10.202.2.45])\n by mailout.west.internal (Postfix) with ESMTP id 1CD4E32009EF;\n Wed, 8 Sep 2021 05:48:41 -0400 (EDT)", "from mailfrontend2 ([10.202.2.163])\n by compute5.internal (MEProxy); Wed, 08 Sep 2021 05:48:41 -0400", "by mail.messagingengine.com (Postfix) with ESMTPA; Wed,\n 8 Sep 2021 05:48:40 -0400 (EDT)" ], "X-Virus-Scanned": [ "amavisd-new at osuosl.org", "amavisd-new at osuosl.org" ], "X-Greylist": "from auto-whitelisted by SQLgrey-1.8.0", "DKIM-Signature": [ "v=1; a=rsa-sha256; c=relaxed/relaxed; d=u256.net; h=from\n :to:cc:subject:date:message-id:in-reply-to:references\n :mime-version:content-transfer-encoding; s=fm2; bh=nF9mzwFi+GsUO\n J/dFI+FKJiBDswdFlTNgudQzHj2PGA=; b=aycoiurZyCPToTb1Vl54AM3RwYjgP\n PCrQc9a7yX9z9k0rGYD7YKNTw4b1am47aEuO8z5VojySkDrmwFHzytzOeBrHZBgx\n ge7RVMbgRW6D4xtNSKQJWBFkz8Fsm9G0i28JfmIImjLTWU/7/NZ2ZAd/e/IoVxMQ\n 2ofqM619nBpbVs+InVZGdJcDcSActj1YgL10H+AN+1hnpGn1RdaObRZq56Htddi0\n 85M+vSpaRRs8P/RBFBmfGgbyx6+jtAs+7AQfXIQEpiJrjiyUMkOlJdOE+VHN+cms\n Inb1YWQQKw2d9a4ZZ/NZq1U9WOUV9YZ06amT0QR6uSupJWJbdsagVzckw==", "v=1; a=rsa-sha256; c=relaxed/relaxed; d=\n messagingengine.com; h=cc:content-transfer-encoding:date:from\n :in-reply-to:message-id:mime-version:references:subject:to\n :x-me-proxy:x-me-proxy:x-me-sender:x-me-sender:x-sasl-enc; s=\n fm3; bh=nF9mzwFi+GsUOJ/dFI+FKJiBDswdFlTNgudQzHj2PGA=; b=e61wGV3J\n 0x2uQL7RSKjcvmsW7WAGL7kMeQecepLLe63bBnDjPTo6TDU4uOJWv/qkFDGNmhTm\n iHRKiQaUM9BgtyZ5g3QG+D9P1cGyzciGWehNBMZ8G/qETxuacdQNkmWExEsPPcmk\n FFAMgOBdMbn+6q6jR2foL2E2lNrikXc+2mshsmqHPHxQwPCD38OxLf90TfxT3g+N\n m7mFqzww39yKbUK2U7PUidbi2CT0VtuT6OLatT9hxShPqKr8d5Ef0F62Aqr/qxUt\n pdDRxfKoiZO1BoX/FpM04noOBXJ3wv6diSmcF7xoKjfYHavmg3/PRxVnod5IkbBk\n 2HW2gN7mE+/xoQ==" ], "X-ME-Sender": "<xms:-IY4YWz8LwVlKL2r3MMPf0FyIB24VgKG3hf-E8w8rNHqbrL9vXwDKA>\n <xme:-IY4YSSevfXmZss2cNdWV_ABF2h8A8RZ5UywxXFzNA5bZ8VPdPjHIXxMrk4jP82so\n FtLGf6qDOizOOQcg_w>", "X-ME-Received": "\n <xmr:-IY4YYUFYfbdscZyluTPcAyVx6AysO5zEXVE37eXMsRMKN6M8G2WJ9GVVO9eoDlEXr-65P5tT-lYAa6ElBMGfmSJZg>", "X-ME-Proxy-Cause": "\n gggruggvucftvghtrhhoucdtuddrgedvtddrudefjedgudekucetufdoteggodetrfdotf\n fvucfrrhhofhhilhgvmecuhfgrshhtofgrihhlpdfqfgfvpdfurfetoffkrfgpnffqhgen\n uceurghilhhouhhtmecufedttdenucesvcftvggtihhpihgvnhhtshculddquddttddmne\n cujfgurhephffvufffkffojghfggfgsedtkeertdertddtnecuhfhrohhmpefirggvthgr\n nhcutfhivhgvthcuoehgrhhivhgvsehuvdehiedrnhgvtheqnecuggftrfgrthhtvghrnh\n ephefgveffkeetheetfeeifedvheelfeejfeehveduteejhfekuedtkeeiuedvteehnecu\n vehluhhsthgvrhfuihiivgepgeenucfrrghrrghmpehmrghilhhfrhhomhepghhrihhvvg\n esuhdvheeirdhnvght", "X-ME-Proxy": "<xmx:-IY4YchDxMOwxIcTTy_fbNhQa4aFDjqSMIaLMeic1uu72p6kUjamzQ>\n <xmx:-IY4YYBQGthNEI9niH1azJbOhyzoWaoZF9CpO5v01F_o9wYH9yPWrQ>\n <xmx:-IY4YdLibV6gh2SpHalTHPLstEbGZFL4CB78PKC3rxoKG8oh23dhxA>\n <xmx:-IY4YR4ApUGV5JjSdsv1yDefx8-98Hn-T0ut9lXAQnTWYWBu2EQoyw>", "From": "Gaetan Rivet <grive@u256.net>", "To": "ovs-dev@openvswitch.org", "Date": "Wed, 8 Sep 2021 11:47:49 +0200", "Message-Id": "\n <f3259aed9a21ba21078770c888efe3b34b9c8a9c.1631094144.git.grive@u256.net>", "X-Mailer": "git-send-email 2.31.1", "In-Reply-To": "<cover.1631094144.git.grive@u256.net>", "References": "<cover.1631094144.git.grive@u256.net>", "MIME-Version": "1.0", "Cc": "Eli Britstein <elibr@nvidia.com>,\n Maxime Coquelin <maxime.coquelin@redhat.com>", "Subject": "[ovs-dev] [PATCH v5 25/27] dpif-netdev: Replace port mutex by rwlock", "X-BeenThere": "ovs-dev@openvswitch.org", "X-Mailman-Version": "2.1.15", "Precedence": "list", "List-Id": "<ovs-dev.openvswitch.org>", "List-Unsubscribe": "<https://mail.openvswitch.org/mailman/options/ovs-dev>,\n <mailto:ovs-dev-request@openvswitch.org?subject=unsubscribe>", "List-Archive": "<http://mail.openvswitch.org/pipermail/ovs-dev/>", "List-Post": "<mailto:ovs-dev@openvswitch.org>", "List-Help": "<mailto:ovs-dev-request@openvswitch.org?subject=help>", "List-Subscribe": "<https://mail.openvswitch.org/mailman/listinfo/ovs-dev>,\n <mailto:ovs-dev-request@openvswitch.org?subject=subscribe>", "Content-Type": "text/plain; charset=\"us-ascii\"", "Content-Transfer-Encoding": "7bit", "Errors-To": "ovs-dev-bounces@openvswitch.org", "Sender": "\"dev\" <ovs-dev-bounces@openvswitch.org>" }, "content": "The port mutex protects the netdev mapping, that can be changed by port\naddition or port deletion. HW offloads operations can be considered read\noperations on the port mapping itself. Use a rwlock to differentiate\nbetween read and write operations, allowing concurrent queries and\noffload insertions.\n\nBecause offload queries, deletion, and reconfigure_datapath() calls are\nall rdlock, the deadlock fixed by [1] is still avoided, as the rdlock\nside is recursive as prescribed by the POSIX standard. Executing\n'reconfigure_datapath()' only requires a rdlock taken, but it is sometimes\nexecuted in contexts where wrlock is taken ('do_add_port()' and\n'do_del_port()').\n\nThis means that the deadlock described in [2] is still valid and should\nbe mitigated. The rdlock is taken using 'tryrdlock()' during offload query,\nkeeping the current behavior.\n\n[1]: 81e89d5c2645 (\"dpif-netdev: Make datapath port mutex recursive.\")\n\n[2]: 12d0edd75eba (\"dpif-netdev: Avoid deadlock with offloading during PMD\n thread deletion.\").\n\nSigned-off-by: Gaetan Rivet <grive@u256.net>\nReviewed-by: Eli Britstein <elibr@nvidia.com>\nReviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>\n---\n lib/dpif-netdev.c | 143 +++++++++++++++++++-------------------\n lib/netdev-offload-dpdk.c | 4 +-\n 2 files changed, 74 insertions(+), 73 deletions(-)", "diff": "diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c\nindex 365726ed5..30547c0ec 100644\n--- a/lib/dpif-netdev.c\n+++ b/lib/dpif-netdev.c\n@@ -245,7 +245,7 @@ enum sched_assignment_type {\n * Acquisition order is, from outermost to innermost:\n *\n * dp_netdev_mutex (global)\n- * port_mutex\n+ * port_rwlock\n * bond_mutex\n * non_pmd_mutex\n */\n@@ -258,8 +258,8 @@ struct dp_netdev {\n /* Ports.\n *\n * Any lookup into 'ports' or any access to the dp_netdev_ports found\n- * through 'ports' requires taking 'port_mutex'. */\n- struct ovs_mutex port_mutex;\n+ * through 'ports' requires taking 'port_rwlock'. */\n+ struct ovs_rwlock port_rwlock;\n struct hmap ports;\n struct seq *port_seq; /* Incremented whenever a port changes. */\n \n@@ -323,7 +323,7 @@ struct dp_netdev {\n \n static struct dp_netdev_port *dp_netdev_lookup_port(const struct dp_netdev *dp,\n odp_port_t)\n- OVS_REQUIRES(dp->port_mutex);\n+ OVS_REQ_RDLOCK(dp->port_rwlock);\n \n enum rxq_cycles_counter_type {\n RXQ_CYCLES_PROC_CURR, /* Cycles spent successfully polling and\n@@ -491,17 +491,17 @@ struct dpif_netdev {\n \n static int get_port_by_number(struct dp_netdev *dp, odp_port_t port_no,\n struct dp_netdev_port **portp)\n- OVS_REQUIRES(dp->port_mutex);\n+ OVS_REQ_RDLOCK(dp->port_rwlock);\n static int get_port_by_name(struct dp_netdev *dp, const char *devname,\n struct dp_netdev_port **portp)\n- OVS_REQUIRES(dp->port_mutex);\n+ OVS_REQ_RDLOCK(dp->port_rwlock);\n static void dp_netdev_free(struct dp_netdev *)\n OVS_REQUIRES(dp_netdev_mutex);\n static int do_add_port(struct dp_netdev *dp, const char *devname,\n const char *type, odp_port_t port_no)\n- OVS_REQUIRES(dp->port_mutex);\n+ OVS_REQ_WRLOCK(dp->port_rwlock);\n static void do_del_port(struct dp_netdev *dp, struct dp_netdev_port *)\n- OVS_REQUIRES(dp->port_mutex);\n+ OVS_REQ_WRLOCK(dp->port_rwlock);\n static int dpif_netdev_open(const struct dpif_class *, const char *name,\n bool create, struct dpif **);\n static void dp_netdev_execute_actions(struct dp_netdev_pmd_thread *pmd,\n@@ -520,7 +520,7 @@ static void dp_netdev_configure_pmd(struct dp_netdev_pmd_thread *pmd,\n int numa_id);\n static void dp_netdev_destroy_pmd(struct dp_netdev_pmd_thread *pmd);\n static void dp_netdev_set_nonpmd(struct dp_netdev *dp)\n- OVS_REQUIRES(dp->port_mutex);\n+ OVS_REQ_WRLOCK(dp->port_rwlock);\n \n static void *pmd_thread_main(void *);\n static struct dp_netdev_pmd_thread *dp_netdev_get_pmd(struct dp_netdev *dp,\n@@ -557,7 +557,7 @@ static void dp_netdev_offload_flush(struct dp_netdev *dp,\n struct dp_netdev_port *port);\n \n static void reconfigure_datapath(struct dp_netdev *dp)\n- OVS_REQUIRES(dp->port_mutex);\n+ OVS_REQ_RDLOCK(dp->port_rwlock);\n static bool dp_netdev_pmd_try_ref(struct dp_netdev_pmd_thread *pmd);\n static void dp_netdev_pmd_unref(struct dp_netdev_pmd_thread *pmd);\n static void dp_netdev_pmd_flow_flush(struct dp_netdev_pmd_thread *pmd);\n@@ -1003,7 +1003,7 @@ dpif_netdev_subtable_lookup_set(struct unixctl_conn *conn, int argc OVS_UNUSED,\n sorted_poll_thread_list(dp, &pmd_list, &n);\n \n /* take port mutex as HMAP iters over them. */\n- ovs_mutex_lock(&dp->port_mutex);\n+ ovs_rwlock_rdlock(&dp->port_rwlock);\n \n for (size_t i = 0; i < n; i++) {\n struct dp_netdev_pmd_thread *pmd = pmd_list[i];\n@@ -1027,7 +1027,7 @@ dpif_netdev_subtable_lookup_set(struct unixctl_conn *conn, int argc OVS_UNUSED,\n }\n \n /* release port mutex before netdev mutex. */\n- ovs_mutex_unlock(&dp->port_mutex);\n+ ovs_rwlock_unlock(&dp->port_rwlock);\n free(pmd_list);\n }\n ovs_mutex_unlock(&dp_netdev_mutex);\n@@ -1638,7 +1638,7 @@ create_dpif_netdev(struct dp_netdev *dp)\n * Return ODPP_NONE on failure. */\n static odp_port_t\n choose_port(struct dp_netdev *dp, const char *name)\n- OVS_REQUIRES(dp->port_mutex)\n+ OVS_REQ_RDLOCK(dp->port_rwlock)\n {\n uint32_t port_no;\n \n@@ -1759,7 +1759,7 @@ create_dp_netdev(const char *name, const struct dpif_class *class,\n ovs_refcount_init(&dp->ref_cnt);\n atomic_flag_clear(&dp->destroyed);\n \n- ovs_mutex_init_recursive(&dp->port_mutex);\n+ ovs_rwlock_init(&dp->port_rwlock);\n hmap_init(&dp->ports);\n dp->port_seq = seq_create();\n ovs_mutex_init(&dp->bond_mutex);\n@@ -1796,7 +1796,7 @@ create_dp_netdev(const char *name, const struct dpif_class *class,\n ovs_mutex_init_recursive(&dp->non_pmd_mutex);\n ovsthread_key_create(&dp->per_pmd_key, NULL);\n \n- ovs_mutex_lock(&dp->port_mutex);\n+ ovs_rwlock_wrlock(&dp->port_rwlock);\n /* non-PMD will be created before all other threads and will\n * allocate static_tx_qid = 0. */\n dp_netdev_set_nonpmd(dp);\n@@ -1804,7 +1804,7 @@ create_dp_netdev(const char *name, const struct dpif_class *class,\n error = do_add_port(dp, name, dpif_netdev_port_open_type(dp->class,\n \"internal\"),\n ODPP_LOCAL);\n- ovs_mutex_unlock(&dp->port_mutex);\n+ ovs_rwlock_unlock(&dp->port_rwlock);\n if (error) {\n dp_netdev_free(dp);\n return error;\n@@ -1880,11 +1880,11 @@ dp_netdev_free(struct dp_netdev *dp)\n \n shash_find_and_delete(&dp_netdevs, dp->name);\n \n- ovs_mutex_lock(&dp->port_mutex);\n+ ovs_rwlock_wrlock(&dp->port_rwlock);\n HMAP_FOR_EACH_SAFE (port, next, node, &dp->ports) {\n do_del_port(dp, port);\n }\n- ovs_mutex_unlock(&dp->port_mutex);\n+ ovs_rwlock_unlock(&dp->port_rwlock);\n \n ovs_mutex_lock(&dp->bond_mutex);\n CMAP_FOR_EACH (bond, node, &dp->tx_bonds) {\n@@ -1909,7 +1909,7 @@ dp_netdev_free(struct dp_netdev *dp)\n \n seq_destroy(dp->port_seq);\n hmap_destroy(&dp->ports);\n- ovs_mutex_destroy(&dp->port_mutex);\n+ ovs_rwlock_destroy(&dp->port_rwlock);\n \n cmap_destroy(&dp->tx_bonds);\n ovs_mutex_destroy(&dp->bond_mutex);\n@@ -2069,7 +2069,7 @@ out:\n static int\n do_add_port(struct dp_netdev *dp, const char *devname, const char *type,\n odp_port_t port_no)\n- OVS_REQUIRES(dp->port_mutex)\n+ OVS_REQ_WRLOCK(dp->port_rwlock)\n {\n struct netdev_saved_flags *sf;\n struct dp_netdev_port *port;\n@@ -2121,7 +2121,7 @@ dpif_netdev_port_add(struct dpif *dpif, struct netdev *netdev,\n odp_port_t port_no;\n int error;\n \n- ovs_mutex_lock(&dp->port_mutex);\n+ ovs_rwlock_wrlock(&dp->port_rwlock);\n dpif_port = netdev_vport_get_dpif_port(netdev, namebuf, sizeof namebuf);\n if (*port_nop != ODPP_NONE) {\n port_no = *port_nop;\n@@ -2134,7 +2134,7 @@ dpif_netdev_port_add(struct dpif *dpif, struct netdev *netdev,\n *port_nop = port_no;\n error = do_add_port(dp, dpif_port, netdev_get_type(netdev), port_no);\n }\n- ovs_mutex_unlock(&dp->port_mutex);\n+ ovs_rwlock_unlock(&dp->port_rwlock);\n \n return error;\n }\n@@ -2145,7 +2145,7 @@ dpif_netdev_port_del(struct dpif *dpif, odp_port_t port_no)\n struct dp_netdev *dp = get_dp_netdev(dpif);\n int error;\n \n- ovs_mutex_lock(&dp->port_mutex);\n+ ovs_rwlock_wrlock(&dp->port_rwlock);\n if (port_no == ODPP_LOCAL) {\n error = EINVAL;\n } else {\n@@ -2156,7 +2156,7 @@ dpif_netdev_port_del(struct dpif *dpif, odp_port_t port_no)\n do_del_port(dp, port);\n }\n }\n- ovs_mutex_unlock(&dp->port_mutex);\n+ ovs_rwlock_unlock(&dp->port_rwlock);\n \n return error;\n }\n@@ -2169,7 +2169,7 @@ is_valid_port_number(odp_port_t port_no)\n \n static struct dp_netdev_port *\n dp_netdev_lookup_port(const struct dp_netdev *dp, odp_port_t port_no)\n- OVS_REQUIRES(dp->port_mutex)\n+ OVS_REQ_RDLOCK(dp->port_rwlock)\n {\n struct dp_netdev_port *port;\n \n@@ -2184,7 +2184,7 @@ dp_netdev_lookup_port(const struct dp_netdev *dp, odp_port_t port_no)\n static int\n get_port_by_number(struct dp_netdev *dp,\n odp_port_t port_no, struct dp_netdev_port **portp)\n- OVS_REQUIRES(dp->port_mutex)\n+ OVS_REQ_RDLOCK(dp->port_rwlock)\n {\n if (!is_valid_port_number(port_no)) {\n *portp = NULL;\n@@ -2219,7 +2219,7 @@ port_destroy(struct dp_netdev_port *port)\n static int\n get_port_by_name(struct dp_netdev *dp,\n const char *devname, struct dp_netdev_port **portp)\n- OVS_REQUIRES(dp->port_mutex)\n+ OVS_REQ_RDLOCK(dp->port_rwlock)\n {\n struct dp_netdev_port *port;\n \n@@ -2238,7 +2238,7 @@ get_port_by_name(struct dp_netdev *dp,\n /* Returns 'true' if there is a port with pmd netdev. */\n static bool\n has_pmd_port(struct dp_netdev *dp)\n- OVS_REQUIRES(dp->port_mutex)\n+ OVS_REQ_RDLOCK(dp->port_rwlock)\n {\n struct dp_netdev_port *port;\n \n@@ -2253,7 +2253,7 @@ has_pmd_port(struct dp_netdev *dp)\n \n static void\n do_del_port(struct dp_netdev *dp, struct dp_netdev_port *port)\n- OVS_REQUIRES(dp->port_mutex)\n+ OVS_REQ_WRLOCK(dp->port_rwlock)\n {\n dp_netdev_offload_flush(dp, port);\n netdev_uninit_flow_api(port->netdev);\n@@ -2282,12 +2282,12 @@ dpif_netdev_port_query_by_number(const struct dpif *dpif, odp_port_t port_no,\n struct dp_netdev_port *port;\n int error;\n \n- ovs_mutex_lock(&dp->port_mutex);\n+ ovs_rwlock_wrlock(&dp->port_rwlock);\n error = get_port_by_number(dp, port_no, &port);\n if (!error && dpif_port) {\n answer_port_query(port, dpif_port);\n }\n- ovs_mutex_unlock(&dp->port_mutex);\n+ ovs_rwlock_unlock(&dp->port_rwlock);\n \n return error;\n }\n@@ -2300,12 +2300,12 @@ dpif_netdev_port_query_by_name(const struct dpif *dpif, const char *devname,\n struct dp_netdev_port *port;\n int error;\n \n- ovs_mutex_lock(&dp->port_mutex);\n+ ovs_rwlock_rdlock(&dp->port_rwlock);\n error = get_port_by_name(dp, devname, &port);\n if (!error && dpif_port) {\n answer_port_query(port, dpif_port);\n }\n- ovs_mutex_unlock(&dp->port_mutex);\n+ ovs_rwlock_unlock(&dp->port_rwlock);\n \n return error;\n }\n@@ -2515,11 +2515,11 @@ mark_to_flow_disassociate(struct dp_netdev_pmd_thread *pmd,\n \n port = netdev_ports_get(in_port, dpif_type_str);\n if (port) {\n- /* Taking a global 'port_mutex' to fulfill thread safety\n+ /* Taking a global 'port_rwlock' to fulfill thread safety\n * restrictions regarding netdev port mapping. */\n- ovs_mutex_lock(&pmd->dp->port_mutex);\n+ ovs_rwlock_rdlock(&pmd->dp->port_rwlock);\n ret = netdev_flow_del(port, &flow->mega_ufid, NULL);\n- ovs_mutex_unlock(&pmd->dp->port_mutex);\n+ ovs_rwlock_unlock(&pmd->dp->port_rwlock);\n netdev_close(port);\n }\n \n@@ -2682,14 +2682,14 @@ dp_netdev_flow_offload_put(struct dp_offload_flow_item *offload)\n goto err_free;\n }\n \n- /* Taking a global 'port_mutex' to fulfill thread safety\n+ /* Taking a global 'port_rwlock' to fulfill thread safety\n * restrictions regarding the netdev port mapping. */\n- ovs_mutex_lock(&pmd->dp->port_mutex);\n+ ovs_rwlock_rdlock(&pmd->dp->port_rwlock);\n ret = netdev_flow_put(port, &offload->match,\n CONST_CAST(struct nlattr *, offload->actions),\n offload->actions_len, &flow->mega_ufid, &info,\n NULL);\n- ovs_mutex_unlock(&pmd->dp->port_mutex);\n+ ovs_rwlock_unlock(&pmd->dp->port_rwlock);\n netdev_close(port);\n \n if (ret) {\n@@ -2745,9 +2745,9 @@ dp_offload_flush(struct dp_offload_thread_item *item)\n {\n struct dp_offload_flush_item *flush = &item->data->flush;\n \n- ovs_mutex_lock(&flush->dp->port_mutex);\n+ ovs_rwlock_rdlock(&flush->dp->port_rwlock);\n netdev_flow_flush(flush->netdev);\n- ovs_mutex_unlock(&flush->dp->port_mutex);\n+ ovs_rwlock_unlock(&flush->dp->port_rwlock);\n \n ovs_barrier_block(flush->barrier);\n \n@@ -2992,7 +2992,7 @@ dp_netdev_offload_flush_enqueue(struct dp_netdev *dp,\n * complete its work. As the flush order will only be\n * enqueued after existing offload requests, those previous\n * offload requests must be processed, which requires being\n- * able to lock the 'port_mutex' from the offload thread.\n+ * able to lock the 'port_rwlock' from the offload thread.\n *\n * Flow offload flush is done when a port is being deleted.\n * Right after this call executes, the offload API is disabled\n@@ -3002,7 +3002,7 @@ dp_netdev_offload_flush_enqueue(struct dp_netdev *dp,\n static void\n dp_netdev_offload_flush(struct dp_netdev *dp,\n struct dp_netdev_port *port)\n- OVS_REQUIRES(dp->port_mutex)\n+ OVS_REQ_WRLOCK(dp->port_rwlock)\n {\n /* The flush mutex only serves to protect the static memory barrier.\n * The memory barrier needs to go beyond the function scope as\n@@ -3020,7 +3020,7 @@ dp_netdev_offload_flush(struct dp_netdev *dp,\n return;\n }\n \n- ovs_mutex_unlock(&dp->port_mutex);\n+ ovs_rwlock_unlock(&dp->port_rwlock);\n ovs_mutex_lock(&flush_mutex);\n \n /* This thread and the offload thread. */\n@@ -3038,7 +3038,7 @@ dp_netdev_offload_flush(struct dp_netdev *dp,\n * Some offload provider (e.g. DPDK) keeps a netdev reference with\n * the offload data. If this reference is not closed, the netdev is\n * kept indefinitely. */\n- ovs_mutex_lock(&dp->port_mutex);\n+ ovs_rwlock_wrlock(&dp->port_rwlock);\n \n ovs_barrier_block(&barrier);\n ovs_barrier_destroy(&barrier);\n@@ -3092,7 +3092,7 @@ dpif_netdev_port_dump_next(const struct dpif *dpif, void *state_,\n struct hmap_node *node;\n int retval;\n \n- ovs_mutex_lock(&dp->port_mutex);\n+ ovs_rwlock_rdlock(&dp->port_rwlock);\n node = hmap_at_position(&dp->ports, &state->position);\n if (node) {\n struct dp_netdev_port *port;\n@@ -3109,7 +3109,7 @@ dpif_netdev_port_dump_next(const struct dpif *dpif, void *state_,\n } else {\n retval = EOF;\n }\n- ovs_mutex_unlock(&dp->port_mutex);\n+ ovs_rwlock_unlock(&dp->port_rwlock);\n \n return retval;\n }\n@@ -3534,24 +3534,24 @@ dpif_netdev_get_flow_offload_status(const struct dp_netdev *dp,\n return false;\n }\n ofpbuf_use_stack(&buf, &act_buf, sizeof act_buf);\n- /* Taking a global 'port_mutex' to fulfill thread safety\n+ /* Taking a global 'port_rwlock' to fulfill thread safety\n * restrictions regarding netdev port mapping.\n *\n * XXX: Main thread will try to pause/stop all revalidators during datapath\n * reconfiguration via datapath purge callback (dp_purge_cb) while\n- * holding 'dp->port_mutex'. So we're not waiting for mutex here.\n- * Otherwise, deadlock is possible, bcause revalidators might sleep\n+ * rw-holding 'dp->port_rwlock'. So we're not waiting for lock here.\n+ * Otherwise, deadlock is possible, because revalidators might sleep\n * waiting for the main thread to release the lock and main thread\n * will wait for them to stop processing.\n * This workaround might make statistics less accurate. Especially\n * for flow deletion case, since there will be no other attempt. */\n- if (!ovs_mutex_trylock(&dp->port_mutex)) {\n+ if (!ovs_rwlock_tryrdlock(&dp->port_rwlock)) {\n ret = netdev_flow_get(netdev, &match, &actions,\n &netdev_flow->mega_ufid, stats, attrs, &buf);\n /* Storing statistics and attributes from the last request for\n * later use on mutex contention. */\n dp_netdev_flow_set_last_stats_attrs(netdev_flow, stats, attrs, ret);\n- ovs_mutex_unlock(&dp->port_mutex);\n+ ovs_rwlock_unlock(&dp->port_rwlock);\n } else {\n dp_netdev_flow_get_last_stats_attrs(netdev_flow, stats, attrs, &ret);\n if (!ret && !attrs->dp_layer) {\n@@ -4380,7 +4380,7 @@ dpif_netdev_offload_stats_get(struct dpif *dpif,\n \n nb_offloads = 0;\n \n- ovs_mutex_lock(&dp->port_mutex);\n+ ovs_rwlock_rdlock(&dp->port_rwlock);\n HMAP_FOR_EACH (port, node, &dp->ports) {\n uint64_t port_nb_offloads = 0;\n \n@@ -4389,7 +4389,7 @@ dpif_netdev_offload_stats_get(struct dpif *dpif,\n nb_offloads += port_nb_offloads;\n }\n }\n- ovs_mutex_unlock(&dp->port_mutex);\n+ ovs_rwlock_unlock(&dp->port_rwlock);\n \n atomic_read_relaxed(&dp_offload_thread.enqueued_item,\n &stats->counters[DP_NETDEV_HW_OFFLOADS_STATS_ENQUEUED].value);\n@@ -4724,7 +4724,7 @@ dpif_netdev_port_set_config(struct dpif *dpif, odp_port_t port_no,\n const char *affinity_list = smap_get(cfg, \"pmd-rxq-affinity\");\n bool emc_enabled = smap_get_bool(cfg, \"emc-enable\", true);\n \n- ovs_mutex_lock(&dp->port_mutex);\n+ ovs_rwlock_wrlock(&dp->port_rwlock);\n error = get_port_by_number(dp, port_no, &port);\n if (error) {\n goto unlock;\n@@ -4778,7 +4778,7 @@ dpif_netdev_port_set_config(struct dpif *dpif, odp_port_t port_no,\n \n dp_netdev_request_reconfigure(dp);\n unlock:\n- ovs_mutex_unlock(&dp->port_mutex);\n+ ovs_rwlock_unlock(&dp->port_rwlock);\n return error;\n }\n \n@@ -5288,7 +5288,7 @@ sched_pmd_add_rxq(struct sched_pmd *sched_pmd, struct dp_netdev_rxq *rxq,\n static void\n sched_numa_list_assignments(struct sched_numa_list *numa_list,\n struct dp_netdev *dp)\n- OVS_REQUIRES(dp->port_mutex)\n+ OVS_REQ_RDLOCK(dp->port_rwlock)\n {\n struct dp_netdev_port *port;\n \n@@ -5540,7 +5540,7 @@ sched_numa_list_schedule(struct sched_numa_list *numa_list,\n struct dp_netdev *dp,\n enum sched_assignment_type algo,\n enum vlog_level level)\n- OVS_REQUIRES(dp->port_mutex)\n+ OVS_REQ_RDLOCK(dp->port_rwlock)\n {\n struct dp_netdev_port *port;\n struct dp_netdev_rxq **rxqs = NULL;\n@@ -5701,7 +5701,8 @@ sched_numa_list_schedule(struct sched_numa_list *numa_list,\n }\n \n static void\n-rxq_scheduling(struct dp_netdev *dp) OVS_REQUIRES(dp->port_mutex)\n+rxq_scheduling(struct dp_netdev *dp)\n+ OVS_REQ_RDLOCK(dp->port_rwlock)\n {\n struct sched_numa_list numa_list;\n enum sched_assignment_type algo = dp->pmd_rxq_assign_type;\n@@ -5758,7 +5759,7 @@ sched_numa_list_variance(struct sched_numa_list *numa_list)\n \n static bool\n pmd_rebalance_dry_run(struct dp_netdev *dp)\n- OVS_REQUIRES(dp->port_mutex)\n+ OVS_REQ_RDLOCK(dp->port_rwlock)\n {\n struct sched_numa_list numa_list_cur;\n struct sched_numa_list numa_list_est;\n@@ -5840,7 +5841,7 @@ reload_affected_pmds(struct dp_netdev *dp)\n \n static void\n reconfigure_pmd_threads(struct dp_netdev *dp)\n- OVS_REQUIRES(dp->port_mutex)\n+ OVS_REQ_RDLOCK(dp->port_rwlock)\n {\n struct dp_netdev_pmd_thread *pmd;\n struct ovs_numa_dump *pmd_cores;\n@@ -5938,7 +5939,7 @@ static void\n pmd_remove_stale_ports(struct dp_netdev *dp,\n struct dp_netdev_pmd_thread *pmd)\n OVS_EXCLUDED(pmd->port_mutex)\n- OVS_REQUIRES(dp->port_mutex)\n+ OVS_REQ_RDLOCK(dp->port_rwlock)\n {\n struct rxq_poll *poll, *poll_next;\n struct tx_port *tx, *tx_next;\n@@ -5968,7 +5969,7 @@ pmd_remove_stale_ports(struct dp_netdev *dp,\n * rxqs and assigns all rxqs/txqs to pmd threads. */\n static void\n reconfigure_datapath(struct dp_netdev *dp)\n- OVS_REQUIRES(dp->port_mutex)\n+ OVS_REQ_RDLOCK(dp->port_rwlock)\n {\n struct hmapx busy_threads = HMAPX_INITIALIZER(&busy_threads);\n struct dp_netdev_pmd_thread *pmd;\n@@ -6147,7 +6148,7 @@ reconfigure_datapath(struct dp_netdev *dp)\n /* Returns true if one of the netdevs in 'dp' requires a reconfiguration */\n static bool\n ports_require_restart(const struct dp_netdev *dp)\n- OVS_REQUIRES(dp->port_mutex)\n+ OVS_REQ_RDLOCK(dp->port_rwlock)\n {\n struct dp_netdev_port *port;\n \n@@ -6205,7 +6206,7 @@ dpif_netdev_run(struct dpif *dpif)\n long long int now = time_msec();\n struct dp_netdev_pmd_thread *pmd;\n \n- ovs_mutex_lock(&dp->port_mutex);\n+ ovs_rwlock_rdlock(&dp->port_rwlock);\n non_pmd = dp_netdev_get_pmd(dp, NON_PMD_CORE_ID);\n if (non_pmd) {\n ovs_mutex_lock(&dp->non_pmd_mutex);\n@@ -6280,7 +6281,7 @@ dpif_netdev_run(struct dpif *dpif)\n if (dp_netdev_is_reconf_required(dp) || ports_require_restart(dp)) {\n reconfigure_datapath(dp);\n }\n- ovs_mutex_unlock(&dp->port_mutex);\n+ ovs_rwlock_unlock(&dp->port_rwlock);\n \n tnl_neigh_cache_run();\n tnl_port_map_run();\n@@ -6300,7 +6301,7 @@ dpif_netdev_wait(struct dpif *dpif)\n struct dp_netdev *dp = get_dp_netdev(dpif);\n \n ovs_mutex_lock(&dp_netdev_mutex);\n- ovs_mutex_lock(&dp->port_mutex);\n+ ovs_rwlock_rdlock(&dp->port_rwlock);\n HMAP_FOR_EACH (port, node, &dp->ports) {\n netdev_wait_reconf_required(port->netdev);\n if (!netdev_is_pmd(port->netdev)) {\n@@ -6311,7 +6312,7 @@ dpif_netdev_wait(struct dpif *dpif)\n }\n }\n }\n- ovs_mutex_unlock(&dp->port_mutex);\n+ ovs_rwlock_unlock(&dp->port_rwlock);\n ovs_mutex_unlock(&dp_netdev_mutex);\n seq_wait(tnl_conf_seq, dp->last_tnl_conf_seq);\n }\n@@ -6938,7 +6939,7 @@ dp_netdev_get_pmd(struct dp_netdev *dp, unsigned core_id)\n /* Sets the 'struct dp_netdev_pmd_thread' for non-pmd threads. */\n static void\n dp_netdev_set_nonpmd(struct dp_netdev *dp)\n- OVS_REQUIRES(dp->port_mutex)\n+ OVS_REQ_WRLOCK(dp->port_rwlock)\n {\n struct dp_netdev_pmd_thread *non_pmd;\n \n@@ -9097,7 +9098,7 @@ dpif_dummy_change_port_number(struct unixctl_conn *conn, int argc OVS_UNUSED,\n ovs_refcount_ref(&dp->ref_cnt);\n ovs_mutex_unlock(&dp_netdev_mutex);\n \n- ovs_mutex_lock(&dp->port_mutex);\n+ ovs_rwlock_wrlock(&dp->port_rwlock);\n if (get_port_by_name(dp, argv[2], &port)) {\n unixctl_command_reply_error(conn, \"unknown port\");\n goto exit;\n@@ -9126,7 +9127,7 @@ dpif_dummy_change_port_number(struct unixctl_conn *conn, int argc OVS_UNUSED,\n unixctl_command_reply(conn, NULL);\n \n exit:\n- ovs_mutex_unlock(&dp->port_mutex);\n+ ovs_rwlock_unlock(&dp->port_rwlock);\n dp_netdev_unref(dp);\n }\n \ndiff --git a/lib/netdev-offload-dpdk.c b/lib/netdev-offload-dpdk.c\nindex 28cb2f96b..ac4739b71 100644\n--- a/lib/netdev-offload-dpdk.c\n+++ b/lib/netdev-offload-dpdk.c\n@@ -46,8 +46,8 @@ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(600, 600);\n * For example, simultaneous call of 'netdev_reconfigure()' for the same\n * 'netdev' is forbidden.\n *\n- * For current implementation all above restrictions could be fulfilled by\n- * taking the datapath 'port_mutex' in lib/dpif-netdev.c. */\n+ * For current implementation all above restrictions are fulfilled by\n+ * read-locking the datapath 'port_rwlock' in lib/dpif-netdev.c. */\n \n /*\n * A mapping from ufid to dpdk rte_flow.\n", "prefixes": [ "ovs-dev", "v5", "25/27" ] }