get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/1525728/
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 1525728,
    "url": "http://patchwork.ozlabs.org/api/patches/1525728/",
    "web_url": "http://patchwork.ozlabs.org/project/openvswitch/patch/f422bac1b2a2a68da417877c5828a7a5fb698e6e.1631094144.git.grive@u256.net/",
    "project": {
        "id": 47,
        "url": "http://patchwork.ozlabs.org/api/projects/47/",
        "name": "Open vSwitch",
        "link_name": "openvswitch",
        "list_id": "ovs-dev.openvswitch.org",
        "list_email": "ovs-dev@openvswitch.org",
        "web_url": "http://openvswitch.org/",
        "scm_url": "git@github.com:openvswitch/ovs.git",
        "webscm_url": "https://github.com/openvswitch/ovs",
        "list_archive_url": "",
        "list_archive_url_format": "",
        "commit_url_format": ""
    },
    "msgid": "<f422bac1b2a2a68da417877c5828a7a5fb698e6e.1631094144.git.grive@u256.net>",
    "list_archive_url": null,
    "date": "2021-09-08T09:47:43",
    "name": "[ovs-dev,v5,19/27] dpif-netdev: Execute flush from offload thread",
    "commit_ref": null,
    "pull_url": null,
    "state": "new",
    "archived": false,
    "hash": "2763ad7762dfcac39ce3f77274063be6f7f90aca",
    "submitter": {
        "id": 78795,
        "url": "http://patchwork.ozlabs.org/api/people/78795/",
        "name": "Gaëtan Rivet",
        "email": "grive@u256.net"
    },
    "delegate": null,
    "mbox": "http://patchwork.ozlabs.org/project/openvswitch/patch/f422bac1b2a2a68da417877c5828a7a5fb698e6e.1631094144.git.grive@u256.net/mbox/",
    "series": [
        {
            "id": 261424,
            "url": "http://patchwork.ozlabs.org/api/series/261424/",
            "web_url": "http://patchwork.ozlabs.org/project/openvswitch/list/?series=261424",
            "date": "2021-09-08T09:47:24",
            "name": "dpif-netdev: Parallel offload processing",
            "version": 5,
            "mbox": "http://patchwork.ozlabs.org/series/261424/mbox/"
        }
    ],
    "comments": "http://patchwork.ozlabs.org/api/patches/1525728/comments/",
    "check": "success",
    "checks": "http://patchwork.ozlabs.org/api/patches/1525728/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<ovs-dev-bounces@openvswitch.org>",
        "X-Original-To": [
            "incoming@patchwork.ozlabs.org",
            "ovs-dev@openvswitch.org"
        ],
        "Delivered-To": [
            "patchwork-incoming@bilbo.ozlabs.org",
            "ovs-dev@lists.linuxfoundation.org"
        ],
        "Authentication-Results": [
            "ozlabs.org;\n\tdkim=fail reason=\"signature verification failed\" (2048-bit key;\n unprotected) header.d=u256.net header.i=@u256.net header.a=rsa-sha256\n header.s=fm2 header.b=x+PLbEgW;\n\tdkim=fail reason=\"signature verification failed\" (2048-bit key;\n unprotected) header.d=messagingengine.com header.i=@messagingengine.com\n header.a=rsa-sha256 header.s=fm3 header.b=FFSMPm7K;\n\tdkim-atps=neutral",
            "ozlabs.org;\n spf=pass (sender SPF authorized) smtp.mailfrom=openvswitch.org\n (client-ip=140.211.166.133; helo=smtp2.osuosl.org;\n envelope-from=ovs-dev-bounces@openvswitch.org; receiver=<UNKNOWN>)",
            "smtp4.osuosl.org (amavisd-new);\n dkim=pass (2048-bit key) header.d=u256.net header.b=\"x+PLbEgW\";\n dkim=pass (2048-bit key) header.d=messagingengine.com\n header.b=\"FFSMPm7K\""
        ],
        "Received": [
            "from smtp2.osuosl.org (smtp2.osuosl.org [140.211.166.133])\n\t(using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits)\n\t key-exchange X25519 server-signature RSA-PSS (4096 bits) server-digest\n SHA256)\n\t(No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 4H4HRn1mwdz9sW8\n\tfor <incoming@patchwork.ozlabs.org>; Wed,  8 Sep 2021 19:49:57 +1000 (AEST)",
            "from localhost (localhost [127.0.0.1])\n\tby smtp2.osuosl.org (Postfix) with ESMTP id 0C626406CE;\n\tWed,  8 Sep 2021 09:49:55 +0000 (UTC)",
            "from smtp2.osuosl.org ([127.0.0.1])\n\tby localhost (smtp2.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id MMQ7zz6rTu2s; Wed,  8 Sep 2021 09:49:53 +0000 (UTC)",
            "from lists.linuxfoundation.org (lf-lists.osuosl.org [140.211.9.56])\n\tby smtp2.osuosl.org (Postfix) with ESMTPS id 6E6684078E;\n\tWed,  8 Sep 2021 09:49:52 +0000 (UTC)",
            "from lf-lists.osuosl.org (localhost [127.0.0.1])\n\tby lists.linuxfoundation.org (Postfix) with ESMTP id 41B02C001C;\n\tWed,  8 Sep 2021 09:49:52 +0000 (UTC)",
            "from smtp4.osuosl.org (smtp4.osuosl.org [140.211.166.137])\n by lists.linuxfoundation.org (Postfix) with ESMTP id 10940C0011\n for <ovs-dev@openvswitch.org>; Wed,  8 Sep 2021 09:49:51 +0000 (UTC)",
            "from localhost (localhost [127.0.0.1])\n by smtp4.osuosl.org (Postfix) with ESMTP id 1D289407C4\n for <ovs-dev@openvswitch.org>; Wed,  8 Sep 2021 09:48:33 +0000 (UTC)",
            "from smtp4.osuosl.org ([127.0.0.1])\n by localhost (smtp4.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n with ESMTP id tJTo9l1yFGEa for <ovs-dev@openvswitch.org>;\n Wed,  8 Sep 2021 09:48:32 +0000 (UTC)",
            "from wout3-smtp.messagingengine.com (wout3-smtp.messagingengine.com\n [64.147.123.19])\n by smtp4.osuosl.org (Postfix) with ESMTPS id 6BB6F40233\n for <ovs-dev@openvswitch.org>; Wed,  8 Sep 2021 09:48:32 +0000 (UTC)",
            "from compute5.internal (compute5.nyi.internal [10.202.2.45])\n by mailout.west.internal (Postfix) with ESMTP id E97C632009E2;\n Wed,  8 Sep 2021 05:48:31 -0400 (EDT)",
            "from mailfrontend2 ([10.202.2.163])\n by compute5.internal (MEProxy); Wed, 08 Sep 2021 05:48:32 -0400",
            "by mail.messagingengine.com (Postfix) with ESMTPA; Wed,\n 8 Sep 2021 05:48:30 -0400 (EDT)"
        ],
        "X-Virus-Scanned": [
            "amavisd-new at osuosl.org",
            "amavisd-new at osuosl.org"
        ],
        "X-Greylist": "from auto-whitelisted by SQLgrey-1.8.0",
        "DKIM-Signature": [
            "v=1; a=rsa-sha256; c=relaxed/relaxed; d=u256.net; h=from\n :to:cc:subject:date:message-id:in-reply-to:references\n :mime-version:content-transfer-encoding; s=fm2; bh=SjPJ6r3fLVJ+A\n bkF8T3FvIcFrG/wf7A7ANKzFOKPF6A=; b=x+PLbEgWxIJy9LzsYqSYGq4JtNRGf\n e2+/+z2Qn/vrS0xmcL0z4++EE/9exZx4GXlNVEbxaC3Kz7lbFYX1acOY7bFJS0YP\n jdvZ+u5Saj28od+yk+XoTjyoxBcuAjq2xwJzHMjxq6CYQloqqVBGEx/l6qdoN/OW\n 5o7JWwnrivcZ3hRRr5rbn0cdEp2S+dZg9wG9SWiXZg0p4eHovNhhLtXxdyO7cwj7\n 0y6l/VfIH4NX11AROCqM1q8jYd8ydrrvyZBhJMZD73bfC4QQ5ES1/WuunV9sYm1z\n c9Tnp8SiPdgdRQP2hA66pVzkPIs5SqAyG832oIIn+WrzZOcxC5tsNyInQ==",
            "v=1; a=rsa-sha256; c=relaxed/relaxed; d=\n messagingengine.com; h=cc:content-transfer-encoding:date:from\n :in-reply-to:message-id:mime-version:references:subject:to\n :x-me-proxy:x-me-proxy:x-me-sender:x-me-sender:x-sasl-enc; s=\n fm3; bh=SjPJ6r3fLVJ+AbkF8T3FvIcFrG/wf7A7ANKzFOKPF6A=; b=FFSMPm7K\n Ckxdva3EhO1VPGKXd3d4Ptsa5EUkyplPe0QNDMxXq4k1chgcR27EeMq4RtU3/ToG\n 2Vcs5fUWZJ6WIAYFlQ3pHcN/pedlnf5unbP0aPVu4k3ZCcmNucmbY3G+5eUeFE/Q\n +EZ8+m2nGpAO7JiFot0Z5SDVHueZ8JJYG1KgqY2EdHc2a5U9vkqQoa7cchKuuuXl\n WYujIUEeCT0phR1KoKcWKKbcJ5TTUcQ55dFJ3CZjoaP4iWL2eyrb6mpFT3yDs7+P\n eZzBpNBTtWKRSRDs31Y7w/6fYrZ+yxQsLaT4UxPuQXPj8zCIR4Kt1sjOhPW6sfg8\n px0vqOHKmGPvJQ=="
        ],
        "X-ME-Sender": "<xms:74Y4YVKTEJDgLGKyKqOpnA8gy6X5Wx2v8XGZrt_gOwysDi1ybvfR6A>\n <xme:74Y4YRKC-G5r9389NEc8usfGtnf1RYOzKadT3nYg30VjdR5RAnO7bgUtqP9DBhNRr\n ZMweGkc1vymPhHUAFc>",
        "X-ME-Received": "\n <xmr:74Y4YdvlowBKtV072djmTUT__xsOOmXWfnRn7fXKC5xdQvK5GgLsSQVmbjRJYddn0zfH2OssMbho1KaRMR9007ka5g>",
        "X-ME-Proxy-Cause": "\n gggruggvucftvghtrhhoucdtuddrgedvtddrudefjedgudekucetufdoteggodetrfdotf\n fvucfrrhhofhhilhgvmecuhfgrshhtofgrihhlpdfqfgfvpdfurfetoffkrfgpnffqhgen\n uceurghilhhouhhtmecufedttdenucesvcftvggtihhpihgvnhhtshculddquddttddmne\n cujfgurhephffvufffkffojghfggfgsedtkeertdertddtnecuhfhrohhmpefirggvthgr\n nhcutfhivhgvthcuoehgrhhivhgvsehuvdehiedrnhgvtheqnecuggftrfgrthhtvghrnh\n ephefgveffkeetheetfeeifedvheelfeejfeehveduteejhfekuedtkeeiuedvteehnecu\n vehluhhsthgvrhfuihiivgepfeenucfrrghrrghmpehmrghilhhfrhhomhepghhrihhvvg\n esuhdvheeirdhnvght",
        "X-ME-Proxy": "<xmx:74Y4YWYME5tybV3f9rjhSuyZzGhT_E9V7ncMrnub6-f_wXT73va5MA>\n <xmx:74Y4Ycavi0KeSXWPai6aayWi0DJ20K7RLzCzb8-ZTMJ3t6cCkZCWng>\n <xmx:74Y4YaBX1BQzJqFsaVKGA3ZkaRh8oC4jo32Ap9OslJyHrVXdlWMI2w>\n <xmx:74Y4YVDD10Yged-2EXgWNHKnS3zMLwXBo_E2VtCiX-mjEF6_1UW9uw>",
        "From": "Gaetan Rivet <grive@u256.net>",
        "To": "ovs-dev@openvswitch.org",
        "Date": "Wed,  8 Sep 2021 11:47:43 +0200",
        "Message-Id": "\n <f422bac1b2a2a68da417877c5828a7a5fb698e6e.1631094144.git.grive@u256.net>",
        "X-Mailer": "git-send-email 2.31.1",
        "In-Reply-To": "<cover.1631094144.git.grive@u256.net>",
        "References": "<cover.1631094144.git.grive@u256.net>",
        "MIME-Version": "1.0",
        "Cc": "Maxime Coquelin <maxime.coquelin@redhat.com>",
        "Subject": "[ovs-dev] [PATCH v5 19/27] dpif-netdev: Execute flush from offload\n\tthread",
        "X-BeenThere": "ovs-dev@openvswitch.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "<ovs-dev.openvswitch.org>",
        "List-Unsubscribe": "<https://mail.openvswitch.org/mailman/options/ovs-dev>,\n <mailto:ovs-dev-request@openvswitch.org?subject=unsubscribe>",
        "List-Archive": "<http://mail.openvswitch.org/pipermail/ovs-dev/>",
        "List-Post": "<mailto:ovs-dev@openvswitch.org>",
        "List-Help": "<mailto:ovs-dev-request@openvswitch.org?subject=help>",
        "List-Subscribe": "<https://mail.openvswitch.org/mailman/listinfo/ovs-dev>,\n <mailto:ovs-dev-request@openvswitch.org?subject=subscribe>",
        "Content-Type": "text/plain; charset=\"us-ascii\"",
        "Content-Transfer-Encoding": "7bit",
        "Errors-To": "ovs-dev-bounces@openvswitch.org",
        "Sender": "\"dev\" <ovs-dev-bounces@openvswitch.org>"
    },
    "content": "When a port is deleted, its offloads must be flushed.  The operation\nruns in the thread that initiated it.  Offload data is thus accessed\njointly by the port deletion thread(s) and the offload thread, which\ncomplicates the data access model.\n\nTo simplify this model, as a pre-step toward introducing parallel\noffloads, execute the flush operation in the offload thread.\n\nSigned-off-by: Gaetan Rivet <grive@u256.net>\nReviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>\n---\n lib/dpif-netdev.c | 126 ++++++++++++++++++++++++++++++++++++++++++++--\n 1 file changed, 122 insertions(+), 4 deletions(-)",
    "diff": "diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c\nindex e0052a65b..381c959af 100644\n--- a/lib/dpif-netdev.c\n+++ b/lib/dpif-netdev.c\n@@ -335,6 +335,7 @@ enum rxq_cycles_counter_type {\n \n enum dp_offload_type {\n     DP_OFFLOAD_FLOW,\n+    DP_OFFLOAD_FLUSH,\n };\n \n enum {\n@@ -353,8 +354,15 @@ struct dp_offload_flow_item {\n     odp_port_t orig_in_port; /* Originating in_port for tnl flows. */\n };\n \n+struct dp_offload_flush_item {\n+    struct dp_netdev *dp;\n+    struct netdev *netdev;\n+    struct ovs_barrier *barrier;\n+};\n+\n union dp_offload_thread_data {\n     struct dp_offload_flow_item flow;\n+    struct dp_offload_flush_item flush;\n };\n \n struct dp_offload_thread_item {\n@@ -543,6 +551,9 @@ static void dp_netdev_del_bond_tx_from_pmd(struct dp_netdev_pmd_thread *pmd,\n                                            uint32_t bond_id)\n     OVS_EXCLUDED(pmd->bond_mutex);\n \n+static void dp_netdev_offload_flush(struct dp_netdev *dp,\n+                                    struct dp_netdev_port *port);\n+\n static void reconfigure_datapath(struct dp_netdev *dp)\n     OVS_REQUIRES(dp->port_mutex);\n static bool dp_netdev_pmd_try_ref(struct dp_netdev_pmd_thread *pmd);\n@@ -2242,7 +2253,7 @@ static void\n do_del_port(struct dp_netdev *dp, struct dp_netdev_port *port)\n     OVS_REQUIRES(dp->port_mutex)\n {\n-    netdev_flow_flush(port->netdev);\n+    dp_netdev_offload_flush(dp, port);\n     netdev_uninit_flow_api(port->netdev);\n     hmap_remove(&dp->ports, &port->node);\n     seq_change(dp->port_seq);\n@@ -2594,13 +2605,16 @@ dp_netdev_free_offload(struct dp_offload_thread_item *offload)\n     case DP_OFFLOAD_FLOW:\n         dp_netdev_free_flow_offload(offload);\n         break;\n+    case DP_OFFLOAD_FLUSH:\n+        free(offload);\n+        break;\n     default:\n         OVS_NOT_REACHED();\n     };\n }\n \n static void\n-dp_netdev_append_flow_offload(struct dp_offload_thread_item *offload)\n+dp_netdev_append_offload(struct dp_offload_thread_item *offload)\n {\n     ovs_mutex_lock(&dp_offload_thread.mutex);\n     ovs_list_push_back(&dp_offload_thread.list, &offload->node);\n@@ -2734,6 +2748,23 @@ dp_offload_flow(struct dp_offload_thread_item *item)\n              UUID_ARGS((struct uuid *) &flow_offload->flow->mega_ufid));\n }\n \n+static void\n+dp_offload_flush(struct dp_offload_thread_item *item)\n+{\n+    struct dp_offload_flush_item *flush = &item->data->flush;\n+\n+    ovs_mutex_lock(&flush->dp->port_mutex);\n+    netdev_flow_flush(flush->netdev);\n+    ovs_mutex_unlock(&flush->dp->port_mutex);\n+\n+    ovs_barrier_block(flush->barrier);\n+\n+    /* Allow the other thread to take again the port lock, before\n+     * continuing offload operations in this thread.\n+     */\n+    ovs_barrier_block(flush->barrier);\n+}\n+\n #define DP_NETDEV_OFFLOAD_QUIESCE_INTERVAL_US (10 * 1000) /* 10 ms */\n \n static void *\n@@ -2764,6 +2795,9 @@ dp_netdev_flow_offload_main(void *data OVS_UNUSED)\n         case DP_OFFLOAD_FLOW:\n             dp_offload_flow(offload);\n             break;\n+        case DP_OFFLOAD_FLUSH:\n+            dp_offload_flush(offload);\n+            break;\n         default:\n             OVS_NOT_REACHED();\n         }\n@@ -2801,7 +2835,7 @@ queue_netdev_flow_del(struct dp_netdev_pmd_thread *pmd,\n     offload = dp_netdev_alloc_flow_offload(pmd, flow,\n                                            DP_NETDEV_FLOW_OFFLOAD_OP_DEL);\n     offload->timestamp = pmd->ctx.now;\n-    dp_netdev_append_flow_offload(offload);\n+    dp_netdev_append_offload(offload);\n }\n \n static void\n@@ -2902,7 +2936,7 @@ queue_netdev_flow_put(struct dp_netdev_pmd_thread *pmd,\n     flow_offload->orig_in_port = orig_in_port;\n \n     item->timestamp = pmd->ctx.now;\n-    dp_netdev_append_flow_offload(item);\n+    dp_netdev_append_offload(item);\n }\n \n static void\n@@ -2926,6 +2960,90 @@ dp_netdev_pmd_remove_flow(struct dp_netdev_pmd_thread *pmd,\n     dp_netdev_flow_unref(flow);\n }\n \n+static void\n+dp_netdev_offload_flush_enqueue(struct dp_netdev *dp,\n+                                struct netdev *netdev,\n+                                struct ovs_barrier *barrier)\n+{\n+    struct dp_offload_thread_item *item;\n+    struct dp_offload_flush_item *flush;\n+\n+    if (ovsthread_once_start(&offload_thread_once)) {\n+        xpthread_cond_init(&dp_offload_thread.cond, NULL);\n+        ovs_thread_create(\"hw_offload\", dp_netdev_flow_offload_main, NULL);\n+        ovsthread_once_done(&offload_thread_once);\n+    }\n+\n+    item = xmalloc(sizeof *item + sizeof *flush);\n+    item->type = DP_OFFLOAD_FLUSH;\n+    item->timestamp = time_usec();\n+\n+    flush = &item->data->flush;\n+    flush->dp = dp;\n+    flush->netdev = netdev;\n+    flush->barrier = barrier;\n+\n+    dp_netdev_append_offload(item);\n+}\n+\n+/* Blocking call that will wait on the offload thread to\n+ * complete its work.  As the flush order will only be\n+ * enqueued after existing offload requests, those previous\n+ * offload requests must be processed, which requires being\n+ * able to lock the 'port_mutex' from the offload thread.\n+ *\n+ * Flow offload flush is done when a port is being deleted.\n+ * Right after this call executes, the offload API is disabled\n+ * for the port. This call must be made blocking until the\n+ * offload provider completed its job.\n+ */\n+static void\n+dp_netdev_offload_flush(struct dp_netdev *dp,\n+                        struct dp_netdev_port *port)\n+    OVS_REQUIRES(dp->port_mutex)\n+{\n+    /* The flush mutex only serves to protect the static memory barrier.\n+     * The memory barrier needs to go beyond the function scope as\n+     * the other thread can resume from blocking after this function\n+     * already finished.\n+     * As the barrier is made static, then it will be shared by\n+     * calls to this function, and it needs to be protected from\n+     * concurrent use.\n+     */\n+    static struct ovs_mutex flush_mutex = OVS_MUTEX_INITIALIZER;\n+    static struct ovs_barrier barrier OVS_GUARDED_BY(flush_mutex);\n+    struct netdev *netdev;\n+\n+    if (!netdev_is_flow_api_enabled()) {\n+        return;\n+    }\n+\n+    ovs_mutex_unlock(&dp->port_mutex);\n+    ovs_mutex_lock(&flush_mutex);\n+\n+    /* This thread and the offload thread. */\n+    ovs_barrier_init(&barrier, 2);\n+\n+    netdev = netdev_ref(port->netdev);\n+    dp_netdev_offload_flush_enqueue(dp, netdev, &barrier);\n+    ovs_barrier_block(&barrier);\n+    netdev_close(netdev);\n+\n+    /* Take back the datapath port lock before allowing the offload\n+     * thread to proceed further. The port deletion must complete first,\n+     * to ensure no further offloads are inserted after the flush.\n+     *\n+     * Some offload provider (e.g. DPDK) keeps a netdev reference with\n+     * the offload data. If this reference is not closed, the netdev is\n+     * kept indefinitely. */\n+    ovs_mutex_lock(&dp->port_mutex);\n+\n+    ovs_barrier_block(&barrier);\n+    ovs_barrier_destroy(&barrier);\n+\n+    ovs_mutex_unlock(&flush_mutex);\n+}\n+\n static void\n dp_netdev_pmd_flow_flush(struct dp_netdev_pmd_thread *pmd)\n {\n",
    "prefixes": [
        "ovs-dev",
        "v5",
        "19/27"
    ]
}