get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/1523534/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 1523534,
    "url": "http://patchwork.ozlabs.org/api/patches/1523534/?format=api",
    "web_url": "http://patchwork.ozlabs.org/project/ovn/patch/20210902090734.9353-4-anton.ivanov@cambridgegreys.com/",
    "project": {
        "id": 68,
        "url": "http://patchwork.ozlabs.org/api/projects/68/?format=api",
        "name": "Open Virtual Network development",
        "link_name": "ovn",
        "list_id": "ovs-dev.openvswitch.org",
        "list_email": "ovs-dev@openvswitch.org",
        "web_url": "http://openvswitch.org/",
        "scm_url": "",
        "webscm_url": "",
        "list_archive_url": "",
        "list_archive_url_format": "",
        "commit_url_format": ""
    },
    "msgid": "<20210902090734.9353-4-anton.ivanov@cambridgegreys.com>",
    "list_archive_url": null,
    "date": "2021-09-02T09:07:34",
    "name": "[ovs-dev,v5,4/4] northd: Restore parallel build with dp_groups",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": false,
    "hash": "4c283ba9a44d3f4b1984f327ad203545def0bbd2",
    "submitter": {
        "id": 71996,
        "url": "http://patchwork.ozlabs.org/api/people/71996/?format=api",
        "name": "Anton Ivanov",
        "email": "anton.ivanov@cambridgegreys.com"
    },
    "delegate": null,
    "mbox": "http://patchwork.ozlabs.org/project/ovn/patch/20210902090734.9353-4-anton.ivanov@cambridgegreys.com/mbox/",
    "series": [
        {
            "id": 260687,
            "url": "http://patchwork.ozlabs.org/api/series/260687/?format=api",
            "web_url": "http://patchwork.ozlabs.org/project/ovn/list/?series=260687",
            "date": "2021-09-02T09:07:31",
            "name": "[ovs-dev,v5,1/4] northd: Disable parallel processing for logical_dp_groups",
            "version": 5,
            "mbox": "http://patchwork.ozlabs.org/series/260687/mbox/"
        }
    ],
    "comments": "http://patchwork.ozlabs.org/api/patches/1523534/comments/",
    "check": "success",
    "checks": "http://patchwork.ozlabs.org/api/patches/1523534/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<ovs-dev-bounces@openvswitch.org>",
        "X-Original-To": [
            "incoming@patchwork.ozlabs.org",
            "ovs-dev@openvswitch.org"
        ],
        "Delivered-To": [
            "patchwork-incoming@bilbo.ozlabs.org",
            "ovs-dev@lists.linuxfoundation.org"
        ],
        "Authentication-Results": "ozlabs.org;\n spf=pass (sender SPF authorized) smtp.mailfrom=openvswitch.org\n (client-ip=2605:bc80:3010::136; helo=smtp3.osuosl.org;\n envelope-from=ovs-dev-bounces@openvswitch.org; receiver=<UNKNOWN>)",
        "Received": [
            "from smtp3.osuosl.org (smtp3.osuosl.org [IPv6:2605:bc80:3010::136])\n\t(using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits)\n\t key-exchange X25519 server-signature RSA-PSS (4096 bits) server-digest\n SHA256)\n\t(No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 4H0ZpZ0Y3Hz9sCD\n\tfor <incoming@patchwork.ozlabs.org>; Thu,  2 Sep 2021 19:08:22 +1000 (AEST)",
            "from localhost (localhost [127.0.0.1])\n\tby smtp3.osuosl.org (Postfix) with ESMTP id 8588761485;\n\tThu,  2 Sep 2021 09:08:19 +0000 (UTC)",
            "from smtp3.osuosl.org ([127.0.0.1])\n\tby localhost (smtp3.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id J3RnNjKYVNJ9; Thu,  2 Sep 2021 09:08:15 +0000 (UTC)",
            "from lists.linuxfoundation.org (lf-lists.osuosl.org [140.211.9.56])\n\tby smtp3.osuosl.org (Postfix) with ESMTPS id 0D39A61497;\n\tThu,  2 Sep 2021 09:08:13 +0000 (UTC)",
            "from lf-lists.osuosl.org (localhost [127.0.0.1])\n\tby lists.linuxfoundation.org (Postfix) with ESMTP id C30CDC001F;\n\tThu,  2 Sep 2021 09:08:12 +0000 (UTC)",
            "from smtp2.osuosl.org (smtp2.osuosl.org [140.211.166.133])\n by lists.linuxfoundation.org (Postfix) with ESMTP id DF4A5C0028\n for <ovs-dev@openvswitch.org>; Thu,  2 Sep 2021 09:08:10 +0000 (UTC)",
            "from localhost (localhost [127.0.0.1])\n by smtp2.osuosl.org (Postfix) with ESMTP id CB823406F9\n for <ovs-dev@openvswitch.org>; Thu,  2 Sep 2021 09:08:10 +0000 (UTC)",
            "from smtp2.osuosl.org ([127.0.0.1])\n by localhost (smtp2.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n with ESMTP id 5rUCS7qj8I6c for <ovs-dev@openvswitch.org>;\n Thu,  2 Sep 2021 09:08:09 +0000 (UTC)",
            "from www.kot-begemot.co.uk (ivanoab7.miniserver.com [37.128.132.42])\n by smtp2.osuosl.org (Postfix) with ESMTPS id 85017406EF\n for <ovs-dev@openvswitch.org>; Thu,  2 Sep 2021 09:08:09 +0000 (UTC)",
            "from tun252.jain.kot-begemot.co.uk ([192.168.18.6]\n helo=jain.kot-begemot.co.uk)\n by www.kot-begemot.co.uk with esmtps\n (TLS1.3:ECDHE_RSA_AES_256_GCM_SHA384:256)\n (Exim 4.92) (envelope-from <anton.ivanov@cambridgegreys.com>)\n id 1mLihX-0003pJ-JX\n for ovs-dev@openvswitch.org; Thu, 02 Sep 2021 09:08:08 +0000",
            "from jain.kot-begemot.co.uk ([192.168.3.3])\n by jain.kot-begemot.co.uk with esmtp (Exim 4.92)\n (envelope-from <anton.ivanov@cambridgegreys.com>)\n id 1mLihO-0002S3-Aw; Thu, 02 Sep 2021 10:07:58 +0100"
        ],
        "X-Virus-Scanned": [
            "amavisd-new at osuosl.org",
            "amavisd-new at osuosl.org"
        ],
        "X-Greylist": "from auto-whitelisted by SQLgrey-1.8.0",
        "From": "anton.ivanov@cambridgegreys.com",
        "To": "ovs-dev@openvswitch.org",
        "Date": "Thu,  2 Sep 2021 10:07:34 +0100",
        "Message-Id": "<20210902090734.9353-4-anton.ivanov@cambridgegreys.com>",
        "X-Mailer": "git-send-email 2.20.1",
        "In-Reply-To": "<20210902090734.9353-1-anton.ivanov@cambridgegreys.com>",
        "References": "<20210902090734.9353-1-anton.ivanov@cambridgegreys.com>",
        "MIME-Version": "1.0",
        "X-Clacks-Overhead": "GNU Terry Pratchett",
        "Cc": "Anton Ivanov <anton.ivanov@cambridgegreys.com>",
        "Subject": "[ovs-dev] [OVN Patch v5 4/4] northd: Restore parallel build with\n\tdp_groups",
        "X-BeenThere": "ovs-dev@openvswitch.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "<ovs-dev.openvswitch.org>",
        "List-Unsubscribe": "<https://mail.openvswitch.org/mailman/options/ovs-dev>,\n <mailto:ovs-dev-request@openvswitch.org?subject=unsubscribe>",
        "List-Archive": "<http://mail.openvswitch.org/pipermail/ovs-dev/>",
        "List-Post": "<mailto:ovs-dev@openvswitch.org>",
        "List-Help": "<mailto:ovs-dev-request@openvswitch.org?subject=help>",
        "List-Subscribe": "<https://mail.openvswitch.org/mailman/listinfo/ovs-dev>,\n <mailto:ovs-dev-request@openvswitch.org?subject=subscribe>",
        "Content-Type": "text/plain; charset=\"us-ascii\"",
        "Content-Transfer-Encoding": "7bit",
        "Errors-To": "ovs-dev-bounces@openvswitch.org",
        "Sender": "\"dev\" <ovs-dev-bounces@openvswitch.org>"
    },
    "content": "From: Anton Ivanov <anton.ivanov@cambridgegreys.com>\n\nRestore parallel build with dp groups using rwlock instead\nof per row locking as an underlying mechanism.\n\nThis provides improvement ~ 10% end-to-end on ovn-heater\nunder virutalization despite awakening some qemu gremlin\nwhich makes qemu climb to silly CPU usage. The gain on\nbare metal is likely to be higher.\n\nSigned-off-by: Anton Ivanov <anton.ivanov@cambridgegreys.com>\n---\n northd/ovn-northd.c | 183 +++++++++++++++++++++++++++++++++-----------\n 1 file changed, 137 insertions(+), 46 deletions(-)",
    "diff": "diff --git a/northd/ovn-northd.c b/northd/ovn-northd.c\nindex 28af790bc..f5d49143d 100644\n--- a/northd/ovn-northd.c\n+++ b/northd/ovn-northd.c\n@@ -59,6 +59,7 @@\n #include \"unixctl.h\"\n #include \"util.h\"\n #include \"uuid.h\"\n+#include \"ovs-thread.h\"\n #include \"openvswitch/vlog.h\"\n \n VLOG_DEFINE_THIS_MODULE(ovn_northd);\n@@ -4369,7 +4370,26 @@ ovn_lflow_init(struct ovn_lflow *lflow, struct ovn_datapath *od,\n static bool use_logical_dp_groups = false;\n static bool use_parallel_build = true;\n \n-static struct hashrow_locks lflow_locks;\n+static struct ovs_rwlock flowtable_lock;\n+\n+static void ovn_make_multi_lflow(struct ovn_lflow *old_lflow,\n+                              struct ovn_datapath *od,\n+                              struct lflow_state *lflow_map,\n+                              uint32_t hash)\n+{\n+    hmapx_add(&old_lflow->od_group, od);\n+    hmap_remove(&lflow_map->single_od, &old_lflow->hmap_node);\n+    if (use_parallel_build) {\n+        hmap_insert_fast(&lflow_map->multiple_od, &old_lflow->hmap_node, hash);\n+    } else {\n+        hmap_insert(&lflow_map->multiple_od, &old_lflow->hmap_node, hash);\n+    }\n+}\n+\n+#ifdef __clang__\n+#pragma clang diagnostic push\n+#pragma clang diagnostic ignored \"-Wthread-safety\"\n+#endif\n \n /* Adds a row with the specified contents to the Logical_Flow table.\n  * Version to use when locking is required.\n@@ -4385,57 +4405,133 @@ do_ovn_lflow_add(struct lflow_state *lflow_map, struct ovn_datapath *od,\n     struct ovn_lflow *old_lflow;\n     struct ovn_lflow *lflow;\n \n+    /* Fast Path.\n+     * See if we can get away without writing - grab a rdlock and check\n+     * if we can get away with as little work as possible.\n+     */\n+\n     if (use_logical_dp_groups) {\n-        old_lflow = do_ovn_lflow_find(&lflow_map->single_od, NULL, stage,\n-                                      priority, match,\n+        if (use_parallel_build) {\n+            ovs_rwlock_rdlock(&flowtable_lock);\n+        }\n+        old_lflow = do_ovn_lflow_find(&lflow_map->single_od,\n+                                      NULL, stage, priority, match,\n                                       actions, ctrl_meter, hash);\n         if (old_lflow) {\n-            hmapx_add(&old_lflow->od_group, od);\n-            /* Found, different, od count went up. Move to multiple od. */\n-            if (hmapx_count(&old_lflow->od_group) > 1) {\n-                hmap_remove(&lflow_map->single_od, &old_lflow->hmap_node);\n+            if (!hmapx_contains(&old_lflow->od_group, od)) {\n+                /* od not in od_group, we need to add it and move to\n+                 * multiple. */\n                 if (use_parallel_build) {\n-                    hmap_insert_fast(&lflow_map->multiple_od,\n-                                     &old_lflow->hmap_node, hash);\n-                } else {\n-                    hmap_insert(&lflow_map->multiple_od,\n-                                &old_lflow->hmap_node, hash);\n+                    /* Upgrade the lock to write, we are likely to\n+                     * modify data. */\n+                    ovs_rwlock_unlock(&flowtable_lock);\n+                    ovs_rwlock_wrlock(&flowtable_lock);\n+\n+                    /* Check if someone got ahead of us and the flow is already\n+                     * in multiple. */\n+                    if (!hmap_contains(&lflow_map->single_od,\n+                                       &old_lflow->hmap_node)) {\n+                        /* Someone did get ahead of us, add the od to the\n+                         * group. */\n+                        hmapx_add(&old_lflow->od_group, od);\n+                        goto done_update_unlock;\n+                    }\n                 }\n+                ovn_make_multi_lflow(old_lflow, od, lflow_map, hash);\n+                goto done_update_unlock;\n             }\n-        } else {\n-            /* Not found, lookup in multiple od. */\n+        }\n+        if (!old_lflow) {\n+            /* Not found in single, lookup in multiple od. */\n             old_lflow = do_ovn_lflow_find(&lflow_map->multiple_od, NULL,\n                                           stage, priority, match,\n                                           actions, ctrl_meter, hash);\n             if (old_lflow) {\n-                hmapx_add(&old_lflow->od_group, od);\n+                if (!hmapx_contains(&old_lflow->od_group, od)) {\n+                    if (use_parallel_build) {\n+                        /* Upgrade lock to write.*/\n+                        ovs_rwlock_unlock(&flowtable_lock);\n+                        ovs_rwlock_wrlock(&flowtable_lock);\n+                    }\n+                    hmapx_add(&old_lflow->od_group, od);\n+                }\n             }\n         }\n+done_update_unlock:\n+        if (use_parallel_build) {\n+            ovs_rwlock_unlock(&flowtable_lock);\n+        }\n         if (old_lflow) {\n             return;\n         }\n     }\n \n-    lflow = xmalloc(sizeof *lflow);\n-    /* While adding new logical flows we're not setting single datapath, but\n-     * collecting a group.  'od' will be updated later for all flows with only\n-     * one datapath in a group, so it could be hashed correctly. */\n-    ovn_lflow_init(lflow, NULL, stage, priority,\n-                   xstrdup(match), xstrdup(actions),\n-                   io_port ? xstrdup(io_port) : NULL,\n-                   nullable_xstrdup(ctrl_meter),\n-                   ovn_lflow_hint(stage_hint), where);\n-    hmapx_add(&lflow->od_group, od);\n-\n-    /* Insert \"fresh\" lflows into single_od. */\n+    /* Slow Path.\n+     * We could not get away with minimal mostly ro amount of work,\n+     * lock with rw and try to do an insert (may end up repeating\n+     * some of what we do for ro). */\n \n+    if (use_logical_dp_groups && use_parallel_build) {\n+        ovs_rwlock_wrlock(&flowtable_lock);\n+    }\n     if (!use_parallel_build) {\n+        lflow = xmalloc(sizeof *lflow);\n+        /* While adding new logical flows we are not setting single datapath,\n+         * but collecting a group.  'od' will be updated later for all flows\n+         * with only one datapath in a group, so it could be hashed correctly.\n+         */\n+        ovn_lflow_init(lflow, NULL, stage, priority,\n+                       xstrdup(match), xstrdup(actions),\n+                       io_port ? xstrdup(io_port) : NULL,\n+                       nullable_xstrdup(ctrl_meter),\n+                       ovn_lflow_hint(stage_hint), where);\n+        hmapx_add(&lflow->od_group, od);\n         hmap_insert(&lflow_map->single_od, &lflow->hmap_node, hash);\n     } else {\n+        if (use_logical_dp_groups) {\n+            /* Search again in case someone else got here first. */\n+            old_lflow = do_ovn_lflow_find(&lflow_map->single_od,\n+                                          NULL, stage, priority, match,\n+                                          actions, ctrl_meter, hash);\n+            if (old_lflow) {\n+                if (!hmapx_contains(&old_lflow->od_group, od)) {\n+                    ovn_make_multi_lflow(old_lflow, od, lflow_map, hash);\n+                }\n+                goto done_add_unlock;\n+            }\n+            /* Unlikely, but possible, more than one thread got here\n+             * ahead of us while we were wating to acquire a write lock. */\n+            old_lflow = do_ovn_lflow_find(&lflow_map->multiple_od, NULL,\n+                                          stage, priority, match,\n+                                          actions, ctrl_meter, hash);\n+            if (old_lflow) {\n+                hmapx_add(&old_lflow->od_group, od);\n+                goto done_add_unlock;\n+            }\n+        }\n+        lflow = xmalloc(sizeof *lflow);\n+        /* While adding new logical flows we're not setting single datapath,\n+         * but collecting a group.  'od' will be updated later for all\n+         * flows with only * one datapath in a group, so it could be hashed\n+         * correctly. */\n+        ovn_lflow_init(lflow, NULL, stage, priority,\n+                       xstrdup(match), xstrdup(actions),\n+                       io_port ? xstrdup(io_port) : NULL,\n+                       nullable_xstrdup(ctrl_meter),\n+                       ovn_lflow_hint(stage_hint), where);\n+        hmapx_add(&lflow->od_group, od);\n         hmap_insert_fast(&lflow_map->single_od, &lflow->hmap_node, hash);\n     }\n+done_add_unlock:\n+    if (use_logical_dp_groups && use_parallel_build) {\n+        ovs_rwlock_unlock(&flowtable_lock);\n+    }\n }\n \n+#ifdef __clang__\n+#pragma clang diagnostic pop\n+#endif\n+\n /* Adds a row with the specified contents to the Logical_Flow table. */\n static void\n ovn_lflow_add_at(struct lflow_state *lflow_map, struct ovn_datapath *od,\n@@ -4453,15 +4549,8 @@ ovn_lflow_add_at(struct lflow_state *lflow_map, struct ovn_datapath *od,\n                                  priority, match,\n                                  actions);\n \n-    if (use_logical_dp_groups && use_parallel_build) {\n-        lock_hash_row(&lflow_locks, hash);\n-        do_ovn_lflow_add(lflow_map, od, hash, stage, priority, match,\n-                         actions, io_port, stage_hint, where, ctrl_meter);\n-        unlock_hash_row(&lflow_locks, hash);\n-    } else {\n-        do_ovn_lflow_add(lflow_map, od, hash, stage, priority, match,\n-                         actions, io_port, stage_hint, where, ctrl_meter);\n-    }\n+    do_ovn_lflow_add(lflow_map, od, hash, stage, priority, match,\n+                     actions, io_port, stage_hint, where, ctrl_meter);\n }\n \n /* Adds a row with the specified contents to the Logical_Flow table. */\n@@ -4555,15 +4644,9 @@ hmap_safe_remove(struct hmap *hmap, struct hmap_node *node)\n static void\n remove_lflow_from_lflows(struct lflow_state *lflows, struct ovn_lflow *lflow)\n {\n-    if (use_logical_dp_groups && use_parallel_build) {\n-        lock_hash_row(&lflow_locks, lflow->hmap_node.hash);\n-    }\n     if (!hmap_safe_remove(&lflows->multiple_od, &lflow->hmap_node)) {\n         hmap_remove(&lflows->single_od, &lflow->hmap_node);\n     }\n-    if (use_logical_dp_groups && use_parallel_build) {\n-        unlock_hash_row(&lflow_locks, lflow->hmap_node.hash);\n-    }\n }\n \n static void\n@@ -12967,7 +13050,7 @@ build_lswitch_and_lrouter_flows(struct hmap *datapaths, struct hmap *ports,\n         }\n     }\n \n-    if (use_parallel_build && (!use_logical_dp_groups)) {\n+    if (use_parallel_build) {\n         struct lflow_state *lflow_segs;\n         struct lswitch_flow_build_info *lsiv;\n         int index;\n@@ -13197,6 +13280,8 @@ reconcile_lflow(struct ovn_lflow *lflow, struct northd_context *ctx,\n     ovn_lflow_destroy(lflows, lflow);\n }\n \n+static bool needs_init = true;\n+\n /* Updates the Logical_Flow and Multicast_Group tables in the OVN_SB database,\n  * constructing their contents based on the OVN_NB database. */\n static void\n@@ -13211,8 +13296,15 @@ build_lflows(struct northd_context *ctx, struct hmap *datapaths,\n \n     fast_hmap_size_for(&lflows.single_od, max_seen_lflow_size);\n     fast_hmap_size_for(&lflows.multiple_od, max_seen_lflow_size);\n-    if (use_parallel_build && use_logical_dp_groups) {\n-        update_hashrow_locks(&lflows.single_od, &lflow_locks);\n+    if (use_parallel_build && use_logical_dp_groups && needs_init) {\n+        ovs_rwlock_init(&flowtable_lock);\n+        /* This happens on first run with parallel+dp_groups.\n+         * db_run will re-read use_parallel_build from config and\n+         * reset it. This way we get correct sizing for\n+         * parallel + dp_groups by doing one single threaded run\n+         * on the first iteration. */\n+        use_parallel_build = false;\n+        needs_init = false;\n     }\n \n \n@@ -15139,7 +15231,6 @@ main(int argc, char *argv[])\n \n     daemonize_complete();\n \n-    init_hash_row_locks(&lflow_locks);\n     use_parallel_build = can_parallelize_hashes(false);\n \n     /* We want to detect (almost) all changes to the ovn-nb db. */\n",
    "prefixes": [
        "ovs-dev",
        "v5",
        "4/4"
    ]
}