get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/1526532/
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 1526532,
    "url": "http://patchwork.ozlabs.org/api/patches/1526532/",
    "web_url": "http://patchwork.ozlabs.org/project/ovn/patch/20210910141321.14624-4-anton.ivanov@cambridgegreys.com/",
    "project": {
        "id": 68,
        "url": "http://patchwork.ozlabs.org/api/projects/68/",
        "name": "Open Virtual Network development",
        "link_name": "ovn",
        "list_id": "ovs-dev.openvswitch.org",
        "list_email": "ovs-dev@openvswitch.org",
        "web_url": "http://openvswitch.org/",
        "scm_url": "",
        "webscm_url": "",
        "list_archive_url": "",
        "list_archive_url_format": "",
        "commit_url_format": ""
    },
    "msgid": "<20210910141321.14624-4-anton.ivanov@cambridgegreys.com>",
    "list_archive_url": null,
    "date": "2021-09-10T14:13:21",
    "name": "[ovs-dev,v7,4/4] northd: Restore parallel build with dp_groups",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": false,
    "hash": "550a335bf1d3fa5215d1490f530f1fa388910b62",
    "submitter": {
        "id": 71996,
        "url": "http://patchwork.ozlabs.org/api/people/71996/",
        "name": "Anton Ivanov",
        "email": "anton.ivanov@cambridgegreys.com"
    },
    "delegate": null,
    "mbox": "http://patchwork.ozlabs.org/project/ovn/patch/20210910141321.14624-4-anton.ivanov@cambridgegreys.com/mbox/",
    "series": [
        {
            "id": 261780,
            "url": "http://patchwork.ozlabs.org/api/series/261780/",
            "web_url": "http://patchwork.ozlabs.org/project/ovn/list/?series=261780",
            "date": "2021-09-10T14:13:18",
            "name": "[ovs-dev,v7,1/4] northd: Disable parallel processing for logical_dp_groups",
            "version": 7,
            "mbox": "http://patchwork.ozlabs.org/series/261780/mbox/"
        }
    ],
    "comments": "http://patchwork.ozlabs.org/api/patches/1526532/comments/",
    "check": "success",
    "checks": "http://patchwork.ozlabs.org/api/patches/1526532/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<ovs-dev-bounces@openvswitch.org>",
        "X-Original-To": [
            "incoming@patchwork.ozlabs.org",
            "ovs-dev@openvswitch.org"
        ],
        "Delivered-To": [
            "patchwork-incoming@bilbo.ozlabs.org",
            "ovs-dev@lists.linuxfoundation.org"
        ],
        "Authentication-Results": "ozlabs.org;\n spf=pass (sender SPF authorized) smtp.mailfrom=openvswitch.org\n (client-ip=140.211.166.137; helo=smtp4.osuosl.org;\n envelope-from=ovs-dev-bounces@openvswitch.org; receiver=<UNKNOWN>)",
        "Received": [
            "from smtp4.osuosl.org (smtp4.osuosl.org [140.211.166.137])\n\t(using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits)\n\t key-exchange X25519 server-signature RSA-PSS (4096 bits) server-digest\n SHA256)\n\t(No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 4H5dCB16KCz9sX3\n\tfor <incoming@patchwork.ozlabs.org>; Sat, 11 Sep 2021 00:13:42 +1000 (AEST)",
            "from localhost (localhost [127.0.0.1])\n\tby smtp4.osuosl.org (Postfix) with ESMTP id 2D9504018C;\n\tFri, 10 Sep 2021 14:13:40 +0000 (UTC)",
            "from smtp4.osuosl.org ([127.0.0.1])\n\tby localhost (smtp4.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id hnM4Hh-dzoeu; Fri, 10 Sep 2021 14:13:38 +0000 (UTC)",
            "from lists.linuxfoundation.org (lf-lists.osuosl.org\n [IPv6:2605:bc80:3010:104::8cd3:938])\n\tby smtp4.osuosl.org (Postfix) with ESMTPS id EC158401E5;\n\tFri, 10 Sep 2021 14:13:37 +0000 (UTC)",
            "from lf-lists.osuosl.org (localhost [127.0.0.1])\n\tby lists.linuxfoundation.org (Postfix) with ESMTP id C3EC2C0011;\n\tFri, 10 Sep 2021 14:13:37 +0000 (UTC)",
            "from smtp3.osuosl.org (smtp3.osuosl.org [IPv6:2605:bc80:3010::136])\n by lists.linuxfoundation.org (Postfix) with ESMTP id 33BAFC000D\n for <ovs-dev@openvswitch.org>; Fri, 10 Sep 2021 14:13:36 +0000 (UTC)",
            "from localhost (localhost [127.0.0.1])\n by smtp3.osuosl.org (Postfix) with ESMTP id 176A760C15\n for <ovs-dev@openvswitch.org>; Fri, 10 Sep 2021 14:13:36 +0000 (UTC)",
            "from smtp3.osuosl.org ([127.0.0.1])\n by localhost (smtp3.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n with ESMTP id 5vpldgxAA9eG for <ovs-dev@openvswitch.org>;\n Fri, 10 Sep 2021 14:13:34 +0000 (UTC)",
            "from www.kot-begemot.co.uk (ivanoab7.miniserver.com [37.128.132.42])\n by smtp3.osuosl.org (Postfix) with ESMTPS id 8867C60603\n for <ovs-dev@openvswitch.org>; Fri, 10 Sep 2021 14:13:34 +0000 (UTC)",
            "from tun252.jain.kot-begemot.co.uk ([192.168.18.6]\n helo=jain.kot-begemot.co.uk)\n by www.kot-begemot.co.uk with esmtps\n (TLS1.3:ECDHE_RSA_AES_256_GCM_SHA384:256)\n (Exim 4.92) (envelope-from <anton.ivanov@cambridgegreys.com>)\n id 1mOhHY-00080D-JI; Fri, 10 Sep 2021 14:13:33 +0000",
            "from jain.kot-begemot.co.uk ([192.168.3.3])\n by jain.kot-begemot.co.uk with esmtp (Exim 4.92)\n (envelope-from <anton.ivanov@cambridgegreys.com>)\n id 1mOhHV-0003oz-0j; Fri, 10 Sep 2021 15:13:31 +0100"
        ],
        "X-Virus-Scanned": [
            "amavisd-new at osuosl.org",
            "amavisd-new at osuosl.org"
        ],
        "X-Greylist": "from auto-whitelisted by SQLgrey-1.8.0",
        "From": "anton.ivanov@cambridgegreys.com",
        "To": "ovs-dev@openvswitch.org",
        "Date": "Fri, 10 Sep 2021 15:13:21 +0100",
        "Message-Id": "<20210910141321.14624-4-anton.ivanov@cambridgegreys.com>",
        "X-Mailer": "git-send-email 2.20.1",
        "In-Reply-To": "<20210910141321.14624-1-anton.ivanov@cambridgegreys.com>",
        "References": "<20210910141321.14624-1-anton.ivanov@cambridgegreys.com>",
        "MIME-Version": "1.0",
        "X-Clacks-Overhead": "GNU Terry Pratchett",
        "Cc": "i.maximets@ovn.org, Anton Ivanov <anton.ivanov@cambridgegreys.com>",
        "Subject": "[ovs-dev] [OVN Patch v7 4/4] northd: Restore parallel build with\n\tdp_groups",
        "X-BeenThere": "ovs-dev@openvswitch.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "<ovs-dev.openvswitch.org>",
        "List-Unsubscribe": "<https://mail.openvswitch.org/mailman/options/ovs-dev>,\n <mailto:ovs-dev-request@openvswitch.org?subject=unsubscribe>",
        "List-Archive": "<http://mail.openvswitch.org/pipermail/ovs-dev/>",
        "List-Post": "<mailto:ovs-dev@openvswitch.org>",
        "List-Help": "<mailto:ovs-dev-request@openvswitch.org?subject=help>",
        "List-Subscribe": "<https://mail.openvswitch.org/mailman/listinfo/ovs-dev>,\n <mailto:ovs-dev-request@openvswitch.org?subject=subscribe>",
        "Content-Type": "text/plain; charset=\"us-ascii\"",
        "Content-Transfer-Encoding": "7bit",
        "Errors-To": "ovs-dev-bounces@openvswitch.org",
        "Sender": "\"dev\" <ovs-dev-bounces@openvswitch.org>"
    },
    "content": "From: Anton Ivanov <anton.ivanov@cambridgegreys.com>\n\nRestore parallel build with dp groups using rwlock instead\nof per row locking as an underlying mechanism.\n\nThis provides improvement ~ 10% end-to-end on ovn-heater\nunder virutalization despite awakening some qemu gremlin\nwhich makes qemu climb to silly CPU usage. The gain on\nbare metal is likely to be higher.\n\nSigned-off-by: Anton Ivanov <anton.ivanov@cambridgegreys.com>\n---\n northd/ovn-northd.c | 299 ++++++++++++++++++++++++++++++++++----------\n 1 file changed, 234 insertions(+), 65 deletions(-)",
    "diff": "diff --git a/northd/ovn-northd.c b/northd/ovn-northd.c\nindex aee5b9508..16e39ec5e 100644\n--- a/northd/ovn-northd.c\n+++ b/northd/ovn-northd.c\n@@ -59,6 +59,7 @@\n #include \"unixctl.h\"\n #include \"util.h\"\n #include \"uuid.h\"\n+#include \"ovs-thread.h\"\n #include \"openvswitch/vlog.h\"\n \n VLOG_DEFINE_THIS_MODULE(ovn_northd);\n@@ -4372,72 +4373,219 @@ ovn_lflow_init(struct ovn_lflow *lflow, struct ovn_datapath *od,\n static bool use_logical_dp_groups = false;\n static bool use_parallel_build = true;\n \n-static struct hashrow_locks lflow_locks;\n+/* This lock is used to lock the lflow table and all related structures.\n+ * It cannot be a mutex, because most of the accesses are read and there is\n+ * only an occasional write change.\n+ */\n+\n+static struct ovs_rwlock flowtable_lock;\n+\n+static void ovn_make_multi_lflow(struct ovn_lflow *old_lflow,\n+                              struct ovn_datapath *od,\n+                              struct lflow_state *lflow_map,\n+                              uint32_t hash)\n+{\n+    hmapx_add(&old_lflow->od_group, od);\n+    hmap_remove(&lflow_map->single_od, &old_lflow->hmap_node);\n+    if (use_parallel_build) {\n+        hmap_insert_fast(&lflow_map->multiple_od, &old_lflow->hmap_node, hash);\n+    } else {\n+        hmap_insert(&lflow_map->multiple_od, &old_lflow->hmap_node, hash);\n+    }\n+}\n \n /* Adds a row with the specified contents to the Logical_Flow table.\n- * Version to use when locking is required.\n+ *\n+ * Assumptions:\n+ *\n+ * 1. A large proportion of the operations are lookups (reads).\n+ * 2. RW operations are a small proportion of overall adds.\n+ *\n+ * Principles of operation:\n+ * 1. All accesses to the flow table are protected by a rwlock.\n+ * 2. By default, everyone grabs a rd lock so that multiple threads\n+ * can do lookups simultaneously.\n+ * 3. If a change is needed, the rd lock is released and a wr lock\n+ * is acquired instead (the fact that POSIX does not have an \"upgrade\"\n+ * on locks is a major pain, but there is nothing we can do - it's not\n+ * there).\n+ * 4. WR lock operations in rd/wr locking have LOWER priority than RD.\n+ * That is by design and spec. So a request for WR lock may wait for a\n+ * considerable amount of time until it is given a change to lock. That\n+ * means that another thread may get there in the meantime and change\n+ * the data. Hence all wr operations MUST be coded to ensure that they\n+ * are not vulnerable to \"someone pulled this from under my feet\". Re-\n+ * reads, checks for presense, etc.\n+ */\n+\n+/* The code which follows is executed both as single thread and parallel.\n+ * When executed as a single thread locking, re-reading after a lock change\n+ * from rd to wr, etc are not needed and that path does not lock.\n+ * clang thread safety analyzer cannot quite get that idea so we have to\n+ * disable it.\n  */\n+\n static struct ovn_lflow *\n do_ovn_lflow_add(struct lflow_state *lflow_map, struct ovn_datapath *od,\n                  uint32_t hash, enum ovn_stage stage, uint16_t priority,\n                  const char *match, const char *actions, const char *io_port,\n                  const struct ovsdb_idl_row *stage_hint,\n                  const char *where, const char *ctrl_meter)\n+                 OVS_NO_THREAD_SAFETY_ANALYSIS\n {\n \n     struct ovn_lflow *old_lflow;\n     struct ovn_lflow *lflow;\n \n     if (use_logical_dp_groups) {\n-        /* Look up in multiple first. */\n-        old_lflow = do_ovn_lflow_find(&lflow_map->multiple_od, NULL, stage,\n-                                      priority, match,\n+        if (use_parallel_build) {\n+            /* Fast Path. In case we run in parallel, see if we\n+             * can get away without writing - grab a rdlock and check\n+             * if we can get away with as little work as possible.\n+             */\n+            ovs_rwlock_rdlock(&flowtable_lock);\n+        }\n+\n+        /* Look up multiple_od first. That is the more common\n+         * lookup.\n+         */\n+\n+        old_lflow = do_ovn_lflow_find(&lflow_map->multiple_od,\n+                                      NULL, stage, priority, match,\n                                       actions, ctrl_meter, hash);\n+\n         if (old_lflow) {\n-            hmapx_add(&old_lflow->od_group, od);\n+            /* Found, amend od_group. */\n+            if (!use_parallel_build) {\n+                hmapx_add(&old_lflow->od_group, od);\n+            } else {\n+                /* See if we need to add this od before upgrading\n+                 *  the rd lock to a wr.\n+                 */\n+                if (!hmapx_contains(&old_lflow->od_group, od)) {\n+                    if (use_parallel_build) {\n+                        /* Upgrade lock to write.*/\n+                        ovs_rwlock_unlock(&flowtable_lock);\n+                        ovs_rwlock_wrlock(&flowtable_lock);\n+                    }\n+                    /* Add the flow to the group. NOOP if it\n+                     * exists in it. */\n+                    hmapx_add(&old_lflow->od_group, od);\n+                }\n+            }\n         } else {\n             /* Not found, lookup in single od. */\n             old_lflow = do_ovn_lflow_find(&lflow_map->single_od, NULL,\n                                           stage, priority, match,\n                                           actions, ctrl_meter, hash);\n             if (old_lflow) {\n-                hmapx_add(&old_lflow->od_group, od);\n-                /* Found, different, od count went up. Move to multiple od. */\n-                if (hmapx_count(&old_lflow->od_group) > 1) {\n-                    hmap_remove(&lflow_map->single_od, &old_lflow->hmap_node);\n+                /* We found an old single od flow. See if we need to\n+                 * update it.\n+                 */\n+                if (!hmapx_contains(&old_lflow->od_group, od)) {\n+                    /* od not in od_group, we need to add it. The flow\n+                     * becomes a multi-od flow so it must move to\n+                     * multiple_od. */\n                     if (use_parallel_build) {\n-                        hmap_insert_fast(&lflow_map->multiple_od,\n-                                         &old_lflow->hmap_node, hash);\n-                    } else {\n-                        hmap_insert(&lflow_map->multiple_od,\n-                                    &old_lflow->hmap_node, hash);\n+                        /* Upgrade the lock to write, we are likely to\n+                         * modify data. Note - this may take a while,\n+                         * so someone may modify the data in the meantime.\n+                         */\n+                        ovs_rwlock_unlock(&flowtable_lock);\n+                        ovs_rwlock_wrlock(&flowtable_lock);\n+\n+                        /* Check if someone modified the data in the meantime.\n+                         */\n+                        if (!hmap_contains(&lflow_map->single_od,\n+                                           &old_lflow->hmap_node)) {\n+                            /* Someone modified the data already. The flow is\n+                             * already in multiple_od. Add the od to\n+                             * the group. If it exists this is a NOOP. */\n+                            hmapx_add(&old_lflow->od_group, od);\n+                            goto done_update_unlock;\n+                        }\n                     }\n+                    ovn_make_multi_lflow(old_lflow, od, lflow_map, hash);\n                 }\n             }\n         }\n+done_update_unlock:\n+        if (use_parallel_build) {\n+            ovs_rwlock_unlock(&flowtable_lock);\n+        }\n         if (old_lflow) {\n             return old_lflow;\n         }\n     }\n \n-    lflow = xmalloc(sizeof *lflow);\n-    /* While adding new logical flows we're not setting single datapath, but\n-     * collecting a group.  'od' will be updated later for all flows with only\n-     * one datapath in a group, so it could be hashed correctly. */\n-    ovn_lflow_init(lflow, NULL, stage, priority,\n-                   xstrdup(match), xstrdup(actions),\n-                   io_port ? xstrdup(io_port) : NULL,\n-                   nullable_xstrdup(ctrl_meter),\n-                   ovn_lflow_hint(stage_hint), where);\n-    hmapx_add(&lflow->od_group, od);\n-\n-    /* Insert \"fresh\" lflows into single_od. */\n-\n+    if (use_logical_dp_groups && use_parallel_build) {\n+        /* Slow Path - insert a new flow.\n+         * We could not get away with minimal mostly ro amount of work.\n+         * We are likely to be modifying data, so we go ahead and\n+         * lock with rw and try to do an insert (may end up repeating\n+         * some of what we do for ro). */\n+        ovs_rwlock_wrlock(&flowtable_lock);\n+    }\n     if (!use_parallel_build) {\n+        lflow = xmalloc(sizeof *lflow);\n+        /* While adding new logical flows we are not setting single datapath,\n+         * but collecting a group.  'od' will be updated later for all flows\n+         * with only one datapath in a group, so it could be hashed correctly.\n+         */\n+        ovn_lflow_init(lflow, NULL, stage, priority,\n+                       xstrdup(match), xstrdup(actions),\n+                       io_port ? xstrdup(io_port) : NULL,\n+                       nullable_xstrdup(ctrl_meter),\n+                       ovn_lflow_hint(stage_hint), where);\n+        hmapx_add(&lflow->od_group, od);\n         hmap_insert(&lflow_map->single_od, &lflow->hmap_node, hash);\n     } else {\n+        if (use_logical_dp_groups) {\n+            /* Search again in case someone added the flow before us. */\n+            lflow = do_ovn_lflow_find(&lflow_map->single_od,\n+                                          NULL, stage, priority, match,\n+                                          actions, ctrl_meter, hash);\n+            if (lflow) {\n+                /* Someone added the flow before us, see if we need to\n+                 * convert it into a multi_od flow. */\n+                if (!hmapx_contains(&lflow->od_group, od)) {\n+                    ovn_make_multi_lflow(lflow, od, lflow_map, hash);\n+                }\n+                goto done_add_unlock;\n+            }\n+            /* Unlikely, but possible, check if than one thread got here\n+             * ahead of us while we were wating to acquire a write lock.\n+             * The flow is not just in the database, but it already has\n+             * more than one od assigned to it.\n+             */\n+            lflow = do_ovn_lflow_find(&lflow_map->multiple_od, NULL,\n+                                          stage, priority, match,\n+                                          actions, ctrl_meter, hash);\n+            if (lflow) {\n+                hmapx_add(&lflow->od_group, od);\n+                goto done_add_unlock;\n+            }\n+        }\n+        /* All race possibilities where a flow has been inserted by\n+         * another thread while we are waiting for a lock have been\n+         * handled, we can go ahead, alloc and insert. */\n+        lflow = xmalloc(sizeof *lflow);\n+        /* While adding new logical flows we're not setting single datapath,\n+         * but collecting a group.  'od' will be updated later for all\n+         * flows with only * one datapath in a group, so it could be hashed\n+         * correctly. */\n+        ovn_lflow_init(lflow, NULL, stage, priority,\n+                       xstrdup(match), xstrdup(actions),\n+                       io_port ? xstrdup(io_port) : NULL,\n+                       nullable_xstrdup(ctrl_meter),\n+                       ovn_lflow_hint(stage_hint), where);\n+        hmapx_add(&lflow->od_group, od);\n         hmap_insert_fast(&lflow_map->single_od, &lflow->hmap_node, hash);\n     }\n+done_add_unlock:\n+    if (use_logical_dp_groups && use_parallel_build) {\n+        ovs_rwlock_unlock(&flowtable_lock);\n+    }\n     return lflow;\n }\n \n@@ -4450,20 +4598,11 @@ ovn_lflow_add_at_with_hash(struct lflow_state *lflow_map,\n                            const struct ovsdb_idl_row *stage_hint,\n                            const char *where, uint32_t hash)\n {\n-    struct ovn_lflow *lflow;\n-\n     ovs_assert(ovn_stage_to_datapath_type(stage) == ovn_datapath_get_type(od));\n-    if (use_logical_dp_groups && use_parallel_build) {\n-        lock_hash_row(&lflow_locks, hash);\n-        lflow = do_ovn_lflow_add(lflow_map, od, hash, stage, priority, match,\n+\n+    return do_ovn_lflow_add(lflow_map, od, hash, stage, priority, match,\n                                  actions, io_port, stage_hint, where,\n                                  ctrl_meter);\n-        unlock_hash_row(&lflow_locks, hash);\n-    } else {\n-        lflow = do_ovn_lflow_add(lflow_map, od, hash, stage, priority, match,\n-                         actions, io_port, stage_hint, where, ctrl_meter);\n-    }\n-    return lflow;\n }\n \n /* Adds a row with the specified contents to the Logical_Flow table. */\n@@ -4484,22 +4623,16 @@ ovn_lflow_add_at(struct lflow_state *lflow_map, struct ovn_datapath *od,\n                                io_port, ctrl_meter, stage_hint, where, hash);\n }\n \n+\n static bool\n ovn_dp_group_add_with_reference(struct ovn_lflow *lflow_ref,\n-                                struct ovn_datapath *od,\n-                                uint32_t hash)\n+                                struct ovn_datapath *od)\n {\n     if (!use_logical_dp_groups || !lflow_ref) {\n         return false;\n     }\n \n-    if (use_parallel_build) {\n-        lock_hash_row(&lflow_locks, hash);\n-        hmapx_add(&lflow_ref->od_group, od);\n-        unlock_hash_row(&lflow_locks, hash);\n-    } else {\n-        hmapx_add(&lflow_ref->od_group, od);\n-    }\n+    hmapx_add(&lflow_ref->od_group, od);\n \n     return true;\n }\n@@ -4595,9 +4728,6 @@ hmap_safe_remove(struct hmap *hmap, struct hmap_node *node)\n static void\n remove_lflow_from_lflows(struct lflow_state *lflows, struct ovn_lflow *lflow)\n {\n-    if (use_logical_dp_groups && use_parallel_build) {\n-        lock_hash_row(&lflow_locks, lflow->hmap_node.hash);\n-    }\n     if (hmapx_count(&lflow->od_group) > 1) {\n         if (!hmap_safe_remove(&lflows->multiple_od, &lflow->hmap_node)) {\n             hmap_remove(&lflows->single_od, &lflow->hmap_node);\n@@ -4607,9 +4737,6 @@ remove_lflow_from_lflows(struct lflow_state *lflows, struct ovn_lflow *lflow)\n             hmap_remove(&lflows->multiple_od, &lflow->hmap_node);\n         }\n     }\n-    if (use_logical_dp_groups && use_parallel_build) {\n-        unlock_hash_row(&lflow_locks, lflow->hmap_node.hash);\n-    }\n }\n \n static void\n@@ -6518,8 +6645,17 @@ build_lb_rules(struct lflow_state *lflows, struct ovn_northd_lb *lb,\n             if (reject) {\n                 meter = copp_meter_get(COPP_REJECT, od->nbs->copp,\n                                        meter_groups);\n-            } else if (ovn_dp_group_add_with_reference(lflow_ref, od, hash)) {\n-                continue;\n+            } else if (!use_parallel_build) {\n+                /* We can use this shortcut only if running single threaded.\n+                 * If we are running multi-threaded, trying to use it means\n+                 * playing with locks in more than one place and that is a\n+                 * recipe for trouble.\n+                 * If running parallel we let the lflow_add logic sort out\n+                 * any duplicates and od groups.\n+                 */\n+                if (ovn_dp_group_add_with_reference(lflow_ref, od)) {\n+                    continue;\n+                }\n             }\n             lflow_ref = ovn_lflow_add_at_with_hash(lflows, od,\n                     S_SWITCH_IN_STATEFUL, priority,\n@@ -9572,8 +9708,17 @@ build_lrouter_defrag_flows_for_lb(struct ovn_northd_lb *lb,\n                 ds_cstr(match), ds_cstr(&defrag_actions));\n         for (size_t j = 0; j < lb->n_nb_lr; j++) {\n             struct ovn_datapath *od = lb->nb_lr[j];\n-            if (ovn_dp_group_add_with_reference(lflow_ref, od, hash)) {\n-                continue;\n+            if (!use_parallel_build) {\n+                /* We can use this shortcut only if running single threaded.\n+                 * If we are running multi-threaded, trying to use it means\n+                 * playing with locks in more than one place and that is a\n+                 * recipe for trouble.\n+                 * If running parallel we let the lflow_add logic sort out\n+                 * any duplicates and od groups.\n+                 */\n+                if (ovn_dp_group_add_with_reference(lflow_ref, od)) {\n+                    continue;\n+                }\n             }\n             lflow_ref = ovn_lflow_add_at_with_hash(lflows, od,\n                                     S_ROUTER_IN_DEFRAG, prio,\n@@ -9636,8 +9781,17 @@ build_lflows_for_unreachable_vips(struct ovn_northd_lb *lb,\n                 continue;\n             }\n \n-            if (ovn_dp_group_add_with_reference(lflow_ref, peer->od, hash)) {\n-                continue;\n+            if (!use_parallel_build) {\n+                /* We can use this shortcut only if running single threaded.\n+                 * If we are running multi-threaded, trying to use it means\n+                 * playing with locks in more than one place and that is a\n+                 * recipe for trouble.\n+                 * If running parallel we let the lflow_add logic sort out\n+                 * any duplicates and od groups.\n+                 */\n+                if (ovn_dp_group_add_with_reference(lflow_ref, peer->od)) {\n+                    continue;\n+                }\n             }\n             lflow_ref = ovn_lflow_add_at_with_hash(lflows, peer->od,\n                                        S_SWITCH_IN_L2_LKUP, 90,\n@@ -13088,7 +13242,7 @@ build_lswitch_and_lrouter_flows(struct hmap *datapaths, struct hmap *ports,\n         }\n     }\n \n-    if (use_parallel_build && (!use_logical_dp_groups)) {\n+    if (use_parallel_build) {\n         struct lflow_state *lflow_segs;\n         struct lswitch_flow_build_info *lsiv;\n         int index;\n@@ -13326,6 +13480,9 @@ reconcile_lflow(struct ovn_lflow *lflow, struct northd_context *ctx,\n     ovn_lflow_destroy(lflows, lflow);\n }\n \n+static bool needs_parallel_init = true;\n+static bool reset_parallel = false;\n+\n /* Updates the Logical_Flow and Multicast_Group tables in the OVN_SB database,\n  * constructing their contents based on the OVN_NB database. */\n static void\n@@ -13340,10 +13497,23 @@ build_lflows(struct northd_context *ctx, struct hmap *datapaths,\n \n     fast_hmap_size_for(&lflows.single_od, max_seen_lflow_size);\n     fast_hmap_size_for(&lflows.multiple_od, max_seen_lflow_size);\n-    if (use_parallel_build && use_logical_dp_groups) {\n-        update_hashrow_locks(&lflows.single_od, &lflow_locks);\n+\n+    if (reset_parallel) {\n+        /* Due to the way parallel is controlled via commands\n+         * it is nearly impossible to trigger, because\n+         * the command  to turn parallel on will nearly always\n+         * come after the first iteration */\n+        use_parallel_build = true;\n+        reset_parallel = false;\n     }\n \n+    if (use_parallel_build && use_logical_dp_groups &&\n+            needs_parallel_init) {\n+        ovs_rwlock_init(&flowtable_lock);\n+        needs_parallel_init = false;\n+        use_parallel_build = false;\n+        reset_parallel = true;\n+    }\n \n     build_lswitch_and_lrouter_flows(datapaths, ports,\n                                     port_groups, &lflows, mcgroups,\n@@ -13386,7 +13556,7 @@ build_lflows(struct northd_context *ctx, struct hmap *datapaths,\n      */\n \n     struct hmap processed_single;\n-    hmap_init(&processed_single);\n+    fast_hmap_size_for(&processed_single, hmap_count(&lflows.single_od));\n \n     uint32_t hash;\n     struct hmapx_node *node;\n@@ -15300,7 +15470,6 @@ main(int argc, char *argv[])\n \n     daemonize_complete();\n \n-    init_hash_row_locks(&lflow_locks);\n     use_parallel_build = can_parallelize_hashes(false);\n \n     /* We want to detect (almost) all changes to the ovn-nb db. */\n",
    "prefixes": [
        "ovs-dev",
        "v7",
        "4/4"
    ]
}