Show a patch.

Update a patch.

Update a patch.

GET /api/patches/1524349/
Content-Type: application/json
Vary: Accept

    "id": 1524349,
    "url": "",
    "web_url": "",
    "project": {
        "id": 68,
        "url": "",
        "name": "Open Virtual Network development",
        "link_name": "ovn",
        "list_id": "",
        "list_email": "",
        "web_url": "",
        "scm_url": "",
        "webscm_url": "",
        "list_archive_url": "",
        "list_archive_url_format": "",
        "commit_url_format": ""
    "msgid": "<>",
    "list_archive_url": null,
    "date": "2021-09-03T14:28:14",
    "name": "[ovs-dev,v2] Make changes to the parallel processing API to allow pool sizing",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": false,
    "hash": "7d7aae9b6bc15946b435009211b3f434b7e7ce6e",
    "submitter": {
        "id": 71996,
        "url": "",
        "name": "Anton Ivanov",
        "email": ""
    "delegate": null,
    "mbox": "",
    "series": [
            "id": 260912,
            "url": "",
            "web_url": "",
            "date": "2021-09-03T14:28:14",
            "name": "[ovs-dev,v2] Make changes to the parallel processing API to allow pool sizing",
            "version": 2,
            "mbox": ""
    "comments": "",
    "check": "fail",
    "checks": "",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<>",
        "X-Original-To": [
        "Delivered-To": [
        "Authentication-Results": ";\n spf=pass (sender SPF authorized)\n (client-ip=;;\n; receiver=<UNKNOWN>)",
        "Received": [
            "from ( [])\n\t(using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits)\n\t key-exchange X25519 server-signature RSA-PSS (4096 bits) server-digest\n SHA256)\n\t(No client certificate requested)\n\tby (Postfix) with ESMTPS id 4H1KsW2fJCz9sPf\n\tfor <>; Sat,  4 Sep 2021 00:28:31 +1000 (AEST)",
            "from localhost (localhost [])\n\tby (Postfix) with ESMTP id 094F383B42;\n\tFri,  3 Sep 2021 14:28:28 +0000 (UTC)",
            "from ([])\n\tby localhost ( []) (amavisd-new, port 10024)\n\twith ESMTP id ushUwCZdvmJx; Fri,  3 Sep 2021 14:28:26 +0000 (UTC)",
            "from (\n [IPv6:2605:bc80:3010:104::8cd3:938])\n\tby (Postfix) with ESMTPS id A16D383EC7;\n\tFri,  3 Sep 2021 14:28:25 +0000 (UTC)",
            "from (localhost [])\n\tby (Postfix) with ESMTP id 7A097C0010;\n\tFri,  3 Sep 2021 14:28:25 +0000 (UTC)",
            "from ( [])\n by (Postfix) with ESMTP id 2CD12C0010\n for <>; Fri,  3 Sep 2021 14:28:24 +0000 (UTC)",
            "from localhost (localhost [])\n by (Postfix) with ESMTP id AB2D7407F6\n for <>; Fri,  3 Sep 2021 14:28:23 +0000 (UTC)",
            "from ([])\n by localhost ( []) (amavisd-new, port 10024)\n with ESMTP id U-3eMoJUsBJU for <>;\n Fri,  3 Sep 2021 14:28:22 +0000 (UTC)",
            "from ( [])\n by (Postfix) with ESMTPS id 1E6E5407FE\n for <>; Fri,  3 Sep 2021 14:28:21 +0000 (UTC)",
            "from ([]\n\n by with esmtps\n (TLS1.3:ECDHE_RSA_AES_256_GCM_SHA384:256)\n (Exim 4.92) (envelope-from <>)\n id 1mMAB1-0001iY-Uw\n for; Fri, 03 Sep 2021 14:28:20 +0000",
            "from ([])\n by with esmtp (Exim 4.92)\n (envelope-from <>)\n id 1mMAAx-0006fM-NK; Fri, 03 Sep 2021 15:28:17 +0100"
        "X-Virus-Scanned": [
            "amavisd-new at",
            "amavisd-new at"
        "X-Greylist": "from auto-whitelisted by SQLgrey-1.8.0",
        "From": "",
        "To": "",
        "Date": "Fri,  3 Sep 2021 15:28:14 +0100",
        "Message-Id": "<>",
        "X-Mailer": "git-send-email 2.20.1",
        "MIME-Version": "1.0",
        "X-Clacks-Overhead": "GNU Terry Pratchett",
        "Cc": "Anton Ivanov <>",
        "Subject": "[ovs-dev] [OVN Patch v2] Make changes to the parallel processing\n\tAPI to allow pool sizing",
        "X-BeenThere": "",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "<>",
        "List-Unsubscribe": "<>,\n <>",
        "List-Archive": "<>",
        "List-Post": "<>",
        "List-Help": "<>",
        "List-Subscribe": "<>,\n <>",
        "Content-Type": "text/plain; charset=\"us-ascii\"",
        "Content-Transfer-Encoding": "7bit",
        "Errors-To": "",
        "Sender": "\"dev\" <>"
    "content": "From: Anton Ivanov <>\n\n1. Make pool size user defineable.\n2. Expose pool destruction.\n3. Make pools resizeable at runtime.\n\nSigned-off-by: Anton Ivanov <>\n---\n lib/ovn-parallel-hmap.c | 202 ++++++++++++++++++++++++++++++----------\n lib/ovn-parallel-hmap.h |  23 ++++-\n northd/ovn-northd.c     |  58 +++++-------\n ovs                     |   2 +-\n 4 files changed, 194 insertions(+), 91 deletions(-)",
    "diff": "diff --git a/lib/ovn-parallel-hmap.c b/lib/ovn-parallel-hmap.c\nindex b8c7ac786..30de457b5 100644\n--- a/lib/ovn-parallel-hmap.c\n+++ b/lib/ovn-parallel-hmap.c\n@@ -51,7 +51,6 @@ static bool can_parallelize = false;\n  * accompanied by a fence. It does not need to be atomic or be\n  * accessed under a lock.\n  */\n-static bool workers_must_exit = false;\n \n static struct ovs_list worker_pools = OVS_LIST_INITIALIZER(&worker_pools);\n \n@@ -70,10 +69,20 @@ static void merge_hash_results(struct worker_pool *pool OVS_UNUSED,\n                                void *fin_result, void *result_frags,\n                                int index);\n \n+\n+static bool init_control(struct worker_control *control, int id,\n+                         struct worker_pool *pool);\n+\n+static void cleanup_control(struct worker_pool *pool, int id);\n+\n+static void free_controls(struct worker_pool *pool);\n+\n+static struct worker_control *alloc_controls(int size);\n+\n bool\n-ovn_stop_parallel_processing(void)\n+ovn_stop_parallel_processing(struct worker_pool *pool)\n {\n-    return workers_must_exit;\n+    return pool->workers_must_exit;\n }\n \n bool\n@@ -92,11 +101,67 @@ ovn_can_parallelize_hashes(bool force_parallel)\n     return can_parallelize;\n }\n \n+\n+void\n+destroy_pool(struct worker_pool *pool) {\n+    char sem_name[256];\n+\n+    free_controls(pool);\n+    sem_close(pool->done);\n+    sprintf(sem_name, MAIN_SEM_NAME, sembase, pool);\n+    sem_unlink(sem_name);\n+    free(pool);\n+}\n+\n+bool\n+ovn_resize_pool(struct worker_pool *pool, int size)\n+{\n+    int i;\n+\n+    ovs_assert(pool != NULL);\n+\n+    if (!size) {\n+        size = pool_size;\n+    }\n+\n+    ovs_mutex_lock(&init_mutex);\n+\n+    if (can_parallelize) {\n+        free_controls(pool);\n+        pool->size = size;\n+\n+        /* Allocate new control structures. */\n+\n+        pool->controls = alloc_controls(size);\n+        pool->workers_must_exit = false;\n+\n+        for (i = 0; i < pool->size; i++) {\n+            if (! init_control(&pool->controls[i], i, pool)) {\n+                goto cleanup;\n+            }\n+        }\n+    }\n+    ovs_mutex_unlock(&init_mutex);\n+    return true;\n+cleanup:\n+\n+    /* Something went wrong when opening semaphores. In this case\n+     * it is better to shut off parallel procesing altogether\n+     */\n+\n+    VLOG_INFO(\"Failed to initialize parallel processing, error %d\", errno);\n+    can_parallelize = false;\n+    free_controls(pool);\n+\n+    ovs_mutex_unlock(&init_mutex);\n+    return false;\n+}\n+\n+\n struct worker_pool *\n-ovn_add_worker_pool(void *(*start)(void *))\n+ovn_add_worker_pool(void *(*start)(void *), int size)\n {\n     struct worker_pool *new_pool = NULL;\n-    struct worker_control *new_control;\n     bool test = false;\n     int i;\n     char sem_name[256];\n@@ -113,38 +178,29 @@ ovn_add_worker_pool(void *(*start)(void *))\n         ovs_mutex_unlock(&init_mutex);\n     }\n \n+    if (!size) {\n+        size = pool_size;\n+    }\n+\n     ovs_mutex_lock(&init_mutex);\n     if (can_parallelize) {\n         new_pool = xmalloc(sizeof(struct worker_pool));\n-        new_pool->size = pool_size;\n-        new_pool->controls = NULL;\n+        new_pool->size = size;\n+        new_pool->start = start;\n         sprintf(sem_name, MAIN_SEM_NAME, sembase, new_pool);\n         new_pool->done = sem_open(sem_name, O_CREAT, S_IRWXU, 0);\n         if (new_pool->done == SEM_FAILED) {\n             goto cleanup;\n         }\n \n-        new_pool->controls =\n-            xmalloc(sizeof(struct worker_control) * new_pool->size);\n+        new_pool->controls = alloc_controls(size);\n+        new_pool->workers_must_exit = false;\n \n         for (i = 0; i < new_pool->size; i++) {\n-            new_control = &new_pool->controls[i];\n-            new_control->id = i;\n-            new_control->done = new_pool->done;\n-            new_control->data = NULL;\n-            ovs_mutex_init(&new_control->mutex);\n-            new_control->finished = ATOMIC_VAR_INIT(false);\n-            sprintf(sem_name, WORKER_SEM_NAME, sembase, new_pool, i);\n-            new_control->fire = sem_open(sem_name, O_CREAT, S_IRWXU, 0);\n-            if (new_control->fire == SEM_FAILED) {\n+            if (!init_control(&new_pool->controls[i], i, new_pool)) {\n                 goto cleanup;\n             }\n         }\n-\n-        for (i = 0; i < pool_size; i++) {\n-            new_pool->controls[i].worker =\n-                ovs_thread_create(\"worker pool helper\", start, &new_pool->controls[i]);\n-        }\n         ovs_list_push_back(&worker_pools, &new_pool->list_node);\n     }\n     ovs_mutex_unlock(&init_mutex);\n@@ -157,16 +213,7 @@ cleanup:\n \n     VLOG_INFO(\"Failed to initialize parallel processing, error %d\", errno);\n     can_parallelize = false;\n-    if (new_pool->controls) {\n-        for (i = 0; i < new_pool->size; i++) {\n-            if (new_pool->controls[i].fire != SEM_FAILED) {\n-                sem_close(new_pool->controls[i].fire);\n-                sprintf(sem_name, WORKER_SEM_NAME, sembase, new_pool, i);\n-                sem_unlink(sem_name);\n-                break; /* semaphores past this one are uninitialized */\n-            }\n-        }\n-    }\n+    free_controls(new_pool);\n     if (new_pool->done != SEM_FAILED) {\n         sem_close(new_pool->done);\n         sprintf(sem_name, MAIN_SEM_NAME, sembase, new_pool);\n@@ -176,7 +223,6 @@ cleanup:\n     return NULL;\n }\n \n-\n /* Initializes 'hmap' as an empty hash table with mask N. */\n void\n ovn_fast_hmap_init(struct hmap *hmap, ssize_t mask)\n@@ -365,14 +411,84 @@ ovn_update_hashrow_locks(struct hmap *lflows, struct hashrow_locks *hrl)\n     }\n }\n \n+static bool\n+init_control(struct worker_control *control, int id,\n+             struct worker_pool *pool)\n+{\n+    char sem_name[256];\n+    control->id = id;\n+    control->done = pool->done;\n+    control->data = NULL;\n+    ovs_mutex_init(&control->mutex);\n+    control->finished = ATOMIC_VAR_INIT(false);\n+    sprintf(sem_name, WORKER_SEM_NAME, sembase, pool, id);\n+    control->fire = sem_open(sem_name, O_CREAT, S_IRWXU, 0);\n+    control->pool = pool;\n+    control->worker = 0;\n+    if (control->fire == SEM_FAILED) {\n+        return false;\n+    }\n+    control->worker =\n+        ovs_thread_create(\"worker pool helper\", pool->start, control);\n+    return true;\n+}\n+\n static void\n-worker_pool_hook(void *aux OVS_UNUSED) {\n+cleanup_control(struct worker_pool *pool, int id)\n+{\n+    char sem_name[256];\n+    struct worker_control *control = &pool->controls[id];\n+\n+    if (control->fire != SEM_FAILED) {\n+        sem_close(control->fire);\n+        sprintf(sem_name, WORKER_SEM_NAME, sembase, pool, id);\n+        sem_unlink(sem_name);\n+    }\n+}\n+\n+static void\n+free_controls(struct worker_pool *pool)\n+{\n     int i;\n+    if (pool->controls) {\n+        pool->workers_must_exit = true;\n+        for (i = 0; i < pool->size ; i++) {\n+            if (pool->controls[i].fire != SEM_FAILED) {\n+                sem_post(pool->controls[i].fire);\n+            }\n+        }\n+        for (i = 0; i < pool->size ; i++) {\n+            if (pool->controls[i].worker) {\n+                pthread_join(pool->controls[i].worker, NULL);\n+                pool->controls[i].worker = 0;\n+            }\n+        }\n+        for (i = 0; i < pool->size; i++) {\n+                cleanup_control(pool, i);\n+            }\n+        free(pool->controls);\n+        pool->controls = NULL;\n+        pool->workers_must_exit = false;\n+    }\n+}\n+\n+static struct worker_control *alloc_controls(int size)\n+{\n+    int i;\n+    struct worker_control *controls =\n+        xcalloc(sizeof(struct worker_control), size);\n+\n+    for (i = 0; i < size ; i++) {\n+        controls[i].fire = SEM_FAILED;\n+    }\n+    return controls;\n+}\n+\n+static void\n+worker_pool_hook(void *aux OVS_UNUSED) {\n     static struct worker_pool *pool;\n     char sem_name[256];\n \n-    workers_must_exit = true;\n-\n     /* All workers must honour the must_exit flag and check for it regularly.\n      * We can make it atomic and check it via atomics in workers, but that\n      * is not really necessary as it is set just once - when the program\n@@ -383,17 +499,7 @@ worker_pool_hook(void *aux OVS_UNUSED) {\n     /* Wake up the workers after the must_exit flag has been set */\n \n     LIST_FOR_EACH (pool, list_node, &worker_pools) {\n-        for (i = 0; i < pool->size ; i++) {\n-            sem_post(pool->controls[i].fire);\n-        }\n-        for (i = 0; i < pool->size ; i++) {\n-            pthread_join(pool->controls[i].worker, NULL);\n-        }\n-        for (i = 0; i < pool->size ; i++) {\n-            sem_close(pool->controls[i].fire);\n-            sprintf(sem_name, WORKER_SEM_NAME, sembase, pool, i);\n-            sem_unlink(sem_name);\n-        }\n+        free_controls(pool);\n         sem_close(pool->done);\n         sprintf(sem_name, MAIN_SEM_NAME, sembase, pool);\n         sem_unlink(sem_name);\ndiff --git a/lib/ovn-parallel-hmap.h b/lib/ovn-parallel-hmap.h\nindex 2df132ea8..4708f41f2 100644\n--- a/lib/ovn-parallel-hmap.h\n+++ b/lib/ovn-parallel-hmap.h\n@@ -83,6 +83,7 @@ struct worker_control {\n     void *data; /* Pointer to data to be processed. */\n     void *workload; /* back-pointer to the worker pool structure. */\n     pthread_t worker;\n+    struct worker_pool *pool;\n };\n \n struct worker_pool {\n@@ -90,16 +91,21 @@ struct worker_pool {\n     struct ovs_list list_node; /* List of pools - used in cleanup/exit. */\n     struct worker_control *controls; /* \"Handles\" in this pool. */\n     sem_t *done; /* Work completion semaphorew. */\n+    void *(*start)(void *); /* Work function. */\n+    bool workers_must_exit; /* Pool to be destroyed flag. */\n };\n \n /* Add a worker pool for thread function start() which expects a pointer to\n- * a worker_control structure as an argument. */\n+ * a worker_control structure as an argument.\n+ * If size is non-zero, it is used for pool sizing. If size is zero, pool\n+ * size uses system defaults.\n+ */\n \n-struct worker_pool *ovn_add_worker_pool(void *(*start)(void *));\n+struct worker_pool *ovn_add_worker_pool(void *(*start)(void *), int size);\n \n /* Setting this to true will make all processing threads exit */\n \n-bool ovn_stop_parallel_processing(void);\n+bool ovn_stop_parallel_processing(struct worker_pool *pool);\n \n /* Build a hmap pre-sized for size elements */\n \n@@ -253,6 +259,10 @@ static inline void init_hash_row_locks(struct hashrow_locks *hrl)\n \n bool ovn_can_parallelize_hashes(bool force_parallel);\n \n+void ovn_destroy_pool(struct worker_pool *pool);\n+\n+bool ovn_resize_pool(struct worker_pool *pool, int size);\n+\n /* Use the OVN library functions for stuff which OVS has not defined\n  * If OVS has defined these, they will still compile using the OVN\n  * local names, but will be dropped by the linker in favour of the OVS\n@@ -263,9 +273,9 @@ bool ovn_can_parallelize_hashes(bool force_parallel);\n \n #define can_parallelize_hashes(force) ovn_can_parallelize_hashes(force)\n \n-#define stop_parallel_processing() ovn_stop_parallel_processing()\n+#define stop_parallel_processing(pool) ovn_stop_parallel_processing(pool)\n \n-#define add_worker_pool(start) ovn_add_worker_pool(start)\n+#define add_worker_pool(start, size) ovn_add_worker_pool(start, size)\n \n #define fast_hmap_size_for(hmap, size) ovn_fast_hmap_size_for(hmap, size)\n \n@@ -286,6 +296,9 @@ bool ovn_can_parallelize_hashes(bool force_parallel);\n #define run_pool_callback(pool, fin_result, result_frags, helper_func) \\\n     ovn_run_pool_callback(pool, fin_result, result_frags, helper_func)\n \n+#define destroy_pool(pool) ovn_destroy_pool(pool)\n+\n+#define resize_pool(pool, size) ovn_resize_pool(pool, size)\n \n \n #ifdef __clang__\ndiff --git a/northd/ovn-northd.c b/northd/ovn-northd.c\nindex ee761cef0..324800c32 100644\n--- a/northd/ovn-northd.c\n+++ b/northd/ovn-northd.c\n@@ -12828,16 +12828,10 @@ build_lswitch_and_lrouter_iterate_by_op(struct ovn_port *op,\n                                       &lsi->actions);\n }\n \n-struct lflows_thread_pool {\n-    struct worker_pool *pool;\n-};\n-\n-\n static void *\n build_lflows_thread(void *arg)\n {\n     struct worker_control *control = (struct worker_control *) arg;\n-    struct lflows_thread_pool *workload;\n     struct lswitch_flow_build_info *lsi;\n \n     struct ovn_datapath *od;\n@@ -12846,21 +12840,21 @@ build_lflows_thread(void *arg)\n     struct ovn_igmp_group *igmp_group;\n     int bnum;\n \n-    while (!stop_parallel_processing()) {\n+\n+    while (!stop_parallel_processing(control->pool)) {\n         wait_for_work(control);\n-        workload = (struct lflows_thread_pool *) control->workload;\n         lsi = (struct lswitch_flow_build_info *) control->data;\n-        if (stop_parallel_processing()) {\n+        if (stop_parallel_processing(control->pool)) {\n             return NULL;\n         }\n-        if (lsi && workload) {\n+        if (lsi) {\n             /* Iterate over bucket ThreadID, ThreadID+size, ... */\n             for (bnum = control->id;\n                     bnum <= lsi->datapaths->mask;\n-                    bnum += workload->pool->size)\n+                    bnum += control->pool->size)\n             {\n                 HMAP_FOR_EACH_IN_PARALLEL (od, key_node, bnum, lsi->datapaths) {\n-                    if (stop_parallel_processing()) {\n+                    if (stop_parallel_processing(control->pool)) {\n                         return NULL;\n                     }\n                     build_lswitch_and_lrouter_iterate_by_od(od, lsi);\n@@ -12868,10 +12862,10 @@ build_lflows_thread(void *arg)\n             }\n             for (bnum = control->id;\n                     bnum <= lsi->ports->mask;\n-                    bnum += workload->pool->size)\n+                    bnum += control->pool->size)\n             {\n                 HMAP_FOR_EACH_IN_PARALLEL (op, key_node, bnum, lsi->ports) {\n-                    if (stop_parallel_processing()) {\n+                    if (stop_parallel_processing(control->pool)) {\n                         return NULL;\n                     }\n                     build_lswitch_and_lrouter_iterate_by_op(op, lsi);\n@@ -12879,10 +12873,10 @@ build_lflows_thread(void *arg)\n             }\n             for (bnum = control->id;\n                     bnum <= lsi->lbs->mask;\n-                    bnum += workload->pool->size)\n+                    bnum += control->pool->size)\n             {\n                 HMAP_FOR_EACH_IN_PARALLEL (lb, hmap_node, bnum, lsi->lbs) {\n-                    if (stop_parallel_processing()) {\n+                    if (stop_parallel_processing(control->pool)) {\n                         return NULL;\n                     }\n                     build_lswitch_arp_nd_service_monitor(lb, lsi->lflows,\n@@ -12900,11 +12894,11 @@ build_lflows_thread(void *arg)\n             }\n             for (bnum = control->id;\n                     bnum <= lsi->igmp_groups->mask;\n-                    bnum += workload->pool->size)\n+                    bnum += control->pool->size)\n             {\n                 HMAP_FOR_EACH_IN_PARALLEL (\n                         igmp_group, hmap_node, bnum, lsi->igmp_groups) {\n-                    if (stop_parallel_processing()) {\n+                    if (stop_parallel_processing(control->pool)) {\n                         return NULL;\n                     }\n                     build_lswitch_ip_mcast_igmp_mld(igmp_group, lsi->lflows,\n@@ -12919,24 +12913,14 @@ build_lflows_thread(void *arg)\n }\n \n static bool pool_init_done = false;\n-static struct lflows_thread_pool *build_lflows_pool = NULL;\n+static struct worker_pool *build_lflows_pool = NULL;\n \n static void\n init_lflows_thread_pool(void)\n {\n-    int index;\n-\n     if (!pool_init_done) {\n-        struct worker_pool *pool = add_worker_pool(build_lflows_thread);\n+        build_lflows_pool = add_worker_pool(build_lflows_thread, 0);\n         pool_init_done = true;\n-        if (pool) {\n-            build_lflows_pool = xmalloc(sizeof(*build_lflows_pool));\n-            build_lflows_pool->pool = pool;\n-            for (index = 0; index < build_lflows_pool->pool->size; index++) {\n-                build_lflows_pool->pool->controls[index].workload =\n-                    build_lflows_pool;\n-            }\n-        }\n     }\n }\n \n@@ -12979,16 +12963,16 @@ build_lswitch_and_lrouter_flows(struct hmap *datapaths, struct hmap *ports,\n         struct lswitch_flow_build_info *lsiv;\n         int index;\n \n-        lsiv = xcalloc(sizeof(*lsiv), build_lflows_pool->pool->size);\n+        lsiv = xcalloc(sizeof(*lsiv), build_lflows_pool->size);\n         if (use_logical_dp_groups) {\n             lflow_segs = NULL;\n         } else {\n-            lflow_segs = xcalloc(sizeof(*lflow_segs), build_lflows_pool->pool->size);\n+            lflow_segs = xcalloc(sizeof(*lflow_segs), build_lflows_pool->size);\n         }\n \n         /* Set up \"work chunks\" for each thread to work on. */\n \n-        for (index = 0; index < build_lflows_pool->pool->size; index++) {\n+        for (index = 0; index < build_lflows_pool->size; index++) {\n             if (use_logical_dp_groups) {\n                 /* if dp_groups are in use we lock a shared lflows hash\n                  * on a per-bucket level instead of merging hash frags */\n@@ -13010,17 +12994,17 @@ build_lswitch_and_lrouter_flows(struct hmap *datapaths, struct hmap *ports,\n             ds_init(&lsiv[index].match);\n             ds_init(&lsiv[index].actions);\n \n-            build_lflows_pool->pool->controls[index].data = &lsiv[index];\n+            build_lflows_pool->controls[index].data = &lsiv[index];\n         }\n \n         /* Run thread pool. */\n         if (use_logical_dp_groups) {\n-            run_pool_callback(build_lflows_pool->pool, NULL, NULL, noop_callback);\n+            run_pool_callback(build_lflows_pool, NULL, NULL, noop_callback);\n         } else {\n-            run_pool_hash(build_lflows_pool->pool, lflows, lflow_segs);\n+            run_pool_hash(build_lflows_pool, lflows, lflow_segs);\n         }\n \n-        for (index = 0; index < build_lflows_pool->pool->size; index++) {\n+        for (index = 0; index < build_lflows_pool->size; index++) {\n             ds_destroy(&lsiv[index].match);\n             ds_destroy(&lsiv[index].actions);\n         }\ndiff --git a/ovs b/ovs\nindex 748010ff3..50e5523b9 160000\n--- a/ovs\n+++ b/ovs\n@@ -1 +1 @@\n-Subproject commit 748010ff304b7cd2c43f4eb98a554433f0df07f9\n+Subproject commit 50e5523b9b2b154e5fafc5acdcdec85e9cc5a330\n",
    "prefixes": [