get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/1525722/
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 1525722,
    "url": "http://patchwork.ozlabs.org/api/patches/1525722/",
    "web_url": "http://patchwork.ozlabs.org/project/openvswitch/patch/98ff0b38dcdd65f3a90ee84b9640a39a33fe0a58.1631094144.git.grive@u256.net/",
    "project": {
        "id": 47,
        "url": "http://patchwork.ozlabs.org/api/projects/47/",
        "name": "Open vSwitch",
        "link_name": "openvswitch",
        "list_id": "ovs-dev.openvswitch.org",
        "list_email": "ovs-dev@openvswitch.org",
        "web_url": "http://openvswitch.org/",
        "scm_url": "git@github.com:openvswitch/ovs.git",
        "webscm_url": "https://github.com/openvswitch/ovs",
        "list_archive_url": "",
        "list_archive_url_format": "",
        "commit_url_format": ""
    },
    "msgid": "<98ff0b38dcdd65f3a90ee84b9640a39a33fe0a58.1631094144.git.grive@u256.net>",
    "list_archive_url": null,
    "date": "2021-09-08T09:47:37",
    "name": "[ovs-dev,v5,13/27] id-fpool: Module for fast ID generation",
    "commit_ref": null,
    "pull_url": null,
    "state": "new",
    "archived": false,
    "hash": "6f278641011efa54ef71335c7cb774995c0f9540",
    "submitter": {
        "id": 78795,
        "url": "http://patchwork.ozlabs.org/api/people/78795/",
        "name": "Gaëtan Rivet",
        "email": "grive@u256.net"
    },
    "delegate": null,
    "mbox": "http://patchwork.ozlabs.org/project/openvswitch/patch/98ff0b38dcdd65f3a90ee84b9640a39a33fe0a58.1631094144.git.grive@u256.net/mbox/",
    "series": [
        {
            "id": 261424,
            "url": "http://patchwork.ozlabs.org/api/series/261424/",
            "web_url": "http://patchwork.ozlabs.org/project/openvswitch/list/?series=261424",
            "date": "2021-09-08T09:47:24",
            "name": "dpif-netdev: Parallel offload processing",
            "version": 5,
            "mbox": "http://patchwork.ozlabs.org/series/261424/mbox/"
        }
    ],
    "comments": "http://patchwork.ozlabs.org/api/patches/1525722/comments/",
    "check": "success",
    "checks": "http://patchwork.ozlabs.org/api/patches/1525722/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<ovs-dev-bounces@openvswitch.org>",
        "X-Original-To": [
            "incoming@patchwork.ozlabs.org",
            "ovs-dev@openvswitch.org"
        ],
        "Delivered-To": [
            "patchwork-incoming@bilbo.ozlabs.org",
            "ovs-dev@lists.linuxfoundation.org"
        ],
        "Authentication-Results": [
            "ozlabs.org;\n\tdkim=fail reason=\"signature verification failed\" (2048-bit key;\n unprotected) header.d=u256.net header.i=@u256.net header.a=rsa-sha256\n header.s=fm2 header.b=b7F4Ok6y;\n\tdkim=fail reason=\"signature verification failed\" (2048-bit key;\n unprotected) header.d=messagingengine.com header.i=@messagingengine.com\n header.a=rsa-sha256 header.s=fm3 header.b=eYANWF2G;\n\tdkim-atps=neutral",
            "ozlabs.org;\n spf=pass (sender SPF authorized) smtp.mailfrom=openvswitch.org\n (client-ip=140.211.166.133; helo=smtp2.osuosl.org;\n envelope-from=ovs-dev-bounces@openvswitch.org; receiver=<UNKNOWN>)",
            "smtp3.osuosl.org (amavisd-new);\n dkim=pass (2048-bit key) header.d=u256.net header.b=\"b7F4Ok6y\";\n dkim=pass (2048-bit key) header.d=messagingengine.com\n header.b=\"eYANWF2G\""
        ],
        "Received": [
            "from smtp2.osuosl.org (smtp2.osuosl.org [140.211.166.133])\n\t(using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits)\n\t key-exchange X25519 server-signature RSA-PSS (4096 bits) server-digest\n SHA256)\n\t(No client certificate requested)\n\tby ozlabs.org (Postfix) with ESMTPS id 4H4HQw65s6z9sW8\n\tfor <incoming@patchwork.ozlabs.org>; Wed,  8 Sep 2021 19:49:12 +1000 (AEST)",
            "from localhost (localhost [127.0.0.1])\n\tby smtp2.osuosl.org (Postfix) with ESMTP id EACE44078B;\n\tWed,  8 Sep 2021 09:49:10 +0000 (UTC)",
            "from smtp2.osuosl.org ([127.0.0.1])\n\tby localhost (smtp2.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n\twith ESMTP id wTOe6VG2JMee; Wed,  8 Sep 2021 09:49:06 +0000 (UTC)",
            "from lists.linuxfoundation.org (lf-lists.osuosl.org\n [IPv6:2605:bc80:3010:104::8cd3:938])\n\tby smtp2.osuosl.org (Postfix) with ESMTPS id 9F8FB406E6;\n\tWed,  8 Sep 2021 09:49:05 +0000 (UTC)",
            "from lf-lists.osuosl.org (localhost [127.0.0.1])\n\tby lists.linuxfoundation.org (Postfix) with ESMTP id EC0ECC0020;\n\tWed,  8 Sep 2021 09:49:03 +0000 (UTC)",
            "from smtp3.osuosl.org (smtp3.osuosl.org [IPv6:2605:bc80:3010::136])\n by lists.linuxfoundation.org (Postfix) with ESMTP id 1DB9DC001C\n for <ovs-dev@openvswitch.org>; Wed,  8 Sep 2021 09:49:02 +0000 (UTC)",
            "from localhost (localhost [127.0.0.1])\n by smtp3.osuosl.org (Postfix) with ESMTP id 59A8060776\n for <ovs-dev@openvswitch.org>; Wed,  8 Sep 2021 09:48:26 +0000 (UTC)",
            "from smtp3.osuosl.org ([127.0.0.1])\n by localhost (smtp3.osuosl.org [127.0.0.1]) (amavisd-new, port 10024)\n with ESMTP id le4KbUakIVe3 for <ovs-dev@openvswitch.org>;\n Wed,  8 Sep 2021 09:48:22 +0000 (UTC)",
            "from wout3-smtp.messagingengine.com (wout3-smtp.messagingengine.com\n [64.147.123.19])\n by smtp3.osuosl.org (Postfix) with ESMTPS id E0E0F6138B\n for <ovs-dev@openvswitch.org>; Wed,  8 Sep 2021 09:48:22 +0000 (UTC)",
            "from compute3.internal (compute3.nyi.internal [10.202.2.43])\n by mailout.west.internal (Postfix) with ESMTP id 4E7CF32009CE;\n Wed,  8 Sep 2021 05:48:22 -0400 (EDT)",
            "from mailfrontend2 ([10.202.2.163])\n by compute3.internal (MEProxy); Wed, 08 Sep 2021 05:48:22 -0400",
            "by mail.messagingengine.com (Postfix) with ESMTPA; Wed,\n 8 Sep 2021 05:48:21 -0400 (EDT)"
        ],
        "X-Virus-Scanned": [
            "amavisd-new at osuosl.org",
            "amavisd-new at osuosl.org"
        ],
        "X-Greylist": "from auto-whitelisted by SQLgrey-1.8.0",
        "DKIM-Signature": [
            "v=1; a=rsa-sha256; c=relaxed/relaxed; d=u256.net; h=from\n :to:cc:subject:date:message-id:in-reply-to:references\n :mime-version:content-transfer-encoding; s=fm2; bh=4gVzHNBX6DyGN\n nH9+ghKia4iopm2nR8XrIK3gjcevtY=; b=b7F4Ok6yKqYI9PVn9y7kiJOWowsQb\n 9vQHBOD6TzSZuf6p6TvO8ODK6Y0IUerChnCwyzyhhslvdEIiTciZnVcC2GDrPH3N\n Ap2MPjufxO1lNhoMZGAKPeIqzSam/LZe8FVAbL5YUAbrtbSdyXVqaTRK0FQb7ke0\n a4ju8Yi2O0kEBM9+bX7Y2mTqFv6n5E9PV3eOYIPSQ7qKqf258olWTeAPD1aiWP3J\n mvHmOO8y9HU0W1W2xPyIYeP61Wm6ppntzbcsOaBFQjA+0XtFluf1ydD/4bLkFj/F\n gLBD8vJGgP3yekc7NVKif65R+c7kRwX83kBtb8nEDqj5ue5559KJVy2VA==",
            "v=1; a=rsa-sha256; c=relaxed/relaxed; d=\n messagingengine.com; h=cc:content-transfer-encoding:date:from\n :in-reply-to:message-id:mime-version:references:subject:to\n :x-me-proxy:x-me-proxy:x-me-sender:x-me-sender:x-sasl-enc; s=\n fm3; bh=4gVzHNBX6DyGNnH9+ghKia4iopm2nR8XrIK3gjcevtY=; b=eYANWF2G\n 8eZN1Evp8JcGsvMHvQ8NZ5sjN9xXA+1kNxusZkAeNm+8AH0BFke0/RpZ5yrSE37T\n 7PoNkQKry2yVSqqCwCjQGm+r44MC0UZu4zKHy2497Ta3NQvRpfWlg3SvBfDXMC60\n mqGTD2Wy9lv4DKik50ISovLdt3Nh1+iPNYoaC8gtRAzXvkpyisYDja+/AQ9n8FaE\n KaVPkfTqdR0Eihdar99GaJziBmrzfZS/IwvWknpWu/tlVefOLLi6GEFTY6Iacf6R\n WHCqt3fDn7HkBj5iIzqwE2mqGkhRs3blxgCz5jgy5fkV41v8/hcsEDoT8n7uJUU9\n H+9MBbhHaJPiJA=="
        ],
        "X-ME-Sender": "<xms:5YY4YV4HtLg9XvQxMwAYr80GqoOVNKwX8xEC_mwx-Zcx57WZaKTW9Q>\n <xme:5YY4YS6z4_nYN20hY5f5bBlmgXCVfb4HfjqMGpQksRVYKMKrhV4M6B136y8Pd_vWX\n R3vrZH2_lYuafsEOcM>",
        "X-ME-Received": "\n <xmr:5YY4Ycf2tgIFx9XgfytAsPREy6-foRwCobo9rf3MPYmjYa4TYWDKAsn8NiM1Yyl-ofDZ9bF4v4XK71d1HbSh3xPnxQ>",
        "X-ME-Proxy-Cause": "\n gggruggvucftvghtrhhoucdtuddrgedvtddrudefjedgudekucetufdoteggodetrfdotf\n fvucfrrhhofhhilhgvmecuhfgrshhtofgrihhlpdfqfgfvpdfurfetoffkrfgpnffqhgen\n uceurghilhhouhhtmecufedttdenucesvcftvggtihhpihgvnhhtshculddquddttddmne\n cujfgurhephffvufffkffojghfggfgsedtkeertdertddtnecuhfhrohhmpefirggvthgr\n nhcutfhivhgvthcuoehgrhhivhgvsehuvdehiedrnhgvtheqnecuggftrfgrthhtvghrnh\n epudfhfeeikeeiiedutefgheekledukedtkeejheduheffteeiudehiefgkedvleefnecu\n ffhomhgrihhnpegrphgrtghhvgdrohhrghenucevlhhushhtvghrufhiiigvpedtnecurf\n grrhgrmhepmhgrihhlfhhrohhmpehgrhhivhgvsehuvdehiedrnhgvth",
        "X-ME-Proxy": "<xmx:5YY4YeJI9d8Ii-f7Qw0jpc49lpPnZeGOyWlSQ0uYnJZpjNvzT0JRsg>\n <xmx:5YY4YZLYhiNcVQ9Auv12r8ICWmh7_-DpHANTam8CvzqUYXDUkSVrCQ>\n <xmx:5YY4YXz-ewrlC9iJNvgl3dAGzGQ2QAU1aHfRalc8LpJCb08TrdAZbw>\n <xmx:5YY4YbyF62imiQfBta6JI9Es-H7HxQh7XZ2phQg5qw-aRE3wjVfQrg>",
        "From": "Gaetan Rivet <grive@u256.net>",
        "To": "ovs-dev@openvswitch.org",
        "Date": "Wed,  8 Sep 2021 11:47:37 +0200",
        "Message-Id": "\n <98ff0b38dcdd65f3a90ee84b9640a39a33fe0a58.1631094144.git.grive@u256.net>",
        "X-Mailer": "git-send-email 2.31.1",
        "In-Reply-To": "<cover.1631094144.git.grive@u256.net>",
        "References": "<cover.1631094144.git.grive@u256.net>",
        "MIME-Version": "1.0",
        "Cc": "Maxime Coquelin <maxime.coquelin@redhat.com>",
        "Subject": "[ovs-dev] [PATCH v5 13/27] id-fpool: Module for fast ID generation",
        "X-BeenThere": "ovs-dev@openvswitch.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "<ovs-dev.openvswitch.org>",
        "List-Unsubscribe": "<https://mail.openvswitch.org/mailman/options/ovs-dev>,\n <mailto:ovs-dev-request@openvswitch.org?subject=unsubscribe>",
        "List-Archive": "<http://mail.openvswitch.org/pipermail/ovs-dev/>",
        "List-Post": "<mailto:ovs-dev@openvswitch.org>",
        "List-Help": "<mailto:ovs-dev-request@openvswitch.org?subject=help>",
        "List-Subscribe": "<https://mail.openvswitch.org/mailman/listinfo/ovs-dev>,\n <mailto:ovs-dev-request@openvswitch.org?subject=subscribe>",
        "Content-Type": "text/plain; charset=\"us-ascii\"",
        "Content-Transfer-Encoding": "7bit",
        "Errors-To": "ovs-dev-bounces@openvswitch.org",
        "Sender": "\"dev\" <ovs-dev-bounces@openvswitch.org>"
    },
    "content": "The current id-pool module is slow to allocate the\nnext valid ID, and can be optimized when restricting\nsome properties of the pool.\n\nThose restrictions are:\n\n  * No ability to add a random ID to the pool.\n\n  * A new ID is no more the smallest possible ID.\n    It is however guaranteed to be in the range of\n\n       [floor, last_alloc + nb_user * cache_size + 1].\n\n    where 'cache_size' is the number of ID in each per-user\n    cache.  It is defined as 'ID_FPOOL_CACHE_SIZE' to 64.\n\n  * A user should never free an ID that is not allocated.\n    No checks are done and doing so will duplicate the spurious\n    ID.  Refcounting or other memory management scheme should\n    be used to ensure an object and its ID are only freed once.\n\nThis allocator is designed to scale reasonably well in multithread\nsetup.  As it is aimed at being a faster replacement to the current\nid-pool, a benchmark has been implemented alongside unit tests.\n\nThe benchmark is composed of 4 rounds: 'new', 'del', 'mix', and 'rnd'.\nRespectively\n\n  + 'new': only allocate IDs\n  + 'del': only free IDs\n  + 'mix': allocate, sequential free, then allocate ID.\n  + 'rnd': allocate, random free, allocate ID.\n\nRandomized freeing is done by swapping the latest allocated ID with any\nfrom the range of currently allocated ID, which is reminiscent of the\nFisher-Yates shuffle.  This evaluates freeing non-sequential IDs,\nwhich is the more natural use-case.\n\nFor this specific round, the id-pool performance is such that a timeout\nof 10 seconds is added to the benchmark:\n\n   $ ./tests/ovstest test-id-fpool benchmark 10000 1\n   Benchmarking n=10000 on 1 thread.\n    type\\thread:       1    Avg\n   id-fpool new:       1      1 ms\n   id-fpool del:       1      1 ms\n   id-fpool mix:       2      2 ms\n   id-fpool rnd:       2      2 ms\n    id-pool new:       4      4 ms\n    id-pool del:       2      2 ms\n    id-pool mix:       6      6 ms\n    id-pool rnd:     431    431 ms\n\n   $ ./tests/ovstest test-id-fpool benchmark 100000 1\n   Benchmarking n=100000 on 1 thread.\n    type\\thread:       1    Avg\n   id-fpool new:       2      2 ms\n   id-fpool del:       2      2 ms\n   id-fpool mix:       3      3 ms\n   id-fpool rnd:       4      4 ms\n    id-pool new:      12     12 ms\n    id-pool del:       5      5 ms\n    id-pool mix:      16     16 ms\n    id-pool rnd:  10000+     -1 ms\n\n   $ ./tests/ovstest test-id-fpool benchmark 1000000 1\n   Benchmarking n=1000000 on 1 thread.\n    type\\thread:       1    Avg\n   id-fpool new:      15     15 ms\n   id-fpool del:      12     12 ms\n   id-fpool mix:      34     34 ms\n   id-fpool rnd:      48     48 ms\n    id-pool new:     276    276 ms\n    id-pool del:     286    286 ms\n    id-pool mix:     448    448 ms\n    id-pool rnd:  10000+     -1 ms\n\nRunning only a performance test on the fast pool:\n\n   $ ./tests/ovstest test-id-fpool perf 1000000 1\n   Benchmarking n=1000000 on 1 thread.\n    type\\thread:       1    Avg\n   id-fpool new:      15     15 ms\n   id-fpool del:      12     12 ms\n   id-fpool mix:      34     34 ms\n   id-fpool rnd:      47     47 ms\n\n   $ ./tests/ovstest test-id-fpool perf 1000000 2\n   Benchmarking n=1000000 on 2 threads.\n    type\\thread:       1      2    Avg\n   id-fpool new:      11     11     11 ms\n   id-fpool del:      10     10     10 ms\n   id-fpool mix:      24     24     24 ms\n   id-fpool rnd:      30     30     30 ms\n\n   $ ./tests/ovstest test-id-fpool perf 1000000 4\n   Benchmarking n=1000000 on 4 threads.\n    type\\thread:       1      2      3      4    Avg\n   id-fpool new:       9     11     11     10     10 ms\n   id-fpool del:       5      6      6      5      5 ms\n   id-fpool mix:      16     16     16     16     16 ms\n   id-fpool rnd:      20     20     20     20     20 ms\n\nSigned-off-by: Gaetan Rivet <grive@u256.net>\nReviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>\n---\n lib/automake.mk       |   2 +\n lib/id-fpool.c        | 279 +++++++++++++++++++\n lib/id-fpool.h        |  66 +++++\n tests/automake.mk     |   1 +\n tests/library.at      |   4 +\n tests/test-id-fpool.c | 615 ++++++++++++++++++++++++++++++++++++++++++\n 6 files changed, 967 insertions(+)\n create mode 100644 lib/id-fpool.c\n create mode 100644 lib/id-fpool.h\n create mode 100644 tests/test-id-fpool.c",
    "diff": "diff --git a/lib/automake.mk b/lib/automake.mk\nindex 098337078..ec1306b49 100644\n--- a/lib/automake.mk\n+++ b/lib/automake.mk\n@@ -151,6 +151,8 @@ lib_libopenvswitch_la_SOURCES = \\\n \tlib/hmap.c \\\n \tlib/hmapx.c \\\n \tlib/hmapx.h \\\n+\tlib/id-fpool.c \\\n+\tlib/id-fpool.h \\\n \tlib/id-pool.c \\\n \tlib/id-pool.h \\\n \tlib/if-notifier-manual.c \\\ndiff --git a/lib/id-fpool.c b/lib/id-fpool.c\nnew file mode 100644\nindex 000000000..15cef5d00\n--- /dev/null\n+++ b/lib/id-fpool.c\n@@ -0,0 +1,279 @@\n+/*\n+ * Copyright (c) 2021 NVIDIA Corporation.\n+ *\n+ * Licensed under the Apache License, Version 2.0 (the \"License\");\n+ * you may not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at:\n+ *\n+ *     http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing, software\n+ * distributed under the License is distributed on an \"AS IS\" BASIS,\n+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+ * See the License for the specific language governing permissions and\n+ * limitations under the License.\n+ */\n+\n+#include <config.h>\n+\n+#include \"openvswitch/list.h\"\n+#include \"openvswitch/thread.h\"\n+#include \"openvswitch/util.h\"\n+#include \"ovs-atomic.h\"\n+#include \"id-fpool.h\"\n+\n+#ifdef HAVE_PTHREAD_SPIN_LOCK\n+#define id_fpool_lock_type ovs_spin\n+#define id_fpool_lock_init(l) do { ovs_spin_init(l); } while (0)\n+#define id_fpool_lock_destroy(l) do { ovs_spin_destroy(l); } while (0)\n+#define id_fpool_lock(l) do { ovs_spin_lock(l); } while (0)\n+#define id_fpool_unlock(l) do { ovs_spin_unlock(l); } while (0)\n+#else\n+#define id_fpool_lock_type ovs_mutex\n+#define id_fpool_lock_init(l) do { ovs_mutex_init(l); } while (0)\n+#define id_fpool_lock_destroy(l) do { ovs_mutex_destroy(l); } while (0)\n+#define id_fpool_lock(l) do { ovs_mutex_lock(l); } while (0)\n+#define id_fpool_unlock(l) do { ovs_mutex_unlock(l); } while (0)\n+#endif\n+\n+struct id_slab {\n+    struct ovs_list node;\n+    uint32_t pos;\n+    uint32_t ids[ID_FPOOL_CACHE_SIZE];\n+};\n+\n+struct per_user {\n+PADDED_MEMBERS(CACHE_LINE_SIZE,\n+    struct id_fpool_lock_type user_lock;\n+    struct id_slab *slab;\n+);};\n+\n+struct id_fpool {\n+    /* Constants */\n+    uint32_t floor; /* IDs are in the range of [floor, ceiling). */\n+    uint32_t ceiling;\n+    size_t nb_user; /* Number of concurrent users. */\n+\n+    /* Shared mutable data protected by global lock. */\n+    struct id_fpool_lock_type pool_lock;\n+    struct ovs_list free_slabs;\n+    uint32_t next_id;\n+\n+    /* Per-user mutable data protected by user locks. */\n+    struct per_user per_users[0];\n+};\n+\n+/* Lock precedence is\n+ * 1: per_users.user_lock\n+ * 2: pool_lock\n+ */\n+\n+static struct id_slab *\n+id_slab_create(uint32_t *next_id, uint32_t max)\n+{\n+    struct id_slab *slab;\n+    size_t n_ids;\n+    size_t pos;\n+\n+    if (next_id[0] == max) {\n+        return NULL;\n+    }\n+\n+    n_ids = max - next_id[0];\n+    slab = xmalloc(sizeof *slab);\n+    ovs_list_init(&slab->node);\n+    slab->pos = 0;\n+\n+    for (pos = MIN(n_ids, ARRAY_SIZE(slab->ids)); pos > 0; pos--) {\n+        slab->ids[pos - 1] = next_id[0];\n+        next_id[0]++;\n+        slab->pos++;\n+    }\n+\n+    return slab;\n+}\n+\n+static bool\n+id_slab_insert(struct id_slab *slab, uint32_t id)\n+{\n+    if (slab == NULL) {\n+        return false;\n+    }\n+    if (slab->pos >= ARRAY_SIZE(slab->ids)) {\n+        return false;\n+    }\n+    slab->ids[slab->pos++] = id;\n+    return true;\n+}\n+\n+static bool\n+id_slab_remove(struct id_slab *slab, uint32_t *id)\n+{\n+    if (slab == NULL) {\n+        return false;\n+    }\n+    if (slab->pos == 0) {\n+        return false;\n+    }\n+    *id = slab->ids[--slab->pos];\n+    return true;\n+}\n+\n+static void\n+per_user_init(struct per_user *pu, uint32_t *next_id, uint32_t max)\n+{\n+    id_fpool_lock_init(&pu->user_lock);\n+    pu->slab = id_slab_create(next_id, max);\n+}\n+\n+static void\n+per_user_destroy(struct per_user *pu)\n+{\n+    id_fpool_lock(&pu->user_lock);\n+    free(pu->slab);\n+    pu->slab = NULL;\n+    id_fpool_unlock(&pu->user_lock);\n+    id_fpool_lock_destroy(&pu->user_lock);\n+}\n+\n+struct id_fpool *\n+id_fpool_create(unsigned int nb_user, uint32_t floor, uint32_t n_ids)\n+{\n+    struct id_fpool *pool;\n+    size_t i;\n+\n+    ovs_assert(nb_user != 0);\n+    ovs_assert(floor <= UINT32_MAX - n_ids);\n+\n+    pool = xmalloc(sizeof *pool + nb_user * sizeof(struct per_user));\n+    pool->next_id = floor;\n+    pool->floor = floor;\n+    pool->ceiling = floor + n_ids;\n+\n+    for (i = 0; i < nb_user; i++) {\n+        per_user_init(&pool->per_users[i],\n+                      &pool->next_id, pool->ceiling);\n+    }\n+    pool->nb_user = nb_user;\n+\n+    id_fpool_lock_init(&pool->pool_lock);\n+    ovs_list_init(&pool->free_slabs);\n+\n+    return pool;\n+}\n+\n+void\n+id_fpool_destroy(struct id_fpool *pool)\n+{\n+    struct id_slab *slab;\n+    struct id_slab *next;\n+    size_t i;\n+\n+    id_fpool_lock(&pool->pool_lock);\n+    LIST_FOR_EACH_SAFE (slab, next, node, &pool->free_slabs) {\n+        free(slab);\n+    }\n+    ovs_list_poison(&pool->free_slabs);\n+    id_fpool_unlock(&pool->pool_lock);\n+    id_fpool_lock_destroy(&pool->pool_lock);\n+\n+    for (i = 0; i < pool->nb_user; i++) {\n+        per_user_destroy(&pool->per_users[i]);\n+    }\n+    free(pool);\n+}\n+\n+bool\n+id_fpool_new_id(struct id_fpool *pool, unsigned int uid, uint32_t *id)\n+{\n+    struct per_user *pu;\n+    unsigned int uid2;\n+    bool res = false;\n+\n+    ovs_assert(uid < pool->nb_user);\n+    pu = &pool->per_users[uid];\n+\n+    id_fpool_lock(&pu->user_lock);\n+\n+    if (id_slab_remove(pu->slab, id)) {\n+        res = true;\n+        goto unlock_and_ret;\n+    }\n+    free(pu->slab);\n+\n+    id_fpool_lock(&pool->pool_lock);\n+    if (!ovs_list_is_empty(&pool->free_slabs)) {\n+        pu->slab = CONTAINER_OF(ovs_list_pop_front(&pool->free_slabs),\n+                                struct id_slab, node);\n+    } else {\n+        pu->slab = id_slab_create(&pool->next_id, pool->ceiling);\n+    }\n+    id_fpool_unlock(&pool->pool_lock);\n+\n+    if (pu->slab != NULL) {\n+        res = id_slab_remove(pu->slab, id);\n+        goto unlock_and_ret;\n+    }\n+\n+    id_fpool_unlock(&pu->user_lock);\n+\n+    /* No ID available in local slab, no slab available in shared list.\n+     * The shared counter is maxed out. Attempt to steal an ID from another\n+     * user slab. */\n+\n+    for (uid2 = 0; uid2 < pool->nb_user; uid2++) {\n+        struct per_user *pu2 = &pool->per_users[uid2];\n+\n+        if (uid == uid2) {\n+            continue;\n+        }\n+        id_fpool_lock(&pu2->user_lock);;\n+        res = id_slab_remove(pu2->slab, id);\n+        id_fpool_unlock(&pu2->user_lock);;\n+        if (res) {\n+            break;\n+        }\n+    }\n+\n+    goto out;\n+\n+unlock_and_ret:\n+    id_fpool_unlock(&pu->user_lock);\n+out:\n+    return res;\n+}\n+\n+void\n+id_fpool_free_id(struct id_fpool *pool, unsigned int uid, uint32_t id)\n+{\n+    struct per_user *pu;\n+\n+    if (id < pool->floor || id >= pool->ceiling) {\n+        return;\n+    }\n+\n+    ovs_assert(uid < pool->nb_user);\n+    pu = &pool->per_users[uid];\n+\n+    id_fpool_lock(&pu->user_lock);\n+\n+    if (pu->slab == NULL) {\n+        /* Create local slab with a single ID. */\n+        pu->slab = id_slab_create(&id, id + 1);\n+        goto unlock;\n+    }\n+\n+    if (id_slab_insert(pu->slab, id)) {\n+        goto unlock;\n+    }\n+\n+    id_fpool_lock(&pool->pool_lock);\n+    ovs_list_push_back(&pool->free_slabs, &pu->slab->node);\n+    id_fpool_unlock(&pool->pool_lock);\n+\n+    /* Create local slab with a single ID. */\n+    pu->slab = id_slab_create(&id, id + 1);\n+\n+unlock:\n+    id_fpool_unlock(&pu->user_lock);\n+}\ndiff --git a/lib/id-fpool.h b/lib/id-fpool.h\nnew file mode 100644\nindex 000000000..f8d855938\n--- /dev/null\n+++ b/lib/id-fpool.h\n@@ -0,0 +1,66 @@\n+/*\n+ * Copyright (c) 2021 NVIDIA Corporation.\n+ *\n+ * Licensed under the Apache License, Version 2.0 (the \"License\");\n+ * you may not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at:\n+ *\n+ *     http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing, software\n+ * distributed under the License is distributed on an \"AS IS\" BASIS,\n+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+ * See the License for the specific language governing permissions and\n+ * limitations under the License.\n+ */\n+\n+#ifndef ID_FPOOL_H\n+#define ID_FPOOL_H\n+\n+#include <stdbool.h>\n+#include <stddef.h>\n+#include <stdint.h>\n+\n+/*\n+ * Fast ID pool.\n+ * =============\n+ *\n+ * Unordered pool of unique 32 bits IDs.\n+ *\n+ * Multiple users are registered at initialization.  Each one gets a cache\n+ * of ID.  When each thread allocates from the pool using its own user ID,\n+ * the pool scales for concurrent allocation.\n+ *\n+ * New IDs are always in the range of '[floor, next_id]', where 'next_id' is\n+ * in the range of '[last_allocated_ID + nb_user * cache_size + 1]'.\n+ * This means that a new ID is not always the smallest available ID, but it is\n+ * still from a limited range.\n+ *\n+ * Users should ensure that an ID is *never* freed twice.  Not doing so will\n+ * have the effect of double-allocating such ID afterward.\n+ *\n+ * Thread-safety\n+ * =============\n+ *\n+ * APIs are thread safe.\n+ * Multiple threads can share the same user ID if necessary.\n+ */\n+\n+#define ID_FPOOL_CACHE_SIZE 64\n+\n+struct id_fpool;\n+\n+/* nb_user is the number of expected users of the pool,\n+ * in terms of execution threads. */\n+struct id_fpool *id_fpool_create(unsigned int nb_user,\n+                                 uint32_t base, uint32_t n_ids);\n+void id_fpool_destroy(struct id_fpool *pool);\n+\n+/* uid is the thread user-id. It should be within '[0, nb_user)'. */\n+bool id_fpool_new_id(struct id_fpool *pool, unsigned int uid, uint32_t *id);\n+\n+/* uid is the thread user-id. It should be within '[0, nb_user)'.\n+ * An allocated ID must never be freed twice. */\n+void id_fpool_free_id(struct id_fpool *pool, unsigned int uid, uint32_t id);\n+\n+#endif  /* ID_FPOOL_H */\ndiff --git a/tests/automake.mk b/tests/automake.mk\nindex 8fdec27ef..8a9151f81 100644\n--- a/tests/automake.mk\n+++ b/tests/automake.mk\n@@ -467,6 +467,7 @@ tests_ovstest_SOURCES = \\\n \ttests/test-heap.c \\\n \ttests/test-hindex.c \\\n \ttests/test-hmap.c \\\n+\ttests/test-id-fpool.c \\\n \ttests/test-json.c \\\n \ttests/test-jsonrpc.c \\\n \ttests/test-list.c \\\ndiff --git a/tests/library.at b/tests/library.at\nindex 661e95727..db4997d8f 100644\n--- a/tests/library.at\n+++ b/tests/library.at\n@@ -270,3 +270,7 @@ AT_SETUP([mpsc-queue module])\n AT_CHECK([ovstest test-mpsc-queue check], [0], [....\n ])\n AT_CLEANUP\n+\n+AT_SETUP([id-fpool module])\n+AT_CHECK([ovstest test-id-fpool check], [0], [])\n+AT_CLEANUP\ndiff --git a/tests/test-id-fpool.c b/tests/test-id-fpool.c\nnew file mode 100644\nindex 000000000..25275d9ae\n--- /dev/null\n+++ b/tests/test-id-fpool.c\n@@ -0,0 +1,615 @@\n+/*\n+ * Copyright (c) 2021 NVIDIA Corporation.\n+ *\n+ * Licensed under the Apache License, Version 2.0 (the \"License\");\n+ * you may not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at:\n+ *\n+ *     http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing, software\n+ * distributed under the License is distributed on an \"AS IS\" BASIS,\n+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+ * See the License for the specific language governing permissions and\n+ * limitations under the License.\n+ */\n+\n+#undef NDEBUG\n+#include <assert.h>\n+#include <getopt.h>\n+#include <string.h>\n+\n+#include <config.h>\n+\n+#include \"command-line.h\"\n+#include \"id-fpool.h\"\n+#include \"id-pool.h\"\n+#include \"openvswitch/vlog.h\"\n+#include \"openvswitch/util.h\"\n+#include \"ovs-thread.h\"\n+#include \"ovs-rcu.h\"\n+#include \"ovs-numa.h\"\n+#include \"ovstest.h\"\n+#include \"random.h\"\n+#include \"timeval.h\"\n+#include \"util.h\"\n+\n+static void\n+test_id_fpool_alloc(void)\n+{\n+    const uint32_t base = 0;\n+    const uint32_t n_id = 10;\n+    struct id_fpool *pool = id_fpool_create(1, base, n_id);\n+    uint32_t ids[10];\n+    size_t i;\n+\n+    /* Can do n_id allocs. */\n+    for (i = 0; i < n_id; i++) {\n+        ovs_assert(id_fpool_new_id(pool, 0, &ids[i]));\n+        ovs_assert(ids[i] >= base);\n+        ovs_assert(ids[i] < base + n_id);\n+    }\n+    /* Only n_id successful allocations. */\n+    ovs_assert(id_fpool_new_id(pool, 0, NULL) == false);\n+\n+    /* Monotonic alloc. */\n+    for (i = 0; i < n_id - 1; i++) {\n+        ovs_assert(ids[i] < ids[i + 1]);\n+    }\n+\n+    for (i = 0; i < n_id; i++) {\n+        id_fpool_free_id(pool, 0, ids[i]);\n+    }\n+\n+    /* Can do n_id new allocs. */\n+    for (i = 0; i < n_id; i++) {\n+        ovs_assert(id_fpool_new_id(pool, 0, &ids[i]));\n+        ovs_assert(ids[i] >= base);\n+        ovs_assert(ids[i] < base + n_id);\n+    }\n+    /* Only n_id successful allocations. */\n+    ovs_assert(id_fpool_new_id(pool, 0, NULL) == false);\n+\n+    for (i = 0; i < n_id; i++) {\n+        id_fpool_free_id(pool, 0, ids[i]);\n+    }\n+\n+    id_fpool_destroy(pool);\n+}\n+\n+static void\n+test_id_fpool_alloc_range(void)\n+{\n+    const uint32_t base = 200;\n+    const uint32_t n_id = 100;\n+    const uint32_t ceil = base + n_id;\n+    struct id_fpool *pool = id_fpool_create(1, base, n_id);\n+    bool id_allocated[100];\n+    size_t i;\n+\n+    memset(id_allocated, 0, sizeof id_allocated);\n+\n+    /* Allocate all IDs only once. */\n+    for (i = 0; i < n_id; i++) {\n+        uint32_t id;\n+\n+        ovs_assert(id_fpool_new_id(pool, 0, &id));\n+        ovs_assert(id >= base);\n+        ovs_assert(id < ceil);\n+\n+        ovs_assert(id_allocated[id - base] == false);\n+        id_allocated[id - base] = true;\n+    }\n+    /* Only n_id successful allocations. */\n+    ovs_assert(id_fpool_new_id(pool, 0, NULL) == false);\n+\n+    for (i = 0; i < n_id; i++) {\n+        ovs_assert(id_allocated[i]);\n+        id_fpool_free_id(pool, 0, base + i);\n+        id_allocated[i] = false;\n+    }\n+\n+    /* The full range is again fully available. */\n+    for (i = 0; i < n_id; i++) {\n+        uint32_t id;\n+\n+        ovs_assert(id_fpool_new_id(pool, 0, &id));\n+        ovs_assert(id >= base);\n+        ovs_assert(id < ceil);\n+\n+        ovs_assert(id_allocated[id - base] == false);\n+        id_allocated[id - base] = true;\n+    }\n+\n+    id_fpool_destroy(pool);\n+}\n+\n+static void\n+test_id_fpool_alloc_steal(void)\n+{\n+    /* N must be less than a slab size to force the second user\n+     * to steal from the first.\n+     */\n+#define N (ID_FPOOL_CACHE_SIZE / 2)\n+    bool ids[N];\n+    struct id_fpool *pool;\n+    uint32_t id;\n+    size_t i;\n+\n+    memset(ids, 0, sizeof ids);\n+    pool = id_fpool_create(2, 0, N);\n+\n+    /* Fill up user 0 cache. */\n+    ovs_assert(id_fpool_new_id(pool, 0, &id));\n+    for (i = 0; i < N - 1; i++) {\n+        /* Check that user 1 can still alloc from user 0 cache. */\n+        ovs_assert(id_fpool_new_id(pool, 1, &id));\n+    }\n+\n+    id_fpool_destroy(pool);\n+}\n+\n+static void\n+test_id_fpool_alloc_under_limit(void)\n+{\n+    const size_t n_id = 100;\n+    uint32_t ids[100];\n+    unsigned int limit;\n+    struct id_fpool *pool;\n+    size_t i;\n+\n+    memset(ids, 0, sizeof ids);\n+    pool = id_fpool_create(1, 0, n_id);\n+\n+    for (limit = 1; limit < n_id; limit++) {\n+        /* Allocate until arbitrary limit then free allocated ids. */\n+        for (i = 0; i < limit; i++) {\n+            ovs_assert(id_fpool_new_id(pool, 0, &ids[i]));\n+        }\n+        for (i = 0; i < limit; i++) {\n+            id_fpool_free_id(pool, 0, ids[i]);\n+        }\n+        /* Verify that the N='limit' next allocations are under limit. */\n+        for (i = 0; i < limit; i++) {\n+            ovs_assert(id_fpool_new_id(pool, 0, &ids[i]));\n+            ovs_assert(ids[i] < limit + ID_FPOOL_CACHE_SIZE);\n+        }\n+        for (i = 0; i < limit; i++) {\n+            id_fpool_free_id(pool, 0, ids[i]);\n+        }\n+    }\n+\n+    id_fpool_destroy(pool);\n+}\n+\n+static void\n+run_tests(struct ovs_cmdl_context *ctx OVS_UNUSED)\n+{\n+    test_id_fpool_alloc();\n+    test_id_fpool_alloc_range();\n+    test_id_fpool_alloc_steal();\n+    test_id_fpool_alloc_under_limit();\n+}\n+\n+static uint32_t *ids;\n+static uint64_t *thread_working_ms; /* Measured work time. */\n+\n+static unsigned int n_threads;\n+static unsigned int n_ids;\n+\n+static struct ovs_barrier barrier;\n+\n+#define TIMEOUT_MS (10 * 1000) /* 10 sec timeout */\n+static int running_time_ms;\n+static volatile bool stop = false;\n+\n+static int\n+elapsed(int *start)\n+{\n+    return running_time_ms - *start;\n+}\n+\n+static void\n+swap_u32(uint32_t *a, uint32_t *b)\n+{\n+    uint32_t t;\n+    t = *a;\n+    *a = *b;\n+    *b = t;\n+}\n+\n+static void\n+shuffle(uint32_t *p, size_t n)\n+{\n+    for (; n > 1; n--, p++) {\n+        uint32_t *q = &p[random_range(n)];\n+        swap_u32(p, q);\n+    }\n+}\n+\n+static void\n+print_result(const char *prefix)\n+{\n+    uint64_t avg;\n+    size_t i;\n+\n+    avg = 0;\n+    for (i = 0; i < n_threads; i++) {\n+        avg += thread_working_ms[i];\n+    }\n+    avg /= n_threads;\n+    printf(\"%s: \", prefix);\n+    for (i = 0; i < n_threads; i++) {\n+        if (thread_working_ms[i] >= TIMEOUT_MS) {\n+            printf(\" %5\" PRIu64 \"+\", thread_working_ms[i]);\n+        } else {\n+            printf(\" %6\" PRIu64, thread_working_ms[i]);\n+        }\n+    }\n+    if (avg >= TIMEOUT_MS) {\n+        printf(\"     -1 ms\\n\");\n+    } else {\n+        printf(\" %6\" PRIu64 \" ms\\n\", avg);\n+    }\n+}\n+\n+struct id_fpool_aux {\n+    struct id_fpool *pool;\n+    atomic_uint thread_id;\n+};\n+\n+static void *\n+id_fpool_thread(void *aux_)\n+{\n+    unsigned int n_ids_per_thread;\n+    struct id_fpool_aux *aux = aux_;\n+    uint32_t *th_ids;\n+    unsigned int tid;\n+    int start;\n+    size_t i;\n+\n+    atomic_add(&aux->thread_id, 1u, &tid);\n+    n_ids_per_thread = n_ids / n_threads;\n+    th_ids = &ids[tid * n_ids_per_thread];\n+\n+    /* NEW / ALLOC */\n+\n+    start = running_time_ms;\n+    for (i = 0; i < n_ids_per_thread; i++) {\n+        ignore(id_fpool_new_id(aux->pool, tid, &th_ids[i]));\n+    }\n+    thread_working_ms[tid] = elapsed(&start);\n+\n+    ovs_barrier_block(&barrier);\n+\n+    /* DEL */\n+\n+    shuffle(th_ids, n_ids_per_thread);\n+\n+    start = running_time_ms;\n+    for (i = 0; i < n_ids_per_thread; i++) {\n+        id_fpool_free_id(aux->pool, tid, th_ids[i]);\n+    }\n+    thread_working_ms[tid] = elapsed(&start);\n+\n+    ovs_barrier_block(&barrier);\n+\n+    /* MIX */\n+\n+    start = running_time_ms;\n+    for (i = 0; i < n_ids_per_thread; i++) {\n+        ignore(id_fpool_new_id(aux->pool, tid, &th_ids[i]));\n+        id_fpool_free_id(aux->pool, tid, th_ids[i]);\n+        ignore(id_fpool_new_id(aux->pool, tid, &th_ids[i]));\n+    }\n+    thread_working_ms[tid] = elapsed(&start);\n+\n+    ovs_barrier_block(&barrier);\n+\n+    /* Do not interfere with other threads still in 'MIX' phase. */\n+    for (i = 0; i < n_ids_per_thread; i++) {\n+        id_fpool_free_id(aux->pool, tid, th_ids[i]);\n+    }\n+\n+    ovs_barrier_block(&barrier);\n+\n+    /* MIX SHUFFLED */\n+\n+    start = running_time_ms;\n+    for (i = 0; i < n_ids_per_thread; i++) {\n+        if (elapsed(&start) >= TIMEOUT_MS) {\n+            break;\n+        }\n+        ignore(id_fpool_new_id(aux->pool, tid, &th_ids[i]));\n+        swap_u32(&th_ids[i], &th_ids[random_range(i + 1)]);\n+        id_fpool_free_id(aux->pool, tid, th_ids[i]);\n+        ignore(id_fpool_new_id(aux->pool, tid, &th_ids[i]));\n+    }\n+    thread_working_ms[tid] = elapsed(&start);\n+\n+    return NULL;\n+}\n+\n+static void\n+benchmark_id_fpool(void)\n+{\n+    pthread_t *threads;\n+    struct id_fpool_aux aux;\n+    size_t i;\n+\n+    memset(ids, 0, n_ids & sizeof *ids);\n+    memset(thread_working_ms, 0, n_threads & sizeof *thread_working_ms);\n+\n+    aux.pool = id_fpool_create(n_threads, 0, n_ids);\n+    atomic_store(&aux.thread_id, 0);\n+\n+    for (i = n_ids - (n_ids % n_threads); i < n_ids; i++) {\n+        id_fpool_new_id(aux.pool, 0, &ids[i]);\n+    }\n+\n+    threads = xmalloc(n_threads * sizeof *threads);\n+    ovs_barrier_init(&barrier, n_threads + 1);\n+\n+    for (i = 0; i < n_threads; i++) {\n+        threads[i] = ovs_thread_create(\"id_fpool_alloc\",\n+                                       id_fpool_thread, &aux);\n+    }\n+\n+    ovs_barrier_block(&barrier);\n+\n+    print_result(\"id-fpool new\");\n+\n+    ovs_barrier_block(&barrier);\n+\n+    print_result(\"id-fpool del\");\n+\n+    ovs_barrier_block(&barrier);\n+    /* Cleanup. */\n+    ovs_barrier_block(&barrier);\n+\n+    print_result(\"id-fpool mix\");\n+\n+    for (i = 0; i < n_threads; i++) {\n+        xpthread_join(threads[i], NULL);\n+    }\n+\n+    print_result(\"id-fpool rnd\");\n+\n+    id_fpool_destroy(aux.pool);\n+    ovs_barrier_destroy(&barrier);\n+    free(threads);\n+}\n+\n+struct id_pool_aux {\n+    struct id_pool *pool;\n+    struct ovs_mutex *lock;\n+    atomic_uint thread_id;\n+};\n+\n+static void *\n+id_pool_thread(void *aux_)\n+{\n+    unsigned int n_ids_per_thread;\n+    struct id_pool_aux *aux = aux_;\n+    uint32_t *th_ids;\n+    unsigned int tid;\n+    int start;\n+    size_t i;\n+\n+    atomic_add(&aux->thread_id, 1u, &tid);\n+    n_ids_per_thread = n_ids / n_threads;\n+    th_ids = &ids[tid * n_ids_per_thread];\n+\n+    /* NEW */\n+\n+    start = running_time_ms;\n+    for (i = 0; i < n_ids_per_thread; i++) {\n+        ovs_mutex_lock(aux->lock);\n+        ovs_assert(id_pool_alloc_id(aux->pool, &th_ids[i]));\n+        ovs_mutex_unlock(aux->lock);\n+    }\n+    thread_working_ms[tid] = elapsed(&start);\n+\n+    ovs_barrier_block(&barrier);\n+\n+    /* DEL */\n+\n+    shuffle(th_ids, n_ids_per_thread);\n+\n+    start = running_time_ms;\n+    for (i = 0; i < n_ids_per_thread; i++) {\n+        ovs_mutex_lock(aux->lock);\n+        id_pool_free_id(aux->pool, th_ids[i]);\n+        ovs_mutex_unlock(aux->lock);\n+    }\n+    thread_working_ms[tid] = elapsed(&start);\n+\n+    ovs_barrier_block(&barrier);\n+\n+    /* MIX */\n+\n+    start = running_time_ms;\n+    for (i = 0; i < n_ids_per_thread; i++) {\n+        ovs_mutex_lock(aux->lock);\n+        ignore(id_pool_alloc_id(aux->pool, &th_ids[i]));\n+        id_pool_free_id(aux->pool, th_ids[i]);\n+        ignore(id_pool_alloc_id(aux->pool, &th_ids[i]));\n+        ovs_mutex_unlock(aux->lock);\n+    }\n+    thread_working_ms[tid] = elapsed(&start);\n+\n+    ovs_barrier_block(&barrier);\n+\n+    /* Do not interfere with other threads still in 'MIX' phase. */\n+    ovs_mutex_lock(aux->lock);\n+    for (i = 0; i < n_ids_per_thread; i++) {\n+        id_pool_free_id(aux->pool, th_ids[i]);\n+    }\n+    ovs_mutex_unlock(aux->lock);\n+\n+    ovs_barrier_block(&barrier);\n+\n+    /* MIX SHUFFLED */\n+\n+    start = running_time_ms;\n+    for (i = 0; i < n_ids_per_thread; i++) {\n+        if (elapsed(&start) >= TIMEOUT_MS) {\n+            break;\n+        }\n+        ovs_mutex_lock(aux->lock);\n+        ignore(id_pool_alloc_id(aux->pool, &th_ids[i]));\n+        swap_u32(&th_ids[i], &th_ids[random_range(i + 1)]);\n+        id_pool_free_id(aux->pool, th_ids[i]);\n+        ignore(id_pool_alloc_id(aux->pool, &th_ids[i]));\n+        ovs_mutex_unlock(aux->lock);\n+    }\n+    thread_working_ms[tid] = elapsed(&start);\n+\n+    return NULL;\n+}\n+\n+OVS_UNUSED\n+static void\n+benchmark_id_pool(void)\n+{\n+    pthread_t *threads;\n+    struct id_pool_aux aux;\n+    struct ovs_mutex lock;\n+    size_t i;\n+\n+    memset(ids, 0, n_ids & sizeof *ids);\n+    memset(thread_working_ms, 0, n_threads & sizeof *thread_working_ms);\n+\n+    aux.pool = id_pool_create(0, n_ids);\n+    aux.lock = &lock;\n+    ovs_mutex_init(&lock);\n+    atomic_store(&aux.thread_id, 0);\n+\n+    for (i = n_ids - (n_ids % n_threads); i < n_ids; i++) {\n+        id_pool_alloc_id(aux.pool, &ids[i]);\n+    }\n+\n+    threads = xmalloc(n_threads * sizeof *threads);\n+    ovs_barrier_init(&barrier, n_threads + 1);\n+\n+    for (i = 0; i < n_threads; i++) {\n+        threads[i] = ovs_thread_create(\"id_pool_alloc\", id_pool_thread, &aux);\n+    }\n+\n+    ovs_barrier_block(&barrier);\n+\n+    print_result(\" id-pool new\");\n+\n+    ovs_barrier_block(&barrier);\n+\n+    print_result(\" id-pool del\");\n+\n+    ovs_barrier_block(&barrier);\n+    /* Cleanup. */\n+    ovs_barrier_block(&barrier);\n+\n+    print_result(\" id-pool mix\");\n+\n+    for (i = 0; i < n_threads; i++) {\n+        xpthread_join(threads[i], NULL);\n+    }\n+\n+    print_result(\" id-pool rnd\");\n+\n+    id_pool_destroy(aux.pool);\n+    ovs_barrier_destroy(&barrier);\n+    free(threads);\n+}\n+\n+static void *\n+clock_main(void *arg OVS_UNUSED)\n+{\n+    struct timeval start;\n+    struct timeval end;\n+\n+    xgettimeofday(&start);\n+    while (!stop) {\n+        xgettimeofday(&end);\n+        running_time_ms = timeval_to_msec(&end) - timeval_to_msec(&start);\n+        xnanosleep(1000);\n+    }\n+\n+    return NULL;\n+}\n+\n+static void\n+do_perf_test(struct ovs_cmdl_context *ctx, bool test_id_pool)\n+{\n+    pthread_t clock;\n+    long int l_threads;\n+    long int l_ids;\n+    size_t i;\n+\n+    l_ids = strtol(ctx->argv[1], NULL, 10);\n+    l_threads = strtol(ctx->argv[2], NULL, 10);\n+    ovs_assert(l_ids > 0 && l_threads > 0);\n+\n+    n_ids = l_ids;\n+    n_threads = l_threads;\n+\n+    ids = xcalloc(n_ids, sizeof *ids);\n+    thread_working_ms = xcalloc(n_threads, sizeof *thread_working_ms);\n+\n+    clock = ovs_thread_create(\"clock\", clock_main, NULL);\n+\n+    printf(\"Benchmarking n=%u on %u thread%s.\\n\", n_ids, n_threads,\n+           n_threads > 1 ? \"s\" : \"\");\n+\n+    printf(\" type\\\\thread:  \");\n+    for (i = 0; i < n_threads; i++) {\n+        printf(\"   %3\" PRIuSIZE \" \", i + 1);\n+    }\n+    printf(\"   Avg\\n\");\n+\n+    ovsrcu_quiesce_start();\n+\n+    benchmark_id_fpool();\n+    if (test_id_pool) {\n+        benchmark_id_pool();\n+    }\n+\n+    stop = true;\n+\n+    free(thread_working_ms);\n+    xpthread_join(clock, NULL);\n+}\n+\n+static void\n+run_benchmark(struct ovs_cmdl_context *ctx)\n+{\n+    do_perf_test(ctx, true);\n+}\n+\n+static void\n+run_perf(struct ovs_cmdl_context *ctx)\n+{\n+    do_perf_test(ctx, false);\n+}\n+\n+static const struct ovs_cmdl_command commands[] = {\n+    {\"check\", NULL, 0, 0, run_tests, OVS_RO},\n+    {\"benchmark\", \"<nb elem> <nb threads>\", 2, 2, run_benchmark, OVS_RO},\n+    {\"perf\", \"<nb elem> <nb threads>\", 2, 2, run_perf, OVS_RO},\n+    {NULL, NULL, 0, 0, NULL, OVS_RO},\n+};\n+\n+static void\n+id_fpool_test_main(int argc, char *argv[])\n+{\n+    struct ovs_cmdl_context ctx = {\n+        .argc = argc - optind,\n+        .argv = argv + optind,\n+    };\n+\n+    vlog_set_levels(NULL, VLF_ANY_DESTINATION, VLL_OFF);\n+\n+    set_program_name(argv[0]);\n+    ovs_cmdl_run_command(&ctx, commands);\n+}\n+\n+OVSTEST_REGISTER(\"test-id-fpool\", id_fpool_test_main);\n",
    "prefixes": [
        "ovs-dev",
        "v5",
        "13/27"
    ]
}