get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/1.2/patches/2226296/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 2226296,
    "url": "http://patchwork.ozlabs.org/api/1.2/patches/2226296/?format=api",
    "web_url": "http://patchwork.ozlabs.org/project/gcc/patch/bmm.hhubrmqub2.gcc.gcc-TEST.redi.31.1.1@forge-stage.sourceware.org/",
    "project": {
        "id": 17,
        "url": "http://patchwork.ozlabs.org/api/1.2/projects/17/?format=api",
        "name": "GNU Compiler Collection",
        "link_name": "gcc",
        "list_id": "gcc-patches.gcc.gnu.org",
        "list_email": "gcc-patches@gcc.gnu.org",
        "web_url": null,
        "scm_url": null,
        "webscm_url": null,
        "list_archive_url": "",
        "list_archive_url_format": "",
        "commit_url_format": ""
    },
    "msgid": "<bmm.hhubrmqub2.gcc.gcc-TEST.redi.31.1.1@forge-stage.sourceware.org>",
    "list_archive_url": null,
    "date": "2026-04-22T10:44:16",
    "name": "[v1,01/16] libstdc++: Atomic wait/notify ABI stabilization",
    "commit_ref": null,
    "pull_url": null,
    "state": "new",
    "archived": false,
    "hash": "51f313fda725b5a1592f1a01158a9def74af25b7",
    "submitter": {
        "id": 93210,
        "url": "http://patchwork.ozlabs.org/api/1.2/people/93210/?format=api",
        "name": "Jonathan Wakely via Sourceware Forge",
        "email": "forge-bot+redi@forge-stage.sourceware.org"
    },
    "delegate": null,
    "mbox": "http://patchwork.ozlabs.org/project/gcc/patch/bmm.hhubrmqub2.gcc.gcc-TEST.redi.31.1.1@forge-stage.sourceware.org/mbox/",
    "series": [
        {
            "id": 500987,
            "url": "http://patchwork.ozlabs.org/api/1.2/series/500987/?format=api",
            "web_url": "http://patchwork.ozlabs.org/project/gcc/list/?series=500987",
            "date": "2026-04-22T10:44:17",
            "name": "atomic wait/notify ABI stabilization",
            "version": 1,
            "mbox": "http://patchwork.ozlabs.org/series/500987/mbox/"
        }
    ],
    "comments": "http://patchwork.ozlabs.org/api/patches/2226296/comments/",
    "check": "pending",
    "checks": "http://patchwork.ozlabs.org/api/patches/2226296/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<gcc-patches-bounces~incoming=patchwork.ozlabs.org@gcc.gnu.org>",
        "X-Original-To": [
            "incoming@patchwork.ozlabs.org",
            "gcc-patches@gcc.gnu.org"
        ],
        "Delivered-To": [
            "patchwork-incoming@legolas.ozlabs.org",
            "gcc-patches@gcc.gnu.org"
        ],
        "Authentication-Results": [
            "legolas.ozlabs.org;\n spf=pass (sender SPF authorized) smtp.mailfrom=gcc.gnu.org\n (client-ip=2620:52:6:3111::32; helo=vm01.sourceware.org;\n envelope-from=gcc-patches-bounces~incoming=patchwork.ozlabs.org@gcc.gnu.org;\n receiver=patchwork.ozlabs.org)",
            "sourceware.org; dmarc=none (p=none dis=none)\n header.from=forge-stage.sourceware.org",
            "sourceware.org;\n spf=pass smtp.mailfrom=forge-stage.sourceware.org",
            "server2.sourceware.org;\n arc=none smtp.remote-ip=38.145.34.39"
        ],
        "Received": [
            "from vm01.sourceware.org (vm01.sourceware.org\n [IPv6:2620:52:6:3111::32])\n\t(using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits)\n\t key-exchange x25519 server-signature ECDSA (secp384r1) server-digest SHA384)\n\t(No client certificate requested)\n\tby legolas.ozlabs.org (Postfix) with ESMTPS id 4g0xZS0vMdz1y2d\n\tfor <incoming@patchwork.ozlabs.org>; Wed, 22 Apr 2026 21:19:28 +1000 (AEST)",
            "from vm01.sourceware.org (localhost [127.0.0.1])\n\tby sourceware.org (Postfix) with ESMTP id 0E2B0436303F\n\tfor <incoming@patchwork.ozlabs.org>; Wed, 22 Apr 2026 11:19:26 +0000 (GMT)",
            "from forge-stage.sourceware.org (vm08.sourceware.org [38.145.34.39])\n by sourceware.org (Postfix) with ESMTPS id AFCA0407C159\n for <gcc-patches@gcc.gnu.org>; Wed, 22 Apr 2026 10:46:01 +0000 (GMT)",
            "from forge-stage.sourceware.org (localhost [IPv6:::1])\n (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits)\n key-exchange x25519 server-signature ECDSA (prime256v1) server-digest SHA256)\n (No client certificate requested)\n by forge-stage.sourceware.org (Postfix) with ESMTPS id 8364542BAF\n for <gcc-patches@gcc.gnu.org>; Wed, 22 Apr 2026 10:46:01 +0000 (UTC)"
        ],
        "DKIM-Filter": [
            "OpenDKIM Filter v2.11.0 sourceware.org 0E2B0436303F",
            "OpenDKIM Filter v2.11.0 sourceware.org AFCA0407C159"
        ],
        "DMARC-Filter": "OpenDMARC Filter v1.4.2 sourceware.org AFCA0407C159",
        "ARC-Filter": "OpenARC Filter v1.0.0 sourceware.org AFCA0407C159",
        "ARC-Seal": "i=1; a=rsa-sha256; d=sourceware.org; s=key; t=1776854761; cv=none;\n b=k3dcszToRWcxhQgJM3p8+mZECcvg4fK9hIervdmuXPydQcB2nBS5scXj7CyTam8E+isOmxiBDCiRE9t8TiMZ84P+dL/Ebh3nxXorj/Fwx6x6p7TdiM2/U5qTiTS88UQPQ+DLzuJxL90G7QeWy/GZOvw0bnTFB1JCu8pnt3IU9e8=",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; d=sourceware.org; s=key;\n t=1776854761; c=relaxed/simple;\n bh=j6GLusfknEQ5exOwQzzar0GHQwwvBXGvEF5zTqizdGg=;\n h=From:Date:Subject:To:Message-ID;\n b=tznA0O0wEZOQI/qUdsOM9/yTvGBxY6QOuvHdMC6s9+/6EkPnq17fQHafw9G4gQNIvR5RG5sNQ9Q9R6wRz9BiSXdLgDrYAtU3u4JDCtTBtUx2zflSPDZ9vTtiaN/ZHqEs8MssmcljDa1YfzLNzmUje3tvK8sv7eEvEmyYIjEHxhE=",
        "ARC-Authentication-Results": "i=1; server2.sourceware.org",
        "From": "Jonathan Wakely via Sourceware Forge\n <forge-bot+redi@forge-stage.sourceware.org>",
        "Date": "Wed, 22 Apr 2026 10:44:16 +0000",
        "Subject": "[PATCH v1 01/16] libstdc++: Atomic wait/notify ABI stabilization",
        "To": "gcc-patches mailing list <gcc-patches@gcc.gnu.org>",
        "Message-ID": "\n <bmm.hhubrmqub2.gcc.gcc-TEST.redi.31.1.1@forge-stage.sourceware.org>",
        "X-Mailer": "batrachomyomachia",
        "X-Pull-Request-Organization": "gcc",
        "X-Pull-Request-Repository": "gcc-TEST",
        "X-Pull-Request": "https://forge.sourceware.org/gcc/gcc-TEST/pulls/31",
        "References": "\n <bmm.hhubrmqub2.gcc.gcc-TEST.redi.31.1.0@forge-stage.sourceware.org>",
        "In-Reply-To": "\n <bmm.hhubrmqub2.gcc.gcc-TEST.redi.31.1.0@forge-stage.sourceware.org>",
        "X-Patch-URL": "\n https://forge.sourceware.org/redi/gcc/commit/bc0a676986271fa6348d7590eda60fa247b1ea0a",
        "X-BeenThere": "gcc-patches@gcc.gnu.org",
        "X-Mailman-Version": "2.1.30",
        "Precedence": "list",
        "List-Id": "Gcc-patches mailing list <gcc-patches.gcc.gnu.org>",
        "List-Unsubscribe": "<https://gcc.gnu.org/mailman/options/gcc-patches>,\n <mailto:gcc-patches-request@gcc.gnu.org?subject=unsubscribe>",
        "List-Archive": "<https://gcc.gnu.org/pipermail/gcc-patches/>",
        "List-Post": "<mailto:gcc-patches@gcc.gnu.org>",
        "List-Help": "<mailto:gcc-patches-request@gcc.gnu.org?subject=help>",
        "List-Subscribe": "<https://gcc.gnu.org/mailman/listinfo/gcc-patches>,\n <mailto:gcc-patches-request@gcc.gnu.org?subject=subscribe>",
        "Reply-To": "gcc-patches mailing list <gcc-patches@gcc.gnu.org>, redi@gcc.gnu.org",
        "Errors-To": "gcc-patches-bounces~incoming=patchwork.ozlabs.org@gcc.gnu.org"
    },
    "content": "From: Thomas Rodgers <trodgers@redhat.com>\n\nThis represents a major refactoring of the previous atomic::wait\nand atomic::notify implementation detail. The aim of this change\nis to simplify the implementation details and position the resulting\nimplementation so that much of the current header-only detail\ncan be moved into the shared library, while also accounting for\nanticipated changes to wait/notify functionality for C++26.\n\nThe previous implementation implemented spin logic in terms of\nthe types __default_spin_policy, __timed_backoff_spin_policy, and\nthe free function __atomic_spin. These are replaced in favor of\ntwo new free functions; __spin_impl and __spin_until_impl. These\ncurrently inline free functions are expected to be moved into the\nlibstdc++ shared library in a future commit.\n\nThe previous implementation derived untimed and timed wait\nimplementation detail from __detail::__waiter_pool_base. This\nis-a relationship is removed in the new version and the previous\nimplementation detail is renamed to reflect this change. The\nstatic _S_for member has been renamed as well to indicate that it\nreturns the __waiter_pool_impl entry in the static 'side table'\nfor a given awaited address.\n\nThis new implementation replaces all of the non-templated waiting\ndetail of __waiter_base, __waiter_pool, __waiter, __enters_wait, and\n__bare_wait with the __wait_impl free function, and the supporting\n__wait_flags enum and __wait_args struct. This currenly inline free\nfunction is expected to be moved into the libstdc++ shared library\nin a future commit.\n\nThis new implementation replaces all of the non-templated notifying\ndetail of __waiter_base, __waiter_pool, and __waiter with the\n__notify_impl free function. This currently inline free function\nis expected to be moved into the libstdc++ shared library in a\nfuture commit.\n\nThe __atomic_wait_address template function is updated to account\nfor the above changes and to support the expected C++26 change to\npass the most recent observed value to the caller supplied predicate.\n\nA new non-templated __atomic_wait_address_v free function is added\nthat only works for atomic types that operate only on __platform_wait_t\nand requires the caller to supply a memory order. This is intended\nto be the simplest code path for such types.\n\nThe __atomic_wait_address_v template function is now implemented in\nterms of new __atomic_wait_address template and continues to accept\na user supplied \"value function\" to retrieve the current value of\nthe atomic.\n\nThe __atomic_notify_address template function is updated to account\nfor the above changes.\n\nThe template __platform_wait_until_impl is renamed to\n__wait_clock_t. The previous __platform_wait_until template is deleted\nand the functionality previously provided is moved t the new tempalate\nfunction __wait_until. A similar change is made to the\n__cond_wait_until_impl/__cond_wait_until implementation.\n\nThis new implementation similarly replaces all of the non-templated\nwaiting detail of __timed_waiter_pool, __timed_waiter, etc. with\nthe new __wait_until_impl free function. This currently inline free\nfunction is expected to be moved into the libstdc++ shared library\nin a future commit.\n\nThis implementation replaces all templated waiting functions that\nmanage clock conversion as well as relative waiting (wait_for) with\nthe new template functions __wait_until and __wait_for.\n\nSimilarly the previous implementation detail for the various\n__atomic_wait_address_Xxx templates is adjusted to account for the\nimplementation changes outlined above.\n\nAll of the \"bare wait\" versions of __atomic_wait_Xxx have been removed\nand replaced with a defaulted boolean __bare_wait parameter on the\nnew version of these templates.\n\nlibstdc++-v3/ChangeLog:\n\n\t* include/bits/atomic_timed_wait.h:\n\t(__detail::__platform_wait_until_impl): Rename to\n\t__platform_wait_until.\n\t(__detail::__platform_wait_until): Remove previous\n\tdefinition.\n\t(__detail::__cond_wait_until_impl): Rename to\n\t__cond_wait_until.\n\t(__detail::__cond_wait_until): Remove previous\n\tdefinition.\n\t(__detail::__spin_until_impl): New function.\n\t(__detail::__wait_until_impl): New function.\n\t(__detail::__wait_until): New function.\n\t(__detail::__wait_for): New function.\n\t(__detail::__timed_waiter_pool): Remove type.\n\t(__detail::__timed_backoff_spin_policy): Remove type.\n\t(__detail::__timed_waiter): Remove type.\n\t(__detail::__enters_timed_wait): Remove type alias.\n\t(__detail::__bare_timed_wait): Remove type alias.\n\t(__atomic_wait_address_until): Adjust to new implementation\n\tdetail.\n\t(__atomic_wait_address_until_v): Likewise.\n\t(__atomic_wait_address_bare): Remove.\n\t(__atomic_wait_address_for): Adjust to new implementation\n\tdetail.\n\t(__atomic_wait_address_for_v): Likewise.\n\t(__atomic_wait_address_for_bare): Remove.\n\t* include/bits/atomic_wait.h: Include bits/stl_pair.h.\n\t(__detail::__default_spin_policy): Remove type.\n\t(__detail::__atomic_spin): Remove function.\n\t(__detail::__waiter_pool_base): Rename to __waiter_pool_impl.\n\tRemove _M_notify.  Rename _S_for to _S_impl_for.\n\t(__detail::__waiter_base): Remove type.\n\t(__detail::__waiter_pool): Remove type.\n\t(__detail::__waiter): Remove type.\n\t(__detail::__enters_wait): Remove type alias.\n\t(__detail::__bare_wait): Remove type alias.\n\t(__detail::__wait_flags): New enum.\n\t(__detail::__wait_args): New struct.\n\t(__detail::__wait_result_type): New type alias.\n\t(__detail::__spin_impl): New function.\n\t(__detail::__wait_impl): New function.\n\t(__atomic_wait_address): Adjust to new implementation detail.\n\t(__atomic_wait_address_v): Likewise.\n\t(__atomic_notify_address): Likewise.\n\t(__atomic_wait_address_bare): Delete.\n\t(__atomic_notify_address_bare): Likewise.\n\t* include/bits/semaphore_base.h: Adjust implementation to\n\tuse new __atomic_wait_address_v contract.\n\t* include/std/barrier: Adjust implementation to use new\n\t__atomic_wait contract.\n\t* include/std/latch: Adjust implementation to use new\n\t__atomic_wait contract.\n\t* testsuite/29_atomics/atomic/wait_notify/100334.cc (main):\n\tAdjust to for __detail::__waiter_pool_base renaming.\n---\n libstdc++-v3/include/bits/atomic_timed_wait.h | 523 ++++++++----------\n libstdc++-v3/include/bits/atomic_wait.h       | 476 ++++++++--------\n libstdc++-v3/include/bits/semaphore_base.h    |  51 +-\n libstdc++-v3/include/std/barrier              |   6 +-\n libstdc++-v3/include/std/latch                |   5 +-\n .../29_atomics/atomic/wait_notify/100334.cc   |   4 +-\n 6 files changed, 495 insertions(+), 570 deletions(-)",
    "diff": "diff --git a/libstdc++-v3/include/bits/atomic_timed_wait.h b/libstdc++-v3/include/bits/atomic_timed_wait.h\nindex 9a6ac95b7d0e..196548484024 100644\n--- a/libstdc++-v3/include/bits/atomic_timed_wait.h\n+++ b/libstdc++-v3/include/bits/atomic_timed_wait.h\n@@ -76,62 +76,32 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION\n #ifdef _GLIBCXX_HAVE_LINUX_FUTEX\n #define _GLIBCXX_HAVE_PLATFORM_TIMED_WAIT\n     // returns true if wait ended before timeout\n-    template<typename _Dur>\n-      bool\n-      __platform_wait_until_impl(const __platform_wait_t* __addr,\n-\t\t\t\t __platform_wait_t __old,\n-\t\t\t\t const chrono::time_point<__wait_clock_t, _Dur>&\n-\t\t\t\t      __atime) noexcept\n-      {\n-\tauto __s = chrono::time_point_cast<chrono::seconds>(__atime);\n-\tauto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);\n+    bool\n+    __platform_wait_until(const __platform_wait_t* __addr,\n+\t\t\t  __platform_wait_t __old,\n+\t\t\t  const __wait_clock_t::time_point& __atime) noexcept\n+    {\n+      auto __s = chrono::time_point_cast<chrono::seconds>(__atime);\n+      auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);\n \n-\tstruct timespec __rt =\n+      struct timespec __rt =\n \t{\n \t  static_cast<std::time_t>(__s.time_since_epoch().count()),\n \t  static_cast<long>(__ns.count())\n \t};\n \n-\tauto __e = syscall (SYS_futex, __addr,\n-\t\t\t    static_cast<int>(__futex_wait_flags::\n-\t\t\t\t\t\t__wait_bitset_private),\n-\t\t\t    __old, &__rt, nullptr,\n-\t\t\t    static_cast<int>(__futex_wait_flags::\n-\t\t\t\t\t\t__bitset_match_any));\n-\n-\tif (__e)\n-\t  {\n-\t    if (errno == ETIMEDOUT)\n-\t      return false;\n-\t    if (errno != EINTR && errno != EAGAIN)\n-\t      __throw_system_error(errno);\n-\t  }\n-\treturn true;\n-      }\n-\n-    // returns true if wait ended before timeout\n-    template<typename _Clock, typename _Dur>\n-      bool\n-      __platform_wait_until(const __platform_wait_t* __addr, __platform_wait_t __old,\n-\t\t\t    const chrono::time_point<_Clock, _Dur>& __atime)\n-      {\n-\tif constexpr (is_same_v<__wait_clock_t, _Clock>)\n-\t  {\n-\t    return __platform_wait_until_impl(__addr, __old, __atime);\n-\t  }\n-\telse\n-\t  {\n-\t    if (!__platform_wait_until_impl(__addr, __old,\n-\t\t\t\t\t    __to_wait_clock(__atime)))\n-\t      {\n-\t\t// We got a timeout when measured against __clock_t but\n-\t\t// we need to check against the caller-supplied clock\n-\t\t// to tell whether we should return a timeout.\n-\t\tif (_Clock::now() < __atime)\n-\t\t  return true;\n-\t      }\n+      auto __e = syscall (SYS_futex, __addr,\n+\t\t\t  static_cast<int>(__futex_wait_flags::__wait_bitset_private),\n+\t\t\t  __old, &__rt, nullptr,\n+\t\t\t  static_cast<int>(__futex_wait_flags::__bitset_match_any));\n+      if (__e)\n+\t{\n+\t  if (errno == ETIMEDOUT)\n \t    return false;\n-\t  }\n+\t  if (errno != EINTR && errno != EAGAIN)\n+\t    __throw_system_error(errno);\n+\t}\n+\treturn true;\n       }\n #else\n // define _GLIBCXX_HAVE_PLATFORM_TIMED_WAIT and implement __platform_wait_until()\n@@ -141,15 +111,10 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION\n \n #ifdef _GLIBCXX_HAS_GTHREADS\n     // Returns true if wait ended before timeout.\n-    // _Clock must be either steady_clock or system_clock.\n-    template<typename _Clock, typename _Dur>\n-      bool\n-      __cond_wait_until_impl(__condvar& __cv, mutex& __mx,\n-\t\t\t     const chrono::time_point<_Clock, _Dur>& __atime)\n-      {\n-\tstatic_assert(std::__is_one_of<_Clock, chrono::steady_clock,\n-\t\t\t\t\t       chrono::system_clock>::value);\n-\n+    bool\n+    __cond_wait_until(__condvar& __cv, mutex& __mx,\n+\t\t      const __wait_clock_t::time_point& __atime)\n+    {\n \tauto __s = chrono::time_point_cast<chrono::seconds>(__atime);\n \tauto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);\n \n@@ -160,293 +125,261 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION\n \t  };\n \n #ifdef _GLIBCXX_USE_PTHREAD_COND_CLOCKWAIT\n-\tif constexpr (is_same_v<chrono::steady_clock, _Clock>)\n+\tif constexpr (is_same_v<chrono::steady_clock, __wait_clock_t>)\n \t  __cv.wait_until(__mx, CLOCK_MONOTONIC, __ts);\n \telse\n #endif\n \t  __cv.wait_until(__mx, __ts);\n-\treturn _Clock::now() < __atime;\n-      }\n-\n-    // returns true if wait ended before timeout\n-    template<typename _Clock, typename _Dur>\n-      bool\n-      __cond_wait_until(__condvar& __cv, mutex& __mx,\n-\t  const chrono::time_point<_Clock, _Dur>& __atime)\n-      {\n-#ifdef _GLIBCXX_USE_PTHREAD_COND_CLOCKWAIT\n-\tif constexpr (is_same_v<_Clock, chrono::steady_clock>)\n-\t  return __detail::__cond_wait_until_impl(__cv, __mx, __atime);\n-\telse\n-#endif\n-\tif constexpr (is_same_v<_Clock, chrono::system_clock>)\n-\t  return __detail::__cond_wait_until_impl(__cv, __mx, __atime);\n-\telse\n-\t  {\n-\t    if (__cond_wait_until_impl(__cv, __mx,\n-\t\t\t\t       __to_wait_clock(__atime)))\n-\t      {\n-\t\t// We got a timeout when measured against __clock_t but\n-\t\t// we need to check against the caller-supplied clock\n-\t\t// to tell whether we should return a timeout.\n-\t\tif (_Clock::now() < __atime)\n-\t\t  return true;\n-\t      }\n-\t    return false;\n-\t  }\n+\treturn __wait_clock_t::now() < __atime;\n       }\n #endif // _GLIBCXX_HAS_GTHREADS\n \n-    struct __timed_waiter_pool : __waiter_pool_base\n+    inline __wait_result_type\n+    __spin_until_impl(const __platform_wait_t* __addr, __wait_args __args,\n+\t\t      const __wait_clock_t::time_point& __deadline)\n     {\n-      // returns true if wait ended before timeout\n-      template<typename _Clock, typename _Dur>\n-\tbool\n-\t_M_do_wait_until(__platform_wait_t* __addr, __platform_wait_t __old,\n-\t\t\t const chrono::time_point<_Clock, _Dur>& __atime)\n-\t{\n-#ifdef _GLIBCXX_HAVE_PLATFORM_TIMED_WAIT\n-\t  return __platform_wait_until(__addr, __old, __atime);\n-#else\n-\t  __platform_wait_t __val;\n-\t  __atomic_load(__addr, &__val, __ATOMIC_RELAXED);\n-\t  if (__val == __old)\n-\t    {\n-\t      lock_guard<mutex> __l(_M_mtx);\n-\t      return __cond_wait_until(_M_cv, _M_mtx, __atime);\n-\t    }\n-\t  else\n-\t    return true;\n-#endif // _GLIBCXX_HAVE_PLATFORM_TIMED_WAIT\n-\t}\n-    };\n+      auto __t0 = __wait_clock_t::now();\n+      using namespace literals::chrono_literals;\n \n-    struct __timed_backoff_spin_policy\n-    {\n-      __wait_clock_t::time_point _M_deadline;\n-      __wait_clock_t::time_point _M_t0;\n-\n-      template<typename _Clock, typename _Dur>\n-\t__timed_backoff_spin_policy(chrono::time_point<_Clock, _Dur>\n-\t\t\t\t      __deadline = _Clock::time_point::max(),\n-\t\t\t\t    chrono::time_point<_Clock, _Dur>\n-\t\t\t\t      __t0 = _Clock::now()) noexcept\n-\t  : _M_deadline(__to_wait_clock(__deadline))\n-\t  , _M_t0(__to_wait_clock(__t0))\n-\t{ }\n-\n-      bool\n-      operator()() const noexcept\n+      __platform_wait_t __val;\n+      auto __now = __wait_clock_t::now();\n+      for (; __now < __deadline; __now = __wait_clock_t::now())\n       {\n-\tusing namespace literals::chrono_literals;\n-\tauto __now = __wait_clock_t::now();\n-\tif (_M_deadline <= __now)\n-\t  return false;\n-\n-\t// FIXME: this_thread::sleep_for not available #ifdef _GLIBCXX_NO_SLEEP\n-\n-\tauto __elapsed = __now - _M_t0;\n+\tauto __elapsed = __now - __t0;\n+#ifndef _GLIBCXX_NO_SLEEP\n \tif (__elapsed > 128ms)\n-\t  {\n-\t    this_thread::sleep_for(64ms);\n-\t  }\n+\t{\n+\t  this_thread::sleep_for(64ms);\n+\t}\n \telse if (__elapsed > 64us)\n-\t  {\n-\t    this_thread::sleep_for(__elapsed / 2);\n-\t  }\n-\telse if (__elapsed > 4us)\n-\t  {\n-\t    __thread_yield();\n-\t  }\n+\t{\n+\t  this_thread::sleep_for(__elapsed / 2);\n+\t}\n \telse\n-\t  return false;\n-\treturn true;\n+#endif\n+\tif (__elapsed > 4us)\n+\t{\n+\t  __thread_yield();\n+\t}\n+\telse\n+\t{\n+\t  auto __res = __detail::__spin_impl(__addr, __args);\n+\t  if (__res.first)\n+\t    return __res;\n+\t}\n+\n+\t__atomic_load(__addr, &__val, __args._M_order);\n+\tif (__val != __args._M_old)\n+\t    return make_pair(true, __val);\n       }\n-    };\n+      return make_pair(false, __val);\n+    }\n \n-    template<typename _EntersWait>\n-      struct __timed_waiter : __waiter_base<__timed_waiter_pool>\n-      {\n-\tusing __base_type = __waiter_base<__timed_waiter_pool>;\n+    inline __wait_result_type\n+    __wait_until_impl(const __platform_wait_t* __addr, __wait_args __args,\n+\t\t      const __wait_clock_t::time_point& __atime)\n+    {\n+#ifdef _GLIBCXX_HAVE_PLATFORM_TIMED_WAIT\n+      __waiter_pool_impl* __pool = nullptr;\n+#else\n+      // if we don't have __platform_wait, we always need the side-table\n+      __waiter_pool_impl* __pool = &__waiter_pool_impl::_S_impl_for(__addr);\n+#endif\n \n-\ttemplate<typename _Tp>\n-\t  __timed_waiter(const _Tp* __addr) noexcept\n-\t  : __base_type(__addr)\n+      __platform_wait_t* __wait_addr;\n+      if (__args & __wait_flags::__proxy_wait)\n \t{\n-\t  if constexpr (_EntersWait::value)\n-\t    _M_w._M_enter_wait();\n+#ifdef _GLIBCXX_HAVE_PLATFORM_TIMED_WAIT\n+\t  __pool = &__waiter_pool_impl::_S_impl_for(__addr);\n+#endif\n+\t  __wait_addr = &__pool->_M_ver;\n+\t  __atomic_load(__wait_addr, &__args._M_old, __args._M_order);\n \t}\n+      else\n+\t__wait_addr = const_cast<__platform_wait_t*>(__addr);\n \n-\t~__timed_waiter()\n+      if (__args & __wait_flags::__do_spin)\n \t{\n-\t  if constexpr (_EntersWait::value)\n-\t    _M_w._M_leave_wait();\n+\t  auto __res = __detail::__spin_until_impl(__wait_addr, __args, __atime);\n+\t  if (__res.first)\n+\t    return __res;\n+\t  if (__args & __wait_flags::__spin_only)\n+\t    return __res;\n \t}\n \n-\t// returns true if wait ended before timeout\n-\ttemplate<typename _Tp, typename _ValFn,\n-\t\t typename _Clock, typename _Dur>\n-\t  bool\n-\t  _M_do_wait_until_v(_Tp __old, _ValFn __vfn,\n-\t\t\t     const chrono::time_point<_Clock, _Dur>&\n-\t\t\t\t\t\t\t\t__atime) noexcept\n-\t  {\n-\t    __platform_wait_t __val;\n-\t    if (_M_do_spin(__old, std::move(__vfn), __val,\n-\t\t\t   __timed_backoff_spin_policy(__atime)))\n-\t      return true;\n-\t    return __base_type::_M_w._M_do_wait_until(__base_type::_M_addr, __val, __atime);\n-\t  }\n-\n-\t// returns true if wait ended before timeout\n-\ttemplate<typename _Pred,\n-\t\t typename _Clock, typename _Dur>\n-\t  bool\n-\t  _M_do_wait_until(_Pred __pred, __platform_wait_t __val,\n-\t\t\t  const chrono::time_point<_Clock, _Dur>&\n-\t\t\t\t\t\t\t      __atime) noexcept\n-\t  {\n-\t    for (auto __now = _Clock::now(); __now < __atime;\n-\t\t  __now = _Clock::now())\n-\t      {\n-\t\tif (__base_type::_M_w._M_do_wait_until(\n-\t\t      __base_type::_M_addr, __val, __atime)\n-\t\t    && __pred())\n-\t\t  return true;\n-\n-\t\tif (__base_type::_M_do_spin(__pred, __val,\n-\t\t\t       __timed_backoff_spin_policy(__atime, __now)))\n-\t\t  return true;\n-\t      }\n-\t    return false;\n-\t  }\n-\n-\t// returns true if wait ended before timeout\n-\ttemplate<typename _Pred,\n-\t\t typename _Clock, typename _Dur>\n-\t  bool\n-\t  _M_do_wait_until(_Pred __pred,\n-\t\t\t   const chrono::time_point<_Clock, _Dur>&\n-\t\t\t\t\t\t\t\t__atime) noexcept\n-\t  {\n-\t    __platform_wait_t __val;\n-\t    if (__base_type::_M_do_spin(__pred, __val,\n-\t\t\t\t\t__timed_backoff_spin_policy(__atime)))\n-\t      return true;\n-\t    return _M_do_wait_until(__pred, __val, __atime);\n-\t  }\n-\n-\ttemplate<typename _Tp, typename _ValFn,\n-\t\t typename _Rep, typename _Period>\n-\t  bool\n-\t  _M_do_wait_for_v(_Tp __old, _ValFn __vfn,\n-\t\t\t   const chrono::duration<_Rep, _Period>&\n-\t\t\t\t\t\t\t\t__rtime) noexcept\n-\t  {\n-\t    __platform_wait_t __val;\n-\t    if (_M_do_spin_v(__old, std::move(__vfn), __val))\n-\t      return true;\n-\n-\t    if (!__rtime.count())\n-\t      return false; // no rtime supplied, and spin did not acquire\n-\n-\t    auto __reltime = chrono::ceil<__wait_clock_t::duration>(__rtime);\n-\n-\t    return __base_type::_M_w._M_do_wait_until(\n-\t\t\t\t\t  __base_type::_M_addr,\n-\t\t\t\t\t  __val,\n-\t\t\t\t\t  chrono::steady_clock::now() + __reltime);\n-\t  }\n-\n-\ttemplate<typename _Pred,\n-\t\t typename _Rep, typename _Period>\n-\t  bool\n-\t  _M_do_wait_for(_Pred __pred,\n-\t\t\t const chrono::duration<_Rep, _Period>& __rtime) noexcept\n-\t  {\n-\t    __platform_wait_t __val;\n-\t    if (__base_type::_M_do_spin(__pred, __val))\n-\t      return true;\n+      if (!(__args & __wait_flags::__track_contention))\n+      {\n+\t// caller does not externally track contention\n+#ifdef _GLIBCXX_HAVE_PLATFORM_TIMED_WAIT\n+\t__pool = (__pool == nullptr) ? &__waiter_pool_impl::_S_impl_for(__addr)\n+\t\t\t\t     : __pool;\n+#endif\n+\t__pool->_M_enter_wait();\n+      }\n \n-\t    if (!__rtime.count())\n-\t      return false; // no rtime supplied, and spin did not acquire\n+      __wait_result_type __res;\n+#ifdef _GLIBCXX_HAVE_PLATFORM_TIMED_WAIT\n+      if (__platform_wait_until(__wait_addr, __args._M_old, __atime))\n+\t__res = make_pair(true, __args._M_old);\n+      else\n+\t__res = make_pair(false, __args._M_old);\n+#else\n+      __platform_wait_t __val;\n+      __atomic_load(__wait_addr, &__val, __args._M_order);\n+      if (__val == __args._M_old)\n+\t{\n+\t   lock_guard<mutex> __l{ __pool->_M_mtx };\n+\t   __atomic_load(__wait_addr, &__val, __args._M_order);\n+\t   if (__val == __args._M_old &&\n+\t       __cond_wait_until(__pool->_M_cv, __pool->_M_mtx, __atime))\n+\t     __res = make_pair(true, __val);\n+\t}\n+      else\n+\t__res = make_pair(false, __val);\n+#endif\n \n-\t    auto __reltime = chrono::ceil<__wait_clock_t::duration>(__rtime);\n+      if (!(__args & __wait_flags::__track_contention))\n+\t  // caller does not externally track contention\n+\t  __pool->_M_leave_wait();\n+      return __res;\n+    }\n \n-\t    return _M_do_wait_until(__pred, __val,\n-\t\t\t\t    chrono::steady_clock::now() + __reltime);\n-\t  }\n-      };\n+    template<typename _Clock, typename _Dur>\n+      __wait_result_type\n+      __wait_until(const __platform_wait_t* __addr, __wait_args __args,\n+\t\t   const chrono::time_point<_Clock, _Dur>& __atime) noexcept\n+      {\n+\tif constexpr (is_same_v<__wait_clock_t, _Clock>)\n+\t  return __detail::__wait_until_impl(__addr, __args, __atime);\n+\telse\n+\t  {\n+\t     auto __res = __detail::__wait_until_impl(__addr, __args,\n+\t\t\t\t\t    __to_wait_clock(__atime));\n+\t     if (!__res.first)\n+\t       {\n+\t\t  // We got a timeout when measured against __clock_t but\n+\t\t  // we need to check against the caller-supplied clock\n+\t\t  // to tell whether we should return a timeout.\n+\t\t  if (_Clock::now() < __atime)\n+\t\t    return make_pair(true, __res.second);\n+\t\t}\n+\t      return __res;\n+\t    }\n+      }\n \n-    using __enters_timed_wait = __timed_waiter<std::true_type>;\n-    using __bare_timed_wait = __timed_waiter<std::false_type>;\n+    template<typename _Rep, typename _Period>\n+      __wait_result_type\n+      __wait_for(const __platform_wait_t* __addr, __wait_args __args,\n+\t\t const chrono::duration<_Rep, _Period>& __rtime) noexcept\n+    {\n+      if (!__rtime.count())\n+\t// no rtime supplied, just spin a bit\n+\treturn __detail::__wait_impl(__addr, __args | __wait_flags::__spin_only);\n+      auto const __reltime = chrono::ceil<__wait_clock_t::duration>(__rtime);\n+      auto const __atime = chrono::steady_clock::now() + __reltime;\n+      return __detail::__wait_until(__addr, __args, __atime);\n+    }\n   } // namespace __detail\n \n   // returns true if wait ended before timeout\n-  template<typename _Tp, typename _ValFn,\n+  template<typename _Tp,\n+\t   typename _Pred, typename _ValFn,\n \t   typename _Clock, typename _Dur>\n     bool\n-    __atomic_wait_address_until_v(const _Tp* __addr, _Tp&& __old, _ValFn&& __vfn,\n-\t\t\tconst chrono::time_point<_Clock, _Dur>&\n-\t\t\t    __atime) noexcept\n+    __atomic_wait_address_until(const _Tp* __addr, _Pred&& __pred,\n+\t\t\t\t_ValFn&& __vfn,\n+\t\t\t\tconst chrono::time_point<_Clock, _Dur>& __atime,\n+\t\t\t\tbool __bare_wait = false) noexcept\n     {\n-      __detail::__enters_timed_wait __w{__addr};\n-      return __w._M_do_wait_until_v(__old, __vfn, __atime);\n+       const auto __wait_addr =\n+\t   reinterpret_cast<const __detail::__platform_wait_t*>(__addr);\n+       __detail::__wait_args __args{ __addr, __bare_wait };\n+       _Tp __val = __vfn();\n+       while (!__pred(__val))\n+\t {\n+\t   auto __res = __detail::__wait_until(__wait_addr, __args, __atime);\n+\t   if (!__res.first)\n+\t     // timed out\n+\t     return __res.first; // C++26 will also return last observed __val\n+\t   __val = __vfn();\n+\t }\n+       return true; // C++26 will also return last observed __val\n     }\n \n-  template<typename _Tp, typename _Pred,\n-\t   typename _Clock, typename _Dur>\n+  template<typename _Clock, typename _Dur>\n     bool\n-    __atomic_wait_address_until(const _Tp* __addr, _Pred __pred,\n-\t\t\t\tconst chrono::time_point<_Clock, _Dur>&\n-\t\t\t\t\t\t\t      __atime) noexcept\n+    __atomic_wait_address_until_v(const __detail::__platform_wait_t* __addr,\n+\t\t\t\t  __detail::__platform_wait_t __old,\n+\t\t\t\t  int __order,\n+\t\t\t\t  const chrono::time_point<_Clock, _Dur>& __atime,\n+\t\t\t\t  bool __bare_wait = false) noexcept\n     {\n-      __detail::__enters_timed_wait __w{__addr};\n-      return __w._M_do_wait_until(__pred, __atime);\n+      __detail::__wait_args __args{ __addr, __old, __order, __bare_wait };\n+      auto __res = __detail::__wait_until(__addr, __args, __atime);\n+      return __res.first; // C++26 will also return last observed __val\n     }\n \n-  template<typename _Pred,\n+  template<typename _Tp, typename _ValFn,\n \t   typename _Clock, typename _Dur>\n     bool\n-    __atomic_wait_address_until_bare(const __detail::__platform_wait_t* __addr,\n-\t\t\t\t_Pred __pred,\n-\t\t\t\tconst chrono::time_point<_Clock, _Dur>&\n-\t\t\t\t\t\t\t      __atime) noexcept\n+    __atomic_wait_address_until_v(const _Tp* __addr, _Tp&& __old, _ValFn&& __vfn,\n+\t\t\t\t  const chrono::time_point<_Clock, _Dur>& __atime,\n+\t\t\t\t  bool __bare_wait = false) noexcept\n     {\n-      __detail::__bare_timed_wait __w{__addr};\n-      return __w._M_do_wait_until(__pred, __atime);\n+       auto __pfn = [&](const _Tp& __val)\n+\t   { return !__detail::__atomic_compare(__old, __val); };\n+       return __atomic_wait_address_until(__addr, __pfn, forward<_ValFn>(__vfn),\n+\t\t\t\t\t  __atime, __bare_wait);\n     }\n \n-  template<typename _Tp, typename _ValFn,\n+  template<typename _Tp,\n+\t   typename _Pred, typename _ValFn,\n \t   typename _Rep, typename _Period>\n-    bool\n-    __atomic_wait_address_for_v(const _Tp* __addr, _Tp&& __old, _ValFn&& __vfn,\n-\t\t      const chrono::duration<_Rep, _Period>& __rtime) noexcept\n-    {\n-      __detail::__enters_timed_wait __w{__addr};\n-      return __w._M_do_wait_for_v(__old, __vfn, __rtime);\n-    }\n+   bool\n+   __atomic_wait_address_for(const _Tp* __addr, _Pred&& __pred,\n+\t\t\t     _ValFn&& __vfn,\n+\t\t\t     const chrono::duration<_Rep, _Period>& __rtime,\n+\t\t\t     bool __bare_wait = false) noexcept\n+  {\n+      const auto __wait_addr =\n+\t  reinterpret_cast<const __detail::__platform_wait_t*>(__addr);\n+      __detail::__wait_args __args{ __addr, __bare_wait };\n+      _Tp __val = __vfn();\n+      while (!__pred(__val))\n+\t{\n+\t  auto __res = __detail::__wait_for(__wait_addr, __args, __rtime);\n+\t  if (!__res.first)\n+\t    // timed out\n+\t    return __res.first; // C++26 will also return last observed __val\n+\t  __val = __vfn();\n+\t}\n+      return true; // C++26 will also return last observed __val\n+  }\n \n-  template<typename _Tp, typename _Pred,\n-\t   typename _Rep, typename _Period>\n+  template<typename _Rep, typename _Period>\n     bool\n-    __atomic_wait_address_for(const _Tp* __addr, _Pred __pred,\n-\t\t      const chrono::duration<_Rep, _Period>& __rtime) noexcept\n-    {\n-\n-      __detail::__enters_timed_wait __w{__addr};\n-      return __w._M_do_wait_for(__pred, __rtime);\n-    }\n+    __atomic_wait_address_for_v(const __detail::__platform_wait_t* __addr,\n+\t\t\t\t__detail::__platform_wait_t __old,\n+\t\t\t\tint __order,\n+\t\t\t\tconst chrono::time_point<_Rep, _Period>& __rtime,\n+\t\t\t\tbool __bare_wait = false) noexcept\n+  {\n+    __detail::__wait_args __args{ __addr, __old, __order, __bare_wait };\n+    auto __res = __detail::__wait_for(__addr, __args, __rtime);\n+    return __res.first; // C++26 will also return last observed __Val\n+  }\n \n-  template<typename _Pred,\n+  template<typename _Tp, typename _ValFn,\n \t   typename _Rep, typename _Period>\n     bool\n-    __atomic_wait_address_for_bare(const __detail::__platform_wait_t* __addr,\n-\t\t\t_Pred __pred,\n-\t\t\tconst chrono::duration<_Rep, _Period>& __rtime) noexcept\n+    __atomic_wait_address_for_v(const _Tp* __addr, _Tp&& __old, _ValFn&& __vfn,\n+\t\t\t\tconst chrono::duration<_Rep, _Period>& __rtime,\n+\t\t\t\tbool __bare_wait = false) noexcept\n     {\n-      __detail::__bare_timed_wait __w{__addr};\n-      return __w._M_do_wait_for(__pred, __rtime);\n+      auto __pfn = [&](const _Tp& __val)\n+\t  { return !__detail::__atomic_compare(__old, __val); };\n+      return __atomic_wait_address_for(__addr, __pfn, forward<_ValFn>(__vfn),\n+\t\t\t\t       __rtime, __bare_wait);\n     }\n _GLIBCXX_END_NAMESPACE_VERSION\n } // namespace std\ndiff --git a/libstdc++-v3/include/bits/atomic_wait.h b/libstdc++-v3/include/bits/atomic_wait.h\nindex 6d1554f68a56..18cfc2ef7bd2 100644\n--- a/libstdc++-v3/include/bits/atomic_wait.h\n+++ b/libstdc++-v3/include/bits/atomic_wait.h\n@@ -50,7 +50,8 @@\n # include <bits/functexcept.h>\n #endif\n \n-# include <bits/std_mutex.h>  // std::mutex, std::__condvar\n+#include <bits/stl_pair.h>\n+#include <bits/std_mutex.h>  // std::mutex, std::__condvar\n \n namespace std _GLIBCXX_VISIBILITY(default)\n {\n@@ -134,7 +135,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION\n     __thread_yield() noexcept\n     {\n #if defined _GLIBCXX_HAS_GTHREADS && defined _GLIBCXX_USE_SCHED_YIELD\n-     __gthread_yield();\n+      __gthread_yield();\n #endif\n     }\n \n@@ -151,38 +152,6 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION\n     inline constexpr auto __atomic_spin_count_relax = 12;\n     inline constexpr auto __atomic_spin_count = 16;\n \n-    struct __default_spin_policy\n-    {\n-      bool\n-      operator()() const noexcept\n-      { return false; }\n-    };\n-\n-    template<typename _Pred,\n-\t     typename _Spin = __default_spin_policy>\n-      bool\n-      __atomic_spin(_Pred& __pred, _Spin __spin = _Spin{ }) noexcept\n-      {\n-\tfor (auto __i = 0; __i < __atomic_spin_count; ++__i)\n-\t  {\n-\t    if (__pred())\n-\t      return true;\n-\n-\t    if (__i < __atomic_spin_count_relax)\n-\t      __detail::__thread_relax();\n-\t    else\n-\t      __detail::__thread_yield();\n-\t  }\n-\n-\twhile (__spin())\n-\t  {\n-\t    if (__pred())\n-\t      return true;\n-\t  }\n-\n-\treturn false;\n-      }\n-\n     // return true if equal\n     template<typename _Tp>\n       bool __atomic_compare(const _Tp& __a, const _Tp& __b)\n@@ -191,7 +160,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION\n \treturn __builtin_memcmp(&__a, &__b, sizeof(_Tp)) == 0;\n       }\n \n-    struct __waiter_pool_base\n+    struct __waiter_pool_impl\n     {\n       // Don't use std::hardware_destructive_interference_size here because we\n       // don't want the layout of library types to depend on compiler options.\n@@ -208,7 +177,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION\n #ifndef _GLIBCXX_HAVE_PLATFORM_WAIT\n       __condvar _M_cv;\n #endif\n-      __waiter_pool_base() = default;\n+      __waiter_pool_impl() = default;\n \n       void\n       _M_enter_wait() noexcept\n@@ -226,256 +195,271 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION\n \treturn __res != 0;\n       }\n \n-      void\n-      _M_notify(__platform_wait_t* __addr, [[maybe_unused]] bool __all,\n-\t\tbool __bare) noexcept\n-      {\n-#ifdef _GLIBCXX_HAVE_PLATFORM_WAIT\n-\tif (__addr == &_M_ver)\n-\t  {\n-\t    __atomic_fetch_add(__addr, 1, __ATOMIC_SEQ_CST);\n-\t    __all = true;\n-\t  }\n-\n-\tif (__bare || _M_waiting())\n-\t  __platform_notify(__addr, __all);\n-#else\n-\t{\n-\t  lock_guard<mutex> __l(_M_mtx);\n-\t  __atomic_fetch_add(__addr, 1, __ATOMIC_RELAXED);\n-\t}\n-\tif (__bare || _M_waiting())\n-\t  _M_cv.notify_all();\n-#endif\n-      }\n-\n-      static __waiter_pool_base&\n-      _S_for(const void* __addr) noexcept\n+      static __waiter_pool_impl&\n+      _S_impl_for(const void* __addr) noexcept\n       {\n \tconstexpr __UINTPTR_TYPE__ __ct = 16;\n-\tstatic __waiter_pool_base __w[__ct];\n+\tstatic __waiter_pool_impl __w[__ct];\n \tauto __key = ((__UINTPTR_TYPE__)__addr >> 2) % __ct;\n \treturn __w[__key];\n       }\n     };\n \n-    struct __waiter_pool : __waiter_pool_base\n+    enum class __wait_flags : __UINT_LEAST32_TYPE__\n     {\n-      void\n-      _M_do_wait(const __platform_wait_t* __addr, __platform_wait_t __old) noexcept\n-      {\n-#ifdef _GLIBCXX_HAVE_PLATFORM_WAIT\n-\t__platform_wait(__addr, __old);\n-#else\n-\t__platform_wait_t __val;\n-\t__atomic_load(__addr, &__val, __ATOMIC_SEQ_CST);\n-\tif (__val == __old)\n-\t  {\n-\t    lock_guard<mutex> __l(_M_mtx);\n-\t    __atomic_load(__addr, &__val, __ATOMIC_RELAXED);\n-\t    if (__val == __old)\n-\t      _M_cv.wait(_M_mtx);\n-\t  }\n-#endif // __GLIBCXX_HAVE_PLATFORM_WAIT\n-      }\n+       __abi_version = 0,\n+       __proxy_wait = 1,\n+       __track_contention = 2,\n+       __do_spin = 4,\n+       __spin_only = 8 | __do_spin, // implies __do_spin\n+       __abi_version_mask = 0xffff0000,\n     };\n \n-    template<typename _Tp>\n-      struct __waiter_base\n+    struct __wait_args\n+    {\n+      __platform_wait_t _M_old;\n+      int _M_order = __ATOMIC_ACQUIRE;\n+      __wait_flags _M_flags;\n+\n+      template<typename _Tp>\n+\texplicit __wait_args(const _Tp* __addr,\n+\t\t\t     bool __bare_wait = false) noexcept\n+\t    : _M_flags{ _S_flags_for(__addr, __bare_wait) }\n+\t{ }\n+\n+      __wait_args(const __platform_wait_t* __addr, __platform_wait_t __old,\n+\t\t  int __order, bool __bare_wait = false) noexcept\n+\t  : _M_old{ __old }\n+\t  , _M_order{ __order }\n+\t  , _M_flags{ _S_flags_for(__addr, __bare_wait) }\n+\t{ }\n+\n+      __wait_args(const __wait_args&) noexcept = default;\n+      __wait_args&\n+      operator=(const __wait_args&) noexcept = default;\n+\n+      bool\n+      operator&(__wait_flags __flag) const noexcept\n       {\n-\tusing __waiter_type = _Tp;\n-\n-\t__waiter_type& _M_w;\n-\t__platform_wait_t* _M_addr;\n-\n-\ttemplate<typename _Up>\n-\t  static __platform_wait_t*\n-\t  _S_wait_addr(const _Up* __a, __platform_wait_t* __b)\n-\t  {\n-\t    if constexpr (__platform_wait_uses_type<_Up>)\n-\t      return reinterpret_cast<__platform_wait_t*>(const_cast<_Up*>(__a));\n-\t    else\n-\t      return __b;\n-\t  }\n-\n-\tstatic __waiter_type&\n-\t_S_for(const void* __addr) noexcept\n-\t{\n-\t  static_assert(sizeof(__waiter_type) == sizeof(__waiter_pool_base));\n-\t  auto& res = __waiter_pool_base::_S_for(__addr);\n-\t  return reinterpret_cast<__waiter_type&>(res);\n-\t}\n+\t using __t = underlying_type_t<__wait_flags>;\n+\t return static_cast<__t>(_M_flags)\n+\t     & static_cast<__t>(__flag);\n+      }\n \n-\ttemplate<typename _Up>\n-\t  explicit __waiter_base(const _Up* __addr) noexcept\n-\t    : _M_w(_S_for(__addr))\n-\t    , _M_addr(_S_wait_addr(__addr, &_M_w._M_ver))\n-\t  { }\n-\n-\tvoid\n-\t_M_notify(bool __all, bool __bare = false) noexcept\n-\t{ _M_w._M_notify(_M_addr, __all, __bare); }\n-\n-\ttemplate<typename _Up, typename _ValFn,\n-\t\t typename _Spin = __default_spin_policy>\n-\t  static bool\n-\t  _S_do_spin_v(__platform_wait_t* __addr,\n-\t\t       const _Up& __old, _ValFn __vfn,\n-\t\t       __platform_wait_t& __val,\n-\t\t       _Spin __spin = _Spin{ })\n-\t  {\n-\t    auto const __pred = [=]\n-\t      { return !__detail::__atomic_compare(__old, __vfn()); };\n-\n-\t    if constexpr (__platform_wait_uses_type<_Up>)\n-\t      {\n-\t\t__builtin_memcpy(&__val, &__old, sizeof(__val));\n-\t      }\n-\t    else\n-\t      {\n-\t\t__atomic_load(__addr, &__val, __ATOMIC_ACQUIRE);\n-\t      }\n-\t    return __atomic_spin(__pred, __spin);\n-\t  }\n-\n-\ttemplate<typename _Up, typename _ValFn,\n-\t\t typename _Spin = __default_spin_policy>\n-\t  bool\n-\t  _M_do_spin_v(const _Up& __old, _ValFn __vfn,\n-\t\t       __platform_wait_t& __val,\n-\t\t       _Spin __spin = _Spin{ })\n-\t  { return _S_do_spin_v(_M_addr, __old, __vfn, __val, __spin); }\n-\n-\ttemplate<typename _Pred,\n-\t\t typename _Spin = __default_spin_policy>\n-\t  static bool\n-\t  _S_do_spin(const __platform_wait_t* __addr,\n-\t\t     _Pred __pred,\n-\t\t     __platform_wait_t& __val,\n-\t\t     _Spin __spin = _Spin{ })\n-\t  {\n-\t    __atomic_load(__addr, &__val, __ATOMIC_ACQUIRE);\n-\t    return __atomic_spin(__pred, __spin);\n-\t  }\n-\n-\ttemplate<typename _Pred,\n-\t\t typename _Spin = __default_spin_policy>\n-\t  bool\n-\t  _M_do_spin(_Pred __pred, __platform_wait_t& __val,\n-\t\t     _Spin __spin = _Spin{ })\n-\t  { return _S_do_spin(_M_addr, __pred, __val, __spin); }\n-      };\n-\n-    template<typename _EntersWait>\n-      struct __waiter : __waiter_base<__waiter_pool>\n+      __wait_args\n+      operator|(__wait_flags __flag) const noexcept\n       {\n-\tusing __base_type = __waiter_base<__waiter_pool>;\n+\tusing __t = underlying_type_t<__wait_flags>;\n+\t__wait_args __res{ *this };\n+\tconst auto __flags = static_cast<__t>(__res._M_flags)\n+\t\t\t     | static_cast<__t>(__flag);\n+\t__res._M_flags = __wait_flags{ __flags };\n+\treturn __res;\n+      }\n \n-\ttemplate<typename _Tp>\n-\t  explicit __waiter(const _Tp* __addr) noexcept\n-\t    : __base_type(__addr)\n-\t  {\n-\t    if constexpr (_EntersWait::value)\n-\t      _M_w._M_enter_wait();\n-\t  }\n+    private:\n+      static int\n+      constexpr _S_default_flags() noexcept\n+      {\n+\tusing __t = underlying_type_t<__wait_flags>;\n+\treturn static_cast<__t>(__wait_flags::__abi_version)\n+\t\t| static_cast<__t>(__wait_flags::__do_spin);\n+      }\n \n-\t~__waiter()\n+      template<typename _Tp>\n+\tstatic int\n+\tconstexpr _S_flags_for(const _Tp*, bool __bare_wait) noexcept\n \t{\n-\t  if constexpr (_EntersWait::value)\n-\t    _M_w._M_leave_wait();\n+\t  auto __res = _S_default_flags();\n+\t  if (!__bare_wait)\n+\t    __res |= static_cast<int>(__wait_flags::__track_contention);\n+\t  if constexpr (!__platform_wait_uses_type<_Tp>)\n+\t    __res |= static_cast<int>(__wait_flags::__proxy_wait);\n+\t  return __res;\n \t}\n \n-\ttemplate<typename _Tp, typename _ValFn>\n-\t  void\n-\t  _M_do_wait_v(_Tp __old, _ValFn __vfn)\n-\t  {\n-\t    do\n-\t      {\n-\t\t__platform_wait_t __val;\n-\t\tif (__base_type::_M_do_spin_v(__old, __vfn, __val))\n-\t\t  return;\n-\t\t__base_type::_M_w._M_do_wait(__base_type::_M_addr, __val);\n-\t      }\n-\t    while (__detail::__atomic_compare(__old, __vfn()));\n-\t  }\n-\n-\ttemplate<typename _Pred>\n-\t  void\n-\t  _M_do_wait(_Pred __pred) noexcept\n-\t  {\n-\t    do\n-\t      {\n-\t\t__platform_wait_t __val;\n-\t\tif (__base_type::_M_do_spin(__pred, __val))\n-\t\t  return;\n-\t\t__base_type::_M_w._M_do_wait(__base_type::_M_addr, __val);\n-\t      }\n-\t    while (!__pred());\n-\t  }\n-      };\n-\n-    using __enters_wait = __waiter<std::true_type>;\n-    using __bare_wait = __waiter<std::false_type>;\n-  } // namespace __detail\n+      template<typename _Tp>\n+\tstatic int\n+\t_S_memory_order_for(const _Tp*, int __order) noexcept\n+\t{\n+\t  if constexpr (__platform_wait_uses_type<_Tp>)\n+\t    return __order;\n+\t  return __ATOMIC_ACQUIRE;\n+\t}\n+    };\n \n-  template<typename _Tp, typename _ValFn>\n-    void\n-    __atomic_wait_address_v(const _Tp* __addr, _Tp __old,\n-\t\t\t    _ValFn __vfn) noexcept\n+    using __wait_result_type = pair<bool, __platform_wait_t>;\n+    inline __wait_result_type\n+    __spin_impl(const __platform_wait_t* __addr, __wait_args __args)\n     {\n-      __detail::__enters_wait __w(__addr);\n-      __w._M_do_wait_v(__old, __vfn);\n+      __platform_wait_t __val;\n+      for (auto __i = 0; __i < __atomic_spin_count; ++__i)\n+\t{\n+\t  __atomic_load(__addr, &__val, __args._M_order);\n+\t  if (__val != __args._M_old)\n+\t    return make_pair(true, __val);\n+\t  if (__i < __atomic_spin_count_relax)\n+\t    __detail::__thread_relax();\n+\t  else\n+\t    __detail::__thread_yield();\n+\t}\n+      return make_pair(false, __val);\n     }\n \n-  template<typename _Tp, typename _Pred>\n-    void\n-    __atomic_wait_address(const _Tp* __addr, _Pred __pred) noexcept\n+    inline __wait_result_type\n+    __wait_impl(const __platform_wait_t* __addr, __wait_args __args)\n     {\n-      __detail::__enters_wait __w(__addr);\n-      __w._M_do_wait(__pred);\n+#ifdef _GLIBCXX_HAVE_PLATFORM_WAIT\n+      __waiter_pool_impl* __pool = nullptr;\n+#else\n+      // if we don't have __platform_wait, we always need the side-table\n+      __waiter_pool_impl* __pool = &__waiter_pool_impl::_S_impl_for(__addr);\n+#endif\n+\n+      __platform_wait_t* __wait_addr;\n+      if (__args & __wait_flags::__proxy_wait)\n+\t{\n+#ifdef _GLIBCXX_HAVE_PLATFORM_WAIT\n+\t  __pool = &__waiter_pool_impl::_S_impl_for(__addr);\n+#endif\n+\t  __wait_addr = &__pool->_M_ver;\n+\t  __atomic_load(__wait_addr, &__args._M_old, __args._M_order);\n+\t}\n+      else\n+\t__wait_addr = const_cast<__platform_wait_t*>(__addr);\n+\n+      if (__args & __wait_flags::__do_spin)\n+\t{\n+\t  auto __res = __detail::__spin_impl(__wait_addr, __args);\n+\t  if (__res.first)\n+\t    return __res;\n+\t  if (__args & __wait_flags::__spin_only)\n+\t    return __res;\n+\t}\n+\n+      if (!(__args & __wait_flags::__track_contention))\n+\t{\n+\t  // caller does not externally track contention\n+#ifdef _GLIBCXX_HAVE_PLATFORM_WAIT\n+\t  __pool = (__pool == nullptr) ? &__waiter_pool_impl::_S_impl_for(__addr)\n+\t\t\t\t       : __pool;\n+#endif\n+\t  __pool->_M_enter_wait();\n+\t}\n+\n+      __wait_result_type __res;\n+#ifdef _GLIBCXX_HAVE_PLATFORM_WAIT\n+      __platform_wait(__wait_addr, __args._M_old);\n+      __res = make_pair(false, __args._M_old);\n+#else\n+      __platform_wait_t __val;\n+      __atomic_load(__wait_addr, &__val, __args._M_order);\n+      if (__val == __args._M_old)\n+\t{\n+\t    lock_guard<mutex> __l{ __pool->_M_mtx };\n+\t    __atomic_load(__wait_addr, &__val, __args._M_order);\n+\t    if (__val == __args._M_old)\n+\t\t__pool->_M_cv.wait(__pool->_M_mtx);\n+\t}\n+      __res = make_pair(false, __val);\n+#endif\n+\n+      if (!(__args & __wait_flags::__track_contention))\n+\t// caller does not externally track contention\n+\t__pool->_M_leave_wait();\n+      return __res;\n     }\n \n-  // This call is to be used by atomic types which track contention externally\n-  template<typename _Pred>\n-    void\n-    __atomic_wait_address_bare(const __detail::__platform_wait_t* __addr,\n-\t\t\t       _Pred __pred) noexcept\n+    inline void\n+    __notify_impl(const __platform_wait_t* __addr, [[maybe_unused]] bool __all,\n+\t\t  __wait_args __args)\n     {\n #ifdef _GLIBCXX_HAVE_PLATFORM_WAIT\n-      do\n+      __waiter_pool_impl* __pool = nullptr;\n+#else\n+      // if we don't have __platform_notify, we always need the side-table\n+      __waiter_pool_impl* __pool = &__waiter_pool_impl::_S_impl_for(__addr);\n+#endif\n+\n+      if (!(__args & __wait_flags::__track_contention))\n \t{\n-\t  __detail::__platform_wait_t __val;\n-\t  if (__detail::__bare_wait::_S_do_spin(__addr, __pred, __val))\n+#ifdef _GLIBCXX_HAVE_PLATFORM_WAIT\n+\t  __pool = &__waiter_pool_impl::_S_impl_for(__addr);\n+#endif\n+\t  if (!__pool->_M_waiting())\n \t    return;\n-\t  __detail::__platform_wait(__addr, __val);\n \t}\n-      while (!__pred());\n-#else // !_GLIBCXX_HAVE_PLATFORM_WAIT\n-      __detail::__bare_wait __w(__addr);\n-      __w._M_do_wait(__pred);\n+\n+      __platform_wait_t* __wait_addr;\n+      if (__args & __wait_flags::__proxy_wait)\n+\t{\n+#ifdef _GLIBCXX_HAVE_PLATFORM_WAIT\n+\t   __pool = (__pool == nullptr) ? &__waiter_pool_impl::_S_impl_for(__addr)\n+\t\t\t\t\t: __pool;\n+#endif\n+\t   __wait_addr = &__pool->_M_ver;\n+\t   __atomic_fetch_add(__wait_addr, 1, __ATOMIC_RELAXED);\n+\t   __all = true;\n+\t }\n+\n+#ifdef _GLIBCXX_HAVE_PLATFORM_WAIT\n+      __platform_notify(__wait_addr, __all);\n+#else\n+      lock_guard<mutex> __l{ __pool->_M_mtx };\n+      __pool->_M_cv.notify_all();\n #endif\n     }\n+  } // namespace __detail\n \n-  template<typename _Tp>\n+  template<typename _Tp,\n+\t   typename _Pred, typename _ValFn>\n     void\n-    __atomic_notify_address(const _Tp* __addr, bool __all) noexcept\n+    __atomic_wait_address(const _Tp* __addr,\n+\t\t\t  _Pred&& __pred, _ValFn&& __vfn,\n+\t\t\t  bool __bare_wait = false) noexcept\n     {\n-      __detail::__bare_wait __w(__addr);\n-      __w._M_notify(__all);\n+      const auto __wait_addr =\n+\t  reinterpret_cast<const __detail::__platform_wait_t*>(__addr);\n+      __detail::__wait_args __args{ __addr, __bare_wait };\n+      _Tp __val = __vfn();\n+      while (!__pred(__val))\n+\t{\n+\t  __detail::__wait_impl(__wait_addr, __args);\n+\t  __val = __vfn();\n+\t}\n+      // C++26 will return __val\n     }\n \n-  // This call is to be used by atomic types which track contention externally\n   inline void\n-  __atomic_notify_address_bare(const __detail::__platform_wait_t* __addr,\n-\t\t\t       bool __all) noexcept\n+  __atomic_wait_address_v(const __detail::__platform_wait_t* __addr,\n+\t\t\t  __detail::__platform_wait_t __old,\n+\t\t\t  int __order)\n   {\n-#ifdef _GLIBCXX_HAVE_PLATFORM_WAIT\n-    __detail::__platform_notify(__addr, __all);\n-#else\n-    __detail::__bare_wait __w(__addr);\n-    __w._M_notify(__all, true);\n-#endif\n+    __detail::__wait_args __args{ __addr, __old, __order };\n+    // C++26 will not ignore the return value here\n+    __detail::__wait_impl(__addr, __args);\n   }\n+\n+  template<typename _Tp, typename _ValFn>\n+    void\n+    __atomic_wait_address_v(const _Tp* __addr, _Tp __old,\n+\t\t\t    _ValFn __vfn) noexcept\n+    {\n+      auto __pfn = [&](const _Tp& __val)\n+\t  { return !__detail::__atomic_compare(__old, __val); };\n+      __atomic_wait_address(__addr, __pfn, forward<_ValFn>(__vfn));\n+    }\n+\n+  template<typename _Tp>\n+    void\n+    __atomic_notify_address(const _Tp* __addr, bool __all,\n+\t\t\t    bool __bare_wait = false) noexcept\n+    {\n+      const auto __wait_addr =\n+\t  reinterpret_cast<const __detail::__platform_wait_t*>(__addr);\n+      __detail::__wait_args __args{ __addr, __bare_wait };\n+      __detail::__notify_impl(__wait_addr, __all, __args);\n+    }\n _GLIBCXX_END_NAMESPACE_VERSION\n } // namespace std\n #endif // __glibcxx_atomic_wait\ndiff --git a/libstdc++-v3/include/bits/semaphore_base.h b/libstdc++-v3/include/bits/semaphore_base.h\nindex d8f9bd8982bf..444a1589fb5a 100644\n--- a/libstdc++-v3/include/bits/semaphore_base.h\n+++ b/libstdc++-v3/include/bits/semaphore_base.h\n@@ -181,10 +181,16 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION\n     __atomic_semaphore(const __atomic_semaphore&) = delete;\n     __atomic_semaphore& operator=(const __atomic_semaphore&) = delete;\n \n+    static _GLIBCXX_ALWAYS_INLINE __detail::__platform_wait_t\n+    _S_get_current(__detail::__platform_wait_t* __counter) noexcept\n+    {\n+      return __atomic_impl::load(__counter, memory_order::acquire);\n+    }\n+\n     static _GLIBCXX_ALWAYS_INLINE bool\n-    _S_do_try_acquire(__detail::__platform_wait_t* __counter) noexcept\n+    _S_do_try_acquire(__detail::__platform_wait_t* __counter,\n+\t\t      __detail::__platform_wait_t __old) noexcept\n     {\n-      auto __old = __atomic_impl::load(__counter, memory_order::acquire);\n       if (__old == 0)\n \treturn false;\n \n@@ -197,17 +203,21 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION\n     _GLIBCXX_ALWAYS_INLINE void\n     _M_acquire() noexcept\n     {\n-      auto const __pred =\n-\t[this] { return _S_do_try_acquire(&this->_M_counter); };\n-      std::__atomic_wait_address_bare(&_M_counter, __pred);\n+      auto const __vfn = [this]{ return _S_get_current(&this->_M_counter); };\n+      auto const __pred = [this](__detail::__platform_wait_t __cur)\n+\t{ return _S_do_try_acquire(&this->_M_counter, __cur); };\n+      std::__atomic_wait_address(&_M_counter, __pred, __vfn, true);\n     }\n \n     bool\n     _M_try_acquire() noexcept\n     {\n-      auto const __pred =\n-\t[this] { return _S_do_try_acquire(&this->_M_counter); };\n-      return std::__detail::__atomic_spin(__pred);\n+      auto const __vfn = [this]{ return _S_get_current(&this->_M_counter); };\n+      auto const __pred = [this](__detail::__platform_wait_t __cur)\n+\t{ return _S_do_try_acquire(&this->_M_counter, __cur); };\n+      return __atomic_wait_address_for(&_M_counter, __pred, __vfn,\n+\t\t\t\t\t __detail::__wait_clock_t::duration(),\n+\t\t\t\t\t true);\n     }\n \n     template<typename _Clock, typename _Duration>\n@@ -215,21 +225,22 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION\n       _M_try_acquire_until(const chrono::time_point<_Clock,\n \t\t\t   _Duration>& __atime) noexcept\n       {\n-\tauto const __pred =\n-\t  [this] { return _S_do_try_acquire(&this->_M_counter); };\n-\n-\treturn __atomic_wait_address_until_bare(&_M_counter, __pred, __atime);\n+\tauto const __vfn = [this]{ return _S_get_current(&this->_M_counter); };\n+\tauto const __pred = [this](__detail::__platform_wait_t __cur)\n+\t { return _S_do_try_acquire(&this->_M_counter, __cur); };\n+\treturn std::__atomic_wait_address_until(&_M_counter,\n+\t\t\t\t\t\t__pred, __vfn, __atime, true);\n       }\n \n     template<typename _Rep, typename _Period>\n       _GLIBCXX_ALWAYS_INLINE bool\n-      _M_try_acquire_for(const chrono::duration<_Rep, _Period>& __rtime)\n-\tnoexcept\n+      _M_try_acquire_for(const chrono::duration<_Rep, _Period>& __rtime) noexcept\n       {\n-\tauto const __pred =\n-\t  [this] { return _S_do_try_acquire(&this->_M_counter); };\n-\n-\treturn __atomic_wait_address_for_bare(&_M_counter, __pred, __rtime);\n+\tauto const __vfn = [this]{ return _S_get_current(&this->_M_counter); };\n+\tauto const __pred = [this](__detail::__platform_wait_t __cur)\n+\t { return _S_do_try_acquire(&this->_M_counter, __cur); };\n+\treturn std::__atomic_wait_address_for(&_M_counter,\n+\t\t\t\t\t      __pred, __vfn, __rtime, true);\n       }\n \n     _GLIBCXX_ALWAYS_INLINE void\n@@ -238,9 +249,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION\n       if (0 < __atomic_impl::fetch_add(&_M_counter, __update, memory_order_release))\n \treturn;\n       if (__update > 1)\n-\t__atomic_notify_address_bare(&_M_counter, true);\n+\t__atomic_notify_address(&_M_counter, true, true);\n       else\n-\t__atomic_notify_address_bare(&_M_counter, true);\n+\t__atomic_notify_address(&_M_counter, true, true);\n // FIXME - Figure out why this does not wake a waiting thread\n //\t__atomic_notify_address_bare(&_M_counter, false);\n     }\ndiff --git a/libstdc++-v3/include/std/barrier b/libstdc++-v3/include/std/barrier\nindex 6c3cfd44697c..62b03d0223f4 100644\n--- a/libstdc++-v3/include/std/barrier\n+++ b/libstdc++-v3/include/std/barrier\n@@ -194,11 +194,7 @@ It looks different from literature pseudocode for two main reasons:\n       wait(arrival_token&& __old_phase) const\n       {\n \t__atomic_phase_const_ref_t __phase(_M_phase);\n-\tauto const __test_fn = [=]\n-\t  {\n-\t    return __phase.load(memory_order_acquire) != __old_phase;\n-\t  };\n-\tstd::__atomic_wait_address(&_M_phase, __test_fn);\n+\t__phase.wait(__old_phase, memory_order_acquire);\n       }\n \n       void\ndiff --git a/libstdc++-v3/include/std/latch b/libstdc++-v3/include/std/latch\nindex cf648545629d..dc54a862c057 100644\n--- a/libstdc++-v3/include/std/latch\n+++ b/libstdc++-v3/include/std/latch\n@@ -89,8 +89,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION\n     _GLIBCXX_ALWAYS_INLINE void\n     wait() const noexcept\n     {\n-      auto const __pred = [this] { return this->try_wait(); };\n-      std::__atomic_wait_address(&_M_a, __pred);\n+      auto const __vfn = [this] { return this->try_wait(); };\n+      auto const __pred = [this](bool __b) { return __b; };\n+      std::__atomic_wait_address(&_M_a, __pred, __vfn);\n     }\n \n     _GLIBCXX_ALWAYS_INLINE void\ndiff --git a/libstdc++-v3/testsuite/29_atomics/atomic/wait_notify/100334.cc b/libstdc++-v3/testsuite/29_atomics/atomic/wait_notify/100334.cc\nindex 018c0c98d0ec..ec596e316500 100644\n--- a/libstdc++-v3/testsuite/29_atomics/atomic/wait_notify/100334.cc\n+++ b/libstdc++-v3/testsuite/29_atomics/atomic/wait_notify/100334.cc\n@@ -54,8 +54,8 @@ main()\n     atom->store(0);\n   }\n \n-  auto a = &std::__detail::__waiter_pool_base::_S_for(reinterpret_cast<char *>(atomics.a[0]));\n-  auto b = &std::__detail::__waiter_pool_base::_S_for(reinterpret_cast<char *>(atomics.a[1]));\n+  auto a = &std::__detail::__waiter_pool_impl::_S_impl_for(reinterpret_cast<char *>(atomics.a[0]));\n+  auto b = &std::__detail::__waiter_pool_impl::_S_impl_for(reinterpret_cast<char *>(atomics.a[1]));\n   VERIFY( a == b );\n \n   auto fut0 = std::async(std::launch::async, [&] { atomics.a[0]->wait(0); });\n",
    "prefixes": [
        "v1",
        "01/16"
    ]
}