get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/2226772/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 2226772,
    "url": "http://patchwork.ozlabs.org/api/patches/2226772/?format=api",
    "web_url": "http://patchwork.ozlabs.org/project/gcc/patch/bmm.hhup55wx16.gcc.gcc-TEST.tkaminsk.85.1.3@forge-stage.sourceware.org/",
    "project": {
        "id": 17,
        "url": "http://patchwork.ozlabs.org/api/projects/17/?format=api",
        "name": "GNU Compiler Collection",
        "link_name": "gcc",
        "list_id": "gcc-patches.gcc.gnu.org",
        "list_email": "gcc-patches@gcc.gnu.org",
        "web_url": null,
        "scm_url": null,
        "webscm_url": null,
        "list_archive_url": "",
        "list_archive_url_format": "",
        "commit_url_format": ""
    },
    "msgid": "<bmm.hhup55wx16.gcc.gcc-TEST.tkaminsk.85.1.3@forge-stage.sourceware.org>",
    "list_archive_url": null,
    "date": "2026-04-22T18:49:40",
    "name": "[v1,03/10] Merged common mutable operations into mutable atomic base.",
    "commit_ref": null,
    "pull_url": null,
    "state": "new",
    "archived": false,
    "hash": "b6d38865eddec7caf38ce2a25a2f16652d4edcd5",
    "submitter": {
        "id": 93223,
        "url": "http://patchwork.ozlabs.org/api/people/93223/?format=api",
        "name": "tkaminsk via Sourceware Forge",
        "email": "forge-bot+tkaminsk@forge-stage.sourceware.org"
    },
    "delegate": null,
    "mbox": "http://patchwork.ozlabs.org/project/gcc/patch/bmm.hhup55wx16.gcc.gcc-TEST.tkaminsk.85.1.3@forge-stage.sourceware.org/mbox/",
    "series": [
        {
            "id": 501094,
            "url": "http://patchwork.ozlabs.org/api/series/501094/?format=api",
            "web_url": "http://patchwork.ozlabs.org/project/gcc/list/?series=501094",
            "date": "2026-04-22T18:49:39",
            "name": "WIP: libstdc++: add support for cv-qualified types in atomic_ref (P3323R1)",
            "version": 1,
            "mbox": "http://patchwork.ozlabs.org/series/501094/mbox/"
        }
    ],
    "comments": "http://patchwork.ozlabs.org/api/patches/2226772/comments/",
    "check": "pending",
    "checks": "http://patchwork.ozlabs.org/api/patches/2226772/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<gcc-patches-bounces~incoming=patchwork.ozlabs.org@gcc.gnu.org>",
        "X-Original-To": [
            "incoming@patchwork.ozlabs.org",
            "gcc-patches@gcc.gnu.org"
        ],
        "Delivered-To": [
            "patchwork-incoming@legolas.ozlabs.org",
            "gcc-patches@gcc.gnu.org"
        ],
        "Authentication-Results": [
            "legolas.ozlabs.org;\n spf=pass (sender SPF authorized) smtp.mailfrom=gcc.gnu.org\n (client-ip=2620:52:6:3111::32; helo=vm01.sourceware.org;\n envelope-from=gcc-patches-bounces~incoming=patchwork.ozlabs.org@gcc.gnu.org;\n receiver=patchwork.ozlabs.org)",
            "sourceware.org; dmarc=none (p=none dis=none)\n header.from=forge-stage.sourceware.org",
            "sourceware.org;\n spf=pass smtp.mailfrom=forge-stage.sourceware.org",
            "server2.sourceware.org;\n arc=none smtp.remote-ip=38.145.34.39"
        ],
        "Received": [
            "from vm01.sourceware.org (vm01.sourceware.org\n [IPv6:2620:52:6:3111::32])\n\t(using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits)\n\t key-exchange x25519 server-signature ECDSA (secp384r1) server-digest SHA384)\n\t(No client certificate requested)\n\tby legolas.ozlabs.org (Postfix) with ESMTPS id 4g18Zt4bnwz1y2d\n\tfor <incoming@patchwork.ozlabs.org>; Thu, 23 Apr 2026 05:35:33 +1000 (AEST)",
            "from vm01.sourceware.org (localhost [127.0.0.1])\n\tby sourceware.org (Postfix) with ESMTP id 8251844E0914\n\tfor <incoming@patchwork.ozlabs.org>; Wed, 22 Apr 2026 19:35:31 +0000 (GMT)",
            "from forge-stage.sourceware.org (vm08.sourceware.org [38.145.34.39])\n by sourceware.org (Postfix) with ESMTPS id 2259040A2C55\n for <gcc-patches@gcc.gnu.org>; Wed, 22 Apr 2026 18:51:08 +0000 (GMT)",
            "from forge-stage.sourceware.org (localhost [IPv6:::1])\n (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits)\n key-exchange x25519 server-signature ECDSA (prime256v1) server-digest SHA256)\n (No client certificate requested)\n by forge-stage.sourceware.org (Postfix) with ESMTPS id F335F43592\n for <gcc-patches@gcc.gnu.org>; Wed, 22 Apr 2026 18:51:07 +0000 (UTC)"
        ],
        "DKIM-Filter": [
            "OpenDKIM Filter v2.11.0 sourceware.org 8251844E0914",
            "OpenDKIM Filter v2.11.0 sourceware.org 2259040A2C55"
        ],
        "DMARC-Filter": "OpenDMARC Filter v1.4.2 sourceware.org 2259040A2C55",
        "ARC-Filter": "OpenARC Filter v1.0.0 sourceware.org 2259040A2C55",
        "ARC-Seal": "i=1; a=rsa-sha256; d=sourceware.org; s=key; t=1776883868; cv=none;\n b=fCFHjXar2rc/yzxdZsRA3O9CTT9je0tBJmMlFvlv5sBhco6Z8vtusDKpMOyUosH5AQ1NVFS8GU8X3u3GvsbrjzuO9JRgQMUR6wyqjq3ka2lq3oqIa1bM92crowInvN1MHuAOK6a2um4OLtAJEu7uBQBoL6MyUcBqECdQ/kbubgQ=",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; d=sourceware.org; s=key;\n t=1776883868; c=relaxed/simple;\n bh=vnRPF61FPpxz4q7AJtQNbkXxcqaBtSCu2seprzCLi3g=;\n h=From:Date:Subject:MIME-Version:To:Message-ID;\n b=is9Ytc3ompD98OXXFq7Uk+FnOWKZ0RQFVRRprhzz0FfyWUnXwF6V1ZUKx/ItWHOcsLplqC+LZ6+Su63DtcJXG2SjUq1WcN8Xq4aV5ClPNYMEwF2gj7W2pmQq2Dz3/YSpDb6z/ol2Yxhi9bl4Q6FrmrbkgmAJSqT9rOB3U14DREk=",
        "ARC-Authentication-Results": "i=1; server2.sourceware.org",
        "From": "tkaminsk via Sourceware Forge\n <forge-bot+tkaminsk@forge-stage.sourceware.org>",
        "Date": "Wed, 22 Apr 2026 18:49:40 +0000",
        "Subject": "[PATCH v1 03/10] Merged common mutable operations into mutable atomic\n base.",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain; charset=UTF-8",
        "Content-Transfer-Encoding": "8bit",
        "To": "gcc-patches mailing list <gcc-patches@gcc.gnu.org>",
        "Message-ID": "\n <bmm.hhup55wx16.gcc.gcc-TEST.tkaminsk.85.1.3@forge-stage.sourceware.org>",
        "X-Mailer": "batrachomyomachia",
        "X-Pull-Request-Organization": "gcc",
        "X-Pull-Request-Repository": "gcc-TEST",
        "X-Pull-Request": "https://forge.sourceware.org/gcc/gcc-TEST/pulls/85",
        "References": "\n <bmm.hhup55wx16.gcc.gcc-TEST.tkaminsk.85.1.0@forge-stage.sourceware.org>",
        "In-Reply-To": "\n <bmm.hhup55wx16.gcc.gcc-TEST.tkaminsk.85.1.0@forge-stage.sourceware.org>",
        "X-Patch-URL": "\n https://forge.sourceware.org/tkaminsk/gcc/commit/1970901d3cdc0ab61b8ad6a0fe76a1e683b34f14",
        "X-BeenThere": "gcc-patches@gcc.gnu.org",
        "X-Mailman-Version": "2.1.30",
        "Precedence": "list",
        "List-Id": "Gcc-patches mailing list <gcc-patches.gcc.gnu.org>",
        "List-Unsubscribe": "<https://gcc.gnu.org/mailman/options/gcc-patches>,\n <mailto:gcc-patches-request@gcc.gnu.org?subject=unsubscribe>",
        "List-Archive": "<https://gcc.gnu.org/pipermail/gcc-patches/>",
        "List-Post": "<mailto:gcc-patches@gcc.gnu.org>",
        "List-Help": "<mailto:gcc-patches-request@gcc.gnu.org?subject=help>",
        "List-Subscribe": "<https://gcc.gnu.org/mailman/listinfo/gcc-patches>,\n <mailto:gcc-patches-request@gcc.gnu.org?subject=subscribe>",
        "Reply-To": "gcc-patches mailing list <gcc-patches@gcc.gnu.org>,\n tkaminsk@gcc.gnu.org",
        "Errors-To": "gcc-patches-bounces~incoming=patchwork.ozlabs.org@gcc.gnu.org"
    },
    "content": "From: Tomasz Kamiński <tkaminsk@redhat.com>\n\nThis removes a lot of code duplication.\n---\n libstdc++-v3/include/bits/atomic_base.h | 339 +++++-------------------\n 1 file changed, 69 insertions(+), 270 deletions(-)",
    "diff": "diff --git a/libstdc++-v3/include/bits/atomic_base.h b/libstdc++-v3/include/bits/atomic_base.h\nindex b8744ac49b45..4caa9e30b4dc 100644\n--- a/libstdc++-v3/include/bits/atomic_base.h\n+++ b/libstdc++-v3/include/bits/atomic_base.h\n@@ -1508,20 +1508,21 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION\n     };\n #undef _GLIBCXX20_INIT\n \n+  // __atomic_ref_base<const _Tp> provides the common APIs for const and\n+  // types,\n+  // __atomic_ref_base<_Tp> inhserits from  __atomic_ref_base<const _Tp>,\n+  // and provides the commonn APIs implementing constrains in [atomic.ref].\n+  // __atomic_ref<_Tp> inherits from __atomic_ref_base<_Tp> (const or not-const)\n+  // adds type specific mutating APIs.\n   // atomic_ref inherits from __atomic_ref;\n-  // __atomic_ref inherits from __atomic_ref_base.\n-  //\n-  // __atomic_ref_base provides the common APIs for const and non-const types;\n-  // __atomic ref adds on top the APIs for non-const types, thus implementing\n-  // the various constraints in [atomic.ref].\n \n   template<typename _Tp>\n     struct __atomic_ref_base;\n \n-\n   template<typename _Tp>\n-    class __atomic_ref_base\n+    struct __atomic_ref_base<const _Tp>\n     {\n+    private:\n       using _Vt = remove_cv_t<_Tp>;\n \n       static consteval bool\n@@ -1559,7 +1560,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION\n       __atomic_ref_base& operator=(const __atomic_ref_base&) = delete;\n \n       explicit\n-      __atomic_ref_base(_Tp& __t) : _M_ptr(std::__addressof(__t))\n+      __atomic_ref_base(const _Tp& __t)\n+        : _M_ptr(const_cast<_Tp*>(std::__addressof(__t)))\n       {\n \t__glibcxx_assert(((__UINTPTR_TYPE__)_M_ptr % required_alignment) == 0);\n       }\n@@ -1588,60 +1590,15 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION\n       _Tp* _M_ptr;\n     };\n \n-  template<typename _Tp,\n-\t   bool = is_const_v<_Tp>,\n-\t   bool = is_integral_v<_Tp> && !is_same_v<remove_cv_t<_Tp>, bool>,\n-\t   bool = is_floating_point_v<_Tp>,\n-\t   bool = is_pointer_v<_Tp>>\n-    struct __atomic_ref;\n-\n-  // base classes for const qualified types\n   template<typename _Tp>\n-    struct __atomic_ref<_Tp, true, false, false, false>\n-      : __atomic_ref_base<_Tp>\n+    struct __atomic_ref_base\n+      : __atomic_ref_base<const _Tp>\n     {\n-      explicit\n-      __atomic_ref(_Tp& __t) : __atomic_ref_base<_Tp>(__t)\n-      { }\n-    };\n-\n-  template<typename _Tp>\n-    struct __atomic_ref<_Tp, true, false, false, true>\n-      : __atomic_ref_base<_Tp>\n-    {\n-      using difference_type = ptrdiff_t;\n-\n-      explicit\n-      __atomic_ref(_Tp& __t) : __atomic_ref_base<_Tp>(__t)\n-      { }\n-    };\n-\n-  template<typename _Tp, bool _IsIntegral, bool _IsFloatingPoint>\n-    struct __atomic_ref<_Tp, true, _IsIntegral, _IsFloatingPoint, false>\n-      : __atomic_ref_base<_Tp>\n-    {\n-      using difference_type = typename __atomic_ref_base<_Tp>::value_type;\n+      using value_type = typename __atomic_ref_base<const _Tp>::value_type;\n \n       explicit\n-      __atomic_ref(_Tp& __t) : __atomic_ref_base<_Tp>(__t)\n+      __atomic_ref_base(_Tp& __t) : __atomic_ref_base<const _Tp>(__t)\n       { }\n-    };\n-\n-\n-  // base class for non-integral, non-floating-point, non-pointer types\n-  template<typename _Tp>\n-    struct __atomic_ref<_Tp, false, false, false, false>\n-      : __atomic_ref_base<_Tp>\n-    {\n-      using value_type = typename __atomic_ref_base<_Tp>::value_type;\n-      explicit\n-      __atomic_ref(_Tp& __t) : __atomic_ref_base<_Tp>(__t)\n-      { }\n-\n-      void\n-      store(value_type __t, memory_order __m = memory_order_seq_cst) const noexcept\n-      { __atomic_impl::store(this->_M_ptr, __t, __m); }\n-\n       value_type\n       operator=(value_type __t) const noexcept\n       {\n@@ -1649,6 +1606,10 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION\n \treturn __t;\n       }\n \n+      void\n+      store(value_type __t, memory_order __m = memory_order_seq_cst) const noexcept\n+      { __atomic_impl::store(this->_M_ptr, __t, __m); }\n+\n       value_type\n       exchange(value_type __desired, memory_order __m = memory_order_seq_cst)\n       const noexcept\n@@ -1705,83 +1666,56 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION\n #endif // __glibcxx_atomic_wait\n     };\n \n-  // base class for atomic_ref<integral-type>\n+  template<typename _Tp,\n+\t   bool = is_const_v<_Tp>,\n+\t   bool = is_integral_v<_Tp> && !is_same_v<remove_cv_t<_Tp>, bool>,\n+\t   bool = is_floating_point_v<_Tp>,\n+\t   bool = is_pointer_v<_Tp>>\n+    struct __atomic_ref;\n+\n+  // base classes for const qualified types\n   template<typename _Tp>\n-    struct __atomic_ref<_Tp, false, true, false, false>\n+    struct __atomic_ref<_Tp, true, false, false, false>\n       : __atomic_ref_base<_Tp>\n     {\n-      using value_type = typename __atomic_ref_base<_Tp>::value_type;\n-      using difference_type = value_type;\n-\n-      explicit\n-      __atomic_ref(_Tp& __t) : __atomic_ref_base<_Tp>(__t)\n-      { }\n-\n-      value_type\n-      operator=(value_type __t) const noexcept\n-      {\n-\tthis->store(__t);\n-\treturn __t;\n-      }\n-\n-      void\n-      store(value_type __t, memory_order __m = memory_order_seq_cst) const noexcept\n-      { __atomic_impl::store(this->_M_ptr, __t, __m); }\n-\n-      value_type\n-      exchange(value_type __desired,\n-\t       memory_order __m = memory_order_seq_cst) const noexcept\n-      { return __atomic_impl::exchange(this->_M_ptr, __desired, __m); }\n-\n-      bool\n-      compare_exchange_weak(value_type& __expected, value_type __desired,\n-\t\t\t    memory_order __success,\n-\t\t\t    memory_order __failure) const noexcept\n-      {\n-\treturn __atomic_impl::compare_exchange_weak<true>(\n-\t\t this->_M_ptr, __expected, __desired, __success, __failure);\n-      }\n-\n-      bool\n-      compare_exchange_strong(value_type& __expected, value_type __desired,\n-\t\t\t      memory_order __success,\n-\t\t\t      memory_order __failure) const noexcept\n-      {\n-\treturn __atomic_impl::compare_exchange_strong<true>(\n-\t\t this->_M_ptr, __expected, __desired, __success, __failure);\n-      }\n-\n-      bool\n-      compare_exchange_weak(value_type& __expected, value_type __desired,\n-\t\t\t    memory_order __order = memory_order_seq_cst)\n-      const noexcept\n-      {\n-\treturn compare_exchange_weak(__expected, __desired, __order,\n-                                     __cmpexch_failure_order(__order));\n-      }\n+      using __atomic_ref_base<_Tp>::__atomic_ref_base;\n+    };\n \n-      bool\n-      compare_exchange_strong(value_type& __expected, value_type __desired,\n-\t\t\t      memory_order __order = memory_order_seq_cst)\n-      const noexcept\n-      {\n-\treturn compare_exchange_strong(__expected, __desired, __order,\n-\t\t\t\t       __cmpexch_failure_order(__order));\n-      }\n+  template<typename _Tp>\n+    struct __atomic_ref<_Tp, true, false, false, true>\n+      : __atomic_ref_base<_Tp>\n+    {\n+      using difference_type = ptrdiff_t;\n+      using __atomic_ref_base<_Tp>::__atomic_ref_base;\n+    };\n \n-#if __glibcxx_atomic_wait\n-      _GLIBCXX_ALWAYS_INLINE void\n-      notify_one() const noexcept\n-      { __atomic_impl::notify_one(this->_M_ptr); }\n+  template<typename _Tp, bool _IsIntegral, bool _IsFloatingPoint>\n+    struct __atomic_ref<_Tp, true, _IsIntegral, _IsFloatingPoint, false>\n+      : __atomic_ref_base<_Tp>\n+    {\n+      using difference_type = typename __atomic_ref_base<_Tp>::value_type;\n+      using __atomic_ref_base<_Tp>::__atomic_ref_base;\n+    };\n \n-      // TODO add const volatile overload\n+  // base class for non-integral, non-floating-point, non-pointer types\n+  template<typename _Tp>\n+    struct __atomic_ref<_Tp, false, false, false, false>\n+      : __atomic_ref_base<_Tp>\n+    {\n+      using __atomic_ref_base<_Tp>::__atomic_ref_base;\n+      using __atomic_ref_base<_Tp>::operator=;\n+     };\n \n-      _GLIBCXX_ALWAYS_INLINE void\n-      notify_all() const noexcept\n-      { __atomic_impl::notify_all(this->_M_ptr); }\n+  // base class for atomic_ref<integral-type>\n+  template<typename _Tp>\n+    struct __atomic_ref<_Tp, false, true, false, false>\n+      : __atomic_ref_base<_Tp>\n+    {\n+      using value_type = typename __atomic_ref_base<_Tp>::value_type;\n+      using difference_type = value_type;\n \n-      // TODO add const volatile overload\n-#endif // __glibcxx_atomic_wait\n+      using __atomic_ref_base<_Tp>::__atomic_ref_base;\n+      using __atomic_ref_base<_Tp>::operator=;\n \n       value_type\n       fetch_add(value_type __i,\n@@ -1853,76 +1787,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION\n       using value_type = typename __atomic_ref_base<_Fp>::value_type;\n       using difference_type = value_type;\n \n-      explicit\n-      __atomic_ref(_Fp& __t) : __atomic_ref_base<_Fp>(__t)\n-      { }\n-\n-      value_type\n-      operator=(value_type __t) const noexcept\n-      {\n-\tthis->store(__t);\n-\treturn __t;\n-      }\n-\n-      void\n-      store(value_type __t, memory_order __m = memory_order_seq_cst) const noexcept\n-      { __atomic_impl::store(this->_M_ptr, __t, __m); }\n-\n-      _Fp\n-      exchange(value_type __desired,\n-\t       memory_order __m = memory_order_seq_cst) const noexcept\n-      { return __atomic_impl::exchange(this->_M_ptr, __desired, __m); }\n-\n-      bool\n-      compare_exchange_weak(value_type& __expected, value_type __desired,\n-\t\t\t    memory_order __success,\n-\t\t\t    memory_order __failure) const noexcept\n-      {\n-\treturn __atomic_impl::compare_exchange_weak<true>(\n-\t\t this->_M_ptr, __expected, __desired, __success, __failure);\n-      }\n-\n-      bool\n-      compare_exchange_strong(value_type& __expected, value_type __desired,\n-\t\t\t      memory_order __success,\n-\t\t\t      memory_order __failure) const noexcept\n-      {\n-\treturn __atomic_impl::compare_exchange_strong<true>(\n-\t\t this->_M_ptr, __expected, __desired, __success, __failure);\n-      }\n-\n-      bool\n-      compare_exchange_weak(value_type& __expected, value_type __desired,\n-\t\t\t    memory_order __order = memory_order_seq_cst)\n-      const noexcept\n-      {\n-\treturn compare_exchange_weak(__expected, __desired, __order,\n-                                     __cmpexch_failure_order(__order));\n-      }\n-\n-      bool\n-      compare_exchange_strong(value_type& __expected, value_type __desired,\n-\t\t\t      memory_order __order = memory_order_seq_cst)\n-      const noexcept\n-      {\n-\treturn compare_exchange_strong(__expected, __desired, __order,\n-\t\t\t\t       __cmpexch_failure_order(__order));\n-      }\n-\n-#if __glibcxx_atomic_wait\n-      _GLIBCXX_ALWAYS_INLINE void\n-      notify_one() const noexcept\n-      { __atomic_impl::notify_one(this->_M_ptr); }\n-\n-      // TODO add const volatile overload\n-\n-      _GLIBCXX_ALWAYS_INLINE void\n-      notify_all() const noexcept\n-      { __atomic_impl::notify_all(this->_M_ptr); }\n-\n-      // TODO add const volatile overload\n-#endif // __glibcxx_atomic_wait\n-\n+      using __atomic_ref_base<_Fp>::__atomic_ref_base;\n+      using __atomic_ref_base<_Fp>::operator=;\n+    \n       value_type\n       fetch_add(value_type __i,\n \t\tmemory_order __m = memory_order_seq_cst) const noexcept\n@@ -1943,83 +1810,15 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION\n     };\n \n   // base class for atomic_ref<pointer-type>\n-  template<typename _Tp>\n-    struct __atomic_ref<_Tp, false, false, false, true>\n-      : __atomic_ref_base<_Tp>\n+  template<typename _Pt>\n+    struct __atomic_ref<_Pt, false, false, false, true>\n+      : __atomic_ref_base<_Pt>\n     {\n-      using value_type = typename __atomic_ref_base<_Tp>::value_type;\n+      using value_type = typename __atomic_ref_base<_Pt>::value_type;\n       using difference_type = ptrdiff_t;\n \n-      explicit\n-      __atomic_ref(_Tp& __t) : __atomic_ref_base<_Tp>(__t)\n-      { }\n-\n-      value_type\n-      operator=(value_type __t) const noexcept\n-      {\n-\tthis->store(__t);\n-\treturn __t;\n-      }\n-\n-      void\n-      store(value_type __t, memory_order __m = memory_order_seq_cst) const noexcept\n-      { __atomic_impl::store(this->_M_ptr, __t, __m); }\n-\n-      value_type\n-      exchange(value_type __desired,\n-\t       memory_order __m = memory_order_seq_cst) const noexcept\n-      { return __atomic_impl::exchange(this->_M_ptr, __desired, __m); }\n-\n-      bool\n-      compare_exchange_weak(value_type& __expected, value_type __desired,\n-\t\t\t    memory_order __success,\n-\t\t\t    memory_order __failure) const noexcept\n-      {\n-\treturn __atomic_impl::compare_exchange_weak<true>(\n-\t\t this->_M_ptr, __expected, __desired, __success, __failure);\n-      }\n-\n-      bool\n-      compare_exchange_strong(value_type& __expected, value_type __desired,\n-\t\t\t    memory_order __success,\n-\t\t\t    memory_order __failure) const noexcept\n-      {\n-\treturn __atomic_impl::compare_exchange_strong<true>(\n-\t\t this->_M_ptr, __expected, __desired, __success, __failure);\n-      }\n-\n-      bool\n-      compare_exchange_weak(value_type& __expected, value_type __desired,\n-\t\t\t    memory_order __order = memory_order_seq_cst)\n-      const noexcept\n-      {\n-\treturn compare_exchange_weak(__expected, __desired, __order,\n-                                     __cmpexch_failure_order(__order));\n-      }\n-\n-      bool\n-      compare_exchange_strong(value_type& __expected, value_type __desired,\n-\t\t\t      memory_order __order = memory_order_seq_cst)\n-      const noexcept\n-      {\n-\treturn compare_exchange_strong(__expected, __desired, __order,\n-\t\t\t\t       __cmpexch_failure_order(__order));\n-      }\n-\n-#if __glibcxx_atomic_wait\n-      _GLIBCXX_ALWAYS_INLINE void\n-      notify_one() const noexcept\n-      { __atomic_impl::notify_one(this->_M_ptr); }\n-\n-      // TODO add const volatile overload\n-\n-      _GLIBCXX_ALWAYS_INLINE void\n-      notify_all() const noexcept\n-      { __atomic_impl::notify_all(this->_M_ptr); }\n-\n-      // TODO add const volatile overload\n-#endif // __glibcxx_atomic_wait\n-\n+      using __atomic_ref_base<_Pt>::__atomic_ref_base;\n+      using __atomic_ref_base<_Pt>::operator=;\n       _GLIBCXX_ALWAYS_INLINE value_type\n       fetch_add(difference_type __d,\n \t\tmemory_order __m = memory_order_seq_cst) const noexcept\n",
    "prefixes": [
        "v1",
        "03/10"
    ]
}