get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/2226768/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 2226768,
    "url": "http://patchwork.ozlabs.org/api/patches/2226768/?format=api",
    "web_url": "http://patchwork.ozlabs.org/project/gcc/patch/bmm.hhup55wx16.gcc.gcc-TEST.tkaminsk.85.1.1@forge-stage.sourceware.org/",
    "project": {
        "id": 17,
        "url": "http://patchwork.ozlabs.org/api/projects/17/?format=api",
        "name": "GNU Compiler Collection",
        "link_name": "gcc",
        "list_id": "gcc-patches.gcc.gnu.org",
        "list_email": "gcc-patches@gcc.gnu.org",
        "web_url": null,
        "scm_url": null,
        "webscm_url": null,
        "list_archive_url": "",
        "list_archive_url_format": "",
        "commit_url_format": ""
    },
    "msgid": "<bmm.hhup55wx16.gcc.gcc-TEST.tkaminsk.85.1.1@forge-stage.sourceware.org>",
    "list_archive_url": null,
    "date": "2026-04-22T18:49:38",
    "name": "[v1,01/10] libstdc++: add support for cv-qualified types in atomic_ref (P3323R1)",
    "commit_ref": null,
    "pull_url": null,
    "state": "new",
    "archived": false,
    "hash": "e51f7a9a64d3051581aca6fee9c24077fbcf6d1e",
    "submitter": {
        "id": 93223,
        "url": "http://patchwork.ozlabs.org/api/people/93223/?format=api",
        "name": "tkaminsk via Sourceware Forge",
        "email": "forge-bot+tkaminsk@forge-stage.sourceware.org"
    },
    "delegate": null,
    "mbox": "http://patchwork.ozlabs.org/project/gcc/patch/bmm.hhup55wx16.gcc.gcc-TEST.tkaminsk.85.1.1@forge-stage.sourceware.org/mbox/",
    "series": [
        {
            "id": 501094,
            "url": "http://patchwork.ozlabs.org/api/series/501094/?format=api",
            "web_url": "http://patchwork.ozlabs.org/project/gcc/list/?series=501094",
            "date": "2026-04-22T18:49:39",
            "name": "WIP: libstdc++: add support for cv-qualified types in atomic_ref (P3323R1)",
            "version": 1,
            "mbox": "http://patchwork.ozlabs.org/series/501094/mbox/"
        }
    ],
    "comments": "http://patchwork.ozlabs.org/api/patches/2226768/comments/",
    "check": "pending",
    "checks": "http://patchwork.ozlabs.org/api/patches/2226768/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<gcc-patches-bounces~incoming=patchwork.ozlabs.org@gcc.gnu.org>",
        "X-Original-To": [
            "incoming@patchwork.ozlabs.org",
            "gcc-patches@gcc.gnu.org"
        ],
        "Delivered-To": [
            "patchwork-incoming@legolas.ozlabs.org",
            "gcc-patches@gcc.gnu.org"
        ],
        "Authentication-Results": [
            "legolas.ozlabs.org;\n spf=pass (sender SPF authorized) smtp.mailfrom=gcc.gnu.org\n (client-ip=2620:52:6:3111::32; helo=vm01.sourceware.org;\n envelope-from=gcc-patches-bounces~incoming=patchwork.ozlabs.org@gcc.gnu.org;\n receiver=patchwork.ozlabs.org)",
            "sourceware.org; dmarc=none (p=none dis=none)\n header.from=forge-stage.sourceware.org",
            "sourceware.org;\n spf=pass smtp.mailfrom=forge-stage.sourceware.org",
            "server2.sourceware.org;\n arc=none smtp.remote-ip=38.145.34.39"
        ],
        "Received": [
            "from vm01.sourceware.org (vm01.sourceware.org\n [IPv6:2620:52:6:3111::32])\n\t(using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits)\n\t key-exchange x25519 server-signature ECDSA (secp384r1) server-digest SHA384)\n\t(No client certificate requested)\n\tby legolas.ozlabs.org (Postfix) with ESMTPS id 4g18Tc3tSGz1yD5\n\tfor <incoming@patchwork.ozlabs.org>; Thu, 23 Apr 2026 05:31:00 +1000 (AEST)",
            "from vm01.sourceware.org (localhost [127.0.0.1])\n\tby sourceware.org (Postfix) with ESMTP id 8738E43DA46F\n\tfor <incoming@patchwork.ozlabs.org>; Wed, 22 Apr 2026 19:30:58 +0000 (GMT)",
            "from forge-stage.sourceware.org (vm08.sourceware.org [38.145.34.39])\n by sourceware.org (Postfix) with ESMTPS id 0446E407FF66\n for <gcc-patches@gcc.gnu.org>; Wed, 22 Apr 2026 18:51:08 +0000 (GMT)",
            "from forge-stage.sourceware.org (localhost [IPv6:::1])\n (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits)\n key-exchange x25519 server-signature ECDSA (prime256v1) server-digest SHA256)\n (No client certificate requested)\n by forge-stage.sourceware.org (Postfix) with ESMTPS id D596143590\n for <gcc-patches@gcc.gnu.org>; Wed, 22 Apr 2026 18:51:07 +0000 (UTC)"
        ],
        "DKIM-Filter": [
            "OpenDKIM Filter v2.11.0 sourceware.org 8738E43DA46F",
            "OpenDKIM Filter v2.11.0 sourceware.org 0446E407FF66"
        ],
        "DMARC-Filter": "OpenDMARC Filter v1.4.2 sourceware.org 0446E407FF66",
        "ARC-Filter": "OpenARC Filter v1.0.0 sourceware.org 0446E407FF66",
        "ARC-Seal": "i=1; a=rsa-sha256; d=sourceware.org; s=key; t=1776883868; cv=none;\n b=emJxsqtb/kzRvhuPU2eqr+bxL1aPjM9sVqwnTwnfDUJqccWLdAG+o9KXmW6dinzT/pkAvBQqHouyfeopaTwZw9+rbJXZPB9W9gXW0hPQylYIK+egrdQ7TAqfCYWVGib2ZEuAjGDdqqdevJxuDRVQCTsyvTYagTYDEYpPtyiLA50=",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; d=sourceware.org; s=key;\n t=1776883868; c=relaxed/simple;\n bh=XRxzC4ILw4eCZL6w0R+6SHVHWgqcBNIcC2o90CiUIGc=;\n h=From:Date:Subject:To:Message-ID;\n b=h+JGbiH3sKIkikME47zz7zxhglCamcBdGdUpgWK5Q80qSr7oktRN3G0TAUtVQvb1TcBDmmwROfkUOJo8M0lWglCvc6kDzKsbt2Qd16YZSscydf1jcvcDTchCnlDSOORFHLgOweIRUnKz3zJpUWVSLhjVg7p6ZrMNpGnV2kua8U8=",
        "ARC-Authentication-Results": "i=1; server2.sourceware.org",
        "From": "tkaminsk via Sourceware Forge\n <forge-bot+tkaminsk@forge-stage.sourceware.org>",
        "Date": "Wed, 22 Apr 2026 18:49:38 +0000",
        "Subject": "[PATCH v1 01/10] libstdc++: add support for cv-qualified types in\n atomic_ref (P3323R1)",
        "To": "gcc-patches mailing list <gcc-patches@gcc.gnu.org>",
        "Message-ID": "\n <bmm.hhup55wx16.gcc.gcc-TEST.tkaminsk.85.1.1@forge-stage.sourceware.org>",
        "X-Mailer": "batrachomyomachia",
        "X-Pull-Request-Organization": "gcc",
        "X-Pull-Request-Repository": "gcc-TEST",
        "X-Pull-Request": "https://forge.sourceware.org/gcc/gcc-TEST/pulls/85",
        "References": "\n <bmm.hhup55wx16.gcc.gcc-TEST.tkaminsk.85.1.0@forge-stage.sourceware.org>",
        "In-Reply-To": "\n <bmm.hhup55wx16.gcc.gcc-TEST.tkaminsk.85.1.0@forge-stage.sourceware.org>",
        "X-Patch-URL": "\n https://forge.sourceware.org/tkaminsk/gcc/commit/38675d56c01e7d5ea1c4f295ec5f32f3e0b36ff9",
        "X-BeenThere": "gcc-patches@gcc.gnu.org",
        "X-Mailman-Version": "2.1.30",
        "Precedence": "list",
        "List-Id": "Gcc-patches mailing list <gcc-patches.gcc.gnu.org>",
        "List-Unsubscribe": "<https://gcc.gnu.org/mailman/options/gcc-patches>,\n <mailto:gcc-patches-request@gcc.gnu.org?subject=unsubscribe>",
        "List-Archive": "<https://gcc.gnu.org/pipermail/gcc-patches/>",
        "List-Post": "<mailto:gcc-patches@gcc.gnu.org>",
        "List-Help": "<mailto:gcc-patches-request@gcc.gnu.org?subject=help>",
        "List-Subscribe": "<https://gcc.gnu.org/mailman/listinfo/gcc-patches>,\n <mailto:gcc-patches-request@gcc.gnu.org?subject=subscribe>",
        "Reply-To": "gcc-patches mailing list <gcc-patches@gcc.gnu.org>,\n tkaminsk@gcc.gnu.org",
        "Errors-To": "gcc-patches-bounces~incoming=patchwork.ozlabs.org@gcc.gnu.org"
    },
    "content": "From: Giuseppe D'Angelo <giuseppe.dangelo@kdab.com>\n\nP3233R1 (DR for C++20/C++11, fixes LWG 4069 and 3508) clarifies that\nstd::atomic_ref<cv T> is meant to be supported.\n\nThis commit implements it by splitting the __atomic_ref class (that\natomic_ref inherits from) into a further base class (__atomic_ref_base):\n\n* __atomic_ref_base<T> implements the atomic API for const and non-const\n  Ts (with specializations for integrals, floating points, pointers);\n* __atomic_ref<T> inherits from __atomic_ref_base<T>; if T is\n  non-const adds on top the \"mutating\" atomic APIs like store(),\n  exchange(), and so on; same discussion w.r.t. the specializations.\n\nThe primary atomic_ref is now meant to be used for cv-bool, not just\nbool, amend the detection accordingly.\n\nAt the same time, disable support for cv-qualified types in std::atomic\n(for instance, std::atomic<volatile T> isn't meaningful; one should use\nvolatile std::atomic<T>), again as per the paper.\n\n\tPR libstdc++/115402\n\nlibstdc++-v3/ChangeLog:\n\n\t* include/bits/atomic_base.h: Add support for atomic_ref<cv T>:\n\t  refactor __atomic_ref into a further subclass in order to\n\t  implement the constraints on atomic_ref mutating APIs; change\n          _Tp in various function signatures to be value_type instead.\n\t* include/std/atomic: Add a static_assert to std::atomic, as per\n\t  P3233R1, complementing the existing ones.\n\t* testsuite/29_atomics/atomic_ref/bool.cc: Add tests for\n\t  cv types in atomic_ref.\n\t* testsuite/29_atomics/atomic_ref/deduction.cc: Likewise.\n\t* testsuite/29_atomics/atomic_ref/float.cc: Likewise.\n\t* testsuite/29_atomics/atomic_ref/generic.cc: Likewise.\n\t* testsuite/29_atomics/atomic_ref/integral.cc: Likewise.\n\t* testsuite/29_atomics/atomic_ref/pointer.cc: Likewise.\n\t* testsuite/29_atomics/atomic_ref/requirements.cc: Likewise.\n\t* testsuite/29_atomics/atomic_ref/wait_notify.cc: Likewise.\n\t* testsuite/29_atomics/atomic_ref/115402.cc: New test.\n---\n libstdc++-v3/include/bits/atomic_base.h       | 507 +++++++++++-------\n libstdc++-v3/include/std/atomic               |   1 +\n .../testsuite/29_atomics/atomic_ref/115402.cc |  16 +\n .../testsuite/29_atomics/atomic_ref/bool.cc   |  18 +\n .../29_atomics/atomic_ref/deduction.cc        |  33 +-\n .../testsuite/29_atomics/atomic_ref/float.cc  |  21 +-\n .../29_atomics/atomic_ref/generic.cc          |   6 +\n .../29_atomics/atomic_ref/integral.cc         |   6 +\n .../29_atomics/atomic_ref/pointer.cc          |   6 +\n .../29_atomics/atomic_ref/requirements.cc     |  70 ++-\n .../29_atomics/atomic_ref/wait_notify.cc      |  10 +\n 11 files changed, 456 insertions(+), 238 deletions(-)\n create mode 100644 libstdc++-v3/testsuite/29_atomics/atomic_ref/115402.cc",
    "diff": "diff --git a/libstdc++-v3/include/bits/atomic_base.h b/libstdc++-v3/include/bits/atomic_base.h\nindex 92d1269493f7..1763a64ab981 100644\n--- a/libstdc++-v3/include/bits/atomic_base.h\n+++ b/libstdc++-v3/include/bits/atomic_base.h\n@@ -1508,14 +1508,42 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION\n     };\n #undef _GLIBCXX20_INIT\n \n+  // atomic_ref inherits from __atomic_ref;\n+  // __atomic_ref inherits from __atomic_ref_base.\n+  //\n+  // __atomic_ref_base provides the common APIs for const and non-const types;\n+  // __atomic ref adds on top the APIs for non-const types, thus implementing\n+  // the various constraints in [atomic.ref].\n+\n   template<typename _Tp,\n-           bool = is_integral_v<_Tp> && !is_same_v<_Tp, bool>,\n-           bool = is_floating_point_v<_Tp>>\n+           bool = is_const_v<_Tp>,\n+           bool = is_integral_v<_Tp> && !is_same_v<remove_cv_t<_Tp>, bool>,\n+           bool = is_floating_point_v<_Tp>,\n+           bool = is_pointer_v<_Tp>>\n     struct __atomic_ref;\n \n-  // base class for non-integral, non-floating-point, non-pointer types\n+  template<typename _Tp,\n+           bool _IsIntegral,\n+           bool _IsFloatingPoint,\n+           bool _IsPointer>\n+    struct __atomic_ref_base;\n+\n+  // Const types\n+  template<typename _Tp, bool _IsIntegral, bool _IsFloatingPoint, bool _IsPointer>\n+    struct __atomic_ref<_Tp, true, _IsIntegral, _IsFloatingPoint, _IsPointer>\n+      : __atomic_ref_base<_Tp, _IsIntegral, _IsFloatingPoint, _IsPointer>\n+    {\n+      __atomic_ref() = delete;\n+      __atomic_ref& operator=(const __atomic_ref&) = delete;\n+\n+      explicit\n+      __atomic_ref(_Tp& __t) : __atomic_ref_base<_Tp, _IsIntegral, _IsFloatingPoint, _IsPointer>(__t)\n+      { }\n+    };\n+\n+  // Non-integral, non-floating-point, non-pointer types\n   template<typename _Tp>\n-    struct __atomic_ref<_Tp, false, false>\n+    struct __atomic_ref_base<_Tp, false, false, false>\n     {\n       static_assert(is_trivially_copyable_v<_Tp>);\n \n@@ -1525,70 +1553,97 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION\n \t? 0 : sizeof(_Tp);\n \n     public:\n-      using value_type = _Tp;\n+      using value_type = remove_cv_t<_Tp>;\n \n       static constexpr bool is_always_lock_free\n \t= __atomic_always_lock_free(sizeof(_Tp), 0);\n \n+      static_assert(is_always_lock_free || !is_volatile_v<_Tp>);\n+\n       static constexpr size_t required_alignment\n \t= _S_min_alignment > alignof(_Tp) ? _S_min_alignment : alignof(_Tp);\n \n-      __atomic_ref& operator=(const __atomic_ref&) = delete;\n+      __atomic_ref_base& operator=(const __atomic_ref_base&) = delete;\n \n       explicit\n-      __atomic_ref(_Tp& __t) : _M_ptr(std::__addressof(__t))\n+      __atomic_ref_base(_Tp& __t) : _M_ptr(std::__addressof(__t))\n       {\n \t__glibcxx_assert(((__UINTPTR_TYPE__)_M_ptr % required_alignment) == 0);\n       }\n \n-      __atomic_ref(const __atomic_ref&) noexcept = default;\n+      __atomic_ref_base(const __atomic_ref_base&) noexcept = default;\n \n-      _Tp\n-      operator=(_Tp __t) const noexcept\n-      {\n-\tthis->store(__t);\n-\treturn __t;\n-      }\n-\n-      operator _Tp() const noexcept { return this->load(); }\n+      operator value_type() const noexcept { return this->load(); }\n \n       bool\n       is_lock_free() const noexcept\n       { return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>(); }\n \n-      void\n-      store(_Tp __t, memory_order __m = memory_order_seq_cst) const noexcept\n-      { __atomic_impl::store(_M_ptr, __t, __m); }\n-\n-      _Tp\n+      value_type\n       load(memory_order __m = memory_order_seq_cst) const noexcept\n       { return __atomic_impl::load(_M_ptr, __m); }\n \n-      _Tp\n-      exchange(_Tp __desired, memory_order __m = memory_order_seq_cst)\n+#if __glibcxx_atomic_wait\n+      _GLIBCXX_ALWAYS_INLINE void\n+      wait(value_type __old, memory_order __m = memory_order_seq_cst) const noexcept\n+      { __atomic_impl::wait(_M_ptr, __old, __m); }\n+\n+      // TODO add const volatile overload\n+#endif // __glibcxx_atomic_wait\n+\n+    protected:\n+      _Tp* _M_ptr;\n+    };\n+\n+  template<typename _Tp>\n+    struct __atomic_ref<_Tp, false, false, false, false>\n+      : __atomic_ref_base<_Tp, false, false, false>\n+    {\n+      using value_type = typename __atomic_ref_base<_Tp, false, false, false>::value_type;\n+\n+      __atomic_ref() = delete;\n+      __atomic_ref& operator=(const __atomic_ref&) = delete;\n+\n+      explicit\n+      __atomic_ref(_Tp& __t) : __atomic_ref_base<_Tp, false, false, false>(__t)\n+      { }\n+\n+      void\n+      store(value_type __t, memory_order __m = memory_order_seq_cst) const noexcept\n+      { __atomic_impl::store(this->_M_ptr, __t, __m); }\n+\n+      value_type\n+      operator=(value_type __t) const noexcept\n+      {\n+\tthis->store(__t);\n+\treturn __t;\n+      }\n+\n+      value_type\n+      exchange(value_type __desired, memory_order __m = memory_order_seq_cst)\n       const noexcept\n-      { return __atomic_impl::exchange(_M_ptr, __desired, __m); }\n+      { return __atomic_impl::exchange(this->_M_ptr, __desired, __m); }\n \n       bool\n-      compare_exchange_weak(_Tp& __expected, _Tp __desired,\n+      compare_exchange_weak(value_type& __expected, value_type __desired,\n \t\t\t    memory_order __success,\n \t\t\t    memory_order __failure) const noexcept\n       {\n \treturn __atomic_impl::compare_exchange_weak<true>(\n-\t\t _M_ptr, __expected, __desired, __success, __failure);\n+\t\t this->_M_ptr, __expected, __desired, __success, __failure);\n       }\n \n       bool\n-      compare_exchange_strong(_Tp& __expected, _Tp __desired,\n+      compare_exchange_strong(value_type& __expected, value_type __desired,\n \t\t\t    memory_order __success,\n \t\t\t    memory_order __failure) const noexcept\n       {\n \treturn __atomic_impl::compare_exchange_strong<true>(\n-\t\t _M_ptr, __expected, __desired, __success, __failure);\n+\t\t this->_M_ptr, __expected, __desired, __success, __failure);\n       }\n \n       bool\n-      compare_exchange_weak(_Tp& __expected, _Tp __desired,\n+      compare_exchange_weak(value_type& __expected, value_type __desired,\n \t\t\t    memory_order __order = memory_order_seq_cst)\n       const noexcept\n       {\n@@ -1597,7 +1652,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION\n       }\n \n       bool\n-      compare_exchange_strong(_Tp& __expected, _Tp __desired,\n+      compare_exchange_strong(value_type& __expected, value_type __desired,\n \t\t\t      memory_order __order = memory_order_seq_cst)\n       const noexcept\n       {\n@@ -1606,64 +1661,51 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION\n       }\n \n #if __glibcxx_atomic_wait\n-      _GLIBCXX_ALWAYS_INLINE void\n-      wait(_Tp __old, memory_order __m = memory_order_seq_cst) const noexcept\n-      { __atomic_impl::wait(_M_ptr, __old, __m); }\n-\n-      // TODO add const volatile overload\n-\n       _GLIBCXX_ALWAYS_INLINE void\n       notify_one() const noexcept\n-      { __atomic_impl::notify_one(_M_ptr); }\n+      { __atomic_impl::notify_one(this->_M_ptr); }\n \n       // TODO add const volatile overload\n \n       _GLIBCXX_ALWAYS_INLINE void\n       notify_all() const noexcept\n-      { __atomic_impl::notify_all(_M_ptr); }\n+      { __atomic_impl::notify_all(this->_M_ptr); }\n \n       // TODO add const volatile overload\n #endif // __glibcxx_atomic_wait\n-\n-    private:\n-      _Tp* _M_ptr;\n     };\n \n-  // base class for atomic_ref<integral-type>\n+\n+  // Integral types (except cv-bool)\n   template<typename _Tp>\n-    struct __atomic_ref<_Tp, true, false>\n+    struct __atomic_ref_base<_Tp, true, false, false>\n     {\n       static_assert(is_integral_v<_Tp>);\n \n     public:\n-      using value_type = _Tp;\n+      using value_type = remove_cv_t<_Tp>;\n       using difference_type = value_type;\n \n       static constexpr bool is_always_lock_free\n \t= __atomic_always_lock_free(sizeof(_Tp), 0);\n \n+      static_assert(is_always_lock_free || !is_volatile_v<_Tp>);\n+\n       static constexpr size_t required_alignment\n \t= sizeof(_Tp) > alignof(_Tp) ? sizeof(_Tp) : alignof(_Tp);\n \n-      __atomic_ref() = delete;\n-      __atomic_ref& operator=(const __atomic_ref&) = delete;\n+      __atomic_ref_base() = delete;\n+      __atomic_ref_base& operator=(const __atomic_ref_base&) = delete;\n \n       explicit\n-      __atomic_ref(_Tp& __t) : _M_ptr(&__t)\n+      __atomic_ref_base(_Tp& __t) : _M_ptr(&__t)\n       {\n \t__glibcxx_assert(((__UINTPTR_TYPE__)_M_ptr % required_alignment) == 0);\n       }\n \n-      __atomic_ref(const __atomic_ref&) noexcept = default;\n-\n-      _Tp\n-      operator=(_Tp __t) const noexcept\n-      {\n-\tthis->store(__t);\n-\treturn __t;\n-      }\n+      __atomic_ref_base(const __atomic_ref_base&) noexcept = default;\n \n-      operator _Tp() const noexcept { return this->load(); }\n+      operator value_type() const noexcept { return this->load(); }\n \n       bool\n       is_lock_free() const noexcept\n@@ -1671,39 +1713,71 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION\n \treturn __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>();\n       }\n \n-      void\n-      store(_Tp __t, memory_order __m = memory_order_seq_cst) const noexcept\n-      { __atomic_impl::store(_M_ptr, __t, __m); }\n-\n-      _Tp\n+      value_type\n       load(memory_order __m = memory_order_seq_cst) const noexcept\n       { return __atomic_impl::load(_M_ptr, __m); }\n \n-      _Tp\n-      exchange(_Tp __desired,\n+#if __glibcxx_atomic_wait\n+      _GLIBCXX_ALWAYS_INLINE void\n+      wait(value_type __old, memory_order __m = memory_order_seq_cst) const noexcept\n+      { __atomic_impl::wait(_M_ptr, __old, __m); }\n+\n+      // TODO add const volatile overload\n+#endif // __glibcxx_atomic_wait\n+\n+    protected:\n+      _Tp* _M_ptr;\n+    };\n+\n+  template<typename _Tp>\n+    struct __atomic_ref<_Tp, false, true, false, false>\n+      : __atomic_ref_base<_Tp, true, false, false>\n+    {\n+      using value_type = typename __atomic_ref_base<_Tp, true, false, false>::value_type;\n+\n+      __atomic_ref() = delete;\n+      __atomic_ref& operator=(const __atomic_ref&) = delete;\n+\n+      explicit\n+      __atomic_ref(_Tp& __t) : __atomic_ref_base<_Tp, true, false, false>(__t)\n+      { }\n+\n+      value_type\n+      operator=(value_type __t) const noexcept\n+      {\n+\tthis->store(__t);\n+\treturn __t;\n+      }\n+\n+      void\n+      store(value_type __t, memory_order __m = memory_order_seq_cst) const noexcept\n+      { __atomic_impl::store(this->_M_ptr, __t, __m); }\n+\n+      value_type\n+      exchange(value_type __desired,\n \t       memory_order __m = memory_order_seq_cst) const noexcept\n-      { return __atomic_impl::exchange(_M_ptr, __desired, __m); }\n+      { return __atomic_impl::exchange(this->_M_ptr, __desired, __m); }\n \n       bool\n-      compare_exchange_weak(_Tp& __expected, _Tp __desired,\n+      compare_exchange_weak(value_type& __expected, value_type __desired,\n \t\t\t    memory_order __success,\n \t\t\t    memory_order __failure) const noexcept\n       {\n \treturn __atomic_impl::compare_exchange_weak<true>(\n-\t\t _M_ptr, __expected, __desired, __success, __failure);\n+\t\t this->_M_ptr, __expected, __desired, __success, __failure);\n       }\n \n       bool\n-      compare_exchange_strong(_Tp& __expected, _Tp __desired,\n+      compare_exchange_strong(value_type& __expected, value_type __desired,\n \t\t\t      memory_order __success,\n \t\t\t      memory_order __failure) const noexcept\n       {\n \treturn __atomic_impl::compare_exchange_strong<true>(\n-\t\t _M_ptr, __expected, __desired, __success, __failure);\n+\t\t this->_M_ptr, __expected, __desired, __success, __failure);\n       }\n \n       bool\n-      compare_exchange_weak(_Tp& __expected, _Tp __desired,\n+      compare_exchange_weak(value_type& __expected, value_type __desired,\n \t\t\t    memory_order __order = memory_order_seq_cst)\n       const noexcept\n       {\n@@ -1712,7 +1786,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION\n       }\n \n       bool\n-      compare_exchange_strong(_Tp& __expected, _Tp __desired,\n+      compare_exchange_strong(value_type& __expected, value_type __desired,\n \t\t\t      memory_order __order = memory_order_seq_cst)\n       const noexcept\n       {\n@@ -1721,21 +1795,15 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION\n       }\n \n #if __glibcxx_atomic_wait\n-      _GLIBCXX_ALWAYS_INLINE void\n-      wait(_Tp __old, memory_order __m = memory_order_seq_cst) const noexcept\n-      { __atomic_impl::wait(_M_ptr, __old, __m); }\n-\n-      // TODO add const volatile overload\n-\n       _GLIBCXX_ALWAYS_INLINE void\n       notify_one() const noexcept\n-      { __atomic_impl::notify_one(_M_ptr); }\n+      { __atomic_impl::notify_one(this->_M_ptr); }\n \n       // TODO add const volatile overload\n \n       _GLIBCXX_ALWAYS_INLINE void\n       notify_all() const noexcept\n-      { __atomic_impl::notify_all(_M_ptr); }\n+      { __atomic_impl::notify_all(this->_M_ptr); }\n \n       // TODO add const volatile overload\n #endif // __glibcxx_atomic_wait\n@@ -1743,27 +1811,27 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION\n       value_type\n       fetch_add(value_type __i,\n \t\tmemory_order __m = memory_order_seq_cst) const noexcept\n-      { return __atomic_impl::fetch_add(_M_ptr, __i, __m); }\n+      { return __atomic_impl::fetch_add(this->_M_ptr, __i, __m); }\n \n       value_type\n       fetch_sub(value_type __i,\n \t\tmemory_order __m = memory_order_seq_cst) const noexcept\n-      { return __atomic_impl::fetch_sub(_M_ptr, __i, __m); }\n+      { return __atomic_impl::fetch_sub(this->_M_ptr, __i, __m); }\n \n       value_type\n       fetch_and(value_type __i,\n \t\tmemory_order __m = memory_order_seq_cst) const noexcept\n-      { return __atomic_impl::fetch_and(_M_ptr, __i, __m); }\n+      { return __atomic_impl::fetch_and(this->_M_ptr, __i, __m); }\n \n       value_type\n       fetch_or(value_type __i,\n \t       memory_order __m = memory_order_seq_cst) const noexcept\n-      { return __atomic_impl::fetch_or(_M_ptr, __i, __m); }\n+      { return __atomic_impl::fetch_or(this->_M_ptr, __i, __m); }\n \n       value_type\n       fetch_xor(value_type __i,\n \t\tmemory_order __m = memory_order_seq_cst) const noexcept\n-      { return __atomic_impl::fetch_xor(_M_ptr, __i, __m); }\n+      { return __atomic_impl::fetch_xor(this->_M_ptr, __i, __m); }\n \n       _GLIBCXX_ALWAYS_INLINE value_type\n       operator++(int) const noexcept\n@@ -1775,70 +1843,62 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION\n \n       value_type\n       operator++() const noexcept\n-      { return __atomic_impl::__add_fetch(_M_ptr, value_type(1)); }\n+      { return __atomic_impl::__add_fetch(this->_M_ptr, value_type(1)); }\n \n       value_type\n       operator--() const noexcept\n-      { return __atomic_impl::__sub_fetch(_M_ptr, value_type(1)); }\n+      { return __atomic_impl::__sub_fetch(this->_M_ptr, value_type(1)); }\n \n       value_type\n       operator+=(value_type __i) const noexcept\n-      { return __atomic_impl::__add_fetch(_M_ptr, __i); }\n+      { return __atomic_impl::__add_fetch(this->_M_ptr, __i); }\n \n       value_type\n       operator-=(value_type __i) const noexcept\n-      { return __atomic_impl::__sub_fetch(_M_ptr, __i); }\n+      { return __atomic_impl::__sub_fetch(this->_M_ptr, __i); }\n \n       value_type\n       operator&=(value_type __i) const noexcept\n-      { return __atomic_impl::__and_fetch(_M_ptr, __i); }\n+      { return __atomic_impl::__and_fetch(this->_M_ptr, __i); }\n \n       value_type\n       operator|=(value_type __i) const noexcept\n-      { return __atomic_impl::__or_fetch(_M_ptr, __i); }\n+      { return __atomic_impl::__or_fetch(this->_M_ptr, __i); }\n \n       value_type\n       operator^=(value_type __i) const noexcept\n-      { return __atomic_impl::__xor_fetch(_M_ptr, __i); }\n-\n-    private:\n-      _Tp* _M_ptr;\n+      { return __atomic_impl::__xor_fetch(this->_M_ptr, __i); }\n     };\n \n-  // base class for atomic_ref<floating-point-type>\n+  // Floating-point types\n   template<typename _Fp>\n-    struct __atomic_ref<_Fp, false, true>\n+    struct __atomic_ref_base<_Fp, false, true, false>\n     {\n       static_assert(is_floating_point_v<_Fp>);\n \n     public:\n-      using value_type = _Fp;\n+      using value_type = remove_cv_t<_Fp>;\n       using difference_type = value_type;\n \n       static constexpr bool is_always_lock_free\n \t= __atomic_always_lock_free(sizeof(_Fp), 0);\n \n+      static_assert(is_always_lock_free || !is_volatile_v<_Fp>);\n+\n       static constexpr size_t required_alignment = __alignof__(_Fp);\n \n-      __atomic_ref() = delete;\n-      __atomic_ref& operator=(const __atomic_ref&) = delete;\n+      __atomic_ref_base() = delete;\n+      __atomic_ref_base& operator=(const __atomic_ref_base&) = delete;\n \n       explicit\n-      __atomic_ref(_Fp& __t) : _M_ptr(&__t)\n+      __atomic_ref_base(_Fp& __t) : _M_ptr(&__t)\n       {\n \t__glibcxx_assert(((__UINTPTR_TYPE__)_M_ptr % required_alignment) == 0);\n       }\n \n-      __atomic_ref(const __atomic_ref&) noexcept = default;\n+      __atomic_ref_base(const __atomic_ref_base&) noexcept = default;\n \n-      _Fp\n-      operator=(_Fp __t) const noexcept\n-      {\n-\tthis->store(__t);\n-\treturn __t;\n-      }\n-\n-      operator _Fp() const noexcept { return this->load(); }\n+      operator value_type() const noexcept { return this->load(); }\n \n       bool\n       is_lock_free() const noexcept\n@@ -1846,39 +1906,71 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION\n \treturn __atomic_impl::is_lock_free<sizeof(_Fp), required_alignment>();\n       }\n \n-      void\n-      store(_Fp __t, memory_order __m = memory_order_seq_cst) const noexcept\n-      { __atomic_impl::store(_M_ptr, __t, __m); }\n-\n       _Fp\n       load(memory_order __m = memory_order_seq_cst) const noexcept\n       { return __atomic_impl::load(_M_ptr, __m); }\n \n+#if __glibcxx_atomic_wait\n+      _GLIBCXX_ALWAYS_INLINE void\n+      wait(value_type __old, memory_order __m = memory_order_seq_cst) const noexcept\n+      { __atomic_impl::wait(_M_ptr, __old, __m); }\n+\n+      // TODO add const volatile overload\n+#endif // __glibcxx_atomic_wait\n+\n+    protected:\n+      _Fp* _M_ptr;\n+    };\n+\n+  template<typename _Fp>\n+    struct __atomic_ref<_Fp, false, false, true, false>\n+      : __atomic_ref_base<_Fp, false, true, false>\n+    {\n+      using value_type = typename __atomic_ref_base<_Fp, false, true, false>::value_type;\n+\n+      __atomic_ref() = delete;\n+      __atomic_ref& operator=(const __atomic_ref&) = delete;\n+\n+      explicit\n+      __atomic_ref(_Fp& __t) : __atomic_ref_base<_Fp, false, true, false>(__t)\n+      { }\n+\n+      value_type\n+      operator=(value_type __t) const noexcept\n+      {\n+\tthis->store(__t);\n+\treturn __t;\n+      }\n+\n+      void\n+      store(value_type __t, memory_order __m = memory_order_seq_cst) const noexcept\n+      { __atomic_impl::store(this->_M_ptr, __t, __m); }\n+\n       _Fp\n-      exchange(_Fp __desired,\n+      exchange(value_type __desired,\n \t       memory_order __m = memory_order_seq_cst) const noexcept\n-      { return __atomic_impl::exchange(_M_ptr, __desired, __m); }\n+      { return __atomic_impl::exchange(this->_M_ptr, __desired, __m); }\n \n       bool\n-      compare_exchange_weak(_Fp& __expected, _Fp __desired,\n+      compare_exchange_weak(value_type& __expected, value_type __desired,\n \t\t\t    memory_order __success,\n \t\t\t    memory_order __failure) const noexcept\n       {\n \treturn __atomic_impl::compare_exchange_weak<true>(\n-\t\t _M_ptr, __expected, __desired, __success, __failure);\n+\t\t this->_M_ptr, __expected, __desired, __success, __failure);\n       }\n \n       bool\n-      compare_exchange_strong(_Fp& __expected, _Fp __desired,\n+      compare_exchange_strong(value_type& __expected, value_type __desired,\n \t\t\t      memory_order __success,\n \t\t\t      memory_order __failure) const noexcept\n       {\n \treturn __atomic_impl::compare_exchange_strong<true>(\n-\t\t _M_ptr, __expected, __desired, __success, __failure);\n+\t\t this->_M_ptr, __expected, __desired, __success, __failure);\n       }\n \n       bool\n-      compare_exchange_weak(_Fp& __expected, _Fp __desired,\n+      compare_exchange_weak(value_type& __expected, value_type __desired,\n \t\t\t    memory_order __order = memory_order_seq_cst)\n       const noexcept\n       {\n@@ -1887,7 +1979,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION\n       }\n \n       bool\n-      compare_exchange_strong(_Fp& __expected, _Fp __desired,\n+      compare_exchange_strong(value_type& __expected, value_type __desired,\n \t\t\t      memory_order __order = memory_order_seq_cst)\n       const noexcept\n       {\n@@ -1896,21 +1988,15 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION\n       }\n \n #if __glibcxx_atomic_wait\n-      _GLIBCXX_ALWAYS_INLINE void\n-      wait(_Fp __old, memory_order __m = memory_order_seq_cst) const noexcept\n-      { __atomic_impl::wait(_M_ptr, __old, __m); }\n-\n-      // TODO add const volatile overload\n-\n       _GLIBCXX_ALWAYS_INLINE void\n       notify_one() const noexcept\n-      { __atomic_impl::notify_one(_M_ptr); }\n+      { __atomic_impl::notify_one(this->_M_ptr); }\n \n       // TODO add const volatile overload\n \n       _GLIBCXX_ALWAYS_INLINE void\n       notify_all() const noexcept\n-      { __atomic_impl::notify_all(_M_ptr); }\n+      { __atomic_impl::notify_all(this->_M_ptr); }\n \n       // TODO add const volatile overload\n #endif // __glibcxx_atomic_wait\n@@ -1918,56 +2004,50 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION\n       value_type\n       fetch_add(value_type __i,\n \t\tmemory_order __m = memory_order_seq_cst) const noexcept\n-      { return __atomic_impl::__fetch_add_flt(_M_ptr, __i, __m); }\n+      { return __atomic_impl::__fetch_add_flt(this->_M_ptr, __i, __m); }\n \n       value_type\n       fetch_sub(value_type __i,\n \t\tmemory_order __m = memory_order_seq_cst) const noexcept\n-      { return __atomic_impl::__fetch_sub_flt(_M_ptr, __i, __m); }\n+      { return __atomic_impl::__fetch_sub_flt(this->_M_ptr, __i, __m); }\n \n       value_type\n       operator+=(value_type __i) const noexcept\n-      { return __atomic_impl::__add_fetch_flt(_M_ptr, __i); }\n+      { return __atomic_impl::__add_fetch_flt(this->_M_ptr, __i); }\n \n       value_type\n       operator-=(value_type __i) const noexcept\n-      { return __atomic_impl::__sub_fetch_flt(_M_ptr, __i); }\n-\n-    private:\n-      _Fp* _M_ptr;\n+      { return __atomic_impl::__sub_fetch_flt(this->_M_ptr, __i); }\n     };\n \n-  // base class for atomic_ref<pointer-type>\n+  // Pointer types\n   template<typename _Tp>\n-    struct __atomic_ref<_Tp*, false, false>\n+    struct __atomic_ref_base<_Tp, false, false, true>\n     {\n+      static_assert(is_pointer_v<_Tp>);\n+\n     public:\n-      using value_type = _Tp*;\n+      using value_type = remove_cv_t<_Tp>;\n       using difference_type = ptrdiff_t;\n \n       static constexpr bool is_always_lock_free = ATOMIC_POINTER_LOCK_FREE == 2;\n \n-      static constexpr size_t required_alignment = __alignof__(_Tp*);\n+      static_assert(is_always_lock_free || !is_volatile_v<_Tp>);\n \n-      __atomic_ref() = delete;\n-      __atomic_ref& operator=(const __atomic_ref&) = delete;\n+      static constexpr size_t required_alignment = __alignof__(_Tp);\n+\n+      __atomic_ref_base() = delete;\n+      __atomic_ref_base& operator=(const __atomic_ref_base&) = delete;\n \n       explicit\n-      __atomic_ref(_Tp*& __t) : _M_ptr(std::__addressof(__t))\n+      __atomic_ref_base(_Tp& __t) : _M_ptr(std::__addressof(__t))\n       {\n \t__glibcxx_assert(((__UINTPTR_TYPE__)_M_ptr % required_alignment) == 0);\n       }\n \n-      __atomic_ref(const __atomic_ref&) noexcept = default;\n-\n-      _Tp*\n-      operator=(_Tp* __t) const noexcept\n-      {\n-\tthis->store(__t);\n-\treturn __t;\n-      }\n+      __atomic_ref_base(const __atomic_ref_base&) noexcept = default;\n \n-      operator _Tp*() const noexcept { return this->load(); }\n+      operator value_type() const noexcept { return this->load(); }\n \n       bool\n       is_lock_free() const noexcept\n@@ -1975,39 +2055,94 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION\n \treturn __atomic_impl::is_lock_free<sizeof(_Tp*), required_alignment>();\n       }\n \n-      void\n-      store(_Tp* __t, memory_order __m = memory_order_seq_cst) const noexcept\n-      { __atomic_impl::store(_M_ptr, __t, __m); }\n-\n-      _Tp*\n+      value_type\n       load(memory_order __m = memory_order_seq_cst) const noexcept\n       { return __atomic_impl::load(_M_ptr, __m); }\n \n-      _Tp*\n-      exchange(_Tp* __desired,\n+#if __glibcxx_atomic_wait\n+      _GLIBCXX_ALWAYS_INLINE void\n+      wait(value_type __old, memory_order __m = memory_order_seq_cst) const noexcept\n+      { __atomic_impl::wait(_M_ptr, __old, __m); }\n+\n+      // TODO add const volatile overload\n+#endif // __glibcxx_atomic_wait\n+\n+    protected:\n+      static constexpr ptrdiff_t\n+      _S_type_size(ptrdiff_t __d) noexcept\n+      {\n+\tusing _PointedType = remove_pointer_t<_Tp>;\n+\tstatic_assert(is_object_v<_PointedType>);\n+\treturn __d * sizeof(_PointedType);\n+      }\n+\n+      _Tp* _M_ptr;\n+    };\n+\n+  template<typename _Tp>\n+    struct __atomic_ref<_Tp, false, false, false, true>\n+      : __atomic_ref_base<_Tp, false, false, true>\n+    {\n+      using value_type = typename __atomic_ref_base<_Tp, false, false, true>::value_type;\n+      using difference_type = typename __atomic_ref_base<_Tp, false, false, true>::difference_type;\n+\n+      __atomic_ref() = delete;\n+      __atomic_ref& operator=(const __atomic_ref&) = delete;\n+\n+      explicit\n+      __atomic_ref(_Tp& __t) : __atomic_ref_base<_Tp, false, false, true>(__t)\n+      { }\n+\n+#if __glibcxx_atomic_wait\n+      _GLIBCXX_ALWAYS_INLINE void\n+      notify_one() const noexcept\n+      { __atomic_impl::notify_one(this->_M_ptr); }\n+\n+      // TODO add const volatile overload\n+\n+      _GLIBCXX_ALWAYS_INLINE void\n+      notify_all() const noexcept\n+      { __atomic_impl::notify_all(this->_M_ptr); }\n+\n+      // TODO add const volatile overload\n+#endif // __glibcxx_atomic_wait\n+\n+      value_type\n+      operator=(value_type __t) const noexcept\n+      {\n+\tthis->store(__t);\n+\treturn __t;\n+      }\n+\n+      void\n+      store(value_type __t, memory_order __m = memory_order_seq_cst) const noexcept\n+      { __atomic_impl::store(this->_M_ptr, __t, __m); }\n+\n+      value_type\n+      exchange(value_type __desired,\n \t       memory_order __m = memory_order_seq_cst) const noexcept\n-      { return __atomic_impl::exchange(_M_ptr, __desired, __m); }\n+      { return __atomic_impl::exchange(this->_M_ptr, __desired, __m); }\n \n       bool\n-      compare_exchange_weak(_Tp*& __expected, _Tp* __desired,\n+      compare_exchange_weak(value_type& __expected, value_type __desired,\n \t\t\t    memory_order __success,\n \t\t\t    memory_order __failure) const noexcept\n       {\n \treturn __atomic_impl::compare_exchange_weak<true>(\n-\t\t _M_ptr, __expected, __desired, __success, __failure);\n+\t\t this->_M_ptr, __expected, __desired, __success, __failure);\n       }\n \n       bool\n-      compare_exchange_strong(_Tp*& __expected, _Tp* __desired,\n+      compare_exchange_strong(value_type& __expected, value_type __desired,\n \t\t\t    memory_order __success,\n \t\t\t    memory_order __failure) const noexcept\n       {\n \treturn __atomic_impl::compare_exchange_strong<true>(\n-\t\t _M_ptr, __expected, __desired, __success, __failure);\n+\t\t this->_M_ptr, __expected, __desired, __success, __failure);\n       }\n \n       bool\n-      compare_exchange_weak(_Tp*& __expected, _Tp* __desired,\n+      compare_exchange_weak(value_type& __expected, value_type __desired,\n \t\t\t    memory_order __order = memory_order_seq_cst)\n       const noexcept\n       {\n@@ -2016,7 +2151,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION\n       }\n \n       bool\n-      compare_exchange_strong(_Tp*& __expected, _Tp* __desired,\n+      compare_exchange_strong(value_type& __expected, value_type __desired,\n \t\t\t      memory_order __order = memory_order_seq_cst)\n       const noexcept\n       {\n@@ -2024,35 +2159,15 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION\n \t\t\t\t       __cmpexch_failure_order(__order));\n       }\n \n-#if __glibcxx_atomic_wait\n-      _GLIBCXX_ALWAYS_INLINE void\n-      wait(_Tp* __old, memory_order __m = memory_order_seq_cst) const noexcept\n-      { __atomic_impl::wait(_M_ptr, __old, __m); }\n-\n-      // TODO add const volatile overload\n-\n-      _GLIBCXX_ALWAYS_INLINE void\n-      notify_one() const noexcept\n-      { __atomic_impl::notify_one(_M_ptr); }\n-\n-      // TODO add const volatile overload\n-\n-      _GLIBCXX_ALWAYS_INLINE void\n-      notify_all() const noexcept\n-      { __atomic_impl::notify_all(_M_ptr); }\n-\n-      // TODO add const volatile overload\n-#endif // __glibcxx_atomic_wait\n-\n       _GLIBCXX_ALWAYS_INLINE value_type\n       fetch_add(difference_type __d,\n \t\tmemory_order __m = memory_order_seq_cst) const noexcept\n-      { return __atomic_impl::fetch_add(_M_ptr, _S_type_size(__d), __m); }\n+      { return __atomic_impl::fetch_add(this->_M_ptr, this->_S_type_size(__d), __m); }\n \n       _GLIBCXX_ALWAYS_INLINE value_type\n       fetch_sub(difference_type __d,\n \t\tmemory_order __m = memory_order_seq_cst) const noexcept\n-      { return __atomic_impl::fetch_sub(_M_ptr, _S_type_size(__d), __m); }\n+      { return __atomic_impl::fetch_sub(this->_M_ptr, this->_S_type_size(__d), __m); }\n \n       value_type\n       operator++(int) const noexcept\n@@ -2065,36 +2180,26 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION\n       value_type\n       operator++() const noexcept\n       {\n-\treturn __atomic_impl::__add_fetch(_M_ptr, _S_type_size(1));\n+\treturn __atomic_impl::__add_fetch(this->_M_ptr, this->_S_type_size(1));\n       }\n \n       value_type\n       operator--() const noexcept\n       {\n-\treturn __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(1));\n+\treturn __atomic_impl::__sub_fetch(this->_M_ptr, this->_S_type_size(1));\n       }\n \n       value_type\n       operator+=(difference_type __d) const noexcept\n       {\n-\treturn __atomic_impl::__add_fetch(_M_ptr, _S_type_size(__d));\n+\treturn __atomic_impl::__add_fetch(this->_M_ptr, this->_S_type_size(__d));\n       }\n \n       value_type\n       operator-=(difference_type __d) const noexcept\n       {\n-\treturn __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(__d));\n+\treturn __atomic_impl::__sub_fetch(this->_M_ptr, this->_S_type_size(__d));\n       }\n-\n-    private:\n-      static constexpr ptrdiff_t\n-      _S_type_size(ptrdiff_t __d) noexcept\n-      {\n-\tstatic_assert(is_object_v<_Tp>);\n-\treturn __d * sizeof(_Tp);\n-      }\n-\n-      _Tp** _M_ptr;\n     };\n #endif // C++2a\n \ndiff --git a/libstdc++-v3/include/std/atomic b/libstdc++-v3/include/std/atomic\nindex 9b1aca0fc09a..cb94cb5e7ca2 100644\n--- a/libstdc++-v3/include/std/atomic\n+++ b/libstdc++-v3/include/std/atomic\n@@ -222,6 +222,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION\n       static_assert(is_move_constructible_v<_Tp>);\n       static_assert(is_copy_assignable_v<_Tp>);\n       static_assert(is_move_assignable_v<_Tp>);\n+      static_assert(is_same_v<_Tp, remove_cv_t<_Tp>>);\n #endif\n \n     public:\ndiff --git a/libstdc++-v3/testsuite/29_atomics/atomic_ref/115402.cc b/libstdc++-v3/testsuite/29_atomics/atomic_ref/115402.cc\nnew file mode 100644\nindex 000000000000..ca449c243c49\n--- /dev/null\n+++ b/libstdc++-v3/testsuite/29_atomics/atomic_ref/115402.cc\n@@ -0,0 +1,16 @@\n+// PR libstdc++/115402\n+// { dg-do run { target c++20 } }\n+\n+#include <atomic>\n+\n+int\n+main()\n+{\n+  volatile int vi = 0;\n+  std::atomic_ref<volatile int> vref(vi);\n+  int val = vref.load();\n+  vref.exchange(val);\n+  vref.compare_exchange_weak(val, 0);\n+  vref.compare_exchange_strong(val, 0);\n+  vref.wait(0);\n+}\ndiff --git a/libstdc++-v3/testsuite/29_atomics/atomic_ref/bool.cc b/libstdc++-v3/testsuite/29_atomics/atomic_ref/bool.cc\nindex 4702932627e8..7b362737afba 100644\n--- a/libstdc++-v3/testsuite/29_atomics/atomic_ref/bool.cc\n+++ b/libstdc++-v3/testsuite/29_atomics/atomic_ref/bool.cc\n@@ -13,3 +13,21 @@ static_assert( not has_or<std::atomic_ref<bool>> );\n static_assert( not has_xor<std::atomic_ref<bool>> );\n static_assert( not has_fetch_add<std::atomic_ref<bool>> );\n static_assert( not has_fetch_sub<std::atomic_ref<bool>> );\n+\n+static_assert( not has_and<std::atomic_ref<const bool>> );\n+static_assert( not has_or<std::atomic_ref<const bool>> );\n+static_assert( not has_xor<std::atomic_ref<const bool>> );\n+static_assert( not has_fetch_add<std::atomic_ref<const bool>> );\n+static_assert( not has_fetch_sub<std::atomic_ref<const bool>> );\n+\n+static_assert( not has_and<std::atomic_ref<volatile bool>> );\n+static_assert( not has_or<std::atomic_ref<volatile bool>> );\n+static_assert( not has_xor<std::atomic_ref<volatile bool>> );\n+static_assert( not has_fetch_add<std::atomic_ref<volatile bool>> );\n+static_assert( not has_fetch_sub<std::atomic_ref<volatile bool>> );\n+\n+static_assert( not has_and<std::atomic_ref<const volatile bool>> );\n+static_assert( not has_or<std::atomic_ref<const volatile bool>> );\n+static_assert( not has_xor<std::atomic_ref<const volatile bool>> );\n+static_assert( not has_fetch_add<std::atomic_ref<const volatile bool>> );\n+static_assert( not has_fetch_sub<std::atomic_ref<const volatile bool>> );\ndiff --git a/libstdc++-v3/testsuite/29_atomics/atomic_ref/deduction.cc b/libstdc++-v3/testsuite/29_atomics/atomic_ref/deduction.cc\nindex f67190e97a3a..01dbfce23751 100644\n--- a/libstdc++-v3/testsuite/29_atomics/atomic_ref/deduction.cc\n+++ b/libstdc++-v3/testsuite/29_atomics/atomic_ref/deduction.cc\n@@ -19,22 +19,29 @@\n \n #include <atomic>\n \n+template <typename T>\n void\n-test01()\n+test_impl(T v)\n {\n-  int i = 0;\n-  std::atomic_ref a0(i);\n-  static_assert(std::is_same_v<decltype(a0), std::atomic_ref<int>>);\n-\n-  float f = 1.0f;\n-  std::atomic_ref a1(f);\n-  static_assert(std::is_same_v<decltype(a1), std::atomic_ref<float>>);\n+  std::atomic_ref a(v);\n+  static_assert(std::is_same_v<decltype(a), std::atomic_ref<T>>);\n+}\n \n-  int* p = &i;\n-  std::atomic_ref a2(p);\n-  static_assert(std::is_same_v<decltype(a2), std::atomic_ref<int*>>);\n+template <typename T>\n+void\n+test(T v)\n+{\n+  test_impl<T>(v);\n+  test_impl<const T>(v);\n+  test_impl<volatile T>(v);\n+  test_impl<const volatile T>(v);\n+}\n \n+int main()\n+{\n+  test<int>(0);\n+  test<float>(1.0f);\n+  test<int*>(nullptr);\n   struct X { } x;\n-  std::atomic_ref a3(x);\n-  static_assert(std::is_same_v<decltype(a3), std::atomic_ref<X>>);\n+  test<X>(x);\n }\ndiff --git a/libstdc++-v3/testsuite/29_atomics/atomic_ref/float.cc b/libstdc++-v3/testsuite/29_atomics/atomic_ref/float.cc\nindex 5773d144c36a..c69f3a711d34 100644\n--- a/libstdc++-v3/testsuite/29_atomics/atomic_ref/float.cc\n+++ b/libstdc++-v3/testsuite/29_atomics/atomic_ref/float.cc\n@@ -299,14 +299,19 @@ test04()\n {\n   if constexpr (std::atomic_ref<float>::is_always_lock_free)\n   {\n-    float i = 0;\n-    float* ptr = 0;\n-    std::atomic_ref<float*> a0(ptr);\n-    std::atomic_ref<float*> a1(ptr);\n-    std::atomic_ref<float*> a2(a0);\n-    a0 = &i;\n-    VERIFY( a1 == &i );\n-    VERIFY( a2 == &i );\n+    float i = 0.0f;\n+    std::atomic_ref<float> a0(i);\n+    std::atomic_ref<float> a1(i);\n+    std::atomic_ref<const float> a1c(i);\n+    std::atomic_ref<volatile float> a1v(i);\n+    std::atomic_ref<const volatile float> a1cv(i);\n+    std::atomic_ref<float> a2(a0);\n+    a0 = 1.0f;\n+    VERIFY( a1 == 1.0f );\n+    VERIFY( a1c == 1.0f );\n+    VERIFY( a1v == 1.0f );\n+    VERIFY( a1cv == 1.0f );\n+    VERIFY( a2 == 1.0f );\n   }\n }\n \ndiff --git a/libstdc++-v3/testsuite/29_atomics/atomic_ref/generic.cc b/libstdc++-v3/testsuite/29_atomics/atomic_ref/generic.cc\nindex 2e6fa0f90e2d..079ec1b1a785 100644\n--- a/libstdc++-v3/testsuite/29_atomics/atomic_ref/generic.cc\n+++ b/libstdc++-v3/testsuite/29_atomics/atomic_ref/generic.cc\n@@ -108,9 +108,15 @@ test02()\n   X i;\n   std::atomic_ref<X> a0(i);\n   std::atomic_ref<X> a1(i);\n+  std::atomic_ref<const X> a1c(i);\n+  std::atomic_ref<volatile X> a1v(i);\n+  std::atomic_ref<const volatile X> a1cv(i);\n   std::atomic_ref<X> a2(a0);\n   a0 = 42;\n   VERIFY( a1.load() == 42 );\n+  VERIFY( a1c.load() == 42 );\n+  VERIFY( a1v.load() == 42 );\n+  VERIFY( a1cv.load() == 42 );\n   VERIFY( a2.load() == 42 );\n }\n \ndiff --git a/libstdc++-v3/testsuite/29_atomics/atomic_ref/integral.cc b/libstdc++-v3/testsuite/29_atomics/atomic_ref/integral.cc\nindex f6b68ebc5989..310434cefb54 100644\n--- a/libstdc++-v3/testsuite/29_atomics/atomic_ref/integral.cc\n+++ b/libstdc++-v3/testsuite/29_atomics/atomic_ref/integral.cc\n@@ -302,9 +302,15 @@ test03()\n   int i = 0;\n   std::atomic_ref<int> a0(i);\n   std::atomic_ref<int> a1(i);\n+  std::atomic_ref<const int> a1c(i);\n+  std::atomic_ref<volatile int> a1v(i);\n+  std::atomic_ref<const volatile int> a1cv(i);\n   std::atomic_ref<int> a2(a0);\n   a0 = 42;\n   VERIFY( a1 == 42 );\n+  VERIFY( a1c == 42 );\n+  VERIFY( a1v == 42 );\n+  VERIFY( a1cv == 42 );\n   VERIFY( a2 == 42 );\n }\n \ndiff --git a/libstdc++-v3/testsuite/29_atomics/atomic_ref/pointer.cc b/libstdc++-v3/testsuite/29_atomics/atomic_ref/pointer.cc\nindex d1789af890eb..8db45c797c8d 100644\n--- a/libstdc++-v3/testsuite/29_atomics/atomic_ref/pointer.cc\n+++ b/libstdc++-v3/testsuite/29_atomics/atomic_ref/pointer.cc\n@@ -210,9 +210,15 @@ test03()\n   int* ptr = 0;\n   std::atomic_ref<int*> a0(ptr);\n   std::atomic_ref<int*> a1(ptr);\n+  std::atomic_ref<int* const> a1c(ptr);\n+  std::atomic_ref<int* volatile> a1v(ptr);\n+  std::atomic_ref<int* const volatile> a1cv(ptr);\n   std::atomic_ref<int*> a2(a0);\n   a0 = &i;\n   VERIFY( a1 == &i );\n+  VERIFY( a1c == &i );\n+  VERIFY( a1v == &i );\n+  VERIFY( a1cv == &i );\n   VERIFY( a2 == &i );\n }\n \ndiff --git a/libstdc++-v3/testsuite/29_atomics/atomic_ref/requirements.cc b/libstdc++-v3/testsuite/29_atomics/atomic_ref/requirements.cc\nindex 3b929563a1e7..e7bb29b0aa63 100644\n--- a/libstdc++-v3/testsuite/29_atomics/atomic_ref/requirements.cc\n+++ b/libstdc++-v3/testsuite/29_atomics/atomic_ref/requirements.cc\n@@ -18,56 +18,94 @@\n // { dg-do compile { target c++20 } }\n \n #include <atomic>\n+#include <type_traits>\n \n+template <class T>\n void\n-test01()\n+test_generic()\n {\n-  struct X { int c; };\n-  using A = std::atomic_ref<X>;\n+  using A = std::atomic_ref<T>;\n   static_assert( std::is_standard_layout_v<A> );\n   static_assert( std::is_nothrow_copy_constructible_v<A> );\n   static_assert( std::is_trivially_destructible_v<A> );\n-  static_assert( std::is_same_v<A::value_type, X> );\n+  static_assert( std::is_same_v<typename A::value_type, std::remove_cv_t<T>> );\n   static_assert( !std::is_copy_assignable_v<A> );\n   static_assert( !std::is_move_assignable_v<A> );\n }\n \n+template <class T>\n void\n-test02()\n+test_integral()\n {\n-  using A = std::atomic_ref<int>;\n+  static_assert( std::is_integral_v<T> );\n+  using A = std::atomic_ref<T>;\n   static_assert( std::is_standard_layout_v<A> );\n   static_assert( std::is_nothrow_copy_constructible_v<A> );\n   static_assert( std::is_trivially_destructible_v<A> );\n-  static_assert( std::is_same_v<A::value_type, int> );\n-  static_assert( std::is_same_v<A::difference_type, A::value_type> );\n+  static_assert( std::is_same_v<typename A::value_type, std::remove_cv_t<T>> );\n+  static_assert( std::is_same_v<typename A::difference_type, typename A::value_type> );\n   static_assert( !std::is_copy_assignable_v<A> );\n   static_assert( !std::is_move_assignable_v<A> );\n }\n \n+template <class T>\n void\n-test03()\n+test_floating_point()\n {\n-  using A = std::atomic_ref<double>;\n+  static_assert( std::is_floating_point_v<T> );\n+  using A = std::atomic_ref<T>;\n   static_assert( std::is_standard_layout_v<A> );\n   static_assert( std::is_nothrow_copy_constructible_v<A> );\n   static_assert( std::is_trivially_destructible_v<A> );\n-  static_assert( std::is_same_v<A::value_type, double> );\n-  static_assert( std::is_same_v<A::difference_type, A::value_type> );\n+  static_assert( std::is_same_v<typename A::value_type, std::remove_cv_t<T>> );\n+  static_assert( std::is_same_v<typename A::difference_type, typename A::value_type> );\n   static_assert( !std::is_copy_assignable_v<A> );\n   static_assert( !std::is_move_assignable_v<A> );\n }\n \n+template <class T>\n void\n-test04()\n+test_pointer()\n {\n-  using A = std::atomic_ref<int*>;\n+  static_assert( std::is_pointer_v<T> );\n+  using A = std::atomic_ref<T>;\n   static_assert( std::is_standard_layout_v<A> );\n   static_assert( std::is_nothrow_copy_constructible_v<A> );\n   static_assert( std::is_trivially_destructible_v<A> );\n-  static_assert( std::is_same_v<A::value_type, int*> );\n-  static_assert( std::is_same_v<A::difference_type, std::ptrdiff_t> );\n+  static_assert( std::is_same_v<typename A::value_type, std::remove_cv_t<T>> );\n+  static_assert( std::is_same_v<typename A::difference_type, std::ptrdiff_t> );\n   static_assert( std::is_nothrow_copy_constructible_v<A> );\n   static_assert( !std::is_copy_assignable_v<A> );\n   static_assert( !std::is_move_assignable_v<A> );\n }\n+\n+int\n+main()\n+{\n+  struct X { int c; };\n+  test_generic<X>();\n+  test_generic<const X>();\n+  test_generic<volatile X>();\n+  test_generic<const volatile X>();\n+\n+  // atomic_ref excludes (cv) `bool` from the set of integral types\n+  test_generic<bool>();\n+  test_generic<const bool>();\n+  test_generic<volatile bool>();\n+  test_generic<const volatile bool>();\n+\n+  test_integral<int>();\n+  test_integral<const int>();\n+  test_integral<volatile int>();\n+  test_integral<const volatile int>();\n+\n+  test_floating_point<double>();\n+  test_floating_point<const double>();\n+  test_floating_point<volatile double>();\n+  test_floating_point<const volatile double>();\n+\n+  test_pointer<int*>();\n+  test_pointer<int* const>();\n+  test_pointer<int* volatile>();\n+  test_pointer<int* const volatile>();\n+}\n\\ No newline at end of file\ndiff --git a/libstdc++-v3/testsuite/29_atomics/atomic_ref/wait_notify.cc b/libstdc++-v3/testsuite/29_atomics/atomic_ref/wait_notify.cc\nindex ecabeecd5bb3..db20a197ed06 100644\n--- a/libstdc++-v3/testsuite/29_atomics/atomic_ref/wait_notify.cc\n+++ b/libstdc++-v3/testsuite/29_atomics/atomic_ref/wait_notify.cc\n@@ -41,6 +41,16 @@ template<typename S>\n         });\n       a.wait(va);\n       t.join();\n+\n+      std::atomic_ref<const S> b{ aa };\n+      b.wait(va);\n+      std::thread t2([&]\n+        {\n+\t  a.store(va);\n+\t  a.notify_one();\n+        });\n+      b.wait(vb);\n+      t2.join();\n     }\n   }\n \n",
    "prefixes": [
        "v1",
        "01/10"
    ]
}