From patchwork Wed Feb 16 22:26:17 2011 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Benjamin Kosnik X-Patchwork-Id: 83408 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Received: from sourceware.org (server1.sourceware.org [209.132.180.131]) by ozlabs.org (Postfix) with SMTP id 0B39AB70F6 for ; Thu, 17 Feb 2011 09:26:49 +1100 (EST) Received: (qmail 30677 invoked by alias); 16 Feb 2011 22:26:45 -0000 Received: (qmail 30599 invoked by uid 22791); 16 Feb 2011 22:26:36 -0000 X-SWARE-Spam-Status: No, hits=-5.7 required=5.0 tests=AWL, BAYES_00, RCVD_IN_DNSWL_HI, SPF_HELO_PASS, T_RP_MATCHES_RCVD X-Spam-Check-By: sourceware.org Received: from mx1.redhat.com (HELO mx1.redhat.com) (209.132.183.28) by sourceware.org (qpsmtpd/0.43rc1) with ESMTP; Wed, 16 Feb 2011 22:26:24 +0000 Received: from int-mx01.intmail.prod.int.phx2.redhat.com (int-mx01.intmail.prod.int.phx2.redhat.com [10.5.11.11]) by mx1.redhat.com (8.14.4/8.14.4) with ESMTP id p1GMQMOR004864 (version=TLSv1/SSLv3 cipher=DHE-RSA-AES256-SHA bits=256 verify=OK); Wed, 16 Feb 2011 17:26:23 -0500 Received: from shotwell (ovpn-113-91.phx2.redhat.com [10.3.113.91]) by int-mx01.intmail.prod.int.phx2.redhat.com (8.13.8/8.13.8) with ESMTP id p1GMQKRP022595 (version=TLSv1/SSLv3 cipher=DHE-RSA-AES128-SHA bits=128 verify=NO); Wed, 16 Feb 2011 17:26:21 -0500 Date: Wed, 16 Feb 2011 14:26:17 -0800 From: Benjamin Kosnik To: Benjamin Kosnik Cc: libstdc++@gcc.gnu.org, gcc-patches@gcc.gnu.org Subject: Re: [v3] remove atomic_address Message-ID: <20110216142617.5774511e@shotwell> In-Reply-To: <20110216110935.1802e9f2@shotwell> References: <20110216110935.1802e9f2@shotwell> Mime-Version: 1.0 Mailing-List: contact gcc-patches-help@gcc.gnu.org; run by ezmlm Precedence: bulk List-Id: List-Unsubscribe: List-Archive: List-Post: List-Help: Sender: gcc-patches-owner@gcc.gnu.org Delivered-To: mailing list gcc-patches@gcc.gnu.org Here's the correct patch. -benjamin 2011-02-16 Benjamin Kosnik * include/std/atomic: Remove atomic_address, uplift to N3225. * include/bits/atomic_0.h: Same. * include/bits/atomic_2.h: Same. * include/bits/atomic_base.h: Same. * testsuite/29_atomics/atomic_address: Delete. Index: include/std/atomic =================================================================== --- include/std/atomic (revision 170216) +++ include/std/atomic (working copy) @@ -1,6 +1,6 @@ // -*- C++ -*- header. -// Copyright (C) 2008, 2009, 2010 Free Software Foundation, Inc. +// Copyright (C) 2008, 2009, 2010, 2011 Free Software Foundation, Inc. // // This file is part of the GNU ISO C++ Library. This library is free // software; you can redistribute it and/or modify it under the @@ -230,132 +230,188 @@ /// Partial specialization for pointer types. template - struct atomic<_Tp*> : atomic_address + struct atomic<_Tp*> { + typedef _Tp* __pointer_type; + typedef __atomic_base<_Tp*> __base_type; + __base_type _M_b; + atomic() = default; ~atomic() = default; atomic(const atomic&) = delete; + atomic& operator=(const atomic&) = delete; atomic& operator=(const atomic&) volatile = delete; - constexpr atomic(_Tp* __v) : atomic_address(__v) { } + constexpr atomic(__pointer_type __p) : _M_b(__p) { } - void - store(_Tp*, memory_order = memory_order_seq_cst); + operator __pointer_type() const + { return __pointer_type(_M_b); } - void - store(_Tp*, memory_order = memory_order_seq_cst) volatile; + operator __pointer_type() const volatile + { return __pointer_type(_M_b); } - _Tp* - load(memory_order = memory_order_seq_cst) const; + __pointer_type + operator=(__pointer_type __p) + { return _M_b.operator=(__p); } - _Tp* - load(memory_order = memory_order_seq_cst) const volatile; + __pointer_type + operator=(__pointer_type __p) volatile + { return _M_b.operator=(__p); } - _Tp* - exchange(_Tp*, memory_order = memory_order_seq_cst); + __pointer_type + operator++(int) + { return _M_b++; } - _Tp* - exchange(_Tp*, memory_order = memory_order_seq_cst) volatile; + __pointer_type + operator++(int) volatile + { return _M_b++; } - bool - compare_exchange_weak(_Tp*&, _Tp*, memory_order, memory_order); + __pointer_type + operator--(int) + { return _M_b--; } - bool - compare_exchange_weak(_Tp*&, _Tp*, memory_order, memory_order) volatile; + __pointer_type + operator--(int) volatile + { return _M_b--; } - bool - compare_exchange_weak(_Tp*&, _Tp*, memory_order = memory_order_seq_cst); + __pointer_type + operator++() + { return ++_M_b; } - bool - compare_exchange_weak(_Tp*&, _Tp*, - memory_order = memory_order_seq_cst) volatile; + __pointer_type + operator++() volatile + { return ++_M_b; } - bool - compare_exchange_strong(_Tp*&, _Tp*, memory_order, memory_order); + __pointer_type + operator--() + { return --_M_b; } - bool - compare_exchange_strong(_Tp*&, _Tp*, memory_order, memory_order) volatile; + __pointer_type + operator--() volatile + { return --_M_b; } + __pointer_type + operator+=(ptrdiff_t __d) + { return _M_b.operator+=(__d); } + + __pointer_type + operator+=(ptrdiff_t __d) volatile + { return _M_b.operator+=(__d); } + + __pointer_type + operator-=(ptrdiff_t __d) + { return _M_b.operator-=(__d); } + + __pointer_type + operator-=(ptrdiff_t __d) volatile + { return _M_b.operator-=(__d); } + bool - compare_exchange_strong(_Tp*&, _Tp*, memory_order = memory_order_seq_cst); + is_lock_free() const + { return _M_b.is_lock_free(); } bool - compare_exchange_strong(_Tp*&, _Tp*, - memory_order = memory_order_seq_cst) volatile; + is_lock_free() const volatile + { return _M_b.is_lock_free(); } - _Tp* - fetch_add(ptrdiff_t, memory_order = memory_order_seq_cst); + void + store(__pointer_type __p, memory_order __m = memory_order_seq_cst) + { return _M_b.store(__p, __m); } - _Tp* - fetch_add(ptrdiff_t, memory_order = memory_order_seq_cst) volatile; + void + store(__pointer_type __p, + memory_order __m = memory_order_seq_cst) volatile + { return _M_b.store(__p, __m); } - _Tp* - fetch_sub(ptrdiff_t, memory_order = memory_order_seq_cst); + __pointer_type + load(memory_order __m = memory_order_seq_cst) const + { return _M_b.load(__m); } - _Tp* - fetch_sub(ptrdiff_t, memory_order = memory_order_seq_cst) volatile; + __pointer_type + load(memory_order __m = memory_order_seq_cst) const volatile + { return _M_b.load(__m); } - operator _Tp*() const - { return load(); } + __pointer_type + exchange(__pointer_type __p, memory_order __m = memory_order_seq_cst) + { return _M_b.exchange(__p, __m); } - operator _Tp*() const volatile - { return load(); } + __pointer_type + exchange(__pointer_type __p, + memory_order __m = memory_order_seq_cst) volatile + { return _M_b.exchange(__p, __m); } - _Tp* - operator=(_Tp* __v) + bool + compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2, + memory_order __m1, memory_order __m2) + { return _M_b.compare_exchange_strong(__p1, __p2, __m1, __m2); } + + bool + compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2, + memory_order __m1, memory_order __m2) volatile + { return _M_b.compare_exchange_strong(__p1, __p2, __m1, __m2); } + + bool + compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2, + memory_order __m = memory_order_seq_cst) { - store(__v); - return __v; + return compare_exchange_weak(__p1, __p2, __m, + __calculate_memory_order(__m)); } - _Tp* - operator=(_Tp* __v) volatile + bool + compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2, + memory_order __m = memory_order_seq_cst) volatile { - store(__v); - return __v; + return compare_exchange_weak(__p1, __p2, __m, + __calculate_memory_order(__m)); } - _Tp* - operator++(int) { return fetch_add(1); } + bool + compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2, + memory_order __m1, memory_order __m2) + { return _M_b.compare_exchange_strong(__p1, __p2, __m1, __m2); } - _Tp* - operator++(int) volatile { return fetch_add(1); } + bool + compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2, + memory_order __m1, memory_order __m2) volatile + { return _M_b.compare_exchange_strong(__p1, __p2, __m1, __m2); } - _Tp* - operator--(int) { return fetch_sub(1); } + bool + compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2, + memory_order __m = memory_order_seq_cst) + { + return _M_b.compare_exchange_strong(__p1, __p2, __m, + __calculate_memory_order(__m)); + } - _Tp* - operator--(int) volatile { return fetch_sub(1); } + bool + compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2, + memory_order __m = memory_order_seq_cst) volatile + { + return _M_b.compare_exchange_strong(__p1, __p2, __m, + __calculate_memory_order(__m)); + } - _Tp* - operator++() { return fetch_add(1) + 1; } + __pointer_type + fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) + { return _M_b.fetch_add(__d, __m); } - _Tp* - operator++() volatile { return fetch_add(1) + 1; } + __pointer_type + fetch_add(ptrdiff_t __d, + memory_order __m = memory_order_seq_cst) volatile + { return _M_b.fetch_add(__d, __m); } - _Tp* - operator--() { return fetch_sub(1) - 1; } + __pointer_type + fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) + { return _M_b.fetch_sub(__d, __m); } - _Tp* - operator--() volatile { return fetch_sub(1) - 1; } - - _Tp* - operator+=(ptrdiff_t __d) - { return fetch_add(__d) + __d; } - - _Tp* - operator+=(ptrdiff_t __d) volatile - { return fetch_add(__d) + __d; } - - _Tp* - operator-=(ptrdiff_t __d) - { return fetch_sub(__d) - __d; } - - _Tp* - operator-=(ptrdiff_t __d) volatile - { return fetch_sub(__d) - __d; } + __pointer_type + fetch_sub(ptrdiff_t __d, + memory_order __m = memory_order_seq_cst) volatile + { return _M_b.fetch_sub(__d, __m); } }; + /// Explicit specialization for bool. template<> struct atomic : public atomic_bool @@ -642,143 +698,13 @@ }; - template - _Tp* - atomic<_Tp*>::load(memory_order __m) const - { return static_cast<_Tp*>(atomic_address::load(__m)); } - - template - _Tp* - atomic<_Tp*>::load(memory_order __m) const volatile - { return static_cast<_Tp*>(atomic_address::load(__m)); } - - template - _Tp* - atomic<_Tp*>::exchange(_Tp* __v, memory_order __m) - { return static_cast<_Tp*>(atomic_address::exchange(__v, __m)); } - - template - _Tp* - atomic<_Tp*>::exchange(_Tp* __v, memory_order __m) volatile - { return static_cast<_Tp*>(atomic_address::exchange(__v, __m)); } - - template - bool - atomic<_Tp*>::compare_exchange_weak(_Tp*& __r, _Tp* __v, memory_order __m1, - memory_order __m2) - { - void** __vr = reinterpret_cast(&__r); - void* __vv = static_cast(__v); - return atomic_address::compare_exchange_weak(*__vr, __vv, __m1, __m2); - } - - template - bool - atomic<_Tp*>::compare_exchange_weak(_Tp*& __r, _Tp* __v, memory_order __m1, - memory_order __m2) volatile - { - void** __vr = reinterpret_cast(&__r); - void* __vv = static_cast(__v); - return atomic_address::compare_exchange_weak(*__vr, __vv, __m1, __m2); - } - - template - bool - atomic<_Tp*>::compare_exchange_weak(_Tp*& __r, _Tp* __v, memory_order __m) - { - return compare_exchange_weak(__r, __v, __m, - __calculate_memory_order(__m)); - } - - template - bool - atomic<_Tp*>::compare_exchange_weak(_Tp*& __r, _Tp* __v, - memory_order __m) volatile - { - return compare_exchange_weak(__r, __v, __m, - __calculate_memory_order(__m)); - } - - template - bool - atomic<_Tp*>::compare_exchange_strong(_Tp*& __r, _Tp* __v, - memory_order __m1, - memory_order __m2) - { - void** __vr = reinterpret_cast(&__r); - void* __vv = static_cast(__v); - return atomic_address::compare_exchange_strong(*__vr, __vv, __m1, __m2); - } - - template - bool - atomic<_Tp*>::compare_exchange_strong(_Tp*& __r, _Tp* __v, - memory_order __m1, - memory_order __m2) volatile - { - void** __vr = reinterpret_cast(&__r); - void* __vv = static_cast(__v); - return atomic_address::compare_exchange_strong(*__vr, __vv, __m1, __m2); - } - - template - bool - atomic<_Tp*>::compare_exchange_strong(_Tp*& __r, _Tp* __v, - memory_order __m) - { - return compare_exchange_strong(__r, __v, __m, - __calculate_memory_order(__m)); - } - - template - bool - atomic<_Tp*>::compare_exchange_strong(_Tp*& __r, _Tp* __v, - memory_order __m) volatile - { - return compare_exchange_strong(__r, __v, __m, - __calculate_memory_order(__m)); - } - - template - _Tp* - atomic<_Tp*>::fetch_add(ptrdiff_t __d, memory_order __m) - { - void* __p = atomic_fetch_add_explicit(this, sizeof(_Tp) * __d, __m); - return static_cast<_Tp*>(__p); - } - - template - _Tp* - atomic<_Tp*>::fetch_add(ptrdiff_t __d, memory_order __m) volatile - { - void* __p = atomic_fetch_add_explicit(this, sizeof(_Tp) * __d, __m); - return static_cast<_Tp*>(__p); - } - - template - _Tp* - atomic<_Tp*>::fetch_sub(ptrdiff_t __d, memory_order __m) - { - void* __p = atomic_fetch_sub_explicit(this, sizeof(_Tp) * __d, __m); - return static_cast<_Tp*>(__p); - } - - template - _Tp* - atomic<_Tp*>::fetch_sub(ptrdiff_t __d, memory_order __m) volatile - { - void* __p = atomic_fetch_sub_explicit(this, sizeof(_Tp) * __d, __m); - return static_cast<_Tp*>(__p); - } - - // Function definitions, atomic_flag operations. inline bool atomic_flag_test_and_set_explicit(atomic_flag* __a, memory_order __m) { return __a->test_and_set(__m); } inline bool - atomic_flag_test_and_set_explicit(volatile atomic_flag* __a, + atomic_flag_test_and_set_explicit(volatile atomic_flag* __a, memory_order __m) { return __a->test_and_set(__m); } @@ -805,355 +731,78 @@ inline void atomic_flag_clear(volatile atomic_flag* __a) { atomic_flag_clear_explicit(__a, memory_order_seq_cst); } - - // Function definitions, atomic_address operations. - inline bool - atomic_is_lock_free(const atomic_address* __a) - { return __a->is_lock_free(); } - inline bool - atomic_is_lock_free(const volatile atomic_address* __a) - { return __a->is_lock_free(); } - - inline void - atomic_init(atomic_address* __a, void* __v); - - inline void - atomic_init(volatile atomic_address* __a, void* __v); - - inline void - atomic_store_explicit(atomic_address* __a, void* __v, memory_order __m) - { __a->store(__v, __m); } - - inline void - atomic_store_explicit(volatile atomic_address* __a, void* __v, - memory_order __m) - { __a->store(__v, __m); } - - inline void - atomic_store(atomic_address* __a, void* __v) - { __a->store(__v); } - - inline void - atomic_store(volatile atomic_address* __a, void* __v) - { __a->store(__v); } - - inline void* - atomic_load_explicit(const atomic_address* __a, memory_order __m) - { return __a->load(__m); } - - inline void* - atomic_load_explicit(const volatile atomic_address* __a, memory_order __m) - { return __a->load(__m); } - - inline void* - atomic_load(const atomic_address* __a) - { return __a->load(); } - - inline void* - atomic_load(const volatile atomic_address* __a) - { return __a->load(); } - - inline void* - atomic_exchange_explicit(atomic_address* __a, void* __v, memory_order __m) - { return __a->exchange(__v, __m); } - - inline void* - atomic_exchange_explicit(volatile atomic_address* __a, void* __v, - memory_order __m) - { return __a->exchange(__v, __m); } - - inline void* - atomic_exchange(atomic_address* __a, void* __v) - { return __a->exchange(__v); } - - inline void* - atomic_exchange(volatile atomic_address* __a, void* __v) - { return __a->exchange(__v); } - - - inline bool - atomic_compare_exchange_weak_explicit(atomic_address* __a, - void** __v1, void* __v2, - memory_order __m1, memory_order __m2) - { return __a->compare_exchange_weak(*__v1, __v2, __m1, __m2); } - - inline bool - atomic_compare_exchange_weak_explicit(volatile atomic_address* __a, - void** __v1, void* __v2, - memory_order __m1, memory_order __m2) - { return __a->compare_exchange_weak(*__v1, __v2, __m1, __m2); } - - inline bool - atomic_compare_exchange_weak(atomic_address* __a, void** __v1, void* __v2) - { - return __a->compare_exchange_weak(*__v1, __v2, memory_order_seq_cst, - memory_order_seq_cst); - } - - inline bool - atomic_compare_exchange_weak(volatile atomic_address* __a, void** __v1, - void* __v2) - { - return __a->compare_exchange_weak(*__v1, __v2, memory_order_seq_cst, - memory_order_seq_cst); - } - - inline bool - atomic_compare_exchange_strong_explicit(atomic_address* __a, - void** __v1, void* __v2, - memory_order __m1, memory_order __m2) - { return __a->compare_exchange_strong(*__v1, __v2, __m1, __m2); } - - inline bool - atomic_compare_exchange_strong_explicit(volatile atomic_address* __a, - void** __v1, void* __v2, - memory_order __m1, memory_order __m2) - { return __a->compare_exchange_strong(*__v1, __v2, __m1, __m2); } - - inline bool - atomic_compare_exchange_strong(atomic_address* __a, void** __v1, void* __v2) - { - return __a->compare_exchange_strong(*__v1, __v2, memory_order_seq_cst, - memory_order_seq_cst); - } - - inline bool - atomic_compare_exchange_strong(volatile atomic_address* __a, - void** __v1, void* __v2) - { - return __a->compare_exchange_strong(*__v1, __v2, memory_order_seq_cst, - memory_order_seq_cst); - } - - inline void* - atomic_fetch_add_explicit(atomic_address* __a, ptrdiff_t __d, - memory_order __m) - { return __a->fetch_add(__d, __m); } - - inline void* - atomic_fetch_add_explicit(volatile atomic_address* __a, ptrdiff_t __d, - memory_order __m) - { return __a->fetch_add(__d, __m); } - - inline void* - atomic_fetch_add(atomic_address* __a, ptrdiff_t __d) - { return __a->fetch_add(__d); } - - inline void* - atomic_fetch_add(volatile atomic_address* __a, ptrdiff_t __d) - { return __a->fetch_add(__d); } - - inline void* - atomic_fetch_sub_explicit(atomic_address* __a, ptrdiff_t __d, - memory_order __m) - { return __a->fetch_sub(__d, __m); } - - inline void* - atomic_fetch_sub_explicit(volatile atomic_address* __a, ptrdiff_t __d, - memory_order __m) - { return __a->fetch_sub(__d, __m); } - - inline void* - atomic_fetch_sub(atomic_address* __a, ptrdiff_t __d) - { return __a->fetch_sub(__d); } - - inline void* - atomic_fetch_sub(volatile atomic_address* __a, ptrdiff_t __d) - { return __a->fetch_sub(__d); } - - - // Function definitions, atomic_bool operations. - inline bool - atomic_is_lock_free(const atomic_bool* __a) - { return __a->is_lock_free(); } - - inline bool - atomic_is_lock_free(const volatile atomic_bool* __a) - { return __a->is_lock_free(); } - - inline void - atomic_init(atomic_bool* __a, bool __b); - - inline void - atomic_init(volatile atomic_bool* __a, bool __b); - - inline void - atomic_store_explicit(atomic_bool* __a, bool __i, memory_order __m) - { __a->store(__i, __m); } - - inline void - atomic_store_explicit(volatile atomic_bool* __a, bool __i, memory_order __m) - { __a->store(__i, __m); } - - inline void - atomic_store(atomic_bool* __a, bool __i) - { __a->store(__i); } - - inline void - atomic_store(volatile atomic_bool* __a, bool __i) - { __a->store(__i); } - - inline bool - atomic_load_explicit(const atomic_bool* __a, memory_order __m) - { return __a->load(__m); } - - inline bool - atomic_load_explicit(const volatile atomic_bool* __a, memory_order __m) - { return __a->load(__m); } - - inline bool - atomic_load(const atomic_bool* __a) - { return __a->load(); } - - inline bool - atomic_load(const volatile atomic_bool* __a) - { return __a->load(); } - - inline bool - atomic_exchange_explicit(atomic_bool* __a, bool __i, memory_order __m) - { return __a->exchange(__i, __m); } - - inline bool - atomic_exchange_explicit(volatile atomic_bool* __a, bool __i, - memory_order __m) - { return __a->exchange(__i, __m); } - - inline bool - atomic_exchange(atomic_bool* __a, bool __i) - { return __a->exchange(__i); } - - inline bool - atomic_exchange(volatile atomic_bool* __a, bool __i) - { return __a->exchange(__i); } - - inline bool - atomic_compare_exchange_weak_explicit(atomic_bool* __a, bool* __i1, - bool __i2, memory_order __m1, - memory_order __m2) - { return __a->compare_exchange_weak(*__i1, __i2, __m1, __m2); } - - inline bool - atomic_compare_exchange_weak_explicit(volatile atomic_bool* __a, bool* __i1, - bool __i2, memory_order __m1, - memory_order __m2) - { return __a->compare_exchange_weak(*__i1, __i2, __m1, __m2); } - - inline bool - atomic_compare_exchange_weak(atomic_bool* __a, bool* __i1, bool __i2) - { - return __a->compare_exchange_weak(*__i1, __i2, memory_order_seq_cst, - memory_order_seq_cst); - } - - inline bool - atomic_compare_exchange_weak(volatile atomic_bool* __a, bool* __i1, bool __i2) - { - return __a->compare_exchange_weak(*__i1, __i2, memory_order_seq_cst, - memory_order_seq_cst); - } - - inline bool - atomic_compare_exchange_strong_explicit(atomic_bool* __a, - bool* __i1, bool __i2, - memory_order __m1, memory_order __m2) - { return __a->compare_exchange_strong(*__i1, __i2, __m1, __m2); } - - inline bool - atomic_compare_exchange_strong_explicit(volatile atomic_bool* __a, - bool* __i1, bool __i2, - memory_order __m1, memory_order __m2) - { return __a->compare_exchange_strong(*__i1, __i2, __m1, __m2); } - - inline bool - atomic_compare_exchange_strong(atomic_bool* __a, bool* __i1, bool __i2) - { - return __a->compare_exchange_strong(*__i1, __i2, memory_order_seq_cst, - memory_order_seq_cst); - } - - inline bool - atomic_compare_exchange_strong(volatile atomic_bool* __a, - bool* __i1, bool __i2) - { - return __a->compare_exchange_strong(*__i1, __i2, memory_order_seq_cst, - memory_order_seq_cst); - } - - - // Function templates for atomic_integral operations, using - // __atomic_base . Template argument should be constricted to - // intergral types as specified in the standard. + // Function templates generally applicable to atomic types. template inline bool - atomic_is_lock_free(const __atomic_base<_ITp>* __a) + atomic_is_lock_free(const atomic<_ITp>* __a) { return __a->is_lock_free(); } template inline bool - atomic_is_lock_free(const volatile __atomic_base<_ITp>* __a) + atomic_is_lock_free(const volatile atomic<_ITp>* __a) { return __a->is_lock_free(); } template - inline void - atomic_init(__atomic_base<_ITp>* __a, _ITp __i); + inline void + atomic_init(atomic<_ITp>* __a, _ITp __i); template - inline void - atomic_init(volatile __atomic_base<_ITp>* __a, _ITp __i); + inline void + atomic_init(volatile atomic<_ITp>* __a, _ITp __i); template inline void - atomic_store_explicit(__atomic_base<_ITp>* __a, _ITp __i, memory_order __m) + atomic_store_explicit(atomic<_ITp>* __a, _ITp __i, memory_order __m) { __a->store(__i, __m); } template inline void - atomic_store_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i, + atomic_store_explicit(volatile atomic<_ITp>* __a, _ITp __i, memory_order __m) { __a->store(__i, __m); } template inline _ITp - atomic_load_explicit(const __atomic_base<_ITp>* __a, memory_order __m) + atomic_load_explicit(const atomic<_ITp>* __a, memory_order __m) { return __a->load(__m); } template inline _ITp - atomic_load_explicit(const volatile __atomic_base<_ITp>* __a, + atomic_load_explicit(const volatile atomic<_ITp>* __a, memory_order __m) { return __a->load(__m); } template inline _ITp - atomic_exchange_explicit(__atomic_base<_ITp>* __a, _ITp __i, + atomic_exchange_explicit(atomic<_ITp>* __a, _ITp __i, memory_order __m) { return __a->exchange(__i, __m); } template inline _ITp - atomic_exchange_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i, + atomic_exchange_explicit(volatile atomic<_ITp>* __a, _ITp __i, memory_order __m) { return __a->exchange(__i, __m); } template inline bool - atomic_compare_exchange_weak_explicit(__atomic_base<_ITp>* __a, + atomic_compare_exchange_weak_explicit(atomic<_ITp>* __a, _ITp* __i1, _ITp __i2, memory_order __m1, memory_order __m2) { return __a->compare_exchange_weak(*__i1, __i2, __m1, __m2); } template inline bool - atomic_compare_exchange_weak_explicit(volatile __atomic_base<_ITp>* __a, + atomic_compare_exchange_weak_explicit(volatile atomic<_ITp>* __a, _ITp* __i1, _ITp __i2, memory_order __m1, memory_order __m2) { return __a->compare_exchange_weak(*__i1, __i2, __m1, __m2); } template inline bool - atomic_compare_exchange_strong_explicit(__atomic_base<_ITp>* __a, + atomic_compare_exchange_strong_explicit(atomic<_ITp>* __a, _ITp* __i1, _ITp __i2, memory_order __m1, memory_order __m2) @@ -1161,105 +810,46 @@ template inline bool - atomic_compare_exchange_strong_explicit(volatile __atomic_base<_ITp>* __a, + atomic_compare_exchange_strong_explicit(volatile atomic<_ITp>* __a, _ITp* __i1, _ITp __i2, memory_order __m1, memory_order __m2) { return __a->compare_exchange_strong(*__i1, __i2, __m1, __m2); } - template - inline _ITp - atomic_fetch_add_explicit(__atomic_base<_ITp>* __a, _ITp __i, - memory_order __m) - { return __a->fetch_add(__i, __m); } template - inline _ITp - atomic_fetch_add_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i, - memory_order __m) - { return __a->fetch_add(__i, __m); } - - template - inline _ITp - atomic_fetch_sub_explicit(__atomic_base<_ITp>* __a, _ITp __i, - memory_order __m) - { return __a->fetch_sub(__i, __m); } - - template - inline _ITp - atomic_fetch_sub_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i, - memory_order __m) - { return __a->fetch_sub(__i, __m); } - - template - inline _ITp - atomic_fetch_and_explicit(__atomic_base<_ITp>* __a, _ITp __i, - memory_order __m) - { return __a->fetch_and(__i, __m); } - - template - inline _ITp - atomic_fetch_and_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i, - memory_order __m) - { return __a->fetch_and(__i, __m); } - - template - inline _ITp - atomic_fetch_or_explicit(__atomic_base<_ITp>* __a, _ITp __i, - memory_order __m) - { return __a->fetch_or(__i, __m); } - - template - inline _ITp - atomic_fetch_or_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i, - memory_order __m) - { return __a->fetch_or(__i, __m); } - - template - inline _ITp - atomic_fetch_xor_explicit(__atomic_base<_ITp>* __a, _ITp __i, - memory_order __m) - { return __a->fetch_xor(__i, __m); } - - template - inline _ITp - atomic_fetch_xor_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i, - memory_order __m) - { return __a->fetch_xor(__i, __m); } - - template inline void - atomic_store(__atomic_base<_ITp>* __a, _ITp __i) + atomic_store(atomic<_ITp>* __a, _ITp __i) { atomic_store_explicit(__a, __i, memory_order_seq_cst); } template inline void - atomic_store(volatile __atomic_base<_ITp>* __a, _ITp __i) + atomic_store(volatile atomic<_ITp>* __a, _ITp __i) { atomic_store_explicit(__a, __i, memory_order_seq_cst); } template inline _ITp - atomic_load(const __atomic_base<_ITp>* __a) + atomic_load(const atomic<_ITp>* __a) { return atomic_load_explicit(__a, memory_order_seq_cst); } template inline _ITp - atomic_load(const volatile __atomic_base<_ITp>* __a) + atomic_load(const volatile atomic<_ITp>* __a) { return atomic_load_explicit(__a, memory_order_seq_cst); } template inline _ITp - atomic_exchange(__atomic_base<_ITp>* __a, _ITp __i) + atomic_exchange(atomic<_ITp>* __a, _ITp __i) { return atomic_exchange_explicit(__a, __i, memory_order_seq_cst); } template inline _ITp - atomic_exchange(volatile __atomic_base<_ITp>* __a, _ITp __i) + atomic_exchange(volatile atomic<_ITp>* __a, _ITp __i) { return atomic_exchange_explicit(__a, __i, memory_order_seq_cst); } template inline bool - atomic_compare_exchange_weak(__atomic_base<_ITp>* __a, + atomic_compare_exchange_weak(atomic<_ITp>* __a, _ITp* __i1, _ITp __i2) { return atomic_compare_exchange_weak_explicit(__a, __i1, __i2, @@ -1269,7 +859,7 @@ template inline bool - atomic_compare_exchange_weak(volatile __atomic_base<_ITp>* __a, + atomic_compare_exchange_weak(volatile atomic<_ITp>* __a, _ITp* __i1, _ITp __i2) { return atomic_compare_exchange_weak_explicit(__a, __i1, __i2, @@ -1279,7 +869,7 @@ template inline bool - atomic_compare_exchange_strong(__atomic_base<_ITp>* __a, + atomic_compare_exchange_strong(atomic<_ITp>* __a, _ITp* __i1, _ITp __i2) { return atomic_compare_exchange_strong_explicit(__a, __i1, __i2, @@ -1289,7 +879,7 @@ template inline bool - atomic_compare_exchange_strong(volatile __atomic_base<_ITp>* __a, + atomic_compare_exchange_strong(volatile atomic<_ITp>* __a, _ITp* __i1, _ITp __i2) { return atomic_compare_exchange_strong_explicit(__a, __i1, __i2, @@ -1297,8 +887,72 @@ memory_order_seq_cst); } + // Function templates for atomic_integral operations only, using + // __atomic_base. Template argument should be constricted to + // intergral types as specified in the standard, excluding address + // types. template inline _ITp + atomic_fetch_add_explicit(__atomic_base<_ITp>* __a, _ITp __i, + memory_order __m) + { return __a->fetch_add(__i, __m); } + + template + inline _ITp + atomic_fetch_add_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i, + memory_order __m) + { return __a->fetch_add(__i, __m); } + + template + inline _ITp + atomic_fetch_sub_explicit(__atomic_base<_ITp>* __a, _ITp __i, + memory_order __m) + { return __a->fetch_sub(__i, __m); } + + template + inline _ITp + atomic_fetch_sub_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i, + memory_order __m) + { return __a->fetch_sub(__i, __m); } + + template + inline _ITp + atomic_fetch_and_explicit(__atomic_base<_ITp>* __a, _ITp __i, + memory_order __m) + { return __a->fetch_and(__i, __m); } + + template + inline _ITp + atomic_fetch_and_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i, + memory_order __m) + { return __a->fetch_and(__i, __m); } + + template + inline _ITp + atomic_fetch_or_explicit(__atomic_base<_ITp>* __a, _ITp __i, + memory_order __m) + { return __a->fetch_or(__i, __m); } + + template + inline _ITp + atomic_fetch_or_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i, + memory_order __m) + { return __a->fetch_or(__i, __m); } + + template + inline _ITp + atomic_fetch_xor_explicit(__atomic_base<_ITp>* __a, _ITp __i, + memory_order __m) + { return __a->fetch_xor(__i, __m); } + + template + inline _ITp + atomic_fetch_xor_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i, + memory_order __m) + { return __a->fetch_xor(__i, __m); } + + template + inline _ITp atomic_fetch_add(__atomic_base<_ITp>* __a, _ITp __i) { return atomic_fetch_add_explicit(__a, __i, memory_order_seq_cst); } @@ -1347,6 +1001,51 @@ atomic_fetch_xor(volatile __atomic_base<_ITp>* __a, _ITp __i) { return atomic_fetch_xor_explicit(__a, __i, memory_order_seq_cst); } + + // Partial specializations for pointers. + template + inline _ITp* + atomic_fetch_add_explicit(atomic<_ITp*>* __a, ptrdiff_t __d, + memory_order __m) + { return __a->fetch_add(__d, __m); } + + template + inline _ITp* + atomic_fetch_add_explicit(volatile atomic<_ITp*>* __a, ptrdiff_t __d, + memory_order __m) + { return __a->fetch_add(__d, __m); } + + template + inline _ITp* + atomic_fetch_add(volatile atomic<_ITp*>* __a, ptrdiff_t __d) + { return __a->fetch_add(__d); } + + template + inline _ITp* + atomic_fetch_add(atomic<_ITp*>* __a, ptrdiff_t __d) + { return __a->fetch_add(__d); } + + template + inline _ITp* + atomic_fetch_sub_explicit(volatile atomic<_ITp*>* __a, + ptrdiff_t __d, memory_order __m) + { return __a->fetch_sub(__d, __m); } + + template + inline _ITp* + atomic_fetch_sub_explicit(atomic<_ITp*>* __a, ptrdiff_t __d, + memory_order __m) + { return __a->fetch_sub(__d, __m); } + + template + inline _ITp* + atomic_fetch_sub(volatile atomic<_ITp*>* __a, ptrdiff_t __d) + { return __a->fetch_sub(__d); } + + template + inline _ITp* + atomic_fetch_sub(atomic<_ITp*>* __a, ptrdiff_t __d) + { return __a->fetch_sub(__d); } // @} group atomics _GLIBCXX_END_NAMESPACE_VERSION Index: include/bits/atomic_0.h =================================================================== --- include/bits/atomic_0.h (revision 170216) +++ include/bits/atomic_0.h (working copy) @@ -134,300 +134,6 @@ }; - /// atomic_address - struct atomic_address - { - private: - void* _M_i; - - public: - atomic_address() = default; - ~atomic_address() = default; - atomic_address(const atomic_address&) = delete; - atomic_address& operator=(const atomic_address&) = delete; - atomic_address& operator=(const atomic_address&) volatile = delete; - - constexpr atomic_address(void* __v): _M_i (__v) { } - - bool - is_lock_free() const { return false; } - - bool - is_lock_free() const volatile { return false; } - - void - store(void* __v, memory_order __m = memory_order_seq_cst) - { - __glibcxx_assert(__m != memory_order_acquire); - __glibcxx_assert(__m != memory_order_acq_rel); - __glibcxx_assert(__m != memory_order_consume); - _ATOMIC_STORE_(this, __v, __m); - } - - void - store(void* __v, memory_order __m = memory_order_seq_cst) volatile - { - __glibcxx_assert(__m != memory_order_acquire); - __glibcxx_assert(__m != memory_order_acq_rel); - __glibcxx_assert(__m != memory_order_consume); - _ATOMIC_STORE_(this, __v, __m); - } - - void* - load(memory_order __m = memory_order_seq_cst) const - { - __glibcxx_assert(__m != memory_order_release); - __glibcxx_assert(__m != memory_order_acq_rel); - return _ATOMIC_LOAD_(this, __m); - } - - void* - load(memory_order __m = memory_order_seq_cst) const volatile - { - __glibcxx_assert(__m != memory_order_release); - __glibcxx_assert(__m != memory_order_acq_rel); - return _ATOMIC_LOAD_(this, __m); - } - - void* - exchange(void* __v, memory_order __m = memory_order_seq_cst) - { return _ATOMIC_MODIFY_(this, =, __v, __m); } - - void* - exchange(void* __v, memory_order __m = memory_order_seq_cst) volatile - { return _ATOMIC_MODIFY_(this, =, __v, __m); } - - bool - compare_exchange_weak(void*& __v1, void* __v2, memory_order __m1, - memory_order __m2) - { - __glibcxx_assert(__m2 != memory_order_release); - __glibcxx_assert(__m2 != memory_order_acq_rel); - __glibcxx_assert(__m2 <= __m1); - return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1); - } - - bool - compare_exchange_weak(void*& __v1, void* __v2, memory_order __m1, - memory_order __m2) volatile - { - __glibcxx_assert(__m2 != memory_order_release); - __glibcxx_assert(__m2 != memory_order_acq_rel); - __glibcxx_assert(__m2 <= __m1); - return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1); - } - - bool - compare_exchange_weak(void*& __v1, void* __v2, - memory_order __m = memory_order_seq_cst) - { - return compare_exchange_weak(__v1, __v2, __m, - __calculate_memory_order(__m)); - } - - bool - compare_exchange_weak(void*& __v1, void* __v2, - memory_order __m = memory_order_seq_cst) volatile - { - return compare_exchange_weak(__v1, __v2, __m, - __calculate_memory_order(__m)); - } - - bool - compare_exchange_weak(const void*& __v1, const void* __v2, - memory_order __m1, memory_order __m2) - { - __glibcxx_assert(__m2 != memory_order_release); - __glibcxx_assert(__m2 != memory_order_acq_rel); - __glibcxx_assert(__m2 <= __m1); - return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1); - } - - bool - compare_exchange_weak(const void*& __v1, const void* __v2, - memory_order __m1, memory_order __m2) volatile - { - __glibcxx_assert(__m2 != memory_order_release); - __glibcxx_assert(__m2 != memory_order_acq_rel); - __glibcxx_assert(__m2 <= __m1); - return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1); - } - - bool - compare_exchange_weak(const void*& __v1, const void* __v2, - memory_order __m = memory_order_seq_cst) - { - return compare_exchange_weak(__v1, __v2, __m, - __calculate_memory_order(__m)); - } - - bool - compare_exchange_weak(const void*& __v1, const void* __v2, - memory_order __m = memory_order_seq_cst) volatile - { - return compare_exchange_weak(__v1, __v2, __m, - __calculate_memory_order(__m)); - } - - bool - compare_exchange_strong(void*& __v1, void* __v2, memory_order __m1, - memory_order __m2) - { - __glibcxx_assert(__m2 != memory_order_release); - __glibcxx_assert(__m2 != memory_order_acq_rel); - __glibcxx_assert(__m2 <= __m1); - return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1); - } - - bool - compare_exchange_strong(void*& __v1, void* __v2, memory_order __m1, - memory_order __m2) volatile - { - __glibcxx_assert(__m2 != memory_order_release); - __glibcxx_assert(__m2 != memory_order_acq_rel); - __glibcxx_assert(__m2 <= __m1); - return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1); - } - - bool - compare_exchange_strong(void*& __v1, void* __v2, - memory_order __m = memory_order_seq_cst) - { - return compare_exchange_strong(__v1, __v2, __m, - __calculate_memory_order(__m)); - } - - bool - compare_exchange_strong(void*& __v1, void* __v2, - memory_order __m = memory_order_seq_cst) volatile - { - return compare_exchange_strong(__v1, __v2, __m, - __calculate_memory_order(__m)); - } - - bool - compare_exchange_strong(const void*& __v1, const void* __v2, - memory_order __m1, memory_order __m2) - { - __glibcxx_assert(__m2 != memory_order_release); - __glibcxx_assert(__m2 != memory_order_acq_rel); - __glibcxx_assert(__m2 <= __m1); - return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1); - } - - bool - compare_exchange_strong(const void*& __v1, const void* __v2, - memory_order __m1, memory_order __m2) volatile - { - __glibcxx_assert(__m2 != memory_order_release); - __glibcxx_assert(__m2 != memory_order_acq_rel); - __glibcxx_assert(__m2 <= __m1); - return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1); - } - - bool - compare_exchange_strong(const void*& __v1, const void* __v2, - memory_order __m = memory_order_seq_cst) - { - return compare_exchange_strong(__v1, __v2, __m, - __calculate_memory_order(__m)); - } - - bool - compare_exchange_strong(const void*& __v1, const void* __v2, - memory_order __m = memory_order_seq_cst) volatile - { - return compare_exchange_strong(__v1, __v2, __m, - __calculate_memory_order(__m)); - } - - void* - fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) - { - void** __p = &(_M_i); - __atomic_flag_base* __g = __atomic_flag_for_address(__p); - __atomic_flag_wait_explicit(__g, __m); - void* __r = *__p; - *__p = (void*)((char*)(*__p) + __d); - atomic_flag_clear_explicit(__g, __m); - return __r; - } - - void* - fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) volatile - { - void* volatile* __p = &(_M_i); - __atomic_flag_base* __g = __atomic_flag_for_address(__p); - __atomic_flag_wait_explicit(__g, __m); - void* __r = *__p; - *__p = (void*)((char*)(*__p) + __d); - atomic_flag_clear_explicit(__g, __m); - return __r; - } - - void* - fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) - { - void** __p = &(_M_i); - __atomic_flag_base* __g = __atomic_flag_for_address(__p); - __atomic_flag_wait_explicit(__g, __m); - void* __r = *__p; - *__p = (void*)((char*)(*__p) - __d); - atomic_flag_clear_explicit(__g, __m); - return __r; - } - - void* - fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) volatile - { - void* volatile* __p = &(_M_i); - __atomic_flag_base* __g = __atomic_flag_for_address(__p); - __atomic_flag_wait_explicit(__g, __m); - void* __r = *__p; - *__p = (void*)((char*)(*__p) - __d); - atomic_flag_clear_explicit(__g, __m); - return __r; - } - - operator void*() const - { return load(); } - - operator void*() const volatile - { return load(); } - - // XXX - void* - operator=(void* __v) - { - store(__v); - return __v; - } - - void* - operator=(void* __v) volatile - { - store(__v); - return __v; - } - - void* - operator+=(ptrdiff_t __d) - { return fetch_add(__d) + __d; } - - void* - operator+=(ptrdiff_t __d) volatile - { return fetch_add(__d) + __d; } - - void* - operator-=(ptrdiff_t __d) - { return fetch_sub(__d) - __d; } - - void* - operator-=(ptrdiff_t __d) volatile - { return fetch_sub(__d) - __d; } - }; - - /// Base class for atomic integrals. // // For each of the integral types, define atomic_[integral type] struct @@ -728,6 +434,220 @@ { return _ATOMIC_MODIFY_(this, ^=, __i, __m); } }; + + /// Partial specialization for pointer types. + template + struct __atomic_base<_PTp*> + { + private: + typedef _PTp* __return_pointer_type; + typedef void* __pointer_type; + __pointer_type _M_i; + + public: + __atomic_base() = default; + ~__atomic_base() = default; + __atomic_base(const __atomic_base&) = delete; + __atomic_base& operator=(const __atomic_base&) = delete; + __atomic_base& operator=(const __atomic_base&) volatile = delete; + + // Requires __pointer_type convertible to _M_i. + constexpr __atomic_base(__return_pointer_type __p): _M_i (__p) { } + + operator __return_pointer_type() const + { return reinterpret_cast<__return_pointer_type>(load()); } + + operator __return_pointer_type() const volatile + { return reinterpret_cast<__return_pointer_type>(load()); } + + __return_pointer_type + operator=(__pointer_type __p) + { + store(__p); + return reinterpret_cast<__return_pointer_type>(__p); + } + + __return_pointer_type + operator=(__pointer_type __p) volatile + { + store(__p); + return reinterpret_cast<__return_pointer_type>(__p); + } + + __return_pointer_type + operator++(int) + { return reinterpret_cast<__return_pointer_type>(fetch_add(1)); } + + __return_pointer_type + operator++(int) volatile + { return reinterpret_cast<__return_pointer_type>(fetch_add(1)); } + + __return_pointer_type + operator--(int) + { return reinterpret_cast<__return_pointer_type>(fetch_sub(1)); } + + __return_pointer_type + operator--(int) volatile + { return reinterpret_cast<__return_pointer_type>(fetch_sub(1)); } + + __return_pointer_type + operator++() + { return reinterpret_cast<__return_pointer_type>(fetch_add(1) + 1); } + + __return_pointer_type + operator++() volatile + { return reinterpret_cast<__return_pointer_type>(fetch_add(1) + 1); } + + __return_pointer_type + operator--() + { return reinterpret_cast<__return_pointer_type>(fetch_sub(1) - 1); } + + __return_pointer_type + operator--() volatile + { return reinterpret_cast<__return_pointer_type>(fetch_sub(1) - 1); } + + __return_pointer_type + operator+=(ptrdiff_t __d) + { return reinterpret_cast<__return_pointer_type>(fetch_add(__d) + __d); } + + __return_pointer_type + operator+=(ptrdiff_t __d) volatile + { return reinterpret_cast<__return_pointer_type>(fetch_add(__d) + __d); } + + __return_pointer_type + operator-=(ptrdiff_t __d) + { return reinterpret_cast<__return_pointer_type>(fetch_sub(__d) - __d); } + + __return_pointer_type + operator-=(ptrdiff_t __d) volatile + { return reinterpret_cast<__return_pointer_type>(fetch_sub(__d) - __d); } + + bool + is_lock_free() const + { return true; } + + bool + is_lock_free() const volatile + { return true; } + + void + store(__pointer_type __p, memory_order __m = memory_order_seq_cst) + { + __glibcxx_assert(__m != memory_order_acquire); + __glibcxx_assert(__m != memory_order_acq_rel); + __glibcxx_assert(__m != memory_order_consume); + _ATOMIC_STORE_(this, __p, __m); + } + + void + store(__pointer_type __p, + memory_order __m = memory_order_seq_cst) volatile + { + __glibcxx_assert(__m != memory_order_acquire); + __glibcxx_assert(__m != memory_order_acq_rel); + __glibcxx_assert(__m != memory_order_consume); + volatile __pointer_type* __p2 = &_M_i; + __typeof__(__p) __w = (__p); + __atomic_flag_base* __g = __atomic_flag_for_address(__p2); + __atomic_flag_wait_explicit(__g, __m); + *__p2 = reinterpret_cast<__pointer_type>(__w); + atomic_flag_clear_explicit(__g, __m); + __w; + } + + __return_pointer_type + load(memory_order __m = memory_order_seq_cst) const + { + __glibcxx_assert(__m != memory_order_release); + __glibcxx_assert(__m != memory_order_acq_rel); + void* __v = _ATOMIC_LOAD_(this, __m); + return reinterpret_cast<__return_pointer_type>(__v); + } + + __return_pointer_type + load(memory_order __m = memory_order_seq_cst) const volatile + { + __glibcxx_assert(__m != memory_order_release); + __glibcxx_assert(__m != memory_order_acq_rel); + void* __v = _ATOMIC_LOAD_(this, __m); + return reinterpret_cast<__return_pointer_type>(__v); + } + + __return_pointer_type + exchange(__pointer_type __p, memory_order __m = memory_order_seq_cst) + { + void* __v = _ATOMIC_MODIFY_(this, =, __p, __m); + return reinterpret_cast<__return_pointer_type>(__v); + } + + __return_pointer_type + exchange(__pointer_type __p, + memory_order __m = memory_order_seq_cst) volatile + { + volatile __pointer_type* __p2 = &_M_i; + __typeof__(__p) __w = (__p); + __atomic_flag_base* __g = __atomic_flag_for_address(__p2); + __atomic_flag_wait_explicit(__g, __m); + __pointer_type __r = *__p2; + *__p2 = __w; + atomic_flag_clear_explicit(__g, __m); + __r; + return reinterpret_cast<__return_pointer_type>(_M_i); + } + + bool + compare_exchange_strong(__return_pointer_type& __rp1, __pointer_type __p2, + memory_order __m1, memory_order __m2) + { + __glibcxx_assert(__m2 != memory_order_release); + __glibcxx_assert(__m2 != memory_order_acq_rel); + __glibcxx_assert(__m2 <= __m1); + __pointer_type& __p1 = reinterpret_cast(__rp1); + return _ATOMIC_CMPEXCHNG_(this, &__p1, __p2, __m1); + } + + bool + compare_exchange_strong(__return_pointer_type& __rp1, __pointer_type __p2, + memory_order __m1, memory_order __m2) volatile + { + __glibcxx_assert(__m2 != memory_order_release); + __glibcxx_assert(__m2 != memory_order_acq_rel); + __glibcxx_assert(__m2 <= __m1); + __pointer_type& __p1 = reinterpret_cast(__rp1); + return _ATOMIC_CMPEXCHNG_(this, &__p1, __p2, __m1); + } + + __return_pointer_type + fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) + { + void* __v = _ATOMIC_MODIFY_(this, +=, __d, __m); + return reinterpret_cast<__return_pointer_type>(__v); + } + + __return_pointer_type + fetch_add(ptrdiff_t __d, + memory_order __m = memory_order_seq_cst) volatile + { + void* __v = _ATOMIC_MODIFY_(this, +=, __d, __m); + return reinterpret_cast<__return_pointer_type>(__v); + } + + __return_pointer_type + fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) + { + void* __v = _ATOMIC_MODIFY_(this, -=, __d, __m); + return reinterpret_cast<__return_pointer_type>(__v); + } + + __return_pointer_type + fetch_sub(ptrdiff_t __d, + memory_order __m = memory_order_seq_cst) volatile + { + void* __v = _ATOMIC_MODIFY_(this, -=, __d, __m); + return reinterpret_cast<__return_pointer_type>(__v); + } + }; + #undef _ATOMIC_LOAD_ #undef _ATOMIC_STORE_ #undef _ATOMIC_MODIFY_ @@ -735,6 +655,6 @@ } // namespace __atomic0 _GLIBCXX_END_NAMESPACE_VERSION -} // namespace +} // namespace std #endif Index: include/bits/atomic_2.h =================================================================== --- include/bits/atomic_2.h (revision 170216) +++ include/bits/atomic_2.h (working copy) @@ -1,6 +1,6 @@ // -*- C++ -*- header. -// Copyright (C) 2008, 2009, 2010 +// Copyright (C) 2008, 2009, 2010, 2011 // Free Software Foundation, Inc. // // This file is part of the GNU ISO C++ Library. This library is free @@ -23,7 +23,7 @@ // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see // . -/** @file bits/atomic_2.h +/** @file bits/atomic_2.h * This is an internal header file, included by other library headers. * Do not attempt to use it directly. @headername{atomic} */ @@ -101,317 +101,6 @@ }; - /// atomic_address - struct atomic_address - { - private: - void* _M_i; - - public: - atomic_address() = default; - ~atomic_address() = default; - atomic_address(const atomic_address&) = delete; - atomic_address& operator=(const atomic_address&) = delete; - atomic_address& operator=(const atomic_address&) volatile = delete; - - constexpr atomic_address(void* __v): _M_i (__v) { } - - bool - is_lock_free() const { return true; } - - bool - is_lock_free() const volatile { return true; } - - void - store(void* __v, memory_order __m = memory_order_seq_cst) - { - __glibcxx_assert(__m != memory_order_acquire); - __glibcxx_assert(__m != memory_order_acq_rel); - __glibcxx_assert(__m != memory_order_consume); - - if (__m == memory_order_relaxed) - _M_i = __v; - else - { - // write_mem_barrier(); - _M_i = __v; - if (__m == memory_order_seq_cst) - __sync_synchronize(); - } - } - - void - store(void* __v, memory_order __m = memory_order_seq_cst) volatile - { - __glibcxx_assert(__m != memory_order_acquire); - __glibcxx_assert(__m != memory_order_acq_rel); - __glibcxx_assert(__m != memory_order_consume); - - if (__m == memory_order_relaxed) - _M_i = __v; - else - { - // write_mem_barrier(); - _M_i = __v; - if (__m == memory_order_seq_cst) - __sync_synchronize(); - } - } - - void* - load(memory_order __m = memory_order_seq_cst) const - { - __glibcxx_assert(__m != memory_order_release); - __glibcxx_assert(__m != memory_order_acq_rel); - - __sync_synchronize(); - void* __ret = _M_i; - __sync_synchronize(); - return __ret; - } - - void* - load(memory_order __m = memory_order_seq_cst) const volatile - { - __glibcxx_assert(__m != memory_order_release); - __glibcxx_assert(__m != memory_order_acq_rel); - - __sync_synchronize(); - void* __ret = _M_i; - __sync_synchronize(); - return __ret; - } - - void* - exchange(void* __v, memory_order __m = memory_order_seq_cst) - { - // XXX built-in assumes memory_order_acquire. - return __sync_lock_test_and_set(&_M_i, __v); - } - - void* - exchange(void* __v, memory_order __m = memory_order_seq_cst) volatile - { - // XXX built-in assumes memory_order_acquire. - return __sync_lock_test_and_set(&_M_i, __v); - } - - bool - compare_exchange_weak(void*& __v1, void* __v2, memory_order __m1, - memory_order __m2) - { return compare_exchange_strong(__v1, __v2, __m1, __m2); } - - bool - compare_exchange_weak(void*& __v1, void* __v2, memory_order __m1, - memory_order __m2) volatile - { return compare_exchange_strong(__v1, __v2, __m1, __m2); } - - bool - compare_exchange_weak(void*& __v1, void* __v2, - memory_order __m = memory_order_seq_cst) - { - return compare_exchange_weak(__v1, __v2, __m, - __calculate_memory_order(__m)); - } - - bool - compare_exchange_weak(void*& __v1, void* __v2, - memory_order __m = memory_order_seq_cst) volatile - { - return compare_exchange_weak(__v1, __v2, __m, - __calculate_memory_order(__m)); - } - - bool - compare_exchange_weak(const void*& __v1, const void* __v2, - memory_order __m1, memory_order __m2) - { return compare_exchange_strong(__v1, __v2, __m1, __m2); } - - bool - compare_exchange_weak(const void*& __v1, const void* __v2, - memory_order __m1, memory_order __m2) volatile - { return compare_exchange_strong(__v1, __v2, __m1, __m2); } - - bool - compare_exchange_weak(const void*& __v1, const void* __v2, - memory_order __m = memory_order_seq_cst) - { - return compare_exchange_weak(__v1, __v2, __m, - __calculate_memory_order(__m)); - } - - bool - compare_exchange_weak(const void*& __v1, const void* __v2, - memory_order __m = memory_order_seq_cst) volatile - { - return compare_exchange_weak(__v1, __v2, __m, - __calculate_memory_order(__m)); - } - - bool - compare_exchange_strong(void*& __v1, void* __v2, memory_order __m1, - memory_order __m2) - { - __glibcxx_assert(__m2 != memory_order_release); - __glibcxx_assert(__m2 != memory_order_acq_rel); - __glibcxx_assert(__m2 <= __m1); - - void* __v1o = __v1; - void* __v1n = __sync_val_compare_and_swap(&_M_i, __v1o, __v2); - - // Assume extra stores (of same value) allowed in true case. - __v1 = __v1n; - return __v1o == __v1n; - } - - bool - compare_exchange_strong(void*& __v1, void* __v2, memory_order __m1, - memory_order __m2) volatile - { - __glibcxx_assert(__m2 != memory_order_release); - __glibcxx_assert(__m2 != memory_order_acq_rel); - __glibcxx_assert(__m2 <= __m1); - - void* __v1o = __v1; - void* __v1n = __sync_val_compare_and_swap(&_M_i, __v1o, __v2); - - // Assume extra stores (of same value) allowed in true case. - __v1 = __v1n; - return __v1o == __v1n; - } - - bool - compare_exchange_strong(void*& __v1, void* __v2, - memory_order __m = memory_order_seq_cst) - { - return compare_exchange_strong(__v1, __v2, __m, - __calculate_memory_order(__m)); - } - - bool - compare_exchange_strong(void*& __v1, void* __v2, - memory_order __m = memory_order_seq_cst) volatile - { - return compare_exchange_strong(__v1, __v2, __m, - __calculate_memory_order(__m)); - } - - bool - compare_exchange_strong(const void*& __v1, const void* __v2, - memory_order __m1, memory_order __m2) - { - __glibcxx_assert(__m2 != memory_order_release); - __glibcxx_assert(__m2 != memory_order_acq_rel); - __glibcxx_assert(__m2 <= __m1); - - const void* __v1o = __v1; - const void* __v1n = __sync_val_compare_and_swap(&_M_i, __v1o, __v2); - - // Assume extra stores (of same value) allowed in true case. - __v1 = __v1n; - return __v1o == __v1n; - } - - bool - compare_exchange_strong(const void*& __v1, const void* __v2, - memory_order __m1, memory_order __m2) volatile - { - __glibcxx_assert(__m2 != memory_order_release); - __glibcxx_assert(__m2 != memory_order_acq_rel); - __glibcxx_assert(__m2 <= __m1); - - const void* __v1o = __v1; - const void* __v1n = __sync_val_compare_and_swap(&_M_i, __v1o, __v2); - - // Assume extra stores (of same value) allowed in true case. - __v1 = __v1n; - return __v1o == __v1n; - } - - bool - compare_exchange_strong(const void*& __v1, const void* __v2, - memory_order __m = memory_order_seq_cst) - { - return compare_exchange_strong(__v1, __v2, __m, - __calculate_memory_order(__m)); - } - - bool - compare_exchange_strong(const void*& __v1, const void* __v2, - memory_order __m = memory_order_seq_cst) volatile - { - return compare_exchange_strong(__v1, __v2, __m, - __calculate_memory_order(__m)); - } - - void* - fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) - { return __sync_fetch_and_add(&_M_i, __d); } - - void* - fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) volatile - { return __sync_fetch_and_add(&_M_i, __d); } - - void* - fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) - { return __sync_fetch_and_sub(&_M_i, __d); } - - void* - fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) volatile - { return __sync_fetch_and_sub(&_M_i, __d); } - - operator void*() const - { return load(); } - - operator void*() const volatile - { return load(); } - - void* -#if 0 - // XXX as specified but won't compile as store takes void*, - // invalid conversion from const void* to void* - // CD1 had this signature - operator=(const void* __v) -#else - operator=(void* __v) -#endif - { - store(__v); - return __v; - } - - void* -#if 0 - // XXX as specified but won't compile as store takes void*, - // invalid conversion from const void* to void* - // CD1 had this signature, but store and this could both be const void*? - operator=(const void* __v) volatile -#else - operator=(void* __v) volatile -#endif - { - store(__v); - return __v; - } - - void* - operator+=(ptrdiff_t __d) - { return __sync_add_and_fetch(&_M_i, __d); } - - void* - operator+=(ptrdiff_t __d) volatile - { return __sync_add_and_fetch(&_M_i, __d); } - - void* - operator-=(ptrdiff_t __d) - { return __sync_sub_and_fetch(&_M_i, __d); } - - void* - operator-=(ptrdiff_t __d) volatile - { return __sync_sub_and_fetch(&_M_i, __d); } - }; - - /// Base class for atomic integrals. // // For each of the integral types, define atomic_[integral type] struct @@ -747,9 +436,234 @@ memory_order __m = memory_order_seq_cst) volatile { return __sync_fetch_and_xor(&_M_i, __i); } }; + + + /// Partial specialization for pointer types. + template + struct __atomic_base<_PTp*> + { + private: + typedef _PTp* __pointer_type; + + __pointer_type _M_p; + + public: + __atomic_base() = default; + ~__atomic_base() = default; + __atomic_base(const __atomic_base&) = delete; + __atomic_base& operator=(const __atomic_base&) = delete; + __atomic_base& operator=(const __atomic_base&) volatile = delete; + + // Requires __pointer_type convertible to _M_p. + constexpr __atomic_base(__pointer_type __p): _M_p (__p) { } + + operator __pointer_type() const + { return load(); } + + operator __pointer_type() const volatile + { return load(); } + + __pointer_type + operator=(__pointer_type __p) + { + store(__p); + return __p; + } + + __pointer_type + operator=(__pointer_type __p) volatile + { + store(__p); + return __p; + } + + __pointer_type + operator++(int) + { return fetch_add(1); } + + __pointer_type + operator++(int) volatile + { return fetch_add(1); } + + __pointer_type + operator--(int) + { return fetch_sub(1); } + + __pointer_type + operator--(int) volatile + { return fetch_sub(1); } + + __pointer_type + operator++() + { return fetch_add(1) + 1; } + + __pointer_type + operator++() volatile + { return fetch_add(1) + 1; } + + __pointer_type + operator--() + { return fetch_sub(1) -1; } + + __pointer_type + operator--() volatile + { return fetch_sub(1) -1; } + + __pointer_type + operator+=(ptrdiff_t __d) + { return fetch_add(__d) + __d; } + + __pointer_type + operator+=(ptrdiff_t __d) volatile + { return fetch_add(__d) + __d; } + + __pointer_type + operator-=(ptrdiff_t __d) + { return fetch_sub(__d) - __d; } + + __pointer_type + operator-=(ptrdiff_t __d) volatile + { return fetch_sub(__d) - __d; } + + bool + is_lock_free() const + { return true; } + + bool + is_lock_free() const volatile + { return true; } + + void + store(__pointer_type __p, memory_order __m = memory_order_seq_cst) + { + __glibcxx_assert(__m != memory_order_acquire); + __glibcxx_assert(__m != memory_order_acq_rel); + __glibcxx_assert(__m != memory_order_consume); + + if (__m == memory_order_relaxed) + _M_p = __p; + else + { + // write_mem_barrier(); + _M_p = __p; + if (__m == memory_order_seq_cst) + __sync_synchronize(); + } + } + + void + store(__pointer_type __p, + memory_order __m = memory_order_seq_cst) volatile + { + __glibcxx_assert(__m != memory_order_acquire); + __glibcxx_assert(__m != memory_order_acq_rel); + __glibcxx_assert(__m != memory_order_consume); + + if (__m == memory_order_relaxed) + _M_p = __p; + else + { + // write_mem_barrier(); + _M_p = __p; + if (__m == memory_order_seq_cst) + __sync_synchronize(); + } + } + + __pointer_type + load(memory_order __m = memory_order_seq_cst) const + { + __glibcxx_assert(__m != memory_order_release); + __glibcxx_assert(__m != memory_order_acq_rel); + + __sync_synchronize(); + __pointer_type __ret = _M_p; + __sync_synchronize(); + return __ret; + } + + __pointer_type + load(memory_order __m = memory_order_seq_cst) const volatile + { + __glibcxx_assert(__m != memory_order_release); + __glibcxx_assert(__m != memory_order_acq_rel); + + __sync_synchronize(); + __pointer_type __ret = _M_p; + __sync_synchronize(); + return __ret; + } + + __pointer_type + exchange(__pointer_type __p, memory_order __m = memory_order_seq_cst) + { + // XXX built-in assumes memory_order_acquire. + return __sync_lock_test_and_set(&_M_p, __p); + } + + + __pointer_type + exchange(__pointer_type __p, + memory_order __m = memory_order_seq_cst) volatile + { + // XXX built-in assumes memory_order_acquire. + return __sync_lock_test_and_set(&_M_p, __p); + } + + bool + compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2, + memory_order __m1, memory_order __m2) + { + __glibcxx_assert(__m2 != memory_order_release); + __glibcxx_assert(__m2 != memory_order_acq_rel); + __glibcxx_assert(__m2 <= __m1); + + __pointer_type __p1o = __p1; + __pointer_type __p1n = __sync_val_compare_and_swap(&_M_p, __p1o, __p2); + + // Assume extra stores (of same value) allowed in true case. + __p1 = __p1n; + return __p1o == __p1n; + } + + bool + compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2, + memory_order __m1, memory_order __m2) volatile + { + __glibcxx_assert(__m2 != memory_order_release); + __glibcxx_assert(__m2 != memory_order_acq_rel); + __glibcxx_assert(__m2 <= __m1); + + __pointer_type __p1o = __p1; + __pointer_type __p1n = __sync_val_compare_and_swap(&_M_p, __p1o, __p2); + + // Assume extra stores (of same value) allowed in true case. + __p1 = __p1n; + return __p1o == __p1n; + } + + __pointer_type + fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) + { return __sync_fetch_and_add(&_M_p, __d); } + + __pointer_type + fetch_add(ptrdiff_t __d, + memory_order __m = memory_order_seq_cst) volatile + { return __sync_fetch_and_add(&_M_p, __d); } + + __pointer_type + fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) + { return __sync_fetch_and_sub(&_M_p, __d); } + + __pointer_type + fetch_sub(ptrdiff_t __d, + memory_order __m = memory_order_seq_cst) volatile + { return __sync_fetch_and_sub(&_M_p, __d); } + }; + } // namespace __atomic2 _GLIBCXX_END_NAMESPACE_VERSION -} // namespace +} // namespace std #endif Index: include/bits/atomic_base.h =================================================================== --- include/bits/atomic_base.h (revision 170216) +++ include/bits/atomic_base.h (working copy) @@ -22,7 +22,7 @@ // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see // . -/** @file bits/atomic_base.h +/** @file bits/atomic_base.h * This is an internal header file, included by other library headers. * Do not attempt to use it directly. @headername{atomic} */ @@ -68,6 +68,12 @@ return __mo2; } + void + atomic_thread_fence(memory_order); + + void + atomic_signal_fence(memory_order); + /// kill_dependency template inline _Tp @@ -78,7 +84,7 @@ } /** - * @brief Base type for atomic_flag. + * @brief Base type for atomic_flag. * * Base type is POD with data, allowing atomic_flag to derive from * it and meet the standard layout type requirement. In addition to @@ -114,27 +120,24 @@ namespace __atomic0 { struct atomic_flag; - struct atomic_address; template struct __atomic_base; - } + } namespace __atomic2 { struct atomic_flag; - struct atomic_address; template struct __atomic_base; - } + } namespace __atomic1 { using __atomic2::atomic_flag; - using __atomic0::atomic_address; using __atomic0::__atomic_base; - } + } /// Lock-free Property #if defined(_GLIBCXX_ATOMIC_BUILTINS_1) && defined(_GLIBCXX_ATOMIC_BUILTINS_2) \ @@ -157,7 +160,6 @@ #define ATOMIC_INT_LOCK_FREE _GLIBCXX_ATOMIC_PROPERTY #define ATOMIC_LONG_LOCK_FREE _GLIBCXX_ATOMIC_PROPERTY #define ATOMIC_LLONG_LOCK_FREE _GLIBCXX_ATOMIC_PROPERTY -#define ATOMIC_ADDRESS_LOCK_FREE _GLIBCXX_ATOMIC_PROPERTY inline namespace _GLIBCXX_ATOMIC_NAMESPACE { } @@ -166,28 +168,28 @@ typedef __atomic_base atomic_char; /// atomic_schar - typedef __atomic_base atomic_schar; + typedef __atomic_base atomic_schar; /// atomic_uchar - typedef __atomic_base atomic_uchar; + typedef __atomic_base atomic_uchar; /// atomic_short - typedef __atomic_base atomic_short; + typedef __atomic_base atomic_short; /// atomic_ushort - typedef __atomic_base atomic_ushort; + typedef __atomic_base atomic_ushort; /// atomic_int typedef __atomic_base atomic_int; /// atomic_uint - typedef __atomic_base atomic_uint; + typedef __atomic_base atomic_uint; /// atomic_long typedef __atomic_base atomic_long; /// atomic_ulong - typedef __atomic_base atomic_ulong; + typedef __atomic_base atomic_ulong; /// atomic_llong typedef __atomic_base atomic_llong; @@ -212,50 +214,50 @@ typedef __atomic_base atomic_int_least8_t; /// atomic_uint_least8_t - typedef __atomic_base atomic_uint_least8_t; + typedef __atomic_base atomic_uint_least8_t; /// atomic_int_least16_t - typedef __atomic_base atomic_int_least16_t; + typedef __atomic_base atomic_int_least16_t; /// atomic_uint_least16_t - typedef __atomic_base atomic_uint_least16_t; + typedef __atomic_base atomic_uint_least16_t; /// atomic_int_least32_t - typedef __atomic_base atomic_int_least32_t; + typedef __atomic_base atomic_int_least32_t; /// atomic_uint_least32_t - typedef __atomic_base atomic_uint_least32_t; + typedef __atomic_base atomic_uint_least32_t; /// atomic_int_least64_t - typedef __atomic_base atomic_int_least64_t; + typedef __atomic_base atomic_int_least64_t; /// atomic_uint_least64_t - typedef __atomic_base atomic_uint_least64_t; + typedef __atomic_base atomic_uint_least64_t; /// atomic_int_fast8_t typedef __atomic_base atomic_int_fast8_t; /// atomic_uint_fast8_t - typedef __atomic_base atomic_uint_fast8_t; + typedef __atomic_base atomic_uint_fast8_t; /// atomic_int_fast16_t - typedef __atomic_base atomic_int_fast16_t; + typedef __atomic_base atomic_int_fast16_t; /// atomic_uint_fast16_t - typedef __atomic_base atomic_uint_fast16_t; + typedef __atomic_base atomic_uint_fast16_t; /// atomic_int_fast32_t - typedef __atomic_base atomic_int_fast32_t; + typedef __atomic_base atomic_int_fast32_t; /// atomic_uint_fast32_t - typedef __atomic_base atomic_uint_fast32_t; + typedef __atomic_base atomic_uint_fast32_t; /// atomic_int_fast64_t - typedef __atomic_base atomic_int_fast64_t; + typedef __atomic_base atomic_int_fast64_t; /// atomic_uint_fast64_t - typedef __atomic_base atomic_uint_fast64_t; + typedef __atomic_base atomic_uint_fast64_t; /// atomic_intptr_t @@ -265,7 +267,7 @@ typedef __atomic_base atomic_uintptr_t; /// atomic_size_t - typedef __atomic_base atomic_size_t; + typedef __atomic_base atomic_size_t; /// atomic_intmax_t typedef __atomic_base atomic_intmax_t; @@ -277,16 +279,17 @@ typedef __atomic_base atomic_ptrdiff_t; - struct atomic_bool; - #define ATOMIC_VAR_INIT(_VI) { _VI } template struct atomic; + template + struct atomic<_Tp*>; + // @} group atomics _GLIBCXX_END_NAMESPACE_VERSION -} // namespace +} // namespace std #endif Index: testsuite/29_atomics/atomic_address/cons/assign_neg.cc =================================================================== --- testsuite/29_atomics/atomic_address/cons/assign_neg.cc (revision 170216) +++ testsuite/29_atomics/atomic_address/cons/assign_neg.cc (working copy) @@ -1,31 +0,0 @@ -// { dg-options "-std=gnu++0x" } -// { dg-do compile } - -// Copyright (C) 2008, 2009 Free Software Foundation, Inc. -// -// This file is part of the GNU ISO C++ Library. This library is free -// software; you can redistribute it and/or modify it under the -// terms of the GNU General Public License as published by the -// Free Software Foundation; either version 3, or (at your option) -// any later version. - -// This library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License along -// with this library; see the file COPYING3. If not see -// . - -#include - -void test01() -{ - // Assign. - typedef std::atomic_address test_type; - test_type t1; - test_type t2; - t1 = t2; // { dg-error "deleted" } -} -// { dg-prune-output "include" } Index: testsuite/29_atomics/atomic_address/cons/single_value.cc =================================================================== --- testsuite/29_atomics/atomic_address/cons/single_value.cc (revision 170216) +++ testsuite/29_atomics/atomic_address/cons/single_value.cc (working copy) @@ -1,28 +0,0 @@ -// { dg-options "-std=gnu++0x" } - -// Copyright (C) 2008, 2009, 2010 Free Software Foundation, Inc. -// -// This file is part of the GNU ISO C++ Library. This library is free -// software; you can redistribute it and/or modify it under the -// terms of the GNU General Public License as published by the -// Free Software Foundation; either version 3, or (at your option) -// any later version. - -// This library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License along -// with this library; see the file COPYING3. If not see -// . - -#include - -int main() -{ - // Single value constructor. - void* v = 0; - std::atomic_address a(v); - return 0; -} Index: testsuite/29_atomics/atomic_address/cons/copy_neg.cc =================================================================== --- testsuite/29_atomics/atomic_address/cons/copy_neg.cc (revision 170216) +++ testsuite/29_atomics/atomic_address/cons/copy_neg.cc (working copy) @@ -1,31 +0,0 @@ -// { dg-options "-std=gnu++0x" } -// { dg-do compile } - -// Copyright (C) 2008, 2009 Free Software Foundation, Inc. -// -// This file is part of the GNU ISO C++ Library. This library is free -// software; you can redistribute it and/or modify it under the -// terms of the GNU General Public License as published by the -// Free Software Foundation; either version 3, or (at your option) -// any later version. - -// This library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License along -// with this library; see the file COPYING3. If not see -// . - -#include - -void test01() -{ - // Copy. - typedef std::atomic_address test_type; - test_type t1; - test_type t2(t1); // { dg-error "deleted" } -} - -// { dg-prune-output "include" } Index: testsuite/29_atomics/atomic_address/cons/default.cc =================================================================== --- testsuite/29_atomics/atomic_address/cons/default.cc (revision 170216) +++ testsuite/29_atomics/atomic_address/cons/default.cc (working copy) @@ -1,27 +0,0 @@ -// { dg-options "-std=gnu++0x" } - -// Copyright (C) 2008, 2009 Free Software Foundation, Inc. -// -// This file is part of the GNU ISO C++ Library. This library is free -// software; you can redistribute it and/or modify it under the -// terms of the GNU General Public License as published by the -// Free Software Foundation; either version 3, or (at your option) -// any later version. - -// This library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License along -// with this library; see the file COPYING3. If not see -// . - -#include - -int main() -{ - // Default constructor. - std::atomic_address a; - return 0; -} Index: testsuite/29_atomics/atomic_address/cons/constexpr.cc =================================================================== --- testsuite/29_atomics/atomic_address/cons/constexpr.cc (revision 170216) +++ testsuite/29_atomics/atomic_address/cons/constexpr.cc (working copy) @@ -1,29 +0,0 @@ -// { dg-do compile } -// { dg-options "-std=gnu++0x" } - -// Copyright (C) 2010 Free Software Foundation, Inc. -// -// This file is part of the GNU ISO C++ Library. This library is free -// software; you can redistribute it and/or modify it under the -// terms of the GNU General Public License as published by the -// Free Software Foundation; either version 3, or (at your option) -// any later version. - -// This library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License along -// with this library; see the file COPYING3. If not see -// . - -#include -#include - -int main() -{ - __gnu_test::constexpr_single_value_constructible test; - test.operator()(); - return 0; -} Index: testsuite/29_atomics/atomic_address/cons/aggregate.cc =================================================================== --- testsuite/29_atomics/atomic_address/cons/aggregate.cc (revision 170216) +++ testsuite/29_atomics/atomic_address/cons/aggregate.cc (working copy) @@ -1,28 +0,0 @@ -// { dg-options "-std=gnu++0x" } -// { dg-do compile } - -// Copyright (C) 2008, 2009, 2010 Free Software Foundation, Inc. -// -// This file is part of the GNU ISO C++ Library. This library is free -// software; you can redistribute it and/or modify it under the -// terms of the GNU General Public License as published by the -// Free Software Foundation; either version 3, or (at your option) -// any later version. - -// This library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License along -// with this library; see the file COPYING3. If not see -// . - -#include -#include - -int main() -{ - std::atomic_address a __attribute__((unused)) = { { NULL } }; - return 0; -} Index: testsuite/29_atomics/atomic_address/requirements/standard_layout.cc =================================================================== --- testsuite/29_atomics/atomic_address/requirements/standard_layout.cc (revision 170216) +++ testsuite/29_atomics/atomic_address/requirements/standard_layout.cc (working copy) @@ -1,28 +0,0 @@ -// { dg-options "-std=gnu++0x" } -// { dg-do compile } - -// Copyright (C) 2008, 2009 Free Software Foundation, Inc. -// -// This file is part of the GNU ISO C++ Library. This library is free -// software; you can redistribute it and/or modify it under the -// terms of the GNU General Public License as published by the -// Free Software Foundation; either version 3, or (at your option) -// any later version. - -// This library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License along -// with this library; see the file COPYING3. If not see -// . - -#include -#include - -void test01() -{ - __gnu_test::standard_layout test; - test.operator()(); -} Index: testsuite/29_atomics/atomic_address/requirements/trivial.cc =================================================================== --- testsuite/29_atomics/atomic_address/requirements/trivial.cc (revision 170216) +++ testsuite/29_atomics/atomic_address/requirements/trivial.cc (working copy) @@ -1,28 +0,0 @@ -// { dg-options "-std=gnu++0x" } -// { dg-do compile } - -// Copyright (C) 2009 Free Software Foundation, Inc. -// -// This file is part of the GNU ISO C++ Library. This library is free -// software; you can redistribute it and/or modify it under the -// terms of the GNU General Public License as published by the -// Free Software Foundation; either version 3, or (at your option) -// any later version. - -// This library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License along -// with this library; see the file COPYING3. If not see -// . - -#include -#include - -void test01() -{ - __gnu_test::has_trivial_cons_dtor test; - test.operator()(); -} Index: testsuite/29_atomics/headers/atomic/types_std_c++0x.cc =================================================================== --- testsuite/29_atomics/headers/atomic/types_std_c++0x.cc (revision 170216) +++ testsuite/29_atomics/headers/atomic/types_std_c++0x.cc (working copy) @@ -1,7 +1,7 @@ // { dg-options "-std=gnu++0x" } // { dg-do compile } -// Copyright (C) 2008, 2009, 2010 Free Software Foundation, Inc. +// Copyright (C) 2008, 2009, 2010, 2011 Free Software Foundation, Inc. // // This file is part of the GNU ISO C++ Library. This library is free // software; you can redistribute it and/or modify it under the @@ -72,6 +72,4 @@ using std::atomic_ptrdiff_t; using std::atomic_intmax_t; using std::atomic_uintmax_t; - - using std::atomic_address; } Index: testsuite/29_atomics/headers/atomic/macros.cc =================================================================== --- testsuite/29_atomics/headers/atomic/macros.cc (revision 170216) +++ testsuite/29_atomics/headers/atomic/macros.cc (working copy) @@ -94,14 +94,6 @@ # endif #endif -#ifndef ATOMIC_ADDRESS_LOCK_FREE -# error "ATOMIC_ADDRESS_LOCK_FREE must be a macro" -# if ATOMIC_ADDRESS_LOCK_FREE != 0 \ - && ATOMIC_ADDRESS_LOCK_FREE != 1 && ATOMIC_ADDRESS_LOCK_FREE != 2 -# error "ATOMIC_ADDRESS_LOCK_FREE must be 0, 1, or 2" -# endif -#endif - #ifndef ATOMIC_FLAG_INIT #error "ATOMIC_FLAG_INIT_must_be_a_macro" #endif