From patchwork Mon Sep 29 16:23:52 2014 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Gleb Fotengauer-Malinovskiy X-Patchwork-Id: 394501 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Received: from sourceware.org (server1.sourceware.org [209.132.180.131]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by ozlabs.org (Postfix) with ESMTPS id 91A4814010C for ; Tue, 30 Sep 2014 02:24:13 +1000 (EST) DomainKey-Signature: a=rsa-sha1; c=nofws; d=gcc.gnu.org; h=list-id :list-unsubscribe:list-archive:list-post:list-help:sender:date :from:to:cc:subject:message-id:references:mime-version :content-type:in-reply-to; q=dns; s=default; b=V3rLIZoQDWXIV+/8R 4dPJYE5zDslEVxP1QjM8611t7D1DA0CbNyWifeiUsOIxCZ64oaiJp+GdX2STZUee DR2m0rBUNtxreX6l3eSKy8CznfMP3kz9yiC8/Nf8EUEg3tA71Y9H/JEcYZ2QqpEI V0K0wb2KwTze42LGDS69CGeuF4= DKIM-Signature: v=1; a=rsa-sha1; c=relaxed; d=gcc.gnu.org; h=list-id :list-unsubscribe:list-archive:list-post:list-help:sender:date :from:to:cc:subject:message-id:references:mime-version :content-type:in-reply-to; s=default; bh=pP+tXW2fh2Udg0E5mXPawv1 vsSw=; b=EopbWWOSclnn4KhThOAU5DfGh/DdBCQ1E26s9peuQZXAg+XUKmSDfLl vUMjLD90iiDekMmHrGFSfHUt95U2PbIA0cScM6c1kSabNrYpDo6QwQstTWFSkG41 RcVSV6bVRG4JUerKt7Y7mJf3k7MYZH18olM8PG1amF/ctQACw7CI= Received: (qmail 32612 invoked by alias); 29 Sep 2014 16:24:05 -0000 Mailing-List: contact gcc-patches-help@gcc.gnu.org; run by ezmlm Precedence: bulk List-Id: List-Unsubscribe: List-Archive: List-Post: List-Help: Sender: gcc-patches-owner@gcc.gnu.org Delivered-To: mailing list gcc-patches@gcc.gnu.org Received: (qmail 32599 invoked by uid 89); 29 Sep 2014 16:24:03 -0000 Authentication-Results: sourceware.org; auth=none X-Virus-Found: No X-Spam-SWARE-Status: No, score=-2.7 required=5.0 tests=AWL, BAYES_00, RP_MATCHES_RCVD autolearn=ham version=3.3.2 X-HELO: pegasus3.altlinux.org Received: from pegasus3.altlinux.org (HELO pegasus3.altlinux.org) (194.107.17.103) by sourceware.org (qpsmtpd/0.93/v0.84-503-g423c35a) with ESMTP; Mon, 29 Sep 2014 16:24:00 +0000 Received: from imap.altlinux.org (imap.altlinux.org [194.107.17.38]) by pegasus3.altlinux.org (Postfix) with ESMTP id 9AF1080EB7; Mon, 29 Sep 2014 20:23:57 +0400 (MSK) Received: from glebfm.cloud.tilaa.com (glebfm.cloud.tilaa.com [84.22.96.71]) by imap.altlinux.org (Postfix) with ESMTPSA id 295681141506; Mon, 29 Sep 2014 20:23:57 +0400 (MSK) Date: Mon, 29 Sep 2014 20:23:52 +0400 From: Gleb Fotengauer-Malinovskiy To: Jakub Jelinek Cc: Torvald Riegel , Richard Henderson , gcc-patches@gcc.gnu.org Subject: Re: [PATCH] PR libitm/61164: redefinition of __always_inline Message-ID: <20140929162352.GE19878@glebfm.cloud.tilaa.com> References: <20140927120617.GA19658@glebfm.cloud.tilaa.com> <20140927170000.GB19878@glebfm.cloud.tilaa.com> <20140929085415.GB17454@tucnak.redhat.com> <20140929125326.GD19878@glebfm.cloud.tilaa.com> <20140929133825.GE17454@tucnak.redhat.com> MIME-Version: 1.0 Content-Disposition: inline In-Reply-To: <20140929133825.GE17454@tucnak.redhat.com> 2014-09-27 Gleb Fotengauer-Malinovskiy libitm/ PR libitm/61164 * local_atomic (__always_inline): Rename to... (__libitm_always_inline): ... this. --- On Mon, Sep 29, 2014 at 03:38:25PM +0200, Jakub Jelinek wrote: > Why do you want to add inline keyword to that? Some inline keywords > are implicit (methods defined inline), so there is no point adding it there. I just didn't get that redefinition of __always_inline was the source of the problem. local_atomic | 299 +++++++++++++++++++++++++++++------------------------------ 1 file changed, 149 insertions(+), 150 deletions(-) diff --git a/local_atomic b/local_atomic index c3e079f..552b919 100644 --- a/local_atomic +++ b/local_atomic @@ -41,8 +41,7 @@ #ifndef _GLIBCXX_ATOMIC #define _GLIBCXX_ATOMIC 1 -#undef __always_inline -#define __always_inline __attribute__((always_inline)) +#define __libitm_always_inline __attribute__((always_inline)) // #pragma GCC system_header @@ -74,7 +74,7 @@ namespace std // _GLIBCXX_VISIBILITY(default) memory_order_seq_cst } memory_order; - inline __always_inline memory_order + inline __libitm_always_inline memory_order __calculate_memory_order(memory_order __m) noexcept { const bool __cond1 = __m == memory_order_release; @@ -84,13 +84,13 @@ namespace std // _GLIBCXX_VISIBILITY(default) return __mo2; } - inline __always_inline void + inline __libitm_always_inline void atomic_thread_fence(memory_order __m) noexcept { __atomic_thread_fence (__m); } - inline __always_inline void + inline __libitm_always_inline void atomic_signal_fence(memory_order __m) noexcept { __atomic_thread_fence (__m); @@ -280,19 +280,19 @@ namespace std // _GLIBCXX_VISIBILITY(default) // Conversion to ATOMIC_FLAG_INIT. atomic_flag(bool __i) noexcept : __atomic_flag_base({ __i }) { } - __always_inline bool + __libitm_always_inline bool test_and_set(memory_order __m = memory_order_seq_cst) noexcept { return __atomic_test_and_set (&_M_i, __m); } - __always_inline bool + __libitm_always_inline bool test_and_set(memory_order __m = memory_order_seq_cst) volatile noexcept { return __atomic_test_and_set (&_M_i, __m); } - __always_inline void + __libitm_always_inline void clear(memory_order __m = memory_order_seq_cst) noexcept { // __glibcxx_assert(__m != memory_order_consume); @@ -302,7 +302,7 @@ namespace std // _GLIBCXX_VISIBILITY(default) __atomic_clear (&_M_i, __m); } - __always_inline void + __libitm_always_inline void clear(memory_order __m = memory_order_seq_cst) volatile noexcept { // __glibcxx_assert(__m != memory_order_consume); @@ -455,7 +455,7 @@ namespace std // _GLIBCXX_VISIBILITY(default) is_lock_free() const volatile noexcept { return __atomic_is_lock_free (sizeof (_M_i), &_M_i); } - __always_inline void + __libitm_always_inline void store(__int_type __i, memory_order __m = memory_order_seq_cst) noexcept { // __glibcxx_assert(__m != memory_order_acquire); @@ -465,7 +465,7 @@ namespace std // _GLIBCXX_VISIBILITY(default) __atomic_store_n(&_M_i, __i, __m); } - __always_inline void + __libitm_always_inline void store(__int_type __i, memory_order __m = memory_order_seq_cst) volatile noexcept { @@ -476,7 +476,7 @@ namespace std // _GLIBCXX_VISIBILITY(default) __atomic_store_n(&_M_i, __i, __m); } - __always_inline __int_type + __libitm_always_inline __int_type load(memory_order __m = memory_order_seq_cst) const noexcept { // __glibcxx_assert(__m != memory_order_release); @@ -485,7 +485,7 @@ namespace std // _GLIBCXX_VISIBILITY(default) return __atomic_load_n(&_M_i, __m); } - __always_inline __int_type + __libitm_always_inline __int_type load(memory_order __m = memory_order_seq_cst) const volatile noexcept { // __glibcxx_assert(__m != memory_order_release); @@ -494,21 +494,21 @@ namespace std // _GLIBCXX_VISIBILITY(default) return __atomic_load_n(&_M_i, __m); } - __always_inline __int_type + __libitm_always_inline __int_type exchange(__int_type __i, memory_order __m = memory_order_seq_cst) noexcept { return __atomic_exchange_n(&_M_i, __i, __m); } - __always_inline __int_type + __libitm_always_inline __int_type exchange(__int_type __i, memory_order __m = memory_order_seq_cst) volatile noexcept { return __atomic_exchange_n(&_M_i, __i, __m); } - __always_inline bool + __libitm_always_inline bool compare_exchange_weak(__int_type& __i1, __int_type __i2, memory_order __m1, memory_order __m2) noexcept { @@ -519,7 +519,7 @@ namespace std // _GLIBCXX_VISIBILITY(default) return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1, __m1, __m2); } - __always_inline bool + __libitm_always_inline bool compare_exchange_weak(__int_type& __i1, __int_type __i2, memory_order __m1, memory_order __m2) volatile noexcept @@ -531,7 +531,7 @@ namespace std // _GLIBCXX_VISIBILITY(default) return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1, __m1, __m2); } - __always_inline bool + __libitm_always_inline bool compare_exchange_weak(__int_type& __i1, __int_type __i2, memory_order __m = memory_order_seq_cst) noexcept { @@ -539,7 +539,7 @@ namespace std // _GLIBCXX_VISIBILITY(default) __calculate_memory_order(__m)); } - __always_inline bool + __libitm_always_inline bool compare_exchange_weak(__int_type& __i1, __int_type __i2, memory_order __m = memory_order_seq_cst) volatile noexcept { @@ -547,7 +547,7 @@ namespace std // _GLIBCXX_VISIBILITY(default) __calculate_memory_order(__m)); } - __always_inline bool + __libitm_always_inline bool compare_exchange_strong(__int_type& __i1, __int_type __i2, memory_order __m1, memory_order __m2) noexcept { @@ -558,7 +558,7 @@ namespace std // _GLIBCXX_VISIBILITY(default) return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0, __m1, __m2); } - __always_inline bool + __libitm_always_inline bool compare_exchange_strong(__int_type& __i1, __int_type __i2, memory_order __m1, memory_order __m2) volatile noexcept @@ -570,7 +570,7 @@ namespace std // _GLIBCXX_VISIBILITY(default) return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0, __m1, __m2); } - __always_inline bool + __libitm_always_inline bool compare_exchange_strong(__int_type& __i1, __int_type __i2, memory_order __m = memory_order_seq_cst) noexcept { @@ -578,7 +578,7 @@ namespace std // _GLIBCXX_VISIBILITY(default) __calculate_memory_order(__m)); } - __always_inline bool + __libitm_always_inline bool compare_exchange_strong(__int_type& __i1, __int_type __i2, memory_order __m = memory_order_seq_cst) volatile noexcept { @@ -586,52 +586,52 @@ namespace std // _GLIBCXX_VISIBILITY(default) __calculate_memory_order(__m)); } - __always_inline __int_type + __libitm_always_inline __int_type fetch_add(__int_type __i, memory_order __m = memory_order_seq_cst) noexcept { return __atomic_fetch_add(&_M_i, __i, __m); } - __always_inline __int_type + __libitm_always_inline __int_type fetch_add(__int_type __i, memory_order __m = memory_order_seq_cst) volatile noexcept { return __atomic_fetch_add(&_M_i, __i, __m); } - __always_inline __int_type + __libitm_always_inline __int_type fetch_sub(__int_type __i, memory_order __m = memory_order_seq_cst) noexcept { return __atomic_fetch_sub(&_M_i, __i, __m); } - __always_inline __int_type + __libitm_always_inline __int_type fetch_sub(__int_type __i, memory_order __m = memory_order_seq_cst) volatile noexcept { return __atomic_fetch_sub(&_M_i, __i, __m); } - __always_inline __int_type + __libitm_always_inline __int_type fetch_and(__int_type __i, memory_order __m = memory_order_seq_cst) noexcept { return __atomic_fetch_and(&_M_i, __i, __m); } - __always_inline __int_type + __libitm_always_inline __int_type fetch_and(__int_type __i, memory_order __m = memory_order_seq_cst) volatile noexcept { return __atomic_fetch_and(&_M_i, __i, __m); } - __always_inline __int_type + __libitm_always_inline __int_type fetch_or(__int_type __i, memory_order __m = memory_order_seq_cst) noexcept { return __atomic_fetch_or(&_M_i, __i, __m); } - __always_inline __int_type + __libitm_always_inline __int_type fetch_or(__int_type __i, memory_order __m = memory_order_seq_cst) volatile noexcept { return __atomic_fetch_or(&_M_i, __i, __m); } - __always_inline __int_type + __libitm_always_inline __int_type fetch_xor(__int_type __i, memory_order __m = memory_order_seq_cst) noexcept { return __atomic_fetch_xor(&_M_i, __i, __m); } - __always_inline __int_type + __libitm_always_inline __int_type fetch_xor(__int_type __i, memory_order __m = memory_order_seq_cst) volatile noexcept { return __atomic_fetch_xor(&_M_i, __i, __m); } @@ -733,7 +733,7 @@ namespace std // _GLIBCXX_VISIBILITY(default) is_lock_free() const volatile noexcept { return __atomic_is_lock_free (sizeof (_M_p), &_M_p); } - __always_inline void + __libitm_always_inline void store(__pointer_type __p, memory_order __m = memory_order_seq_cst) noexcept { @@ -744,7 +744,7 @@ namespace std // _GLIBCXX_VISIBILITY(default) __atomic_store_n(&_M_p, __p, __m); } - __always_inline void + __libitm_always_inline void store(__pointer_type __p, memory_order __m = memory_order_seq_cst) volatile noexcept { @@ -755,7 +755,7 @@ namespace std // _GLIBCXX_VISIBILITY(default) __atomic_store_n(&_M_p, __p, __m); } - __always_inline __pointer_type + __libitm_always_inline __pointer_type load(memory_order __m = memory_order_seq_cst) const noexcept { // __glibcxx_assert(__m != memory_order_release); @@ -764,7 +764,7 @@ namespace std // _GLIBCXX_VISIBILITY(default) return __atomic_load_n(&_M_p, __m); } - __always_inline __pointer_type + __libitm_always_inline __pointer_type load(memory_order __m = memory_order_seq_cst) const volatile noexcept { // __glibcxx_assert(__m != memory_order_release); @@ -773,21 +773,21 @@ namespace std // _GLIBCXX_VISIBILITY(default) return __atomic_load_n(&_M_p, __m); } - __always_inline __pointer_type + __libitm_always_inline __pointer_type exchange(__pointer_type __p, memory_order __m = memory_order_seq_cst) noexcept { return __atomic_exchange_n(&_M_p, __p, __m); } - __always_inline __pointer_type + __libitm_always_inline __pointer_type exchange(__pointer_type __p, memory_order __m = memory_order_seq_cst) volatile noexcept { return __atomic_exchange_n(&_M_p, __p, __m); } - __always_inline bool + __libitm_always_inline bool compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2, memory_order __m1, memory_order __m2) noexcept @@ -799,7 +799,7 @@ namespace std // _GLIBCXX_VISIBILITY(default) return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0, __m1, __m2); } - __always_inline bool + __libitm_always_inline bool compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2, memory_order __m1, memory_order __m2) volatile noexcept @@ -811,22 +811,22 @@ namespace std // _GLIBCXX_VISIBILITY(default) return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0, __m1, __m2); } - __always_inline __pointer_type + __libitm_always_inline __pointer_type fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) noexcept { return __atomic_fetch_add(&_M_p, __d, __m); } - __always_inline __pointer_type + __libitm_always_inline __pointer_type fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) volatile noexcept { return __atomic_fetch_add(&_M_p, __d, __m); } - __always_inline __pointer_type + __libitm_always_inline __pointer_type fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) noexcept { return __atomic_fetch_sub(&_M_p, __d, __m); } - __always_inline __pointer_type + __libitm_always_inline __pointer_type fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) volatile noexcept { return __atomic_fetch_sub(&_M_p, __d, __m); } @@ -870,67 +870,67 @@ namespace std // _GLIBCXX_VISIBILITY(default) bool is_lock_free() const volatile noexcept { return _M_base.is_lock_free(); } - __always_inline void + __libitm_always_inline void store(bool __i, memory_order __m = memory_order_seq_cst) noexcept { _M_base.store(__i, __m); } - __always_inline void + __libitm_always_inline void store(bool __i, memory_order __m = memory_order_seq_cst) volatile noexcept { _M_base.store(__i, __m); } - __always_inline bool + __libitm_always_inline bool load(memory_order __m = memory_order_seq_cst) const noexcept { return _M_base.load(__m); } - __always_inline bool + __libitm_always_inline bool load(memory_order __m = memory_order_seq_cst) const volatile noexcept { return _M_base.load(__m); } - __always_inline bool + __libitm_always_inline bool exchange(bool __i, memory_order __m = memory_order_seq_cst) noexcept { return _M_base.exchange(__i, __m); } - __always_inline bool + __libitm_always_inline bool exchange(bool __i, memory_order __m = memory_order_seq_cst) volatile noexcept { return _M_base.exchange(__i, __m); } - __always_inline bool + __libitm_always_inline bool compare_exchange_weak(bool& __i1, bool __i2, memory_order __m1, memory_order __m2) noexcept { return _M_base.compare_exchange_weak(__i1, __i2, __m1, __m2); } - __always_inline bool + __libitm_always_inline bool compare_exchange_weak(bool& __i1, bool __i2, memory_order __m1, memory_order __m2) volatile noexcept { return _M_base.compare_exchange_weak(__i1, __i2, __m1, __m2); } - __always_inline bool + __libitm_always_inline bool compare_exchange_weak(bool& __i1, bool __i2, memory_order __m = memory_order_seq_cst) noexcept { return _M_base.compare_exchange_weak(__i1, __i2, __m); } - __always_inline bool + __libitm_always_inline bool compare_exchange_weak(bool& __i1, bool __i2, memory_order __m = memory_order_seq_cst) volatile noexcept { return _M_base.compare_exchange_weak(__i1, __i2, __m); } - __always_inline bool + __libitm_always_inline bool compare_exchange_strong(bool& __i1, bool __i2, memory_order __m1, memory_order __m2) noexcept { return _M_base.compare_exchange_strong(__i1, __i2, __m1, __m2); } - __always_inline bool + __libitm_always_inline bool compare_exchange_strong(bool& __i1, bool __i2, memory_order __m1, memory_order __m2) volatile noexcept { return _M_base.compare_exchange_strong(__i1, __i2, __m1, __m2); } - __always_inline bool + __libitm_always_inline bool compare_exchange_strong(bool& __i1, bool __i2, memory_order __m = memory_order_seq_cst) noexcept { return _M_base.compare_exchange_strong(__i1, __i2, __m); } - __always_inline bool + __libitm_always_inline bool compare_exchange_strong(bool& __i1, bool __i2, memory_order __m = memory_order_seq_cst) volatile noexcept { return _M_base.compare_exchange_strong(__i1, __i2, __m); } @@ -980,11 +980,11 @@ namespace std // _GLIBCXX_VISIBILITY(default) store(_Tp __i, memory_order _m = memory_order_seq_cst) noexcept { __atomic_store(&_M_i, &__i, _m); } - __always_inline void + __libitm_always_inline void store(_Tp __i, memory_order _m = memory_order_seq_cst) volatile noexcept { __atomic_store(&_M_i, &__i, _m); } - __always_inline _Tp + __libitm_always_inline _Tp load(memory_order _m = memory_order_seq_cst) const noexcept { _Tp tmp; @@ -992,7 +992,7 @@ namespace std // _GLIBCXX_VISIBILITY(default) return tmp; } - __always_inline _Tp + __libitm_always_inline _Tp load(memory_order _m = memory_order_seq_cst) const volatile noexcept { _Tp tmp; @@ -1000,7 +1000,7 @@ namespace std // _GLIBCXX_VISIBILITY(default) return tmp; } - __always_inline _Tp + __libitm_always_inline _Tp exchange(_Tp __i, memory_order _m = memory_order_seq_cst) noexcept { _Tp tmp; @@ -1008,7 +1008,7 @@ namespace std // _GLIBCXX_VISIBILITY(default) return tmp; } - __always_inline _Tp + __libitm_always_inline _Tp exchange(_Tp __i, memory_order _m = memory_order_seq_cst) volatile noexcept { @@ -1017,50 +1017,50 @@ namespace std // _GLIBCXX_VISIBILITY(default) return tmp; } - __always_inline bool + __libitm_always_inline bool compare_exchange_weak(_Tp& __e, _Tp __i, memory_order __s, memory_order __f) noexcept { return __atomic_compare_exchange(&_M_i, &__e, &__i, true, __s, __f); } - __always_inline bool + __libitm_always_inline bool compare_exchange_weak(_Tp& __e, _Tp __i, memory_order __s, memory_order __f) volatile noexcept { return __atomic_compare_exchange(&_M_i, &__e, &__i, true, __s, __f); } - __always_inline bool + __libitm_always_inline bool compare_exchange_weak(_Tp& __e, _Tp __i, memory_order __m = memory_order_seq_cst) noexcept { return compare_exchange_weak(__e, __i, __m, __m); } - __always_inline bool + __libitm_always_inline bool compare_exchange_weak(_Tp& __e, _Tp __i, memory_order __m = memory_order_seq_cst) volatile noexcept { return compare_exchange_weak(__e, __i, __m, __m); } - __always_inline bool + __libitm_always_inline bool compare_exchange_strong(_Tp& __e, _Tp __i, memory_order __s, memory_order __f) noexcept { return __atomic_compare_exchange(&_M_i, &__e, &__i, false, __s, __f); } - __always_inline bool + __libitm_always_inline bool compare_exchange_strong(_Tp& __e, _Tp __i, memory_order __s, memory_order __f) volatile noexcept { return __atomic_compare_exchange(&_M_i, &__e, &__i, false, __s, __f); } - __always_inline bool + __libitm_always_inline bool compare_exchange_strong(_Tp& __e, _Tp __i, memory_order __m = memory_order_seq_cst) noexcept { return compare_exchange_strong(__e, __i, __m, __m); } - __always_inline bool + __libitm_always_inline bool compare_exchange_strong(_Tp& __e, _Tp __i, memory_order __m = memory_order_seq_cst) volatile noexcept { return compare_exchange_strong(__e, __i, __m, __m); } @@ -1153,46 +1153,46 @@ namespace std // _GLIBCXX_VISIBILITY(default) is_lock_free() const volatile noexcept { return _M_b.is_lock_free(); } - __always_inline void + __libitm_always_inline void store(__pointer_type __p, memory_order __m = memory_order_seq_cst) noexcept { return _M_b.store(__p, __m); } - __always_inline void + __libitm_always_inline void store(__pointer_type __p, memory_order __m = memory_order_seq_cst) volatile noexcept { return _M_b.store(__p, __m); } - __always_inline __pointer_type + __libitm_always_inline __pointer_type load(memory_order __m = memory_order_seq_cst) const noexcept { return _M_b.load(__m); } - __always_inline __pointer_type + __libitm_always_inline __pointer_type load(memory_order __m = memory_order_seq_cst) const volatile noexcept { return _M_b.load(__m); } - __always_inline __pointer_type + __libitm_always_inline __pointer_type exchange(__pointer_type __p, memory_order __m = memory_order_seq_cst) noexcept { return _M_b.exchange(__p, __m); } - __always_inline __pointer_type + __libitm_always_inline __pointer_type exchange(__pointer_type __p, memory_order __m = memory_order_seq_cst) volatile noexcept { return _M_b.exchange(__p, __m); } - __always_inline bool + __libitm_always_inline bool compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2, memory_order __m1, memory_order __m2) noexcept { return _M_b.compare_exchange_strong(__p1, __p2, __m1, __m2); } - __always_inline bool + __libitm_always_inline bool compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2, memory_order __m1, memory_order __m2) volatile noexcept { return _M_b.compare_exchange_strong(__p1, __p2, __m1, __m2); } - __always_inline bool + __libitm_always_inline bool compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2, memory_order __m = memory_order_seq_cst) noexcept { @@ -1200,7 +1200,7 @@ namespace std // _GLIBCXX_VISIBILITY(default) __calculate_memory_order(__m)); } - __always_inline bool + __libitm_always_inline bool compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2, memory_order __m = memory_order_seq_cst) volatile noexcept { @@ -1208,18 +1208,18 @@ namespace std // _GLIBCXX_VISIBILITY(default) __calculate_memory_order(__m)); } - __always_inline bool + __libitm_always_inline bool compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2, memory_order __m1, memory_order __m2) noexcept { return _M_b.compare_exchange_strong(__p1, __p2, __m1, __m2); } - __always_inline bool + __libitm_always_inline bool compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2, memory_order __m1, memory_order __m2) volatile noexcept { return _M_b.compare_exchange_strong(__p1, __p2, __m1, __m2); } - __always_inline bool + __libitm_always_inline bool compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2, memory_order __m = memory_order_seq_cst) noexcept { @@ -1227,7 +1227,7 @@ namespace std // _GLIBCXX_VISIBILITY(default) __calculate_memory_order(__m)); } - __always_inline bool + __libitm_always_inline bool compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2, memory_order __m = memory_order_seq_cst) volatile noexcept { @@ -1235,22 +1235,22 @@ namespace std // _GLIBCXX_VISIBILITY(default) __calculate_memory_order(__m)); } - __always_inline __pointer_type + __libitm_always_inline __pointer_type fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) noexcept { return _M_b.fetch_add(__d, __m); } - __always_inline __pointer_type + __libitm_always_inline __pointer_type fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) volatile noexcept { return _M_b.fetch_add(__d, __m); } - __always_inline __pointer_type + __libitm_always_inline __pointer_type fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) noexcept { return _M_b.fetch_sub(__d, __m); } - __always_inline __pointer_type + __libitm_always_inline __pointer_type fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) volatile noexcept { return _M_b.fetch_sub(__d, __m); } @@ -1544,98 +1544,98 @@ namespace std // _GLIBCXX_VISIBILITY(default) // Function definitions, atomic_flag operations. - inline __always_inline bool + inline __libitm_always_inline bool atomic_flag_test_and_set_explicit(atomic_flag* __a, memory_order __m) noexcept { return __a->test_and_set(__m); } - inline __always_inline bool + inline __libitm_always_inline bool atomic_flag_test_and_set_explicit(volatile atomic_flag* __a, memory_order __m) noexcept { return __a->test_and_set(__m); } - inline __always_inline void + inline __libitm_always_inline void atomic_flag_clear_explicit(atomic_flag* __a, memory_order __m) noexcept { __a->clear(__m); } - inline __always_inline void + inline __libitm_always_inline void atomic_flag_clear_explicit(volatile atomic_flag* __a, memory_order __m) noexcept { __a->clear(__m); } - inline __always_inline bool + inline __libitm_always_inline bool atomic_flag_test_and_set(atomic_flag* __a) noexcept { return atomic_flag_test_and_set_explicit(__a, memory_order_seq_cst); } - inline __always_inline bool + inline __libitm_always_inline bool atomic_flag_test_and_set(volatile atomic_flag* __a) noexcept { return atomic_flag_test_and_set_explicit(__a, memory_order_seq_cst); } - inline __always_inline void + inline __libitm_always_inline void atomic_flag_clear(atomic_flag* __a) noexcept { atomic_flag_clear_explicit(__a, memory_order_seq_cst); } - inline __always_inline void + inline __libitm_always_inline void atomic_flag_clear(volatile atomic_flag* __a) noexcept { atomic_flag_clear_explicit(__a, memory_order_seq_cst); } // Function templates generally applicable to atomic types. template - __always_inline bool + __libitm_always_inline bool atomic_is_lock_free(const atomic<_ITp>* __a) noexcept { return __a->is_lock_free(); } template - __always_inline bool + __libitm_always_inline bool atomic_is_lock_free(const volatile atomic<_ITp>* __a) noexcept { return __a->is_lock_free(); } template - __always_inline void + __libitm_always_inline void atomic_init(atomic<_ITp>* __a, _ITp __i) noexcept; template - __always_inline void + __libitm_always_inline void atomic_init(volatile atomic<_ITp>* __a, _ITp __i) noexcept; template - __always_inline void + __libitm_always_inline void atomic_store_explicit(atomic<_ITp>* __a, _ITp __i, memory_order __m) noexcept { __a->store(__i, __m); } template - __always_inline void + __libitm_always_inline void atomic_store_explicit(volatile atomic<_ITp>* __a, _ITp __i, memory_order __m) noexcept { __a->store(__i, __m); } template - __always_inline _ITp + __libitm_always_inline _ITp atomic_load_explicit(const atomic<_ITp>* __a, memory_order __m) noexcept { return __a->load(__m); } template - __always_inline _ITp + __libitm_always_inline _ITp atomic_load_explicit(const volatile atomic<_ITp>* __a, memory_order __m) noexcept { return __a->load(__m); } template - __always_inline _ITp + __libitm_always_inline _ITp atomic_exchange_explicit(atomic<_ITp>* __a, _ITp __i, memory_order __m) noexcept { return __a->exchange(__i, __m); } template - __always_inline _ITp + __libitm_always_inline _ITp atomic_exchange_explicit(volatile atomic<_ITp>* __a, _ITp __i, memory_order __m) noexcept { return __a->exchange(__i, __m); } template - __always_inline bool + __libitm_always_inline bool atomic_compare_exchange_weak_explicit(atomic<_ITp>* __a, _ITp* __i1, _ITp __i2, memory_order __m1, @@ -1643,7 +1643,7 @@ namespace std // _GLIBCXX_VISIBILITY(default) { return __a->compare_exchange_weak(*__i1, __i2, __m1, __m2); } template - __always_inline bool + __libitm_always_inline bool atomic_compare_exchange_weak_explicit(volatile atomic<_ITp>* __a, _ITp* __i1, _ITp __i2, memory_order __m1, @@ -1651,7 +1651,7 @@ namespace std // _GLIBCXX_VISIBILITY(default) { return __a->compare_exchange_weak(*__i1, __i2, __m1, __m2); } template - __always_inline bool + __libitm_always_inline bool atomic_compare_exchange_strong_explicit(atomic<_ITp>* __a, _ITp* __i1, _ITp __i2, memory_order __m1, @@ -1659,7 +1659,7 @@ namespace std // _GLIBCXX_VISIBILITY(default) { return __a->compare_exchange_strong(*__i1, __i2, __m1, __m2); } template - __always_inline bool + __libitm_always_inline bool atomic_compare_exchange_strong_explicit(volatile atomic<_ITp>* __a, _ITp* __i1, _ITp __i2, memory_order __m1, @@ -1668,37 +1668,37 @@ namespace std // _GLIBCXX_VISIBILITY(default) template - __always_inline void + __libitm_always_inline void atomic_store(atomic<_ITp>* __a, _ITp __i) noexcept { atomic_store_explicit(__a, __i, memory_order_seq_cst); } template - __always_inline void + __libitm_always_inline void atomic_store(volatile atomic<_ITp>* __a, _ITp __i) noexcept { atomic_store_explicit(__a, __i, memory_order_seq_cst); } template - __always_inline _ITp + __libitm_always_inline _ITp atomic_load(const atomic<_ITp>* __a) noexcept { return atomic_load_explicit(__a, memory_order_seq_cst); } template - __always_inline _ITp + __libitm_always_inline _ITp atomic_load(const volatile atomic<_ITp>* __a) noexcept { return atomic_load_explicit(__a, memory_order_seq_cst); } template - __always_inline _ITp + __libitm_always_inline _ITp atomic_exchange(atomic<_ITp>* __a, _ITp __i) noexcept { return atomic_exchange_explicit(__a, __i, memory_order_seq_cst); } template - __always_inline _ITp + __libitm_always_inline _ITp atomic_exchange(volatile atomic<_ITp>* __a, _ITp __i) noexcept { return atomic_exchange_explicit(__a, __i, memory_order_seq_cst); } template - __always_inline bool + __libitm_always_inline bool atomic_compare_exchange_weak(atomic<_ITp>* __a, _ITp* __i1, _ITp __i2) noexcept { @@ -1708,7 +1708,7 @@ namespace std // _GLIBCXX_VISIBILITY(default) } template - __always_inline bool + __libitm_always_inline bool atomic_compare_exchange_weak(volatile atomic<_ITp>* __a, _ITp* __i1, _ITp __i2) noexcept { @@ -1718,7 +1718,7 @@ namespace std // _GLIBCXX_VISIBILITY(default) } template - __always_inline bool + __libitm_always_inline bool atomic_compare_exchange_strong(atomic<_ITp>* __a, _ITp* __i1, _ITp __i2) noexcept { @@ -1728,7 +1728,7 @@ namespace std // _GLIBCXX_VISIBILITY(default) } template - __always_inline bool + __libitm_always_inline bool atomic_compare_exchange_strong(volatile atomic<_ITp>* __a, _ITp* __i1, _ITp __i2) noexcept { @@ -1742,158 +1742,158 @@ namespace std // _GLIBCXX_VISIBILITY(default) // intergral types as specified in the standard, excluding address // types. template - __always_inline _ITp + __libitm_always_inline _ITp atomic_fetch_add_explicit(__atomic_base<_ITp>* __a, _ITp __i, memory_order __m) noexcept { return __a->fetch_add(__i, __m); } template - __always_inline _ITp + __libitm_always_inline _ITp atomic_fetch_add_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i, memory_order __m) noexcept { return __a->fetch_add(__i, __m); } template - __always_inline _ITp + __libitm_always_inline _ITp atomic_fetch_sub_explicit(__atomic_base<_ITp>* __a, _ITp __i, memory_order __m) noexcept { return __a->fetch_sub(__i, __m); } template - __always_inline _ITp + __libitm_always_inline _ITp atomic_fetch_sub_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i, memory_order __m) noexcept { return __a->fetch_sub(__i, __m); } template - __always_inline _ITp + __libitm_always_inline _ITp atomic_fetch_and_explicit(__atomic_base<_ITp>* __a, _ITp __i, memory_order __m) noexcept { return __a->fetch_and(__i, __m); } template - __always_inline _ITp + __libitm_always_inline _ITp atomic_fetch_and_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i, memory_order __m) noexcept { return __a->fetch_and(__i, __m); } template - __always_inline _ITp + __libitm_always_inline _ITp atomic_fetch_or_explicit(__atomic_base<_ITp>* __a, _ITp __i, memory_order __m) noexcept { return __a->fetch_or(__i, __m); } template - __always_inline _ITp + __libitm_always_inline _ITp atomic_fetch_or_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i, memory_order __m) noexcept { return __a->fetch_or(__i, __m); } template - __always_inline _ITp + __libitm_always_inline _ITp atomic_fetch_xor_explicit(__atomic_base<_ITp>* __a, _ITp __i, memory_order __m) noexcept { return __a->fetch_xor(__i, __m); } template - __always_inline _ITp + __libitm_always_inline _ITp atomic_fetch_xor_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i, memory_order __m) noexcept { return __a->fetch_xor(__i, __m); } template - __always_inline _ITp + __libitm_always_inline _ITp atomic_fetch_add(__atomic_base<_ITp>* __a, _ITp __i) noexcept { return atomic_fetch_add_explicit(__a, __i, memory_order_seq_cst); } template - __always_inline _ITp + __libitm_always_inline _ITp atomic_fetch_add(volatile __atomic_base<_ITp>* __a, _ITp __i) noexcept { return atomic_fetch_add_explicit(__a, __i, memory_order_seq_cst); } template - __always_inline _ITp + __libitm_always_inline _ITp atomic_fetch_sub(__atomic_base<_ITp>* __a, _ITp __i) noexcept { return atomic_fetch_sub_explicit(__a, __i, memory_order_seq_cst); } template - __always_inline _ITp + __libitm_always_inline _ITp atomic_fetch_sub(volatile __atomic_base<_ITp>* __a, _ITp __i) noexcept { return atomic_fetch_sub_explicit(__a, __i, memory_order_seq_cst); } template - __always_inline _ITp + __libitm_always_inline _ITp atomic_fetch_and(__atomic_base<_ITp>* __a, _ITp __i) noexcept { return atomic_fetch_and_explicit(__a, __i, memory_order_seq_cst); } template - __always_inline _ITp + __libitm_always_inline _ITp atomic_fetch_and(volatile __atomic_base<_ITp>* __a, _ITp __i) noexcept { return atomic_fetch_and_explicit(__a, __i, memory_order_seq_cst); } template - __always_inline _ITp + __libitm_always_inline _ITp atomic_fetch_or(__atomic_base<_ITp>* __a, _ITp __i) noexcept { return atomic_fetch_or_explicit(__a, __i, memory_order_seq_cst); } template - __always_inline _ITp + __libitm_always_inline _ITp atomic_fetch_or(volatile __atomic_base<_ITp>* __a, _ITp __i) noexcept { return atomic_fetch_or_explicit(__a, __i, memory_order_seq_cst); } template - __always_inline _ITp + __libitm_always_inline _ITp atomic_fetch_xor(__atomic_base<_ITp>* __a, _ITp __i) noexcept { return atomic_fetch_xor_explicit(__a, __i, memory_order_seq_cst); } template - __always_inline _ITp + __libitm_always_inline _ITp atomic_fetch_xor(volatile __atomic_base<_ITp>* __a, _ITp __i) noexcept { return atomic_fetch_xor_explicit(__a, __i, memory_order_seq_cst); } // Partial specializations for pointers. template - __always_inline _ITp* + __libitm_always_inline _ITp* atomic_fetch_add_explicit(atomic<_ITp*>* __a, ptrdiff_t __d, memory_order __m) noexcept { return __a->fetch_add(__d, __m); } template - __always_inline _ITp* + __libitm_always_inline _ITp* atomic_fetch_add_explicit(volatile atomic<_ITp*>* __a, ptrdiff_t __d, memory_order __m) noexcept { return __a->fetch_add(__d, __m); } template - __always_inline _ITp* + __libitm_always_inline _ITp* atomic_fetch_add(volatile atomic<_ITp*>* __a, ptrdiff_t __d) noexcept { return __a->fetch_add(__d); } template - __always_inline _ITp* + __libitm_always_inline _ITp* atomic_fetch_add(atomic<_ITp*>* __a, ptrdiff_t __d) noexcept { return __a->fetch_add(__d); } template - __always_inline _ITp* + __libitm_always_inline _ITp* atomic_fetch_sub_explicit(volatile atomic<_ITp*>* __a, ptrdiff_t __d, memory_order __m) noexcept { return __a->fetch_sub(__d, __m); } template - __always_inline _ITp* + __libitm_always_inline _ITp* atomic_fetch_sub_explicit(atomic<_ITp*>* __a, ptrdiff_t __d, memory_order __m) noexcept { return __a->fetch_sub(__d, __m); } template - __always_inline _ITp* + __libitm_always_inline _ITp* atomic_fetch_sub(volatile atomic<_ITp*>* __a, ptrdiff_t __d) noexcept { return __a->fetch_sub(__d); } template - __always_inline _ITp* + __libitm_always_inline _ITp* atomic_fetch_sub(atomic<_ITp*>* __a, ptrdiff_t __d) noexcept { return __a->fetch_sub(__d); } // @} group atomics