diff mbox

libstdc++: Add mem_order_hle_acquire/release to atomic.h v2

Message ID 1358143565-29558-1-git-send-email-andi@firstfloor.org
State New
Headers show

Commit Message

Andi Kleen Jan. 14, 2013, 6:06 a.m. UTC
From: Andi Kleen <ak@linux.intel.com>

The underlying compiler supports additional __ATOMIC_HLE_ACQUIRE/RELEASE
memmodel flags for TSX, but this was not exposed to the C++ wrapper.
Handle it there.

These are additional flags, so some of assert checks need to mask
off the flags before checking the memory model type.

This is a reworked version over the version last year, including
using operator overloading as suggested by Jonathan Wakely
and a real test case

Needs the earlier __atomic_clear/store HLE fix patch to make
the test case succeed.

Passes bootstrap and testsuite on x86_64-linux.

libstdc++/:
2013-01-12  Andi Kleen  <ak@linux.intel.com>
	    Jonathan Wakely  <jwakely.gcc@gmail.com>

        PR libstdc++/55223
	* include/bits/atomic_base.h (__memory_order_modifier): Add
        __memory_order_mask, __memory_order_modifier_mask,
	__memory_order_hle_acquire, __memory_order_hle_release.
	(operator|,operator&): Add.
	(__cmpexch_failure_order):  Rename to __cmpexch_failure_order2.
	(__cmpexch_failure_order): Add.
        (clear, store, load, compare_exchange_weak, compare_exchange_strong):
        Handle flags.
	* testsuite/29_atomics/atomic_flag/test_and_set/explicit-hle.cc:
	Add.
---
 libstdc++-v3/include/bits/atomic_base.h            |  142 +++++++++++++-------
 .../atomic_flag/test_and_set/explicit-hle.cc       |  120 +++++++++++++++++
 2 files changed, 217 insertions(+), 45 deletions(-)
 create mode 100644 libstdc++-v3/testsuite/29_atomics/atomic_flag/test_and_set/explicit-hle.cc

Comments

Jonathan Wakely Jan. 14, 2013, 12:30 p.m. UTC | #1
On 14 January 2013 06:06, Andi Kleen  wrote:
>
> The underlying compiler supports additional __ATOMIC_HLE_ACQUIRE/RELEASE
> memmodel flags for TSX, but this was not exposed to the C++ wrapper.
> Handle it there.
>
> These are additional flags, so some of assert checks need to mask
> off the flags before checking the memory model type.
>
> This is a reworked version over the version last year, including
> using operator overloading as suggested by Jonathan Wakely
> and a real test case
>
> Needs the earlier __atomic_clear/store HLE fix patch to make
> the test case succeed.
>
> Passes bootstrap and testsuite on x86_64-linux.

This looks good, thanks for reworking it.

I think the new testcase should only have 2013 in the copyright dates,
and does atomic_base.h need 2013 added?

As the earlier version was first posted some time ago I think it's OK
even though the trunk's now in release branch mode.  Please wait 24
hours in case anyone else objects and if not it can be committed.

Thanks.
Jakub Jelinek Jan. 21, 2013, 2:05 p.m. UTC | #2
On Sun, Jan 13, 2013 at 10:06:05PM -0800, Andi Kleen wrote:
> 2013-01-12  Andi Kleen  <ak@linux.intel.com>
> 	    Jonathan Wakely  <jwakely.gcc@gmail.com>
> 
>         PR libstdc++/55223
> 	* testsuite/29_atomics/atomic_flag/test_and_set/explicit-hle.cc:
> 	Add.

The test is broken on i?86-linux,

> --- /dev/null
> +++ b/libstdc++-v3/testsuite/29_atomics/atomic_flag/test_and_set/explicit-hle.cc
> @@ -0,0 +1,120 @@
> +// { dg-options "-std=gnu++0x" }

1) you need // { dg-additional-options "-march=i486" { target ia32 } }
   or something similar, otherwise for a pure i386 target it will not work right

> +// { dg-do compile { target i?86-*-* x86_64-*-* } }
> +// { dg-final { scan-assembler-times "\(xacquire\|\.byte.*0xf2\)" 14 } }
> +// { dg-final { scan-assembler-times "\(xrelease\|\.byte.*0xf3\)" 14 } }

2) \.byte.*0xf needs to be replaced by \.byte\[^\n\r]*0xf
   otherwise it will happily match say
        .byte   0                                                                                                                                  
        .uleb128 0x2d                                                                                                                              
        .long   0xf23                                                                                                                              
   in .debug_info

3) I guess you want to add "-g0 -fno-exceptions -fno-asynchronous-unwind-tables"
   to double check that .byte 0xf2 or .byte 0xf3 won't suddenly appear in
   .debug_* or .eh_frame sections.

	Jakub
diff mbox

Patch

diff --git a/libstdc++-v3/include/bits/atomic_base.h b/libstdc++-v3/include/bits/atomic_base.h
index 8ce5553..d69bc76 100644
--- a/libstdc++-v3/include/bits/atomic_base.h
+++ b/libstdc++-v3/include/bits/atomic_base.h
@@ -59,14 +59,41 @@  _GLIBCXX_BEGIN_NAMESPACE_VERSION
       memory_order_seq_cst
     } memory_order;
 
+  enum __memory_order_modifier
+    {
+      __memory_order_mask          = 0x0ffff,
+      __memory_order_modifier_mask = 0xffff0000,
+      __memory_order_hle_acquire   = 0x10000,
+      __memory_order_hle_release   = 0x20000
+    };
+
+  constexpr memory_order
+  operator|(memory_order __m, __memory_order_modifier __mod)
+  {
+    return memory_order(__m | int(__mod));
+  }
+
+  constexpr memory_order
+  operator&(memory_order __m, __memory_order_modifier __mod)
+  {
+    return memory_order(__m & int(__mod));
+  }
+
   // Drop release ordering as per [atomics.types.operations.req]/21
   constexpr memory_order
-  __cmpexch_failure_order(memory_order __m) noexcept
+  __cmpexch_failure_order2(memory_order __m) noexcept
   {
     return __m == memory_order_acq_rel ? memory_order_acquire
       : __m == memory_order_release ? memory_order_relaxed : __m;
   }
 
+  constexpr memory_order
+  __cmpexch_failure_order(memory_order __m) noexcept
+  {
+    return memory_order(__cmpexch_failure_order2(__m & __memory_order_mask)
+      | (__m & __memory_order_modifier_mask));
+  }
+
   inline void
   atomic_thread_fence(memory_order __m) noexcept
   { __atomic_thread_fence(__m); }
@@ -268,9 +295,10 @@  _GLIBCXX_BEGIN_NAMESPACE_VERSION
     void
     clear(memory_order __m = memory_order_seq_cst) noexcept
     {
-      __glibcxx_assert(__m != memory_order_consume);
-      __glibcxx_assert(__m != memory_order_acquire);
-      __glibcxx_assert(__m != memory_order_acq_rel);
+      memory_order __b = __m & __memory_order_mask;
+      __glibcxx_assert(__b != memory_order_consume);
+      __glibcxx_assert(__b != memory_order_acquire);
+      __glibcxx_assert(__b != memory_order_acq_rel);
 
       __atomic_clear (&_M_i, __m);
     }
@@ -278,9 +306,10 @@  _GLIBCXX_BEGIN_NAMESPACE_VERSION
     void
     clear(memory_order __m = memory_order_seq_cst) volatile noexcept
     {
-      __glibcxx_assert(__m != memory_order_consume);
-      __glibcxx_assert(__m != memory_order_acquire);
-      __glibcxx_assert(__m != memory_order_acq_rel);
+      memory_order __b = __m & __memory_order_mask;
+      __glibcxx_assert(__b != memory_order_consume);
+      __glibcxx_assert(__b != memory_order_acquire);
+      __glibcxx_assert(__b != memory_order_acq_rel);
 
       __atomic_clear (&_M_i, __m);
     }
@@ -431,9 +460,10 @@  _GLIBCXX_BEGIN_NAMESPACE_VERSION
       void
       store(__int_type __i, memory_order __m = memory_order_seq_cst) noexcept
       {
-	__glibcxx_assert(__m != memory_order_acquire);
-	__glibcxx_assert(__m != memory_order_acq_rel);
-	__glibcxx_assert(__m != memory_order_consume);
+        memory_order __b = __m & __memory_order_mask;
+	__glibcxx_assert(__b != memory_order_acquire);
+	__glibcxx_assert(__b != memory_order_acq_rel);
+	__glibcxx_assert(__b != memory_order_consume);
 
 	__atomic_store_n(&_M_i, __i, __m);
       }
@@ -442,9 +472,10 @@  _GLIBCXX_BEGIN_NAMESPACE_VERSION
       store(__int_type __i,
 	    memory_order __m = memory_order_seq_cst) volatile noexcept
       {
-	__glibcxx_assert(__m != memory_order_acquire);
-	__glibcxx_assert(__m != memory_order_acq_rel);
-	__glibcxx_assert(__m != memory_order_consume);
+        memory_order __b = __m & __memory_order_mask;
+	__glibcxx_assert(__b != memory_order_acquire);
+	__glibcxx_assert(__b != memory_order_acq_rel);
+	__glibcxx_assert(__b != memory_order_consume);
 
 	__atomic_store_n(&_M_i, __i, __m);
       }
@@ -452,8 +483,9 @@  _GLIBCXX_BEGIN_NAMESPACE_VERSION
       __int_type
       load(memory_order __m = memory_order_seq_cst) const noexcept
       {
-	__glibcxx_assert(__m != memory_order_release);
-	__glibcxx_assert(__m != memory_order_acq_rel);
+       memory_order __b = __m & __memory_order_mask;
+	__glibcxx_assert(__b != memory_order_release);
+	__glibcxx_assert(__b != memory_order_acq_rel);
 
 	return __atomic_load_n(&_M_i, __m);
       }
@@ -461,8 +493,9 @@  _GLIBCXX_BEGIN_NAMESPACE_VERSION
       __int_type
       load(memory_order __m = memory_order_seq_cst) const volatile noexcept
       {
-	__glibcxx_assert(__m != memory_order_release);
-	__glibcxx_assert(__m != memory_order_acq_rel);
+        memory_order __b = __m & __memory_order_mask;
+	__glibcxx_assert(__b != memory_order_release);
+	__glibcxx_assert(__b != memory_order_acq_rel);
 
 	return __atomic_load_n(&_M_i, __m);
       }
@@ -486,9 +519,11 @@  _GLIBCXX_BEGIN_NAMESPACE_VERSION
       compare_exchange_weak(__int_type& __i1, __int_type __i2,
 			    memory_order __m1, memory_order __m2) noexcept
       {
-	__glibcxx_assert(__m2 != memory_order_release);
-	__glibcxx_assert(__m2 != memory_order_acq_rel);
-	__glibcxx_assert(__m2 <= __m1);
+       memory_order __b2 = __m2 & __memory_order_mask;
+       memory_order __b1 = __m1 & __memory_order_mask;
+	__glibcxx_assert(__b2 != memory_order_release);
+	__glibcxx_assert(__b2 != memory_order_acq_rel);
+	__glibcxx_assert(__b2 <= __b1);
 
 	return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1, __m1, __m2);
       }
@@ -498,9 +533,11 @@  _GLIBCXX_BEGIN_NAMESPACE_VERSION
 			    memory_order __m1,
 			    memory_order __m2) volatile noexcept
       {
-	__glibcxx_assert(__m2 != memory_order_release);
-	__glibcxx_assert(__m2 != memory_order_acq_rel);
-	__glibcxx_assert(__m2 <= __m1);
+       memory_order __b2 = __m2 & __memory_order_mask;
+       memory_order __b1 = __m1 & __memory_order_mask;
+	__glibcxx_assert(__b2 != memory_order_release);
+	__glibcxx_assert(__b2 != memory_order_acq_rel);
+	__glibcxx_assert(__b2 <= __b1);
 
 	return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1, __m1, __m2);
       }
@@ -525,9 +562,11 @@  _GLIBCXX_BEGIN_NAMESPACE_VERSION
       compare_exchange_strong(__int_type& __i1, __int_type __i2,
 			      memory_order __m1, memory_order __m2) noexcept
       {
-	__glibcxx_assert(__m2 != memory_order_release);
-	__glibcxx_assert(__m2 != memory_order_acq_rel);
-	__glibcxx_assert(__m2 <= __m1);
+        memory_order __b2 = __m2 & __memory_order_mask;
+        memory_order __b1 = __m1 & __memory_order_mask;
+	__glibcxx_assert(__b2 != memory_order_release);
+	__glibcxx_assert(__b2 != memory_order_acq_rel);
+	__glibcxx_assert(__b2 <= __b1);
 
 	return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0, __m1, __m2);
       }
@@ -537,9 +576,12 @@  _GLIBCXX_BEGIN_NAMESPACE_VERSION
 			      memory_order __m1,
 			      memory_order __m2) volatile noexcept
       {
-	__glibcxx_assert(__m2 != memory_order_release);
-	__glibcxx_assert(__m2 != memory_order_acq_rel);
-	__glibcxx_assert(__m2 <= __m1);
+        memory_order __b2 = __m2 & __memory_order_mask;
+        memory_order __b1 = __m1 & __memory_order_mask;
+
+	__glibcxx_assert(__b2 != memory_order_release);
+	__glibcxx_assert(__b2 != memory_order_acq_rel);
+	__glibcxx_assert(__b2 <= __b1);
 
 	return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0, __m1, __m2);
       }
@@ -726,9 +768,11 @@  _GLIBCXX_BEGIN_NAMESPACE_VERSION
       store(__pointer_type __p,
 	    memory_order __m = memory_order_seq_cst) noexcept
       {
-	__glibcxx_assert(__m != memory_order_acquire);
-	__glibcxx_assert(__m != memory_order_acq_rel);
-	__glibcxx_assert(__m != memory_order_consume);
+        memory_order __b = __m & __memory_order_mask;
+
+	__glibcxx_assert(__b != memory_order_acquire);
+	__glibcxx_assert(__b != memory_order_acq_rel);
+	__glibcxx_assert(__b != memory_order_consume);
 
 	__atomic_store_n(&_M_p, __p, __m);
       }
@@ -737,9 +781,10 @@  _GLIBCXX_BEGIN_NAMESPACE_VERSION
       store(__pointer_type __p,
 	    memory_order __m = memory_order_seq_cst) volatile noexcept
       {
-	__glibcxx_assert(__m != memory_order_acquire);
-	__glibcxx_assert(__m != memory_order_acq_rel);
-	__glibcxx_assert(__m != memory_order_consume);
+        memory_order __b = __m & __memory_order_mask;
+	__glibcxx_assert(__b != memory_order_acquire);
+	__glibcxx_assert(__b != memory_order_acq_rel);
+	__glibcxx_assert(__b != memory_order_consume);
 
 	__atomic_store_n(&_M_p, __p, __m);
       }
@@ -747,8 +792,9 @@  _GLIBCXX_BEGIN_NAMESPACE_VERSION
       __pointer_type
       load(memory_order __m = memory_order_seq_cst) const noexcept
       {
-	__glibcxx_assert(__m != memory_order_release);
-	__glibcxx_assert(__m != memory_order_acq_rel);
+        memory_order __b = __m & __memory_order_mask;
+	__glibcxx_assert(__b != memory_order_release);
+	__glibcxx_assert(__b != memory_order_acq_rel);
 
 	return __atomic_load_n(&_M_p, __m);
       }
@@ -756,8 +802,9 @@  _GLIBCXX_BEGIN_NAMESPACE_VERSION
       __pointer_type
       load(memory_order __m = memory_order_seq_cst) const volatile noexcept
       {
-	__glibcxx_assert(__m != memory_order_release);
-	__glibcxx_assert(__m != memory_order_acq_rel);
+        memory_order __b = __m & __memory_order_mask;
+	__glibcxx_assert(__b != memory_order_release);
+	__glibcxx_assert(__b != memory_order_acq_rel);
 
 	return __atomic_load_n(&_M_p, __m);
       }
@@ -782,9 +829,11 @@  _GLIBCXX_BEGIN_NAMESPACE_VERSION
 			      memory_order __m1,
 			      memory_order __m2) noexcept
       {
-	__glibcxx_assert(__m2 != memory_order_release);
-	__glibcxx_assert(__m2 != memory_order_acq_rel);
-	__glibcxx_assert(__m2 <= __m1);
+        memory_order __b2 = __m2 & __memory_order_mask;
+        memory_order __b1 = __m1 & __memory_order_mask;
+	__glibcxx_assert(__b2 != memory_order_release);
+	__glibcxx_assert(__b2 != memory_order_acq_rel);
+	__glibcxx_assert(__b2 <= __b1);
 
 	return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0, __m1, __m2);
       }
@@ -794,9 +843,12 @@  _GLIBCXX_BEGIN_NAMESPACE_VERSION
 			      memory_order __m1,
 			      memory_order __m2) volatile noexcept
       {
-	__glibcxx_assert(__m2 != memory_order_release);
-	__glibcxx_assert(__m2 != memory_order_acq_rel);
-	__glibcxx_assert(__m2 <= __m1);
+        memory_order __b2 = __m2 & __memory_order_mask;
+        memory_order __b1 = __m1 & __memory_order_mask;
+
+	__glibcxx_assert(__b2 != memory_order_release);
+	__glibcxx_assert(__b2 != memory_order_acq_rel);
+	__glibcxx_assert(__b2 <= __b1);
 
 	return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0, __m1, __m2);
       }
diff --git a/libstdc++-v3/testsuite/29_atomics/atomic_flag/test_and_set/explicit-hle.cc b/libstdc++-v3/testsuite/29_atomics/atomic_flag/test_and_set/explicit-hle.cc
new file mode 100644
index 0000000..916a5e2
--- /dev/null
+++ b/libstdc++-v3/testsuite/29_atomics/atomic_flag/test_and_set/explicit-hle.cc
@@ -0,0 +1,120 @@ 
+// { dg-options "-std=gnu++0x" }
+// { dg-do compile { target i?86-*-* x86_64-*-* } }
+// { dg-final { scan-assembler-times "\(xacquire\|\.byte.*0xf2\)" 14 } }
+// { dg-final { scan-assembler-times "\(xrelease\|\.byte.*0xf3\)" 14 } }
+
+// Copyright (C) 2008, 2009, 2013 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library.  This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3.  If not see
+// <http://www.gnu.org/licenses/>.
+
+#include <atomic>
+
+#define ACQ memory_order_acquire | __memory_order_hle_acquire
+#define REL memory_order_release | __memory_order_hle_release
+
+int main()
+{
+  unsigned zero, one;
+  using namespace std;
+  atomic_flag af = ATOMIC_FLAG_INIT;
+
+  if (!af.test_and_set(ACQ))
+    af.clear(REL);
+
+  atomic_uint au = ATOMIC_VAR_INIT(0);
+
+  if (au.exchange(1, ACQ))
+    au.store(0, REL);
+
+  if (au.exchange(1, ACQ))
+    au.exchange(0, REL);
+
+  zero = 0;
+  one = 1;
+  if (au.compare_exchange_weak(zero, 1, ACQ, memory_order_consume))
+    au.compare_exchange_weak(one, 0, REL, memory_order_consume);
+
+  zero = 0;
+  one = 1;
+  if (au.compare_exchange_strong(zero, 1, ACQ, memory_order_consume))
+    au.compare_exchange_strong(one, 0, REL, memory_order_consume);
+
+  if (!au.fetch_add(1, ACQ))
+    au.fetch_add(-1, REL);
+
+  if (!au.fetch_sub(1, ACQ))
+    au.fetch_sub(-1, REL);
+
+#if 0 /* broken in underlying target */
+  if (!au.fetch_and(1, ACQ))
+    au.fetch_and(-1, REL);
+
+  if (!au.fetch_or(1, ACQ))
+    au.fetch_or(-1, REL);
+
+  if (!au.fetch_xor(1, ACQ))
+    au.fetch_xor(-1, REL);
+
+  if (!au.fetch_nand(1, ACQ))
+    au.fetch_nand(-1, REL);
+#endif
+
+  volatile atomic_flag vaf = ATOMIC_FLAG_INIT;
+
+  if (!vaf.test_and_set(ACQ))
+    vaf.clear(REL);
+
+  volatile atomic_uint vau = ATOMIC_VAR_INIT(0);
+
+  if (!vau.exchange(1, ACQ))
+    vau.store(0, REL);
+
+  if (!vau.exchange(1, ACQ))
+    vau.exchange(0, REL);
+
+  zero = 0;
+  one = 1;
+  if (vau.compare_exchange_weak(zero, 1, ACQ, memory_order_consume))
+    vau.compare_exchange_weak(one, 0, REL, memory_order_consume);
+
+  zero = 0;
+  one = 1;
+  if (vau.compare_exchange_strong(zero, 1, ACQ, memory_order_consume))
+    vau.compare_exchange_strong(one, 0, REL, memory_order_consume);
+
+  if (!vau.fetch_add(1, ACQ))
+    vau.fetch_add(-1, REL);
+
+  if (!vau.fetch_sub(1, ACQ))
+    vau.fetch_sub(-1, REL);
+
+#if 0 /* broken in underlying target */
+
+  if (!vau.fetch_and(1, ACQ))
+    vau.fetch_and(-1, REL);
+
+  if (!vau.fetch_or(1, ACQ))
+    vau.fetch_or(-1, REL);
+
+  if (!vau.fetch_xor(1, ACQ))
+    vau.fetch_xor(-1, REL);
+
+  if (!vau.fetch_nand(1, ACQ))
+    vau.fetch_nand(-1, REL);
+#endif
+
+  return 0;
+}