@@ -5272,10 +5272,12 @@ expand_builtin_sync_lock_release (enum machine_mode mode, tree exp)
}
/* Given an integer representing an ``enum memmodel'', verify its
- correctness and return the memory model enum. */
+ correctness and return the memory model enum. When STORE
+ is true this for a store only. When BARRIER is true this if for
+ a barrier. */
static enum memmodel
-get_memmodel (tree exp)
+get_memmodel (tree exp, bool store, bool barrier)
{
rtx op;
unsigned HOST_WIDE_INT val;
@@ -5289,7 +5291,7 @@ get_memmodel (tree exp)
val = INTVAL (op);
if (targetm.memmodel_check)
- val = targetm.memmodel_check (val);
+ val = targetm.memmodel_check (val, store, barrier);
else if (val & ~MEMMODEL_MASK)
{
warning (OPT_Winvalid_memory_model,
@@ -5318,7 +5320,7 @@ expand_builtin_atomic_exchange (enum machine_mode mode, tree exp, rtx target)
rtx val, mem;
enum memmodel model;
- model = get_memmodel (CALL_EXPR_ARG (exp, 2));
+ model = get_memmodel (CALL_EXPR_ARG (exp, 2), false, false);
if ((model & MEMMODEL_MASK) == MEMMODEL_CONSUME)
{
error ("invalid memory model for %<__atomic_exchange%>");
@@ -5352,8 +5354,8 @@ expand_builtin_atomic_compare_exchange (enum machine_mode mode, tree exp,
tree weak;
bool is_weak;
- success = get_memmodel (CALL_EXPR_ARG (exp, 4));
- failure = get_memmodel (CALL_EXPR_ARG (exp, 5));
+ success = get_memmodel (CALL_EXPR_ARG (exp, 4), false, false);
+ failure = get_memmodel (CALL_EXPR_ARG (exp, 5), false, false);
if ((failure & MEMMODEL_MASK) == MEMMODEL_RELEASE
|| (failure & MEMMODEL_MASK) == MEMMODEL_ACQ_REL)
@@ -5408,7 +5410,7 @@ expand_builtin_atomic_load (enum machine_mode mode, tree exp, rtx target)
rtx mem;
enum memmodel model;
- model = get_memmodel (CALL_EXPR_ARG (exp, 1));
+ model = get_memmodel (CALL_EXPR_ARG (exp, 1), false, false);
if ((model & MEMMODEL_MASK) == MEMMODEL_RELEASE
|| (model & MEMMODEL_MASK) == MEMMODEL_ACQ_REL)
{
@@ -5437,7 +5439,7 @@ expand_builtin_atomic_store (enum machine_mode mode, tree exp)
rtx mem, val;
enum memmodel model;
- model = get_memmodel (CALL_EXPR_ARG (exp, 2));
+ model = get_memmodel (CALL_EXPR_ARG (exp, 2), true, false);
if ((model & MEMMODEL_MASK) != MEMMODEL_RELAXED
&& (model & MEMMODEL_MASK) != MEMMODEL_SEQ_CST
&& (model & MEMMODEL_MASK) != MEMMODEL_RELEASE)
@@ -5477,7 +5479,7 @@ expand_builtin_atomic_fetch_op (enum machine_mode mode, tree exp, rtx target,
tree fndecl;
tree addr;
- model = get_memmodel (CALL_EXPR_ARG (exp, 2));
+ model = get_memmodel (CALL_EXPR_ARG (exp, 2), false, false);
/* Expand the operands. */
mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode);
@@ -5544,7 +5546,7 @@ expand_builtin_atomic_clear (tree exp)
mode = mode_for_size (BOOL_TYPE_SIZE, MODE_INT, 0);
mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode);
- model = get_memmodel (CALL_EXPR_ARG (exp, 1));
+ model = get_memmodel (CALL_EXPR_ARG (exp, 1), true, false);
if ((model & MEMMODEL_MASK) == MEMMODEL_ACQUIRE
|| (model & MEMMODEL_MASK) == MEMMODEL_ACQ_REL)
@@ -5553,8 +5555,6 @@ expand_builtin_atomic_clear (tree exp)
return const0_rtx;
}
- /* need target hook there to check for not hle acquire */
-
if (HAVE_atomic_clear)
{
emit_insn (gen_atomic_clear (mem, model));
@@ -5585,7 +5585,7 @@ expand_builtin_atomic_test_and_set (tree exp, rtx target)
mode = mode_for_size (BOOL_TYPE_SIZE, MODE_INT, 0);
mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode);
- model = get_memmodel (CALL_EXPR_ARG (exp, 1));
+ model = get_memmodel (CALL_EXPR_ARG (exp, 1), false, false);
return expand_atomic_test_and_set (target, mem, model);
}
@@ -5724,7 +5724,7 @@ expand_builtin_atomic_is_lock_free (tree exp)
static void
expand_builtin_atomic_thread_fence (tree exp)
{
- enum memmodel model = get_memmodel (CALL_EXPR_ARG (exp, 0));
+ enum memmodel model = get_memmodel (CALL_EXPR_ARG (exp, 0), false, true);
expand_mem_thread_fence (model);
}
@@ -5735,7 +5735,7 @@ expand_builtin_atomic_thread_fence (tree exp)
static void
expand_builtin_atomic_signal_fence (tree exp)
{
- enum memmodel model = get_memmodel (CALL_EXPR_ARG (exp, 0));
+ enum memmodel model = get_memmodel (CALL_EXPR_ARG (exp, 0), false, true);
expand_mem_signal_fence (model);
}
@@ -42076,14 +42076,31 @@ ix86_destroy_cost_data (void *data)
free (data);
}
-/* Validate target specific memory model bits in VAL. */
+/* Validate target specific memory model bits in VAL. When
+ STORE is true this for a store. When BARRIER is true
+ this is for a barrier. */
static unsigned HOST_WIDE_INT
-ix86_memmodel_check (unsigned HOST_WIDE_INT val)
+ix86_memmodel_check (unsigned HOST_WIDE_INT val, bool store,
+ bool barrier)
{
unsigned HOST_WIDE_INT model = val & MEMMODEL_MASK;
bool strong;
+ if (barrier && (val & (IX86_HLE_ACQUIRE|IX86_HLE_RELEASE)))
+ {
+ warning (OPT_Winvalid_memory_model,
+ "__ATOMIC_HLE_ACQUIRE or __ATOMIC_HLE_RELEASE not valid for barriers.");
+ return MEMMODEL_SEQ_CST;
+ }
+
+ if (store && (val & IX86_HLE_ACQUIRE))
+ {
+ warning (OPT_Winvalid_memory_model,
+ "__ATOMIC_HLE_ACQUIRE not valid for stores.");
+ return MEMMODEL_SEQ_CST;
+ }
+
if (val & ~(unsigned HOST_WIDE_INT)(IX86_HLE_ACQUIRE|IX86_HLE_RELEASE
|MEMMODEL_MASK)
|| ((val & IX86_HLE_ACQUIRE) && (val & IX86_HLE_RELEASE)))
@@ -11338,9 +11338,11 @@ Address Sanitizer shadow memory address. NULL if Address Sanitizer is not
supported by the target.
@end deftypefn
-@deftypefn {Target Hook} {unsigned HOST_WIDE_INT} TARGET_MEMMODEL_CHECK (unsigned HOST_WIDE_INT @var{val})
+@deftypefn {Target Hook} {unsigned HOST_WIDE_INT} TARGET_MEMMODEL_CHECK (unsigned HOST_WIDE_INT @var{val}, bool @var{store}, bool @var{barrier})
Validate target specific memory model mask bits. When NULL no target specific
-memory model bits are allowed.
+memory model bits are allowed. When @code{STORE} is true this is for
+__atomic_clear or __atomic_store. When @code{BARRIER} is true this is for a
+barrier.
@end deftypefn
@deftypevr {Target Hook} {unsigned char} TARGET_ATOMIC_TEST_AND_SET_TRUEVAL
@@ -11178,7 +11178,9 @@ is zero, which disables this optimization.
@hook TARGET_MEMMODEL_CHECK
Validate target specific memory model mask bits. When NULL no target specific
-memory model bits are allowed.
+memory model bits are allowed. When @code{STORE} is true this is for
+__atomic_clear or __atomic_store. When @code{BARRIER} is true this is for a
+barrier.
@end deftypefn
@hook TARGET_ATOMIC_TEST_AND_SET_TRUEVAL
@@ -2059,7 +2059,8 @@ DEFHOOKPOD
DEFHOOK
(memmodel_check,
"",
- unsigned HOST_WIDE_INT, (unsigned HOST_WIDE_INT val), NULL)
+ unsigned HOST_WIDE_INT, (unsigned HOST_WIDE_INT val, bool store, bool barrier),
+ NULL)
/* Defines an offset bitwise ored into shifted address to get corresponding
Address Sanitizer shadow address, or -1 if Address Sanitizer is not
From: Andi Kleen <ak@linux.intel.com> For HLE stores are only valid with __ATOMIC_HLE_RELEASE. The middle end didn't know this. This adds a new parameter to the get_memmodel target hook to distingush stores and give an warning for acquire stores. I also did a similar check for the barriers where HLE is not useful at all. Fixes one todo in the earlier hle release patch. gcc/: 2013-01-13 Andi Kleen <ak@linux.intel.com> PR target/55948 * builtins.c (get_memmodel): Pass store, barrier parameters to target hook. (expand_builtin_atomic_exchange, expand_builtin_atomic_compare_exchange, expand_builtin_atomic_load, expand_builtin_atomic_store, expand_builtin_atomic_fetch_op, expand_builtin_atomic_test_and_set, expand_builtin_atomic_test_lock, expand_builtin_atomic_clear, expand_builtin_atomic_thread_fence): Pass store, barrier parameters to get_memmodel. * config/i386/i386.c (ix86_memmodel_check): Add store, barrier warning. * doc/tm.texi (TARGET_GET_MEMMODEL): Add store, barrier parameters. * doc/tm.exit.in (TARGET_GET_MEMMODEL): Dito. * target.def (TARGET_GET_MEMMODEL): Dito. --- gcc/builtins.c | 30 +++++++++++++++--------------- gcc/config/i386/i386.c | 21 +++++++++++++++++++-- gcc/doc/tm.texi | 6 ++++-- gcc/doc/tm.texi.in | 4 +++- gcc/target.def | 3 ++- 5 files changed, 43 insertions(+), 21 deletions(-)