PR target/65697
* coretypes.h (MEMMODEL_SYNC, MEMMODEL_BASE_MASK): New macros.
(enum memmodel): Add SYNC_{ACQUIRE,RELEASE,SEQ_CST}.
* tree.h (memmodel_from_int, memmodel_base, is_mm_relaxed,
is_mm_consume,is_mm_acquire, is_mm_release, is_mm_acq_rel,
is_mm_seq_cst, is_mm_sync): New accessor functions.
* builtins.c (expand_builtin_sync_operation,
expand_builtin_compare_and_swap): Use MEMMODEL_SYNC_SEQ_CST.
(expand_builtin_sync_lock_release): Use MEMMODEL_SYNC_RELEASE.
(get_memmodel, expand_builtin_atomic_compare_exchange,
expand_builtin_atomic_load, expand_builtin_atomic_store,
expand_builtin_atomic_clear): Use new accessor routines.
(expand_builtin_sync_synchronize): Use MEMMODEL_SYNC_SEQ_CST.
* optabs.c (expand_compare_and_swap_loop): Use MEMMODEL_SYNC_SEQ_CST.
(maybe_emit_sync_lock_test_and_set): Use new accessors and
MEMMODEL_SYNC_ACQUIRE.
(expand_sync_lock_test_and_set): Use MEMMODEL_SYNC_ACQUIRE.
(expand_mem_thread_fence, expand_mem_signal_fence, expand_atomic_load,
expand_atomic_store): Use new accessors.
* emit-rtl.c (need_atomic_barrier_p): Add additional enum cases.
* tsan.c (instrument_builtin_call): Update check for memory model beyond
final enum to use MEMMODEL_LAST.
* config/aarch64/aarch64.c (aarch64_expand_compare_and_swap): Use new
accessors.
* config/aarch64/atomics.md (atomic_load<mode>,atomic_store<mode>,
arch64_load_exclusive<mode>, aarch64_store_exclusive<mode>,
mem_thread_fence, *dmb): Likewise.
* config/alpha/alpha.c (alpha_split_compare_and_swap,
alpha_split_compare_and_swap_12): Likewise.
* config/arm/arm.c (arm_expand_compare_and_swap,
arm_split_compare_and_swap, arm_split_atomic_op): Likewise.
* config/arm/sync.md (atomic_load<mode>, atomic_store<mode>,
atomic_loaddi): Likewise.
* config/i386/i386.c (ix86_destroy_cost_data, ix86_memmodel_check):
Likewise.
* config/i386/sync.md (mem_thread_fence, atomic_store<mode>): Likewise.
* config/ia64/ia64.c (ia64_expand_atomic_op): Add new memmodel cases and
use new accessors.
* config/ia64/sync.md (mem_thread_fence, atomic_load<mode>,
atomic_store<mode>, atomic_compare_and_swap<mode>,
atomic_exchange<mode>): Use new accessors.
* config/mips/mips.c (mips_process_sync_loop): Likewise.
* config/pa/pa.md (atomic_loaddi, atomic_storedi): Likewise.
* config/rs6000/rs6000.c (rs6000_pre_atomic_barrier,
rs6000_post_atomic_barrier): Add new cases.
(rs6000_expand_atomic_compare_and_swap): Use new accessors.
* config/rs6000/sync.md (mem_thread_fence): Add new cases.
(atomic_load<mode>): Add new cases and use new accessors.
(store_quadpti): Add new cases.
* config/s390/s390.md (mem_thread_fence, atomic_store<mode>): Use new
accessors.
* config/sparc/sparc.c (sparc_emit_membar_for_model): Use new accessors.
* doc/extend.texi: Update docs to indicate 16 bits are used for memory
model, not 8.
* c-family/c-common.c: Use new accessor for memmodel_base.
===================================================================
*************** enum function_class {
function_c11_misc
};
+ /* Suppose that higher bits are target dependent. */
+ #define MEMMODEL_MASK ((1<<16)-1)
+
+ /* Legacy sync operations set this upper flag in the memory model. This allows
+ targets that need to do something stronger for sync operations to
+ differentiate with their target patterns and issue a more appropriate insn
+ sequence. See bugzilla 65697 for background. */
+ #define MEMMODEL_SYNC (1<<15)
+
+ /* Memory model without SYNC bit for targets/operations that do not care. */
+ #define MEMMODEL_BASE_MASK (MEMMODEL_SYNC-1)
+
/* Memory model types for the __atomic* builtins.
This must match the order in libstdc++-v3/include/bits/atomic_base.h. */
enum memmodel
*************** enum memmodel
MEMMODEL_RELEASE = 3,
MEMMODEL_ACQ_REL = 4,
MEMMODEL_SEQ_CST = 5,
! MEMMODEL_LAST = 6
};
- /* Suppose that higher bits are target dependent. */
- #define MEMMODEL_MASK ((1<<16)-1)
-
/* Support for user-provided GGC and PCH markers. The first parameter
is a pointer to a pointer, the second a cookie. */
typedef void (*gt_pointer_operator) (void *, void *);
MEMMODEL_RELEASE = 3,
MEMMODEL_ACQ_REL = 4,
MEMMODEL_SEQ_CST = 5,
! MEMMODEL_LAST = 6,
! MEMMODEL_SYNC_ACQUIRE = MEMMODEL_ACQUIRE | MEMMODEL_SYNC,
! MEMMODEL_SYNC_RELEASE = MEMMODEL_RELEASE | MEMMODEL_SYNC,
! MEMMODEL_SYNC_SEQ_CST = MEMMODEL_SEQ_CST | MEMMODEL_SYNC
};
/* Support for user-provided GGC and PCH markers. The first parameter
is a pointer to a pointer, the second a cookie. */
typedef void (*gt_pointer_operator) (void *, void *);
===================================================================
*************** extern void assign_assembler_name_if_nee
extern void warn_deprecated_use (tree, tree);
extern void cache_integer_cst (tree);
+ /* Return the memory model from a host integer. */
+ static inline enum memmodel
+ memmodel_from_int (unsigned HOST_WIDE_INT val)
+ {
+ return (enum memmodel) (val & MEMMODEL_MASK);
+ }
+
+ /* Return the base memory model from a host integer. */
+ static inline enum memmodel
+ memmodel_base (unsigned HOST_WIDE_INT val)
+ {
+ return (enum memmodel) (val & MEMMODEL_BASE_MASK);
+ }
+
+ /* Return TRUE if the memory model is RELAXED. */
+ static inline bool
+ is_mm_relaxed (enum memmodel model)
+ {
+ return (model & MEMMODEL_BASE_MASK) == MEMMODEL_RELAXED;
+ }
+
+ /* Return TRUE if the memory model is CONSUME. */
+ static inline bool
+ is_mm_consume (enum memmodel model)
+ {
+ return (model & MEMMODEL_BASE_MASK) == MEMMODEL_CONSUME;
+ }
+
+ /* Return TRUE if the memory model is ACQUIRE. */
+ static inline bool
+ is_mm_acquire (enum memmodel model)
+ {
+ return (model & MEMMODEL_BASE_MASK) == MEMMODEL_ACQUIRE;
+ }
+
+ /* Return TRUE if the memory model is RELEASE. */
+ static inline bool
+ is_mm_release (enum memmodel model)
+ {
+ return (model & MEMMODEL_BASE_MASK) == MEMMODEL_RELEASE;
+ }
+
+ /* Return TRUE if the memory model is ACQ_REL. */
+ static inline bool
+ is_mm_acq_rel (enum memmodel model)
+ {
+ return (model & MEMMODEL_BASE_MASK) == MEMMODEL_ACQ_REL;
+ }
+
+ /* Return TRUE if the memory model is SEQ_CST. */
+ static inline bool
+ is_mm_seq_cst (enum memmodel model)
+ {
+ return (model & MEMMODEL_BASE_MASK) == MEMMODEL_SEQ_CST;
+ }
+
+ /* Return TRUE if the memory model is a SYNC variant. */
+ static inline bool
+ is_mm_sync (enum memmodel model)
+ {
+ return (model & MEMMODEL_SYNC);
+ }
+
/* Compare and hash for any structure which begins with a canonical
pointer. Assumes all pointers are interchangeable, which is sort
of already assumed by gcc elsewhere IIRC. */
===================================================================
*************** expand_builtin_sync_operation (machine_m
mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode);
val = expand_expr_force_mode (CALL_EXPR_ARG (exp, 1), mode);
! return expand_atomic_fetch_op (target, mem, val, code, MEMMODEL_SEQ_CST,
after);
}
mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode);
val = expand_expr_force_mode (CALL_EXPR_ARG (exp, 1), mode);
! return expand_atomic_fetch_op (target, mem, val, code, MEMMODEL_SYNC_SEQ_CST,
after);
}
*************** expand_builtin_compare_and_swap (machine
poval = ⌖
}
if (!expand_atomic_compare_and_swap (pbool, poval, mem, old_val, new_val,
! false, MEMMODEL_SEQ_CST,
! MEMMODEL_SEQ_CST))
return NULL_RTX;
return target;
poval = ⌖
}
if (!expand_atomic_compare_and_swap (pbool, poval, mem, old_val, new_val,
! false, MEMMODEL_SYNC_SEQ_CST,
! MEMMODEL_SYNC_SEQ_CST))
return NULL_RTX;
return target;
*************** expand_builtin_sync_lock_release (machin
/* Expand the operands. */
mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode);
! expand_atomic_store (mem, const0_rtx, MEMMODEL_RELEASE, true);
}
/* Given an integer representing an ``enum memmodel'', verify its
/* Expand the operands. */
mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode);
! expand_atomic_store (mem, const0_rtx, MEMMODEL_SYNC_RELEASE, true);
}
/* Given an integer representing an ``enum memmodel'', verify its
*************** get_memmodel (tree exp)
return MEMMODEL_SEQ_CST;
}
! if ((INTVAL (op) & MEMMODEL_MASK) >= MEMMODEL_LAST)
{
warning (OPT_Winvalid_memory_model,
"invalid memory model argument to builtin");
return MEMMODEL_SEQ_CST;
}
! /* Should never see a user explicit SYNC memodel model, so >= LAST works. */
! if (memmodel_base (val) >= MEMMODEL_LAST)
{
warning (OPT_Winvalid_memory_model,
"invalid memory model argument to builtin");
*************** expand_builtin_atomic_compare_exchange (
success = MEMMODEL_SEQ_CST;
}
! if ((failure & MEMMODEL_MASK) == MEMMODEL_RELEASE
! || (failure & MEMMODEL_MASK) == MEMMODEL_ACQ_REL)
{
warning (OPT_Winvalid_memory_model,
"invalid failure memory model for "
success = MEMMODEL_SEQ_CST;
}
! if (is_mm_release (failure) || is_mm_acq_rel (failure))
{
warning (OPT_Winvalid_memory_model,
"invalid failure memory model for "
*************** expand_builtin_atomic_load (machine_mode
enum memmodel model;
model = get_memmodel (CALL_EXPR_ARG (exp, 1));
! if ((model & MEMMODEL_MASK) == MEMMODEL_RELEASE
! || (model & MEMMODEL_MASK) == MEMMODEL_ACQ_REL)
{
warning (OPT_Winvalid_memory_model,
"invalid memory model for %<__atomic_load%>");
enum memmodel model;
model = get_memmodel (CALL_EXPR_ARG (exp, 1));
! if (is_mm_release (model) || is_mm_acq_rel (model))
{
warning (OPT_Winvalid_memory_model,
"invalid memory model for %<__atomic_load%>");
*************** expand_builtin_atomic_store (machine_mod
enum memmodel model;
model = get_memmodel (CALL_EXPR_ARG (exp, 2));
! if ((model & MEMMODEL_MASK) != MEMMODEL_RELAXED
! && (model & MEMMODEL_MASK) != MEMMODEL_SEQ_CST
! && (model & MEMMODEL_MASK) != MEMMODEL_RELEASE)
{
warning (OPT_Winvalid_memory_model,
"invalid memory model for %<__atomic_store%>");
enum memmodel model;
model = get_memmodel (CALL_EXPR_ARG (exp, 2));
! if (!(is_mm_relaxed (model) || is_mm_seq_cst (model)
! || is_mm_release (model)))
{
warning (OPT_Winvalid_memory_model,
"invalid memory model for %<__atomic_store%>");
*************** expand_builtin_atomic_clear (tree exp)
mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode);
model = get_memmodel (CALL_EXPR_ARG (exp, 1));
! if ((model & MEMMODEL_MASK) == MEMMODEL_CONSUME
! || (model & MEMMODEL_MASK) == MEMMODEL_ACQUIRE
! || (model & MEMMODEL_MASK) == MEMMODEL_ACQ_REL)
{
warning (OPT_Winvalid_memory_model,
"invalid memory model for %<__atomic_store%>");
mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode);
model = get_memmodel (CALL_EXPR_ARG (exp, 1));
! if (is_mm_consume (model) || is_mm_acquire (model) || is_mm_acq_rel (model))
{
warning (OPT_Winvalid_memory_model,
"invalid memory model for %<__atomic_store%>");
*************** expand_builtin_atomic_signal_fence (tree
static void
expand_builtin_sync_synchronize (void)
{
! expand_mem_thread_fence (MEMMODEL_SEQ_CST);
}
static rtx
static void
expand_builtin_sync_synchronize (void)
{
! expand_mem_thread_fence (MEMMODEL_SYNC_SEQ_CST);
}
static rtx
===================================================================
*************** expand_compare_and_swap_loop (rtx mem, r
success = NULL_RTX;
oldval = cmp_reg;
if (!expand_atomic_compare_and_swap (&success, &oldval, mem, old_reg,
! new_reg, false, MEMMODEL_SEQ_CST,
MEMMODEL_RELAXED))
return false;
success = NULL_RTX;
oldval = cmp_reg;
if (!expand_atomic_compare_and_swap (&success, &oldval, mem, old_reg,
! new_reg, false, MEMMODEL_SYNC_SEQ_CST,
MEMMODEL_RELAXED))
return false;
*************** maybe_emit_sync_lock_test_and_set (rtx t
exists, and the memory model is stronger than acquire, add a release
barrier before the instruction. */
! if ((model & MEMMODEL_MASK) == MEMMODEL_SEQ_CST
! || (model & MEMMODEL_MASK) == MEMMODEL_RELEASE
! || (model & MEMMODEL_MASK) == MEMMODEL_ACQ_REL)
expand_mem_thread_fence (model);
if (icode != CODE_FOR_nothing)
exists, and the memory model is stronger than acquire, add a release
barrier before the instruction. */
! if (is_mm_seq_cst (model) || is_mm_release (model) || is_mm_acq_rel (model))
expand_mem_thread_fence (model);
if (icode != CODE_FOR_nothing)
*************** expand_sync_lock_test_and_set (rtx targe
rtx ret;
/* Try an atomic_exchange first. */
! ret = maybe_emit_atomic_exchange (target, mem, val, MEMMODEL_ACQUIRE);
if (ret)
return ret;
! ret = maybe_emit_sync_lock_test_and_set (target, mem, val, MEMMODEL_ACQUIRE);
if (ret)
return ret;
rtx ret;
/* Try an atomic_exchange first. */
! ret = maybe_emit_atomic_exchange (target, mem, val, MEMMODEL_SYNC_ACQUIRE);
if (ret)
return ret;
! ret = maybe_emit_sync_lock_test_and_set (target, mem, val,
! MEMMODEL_SYNC_ACQUIRE);
if (ret)
return ret;
*************** expand_sync_lock_test_and_set (rtx targe
/* If there are no other options, try atomic_test_and_set if the value
being stored is 1. */
if (val == const1_rtx)
! ret = maybe_emit_atomic_test_and_set (target, mem, MEMMODEL_ACQUIRE);
return ret;
}
/* If there are no other options, try atomic_test_and_set if the value
being stored is 1. */
if (val == const1_rtx)
! ret = maybe_emit_atomic_test_and_set (target, mem, MEMMODEL_SYNC_ACQUIRE);
return ret;
}
*************** expand_mem_thread_fence (enum memmodel m
{
if (HAVE_mem_thread_fence)
emit_insn (gen_mem_thread_fence (GEN_INT (model)));
! else if ((model & MEMMODEL_MASK) != MEMMODEL_RELAXED)
{
if (HAVE_memory_barrier)
emit_insn (gen_memory_barrier ());
{
if (HAVE_mem_thread_fence)
emit_insn (gen_mem_thread_fence (GEN_INT (model)));
! else if (!is_mm_relaxed (model))
{
if (HAVE_memory_barrier)
emit_insn (gen_memory_barrier ());
*************** expand_mem_signal_fence (enum memmodel m
{
if (HAVE_mem_signal_fence)
emit_insn (gen_mem_signal_fence (GEN_INT (model)));
! else if ((model & MEMMODEL_MASK) != MEMMODEL_RELAXED)
{
/* By default targets are coherent between a thread and the signal
handler running on the same thread. Thus this really becomes a
{
if (HAVE_mem_signal_fence)
emit_insn (gen_mem_signal_fence (GEN_INT (model)));
! else if (!is_mm_relaxed (model))
{
/* By default targets are coherent between a thread and the signal
handler running on the same thread. Thus this really becomes a
*************** expand_atomic_load (rtx target, rtx mem,
target = gen_reg_rtx (mode);
/* For SEQ_CST, emit a barrier before the load. */
! if ((model & MEMMODEL_MASK) == MEMMODEL_SEQ_CST)
expand_mem_thread_fence (model);
emit_move_insn (target, mem);
target = gen_reg_rtx (mode);
/* For SEQ_CST, emit a barrier before the load. */
! if (is_mm_seq_cst (model))
expand_mem_thread_fence (model);
emit_move_insn (target, mem);
*************** expand_atomic_store (rtx mem, rtx val, e
if (maybe_expand_insn (icode, 2, ops))
{
/* lock_release is only a release barrier. */
! if ((model & MEMMODEL_MASK) == MEMMODEL_SEQ_CST)
expand_mem_thread_fence (model);
return const0_rtx;
}
if (maybe_expand_insn (icode, 2, ops))
{
/* lock_release is only a release barrier. */
! if (is_mm_seq_cst (model))
expand_mem_thread_fence (model);
return const0_rtx;
}
*************** expand_atomic_store (rtx mem, rtx val, e
emit_move_insn (mem, val);
/* For SEQ_CST, also emit a barrier after the store. */
! if ((model & MEMMODEL_MASK) == MEMMODEL_SEQ_CST)
expand_mem_thread_fence (model);
return const0_rtx;
emit_move_insn (mem, val);
/* For SEQ_CST, also emit a barrier after the store. */
! if (is_mm_seq_cst (model))
expand_mem_thread_fence (model);
return const0_rtx;
===================================================================
*************** need_atomic_barrier_p (enum memmodel mod
case MEMMODEL_CONSUME:
return false;
case MEMMODEL_RELEASE:
+ case MEMMODEL_SYNC_RELEASE:
return pre;
case MEMMODEL_ACQUIRE:
+ case MEMMODEL_SYNC_ACQUIRE:
return !pre;
case MEMMODEL_ACQ_REL:
case MEMMODEL_SEQ_CST:
+ case MEMMODEL_SYNC_SEQ_CST:
return true;
default:
gcc_unreachable ();
===================================================================
*************** instrument_builtin_call (gimple_stmt_ite
case fetch_op:
last_arg = gimple_call_arg (stmt, num - 1);
if (!tree_fits_uhwi_p (last_arg)
! || tree_to_uhwi (last_arg) > MEMMODEL_SEQ_CST)
return;
gimple_call_set_fndecl (stmt, decl);
update_stmt (stmt);
case fetch_op:
last_arg = gimple_call_arg (stmt, num - 1);
if (!tree_fits_uhwi_p (last_arg)
! || memmodel_base (tree_to_uhwi (last_arg)) >= MEMMODEL_LAST)
return;
gimple_call_set_fndecl (stmt, decl);
update_stmt (stmt);
*************** instrument_builtin_call (gimple_stmt_ite
for (j = 0; j < 6; j++)
args[j] = gimple_call_arg (stmt, j);
if (!tree_fits_uhwi_p (args[4])
! || tree_to_uhwi (args[4]) > MEMMODEL_SEQ_CST)
return;
if (!tree_fits_uhwi_p (args[5])
! || tree_to_uhwi (args[5]) > MEMMODEL_SEQ_CST)
return;
update_gimple_call (gsi, decl, 5, args[0], args[1], args[2],
args[4], args[5]);
for (j = 0; j < 6; j++)
args[j] = gimple_call_arg (stmt, j);
if (!tree_fits_uhwi_p (args[4])
! || memmodel_base (tree_to_uhwi (args[4])) >= MEMMODEL_LAST)
return;
if (!tree_fits_uhwi_p (args[5])
! || memmodel_base (tree_to_uhwi (args[5])) >= MEMMODEL_LAST)
return;
update_gimple_call (gsi, decl, 5, args[0], args[1], args[2],
args[4], args[5]);
===================================================================
*************** get_atomic_generic_size (location_t loc,
if (TREE_CODE (p) == INTEGER_CST)
{
int i = tree_to_uhwi (p);
! if (i < 0 || (i & MEMMODEL_MASK) >= MEMMODEL_LAST)
{
warning_at (loc, OPT_Winvalid_memory_model,
"invalid memory model argument %d of %qE", x + 1,
if (TREE_CODE (p) == INTEGER_CST)
{
int i = tree_to_uhwi (p);
! if (i < 0 || (memmodel_base (i) >= MEMMODEL_LAST))
{
warning_at (loc, OPT_Winvalid_memory_model,
"invalid memory model argument %d of %qE", x + 1,
===================================================================
*************** aarch64_expand_compare_and_swap (rtx ope
unlikely event of fail being ACQUIRE and succ being RELEASE we need to
promote succ to ACQ_REL so that we don't lose the acquire semantics. */
! if (INTVAL (mod_f) == MEMMODEL_ACQUIRE
! && INTVAL (mod_s) == MEMMODEL_RELEASE)
mod_s = GEN_INT (MEMMODEL_ACQ_REL);
switch (mode)
unlikely event of fail being ACQUIRE and succ being RELEASE we need to
promote succ to ACQ_REL so that we don't lose the acquire semantics. */
! if (is_mm_acquire (memmodel_from_int (INTVAL (mod_f)))
! && is_mm_release (memmodel_from_int (INTVAL (mod_s))))
mod_s = GEN_INT (MEMMODEL_ACQ_REL);
switch (mode)
===================================================================
***************
UNSPECV_LDA))]
""
{
! enum memmodel model = (enum memmodel) INTVAL (operands[2]);
! if (model == MEMMODEL_RELAXED
! || model == MEMMODEL_CONSUME
! || model == MEMMODEL_RELEASE)
return "ldr<atomic_sfx>\t%<w>0, %1";
else
return "ldar<atomic_sfx>\t%<w>0, %1";
UNSPECV_LDA))]
""
{
! enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
! if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model))
return "ldr<atomic_sfx>\t%<w>0, %1";
else
return "ldar<atomic_sfx>\t%<w>0, %1";
***************
UNSPECV_STL))]
""
{
! enum memmodel model = (enum memmodel) INTVAL (operands[2]);
! if (model == MEMMODEL_RELAXED
! || model == MEMMODEL_CONSUME
! || model == MEMMODEL_ACQUIRE)
return "str<atomic_sfx>\t%<w>1, %0";
else
return "stlr<atomic_sfx>\t%<w>1, %0";
UNSPECV_STL))]
""
{
! enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
! if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_acquire (model))
return "str<atomic_sfx>\t%<w>1, %0";
else
return "stlr<atomic_sfx>\t%<w>1, %0";
***************
UNSPECV_LX)))]
""
{
! enum memmodel model = (enum memmodel) INTVAL (operands[2]);
! if (model == MEMMODEL_RELAXED
! || model == MEMMODEL_CONSUME
! || model == MEMMODEL_RELEASE)
return "ldxr<atomic_sfx>\t%w0, %1";
else
return "ldaxr<atomic_sfx>\t%w0, %1";
UNSPECV_LX)))]
""
{
! enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
! if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model))
return "ldxr<atomic_sfx>\t%w0, %1";
else
return "ldaxr<atomic_sfx>\t%w0, %1";
***************
UNSPECV_LX))]
""
{
! enum memmodel model = (enum memmodel) INTVAL (operands[2]);
! if (model == MEMMODEL_RELAXED
! || model == MEMMODEL_CONSUME
! || model == MEMMODEL_RELEASE)
return "ldxr\t%<w>0, %1";
else
return "ldaxr\t%<w>0, %1";
UNSPECV_LX))]
""
{
! enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
! if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model))
return "ldxr\t%<w>0, %1";
else
return "ldaxr\t%<w>0, %1";
***************
UNSPECV_SX))]
""
{
! enum memmodel model = (enum memmodel) INTVAL (operands[3]);
! if (model == MEMMODEL_RELAXED
! || model == MEMMODEL_CONSUME
! || model == MEMMODEL_ACQUIRE)
return "stxr<atomic_sfx>\t%w0, %<w>2, %1";
else
return "stlxr<atomic_sfx>\t%w0, %<w>2, %1";
UNSPECV_SX))]
""
{
! enum memmodel model = memmodel_from_int (INTVAL (operands[3]));
! if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_acquire (model))
return "stxr<atomic_sfx>\t%w0, %<w>2, %1";
else
return "stlxr<atomic_sfx>\t%w0, %<w>2, %1";
***************
[(match_operand:SI 0 "const_int_operand" "")]
""
{
! enum memmodel model = (enum memmodel) INTVAL (operands[0]);
! if (model != MEMMODEL_RELAXED && model != MEMMODEL_CONSUME)
emit_insn (gen_dmb (operands[0]));
DONE;
}
[(match_operand:SI 0 "const_int_operand" "")]
""
{
! enum memmodel model = memmodel_from_int (INTVAL (operands[0]));
! if (!(is_mm_relaxed (model) || is_mm_consume (model)))
emit_insn (gen_dmb (operands[0]));
DONE;
}
***************
UNSPEC_MB))]
""
{
! enum memmodel model = (enum memmodel) INTVAL (operands[1]);
! if (model == MEMMODEL_ACQUIRE)
return "dmb\\tishld";
else
return "dmb\\tish";
UNSPEC_MB))]
""
{
! enum memmodel model = memmodel_from_int (INTVAL (operands[1]));
! if (is_mm_acquire (model))
return "dmb\\tishld";
else
return "dmb\\tish";
===================================================================
*************** alpha_split_compare_and_swap (rtx operan
oldval = operands[3];
newval = operands[4];
is_weak = (operands[5] != const0_rtx);
! mod_s = (enum memmodel) INTVAL (operands[6]);
! mod_f = (enum memmodel) INTVAL (operands[7]);
mode = GET_MODE (mem);
alpha_pre_atomic_barrier (mod_s);
oldval = operands[3];
newval = operands[4];
is_weak = (operands[5] != const0_rtx);
! mod_s = memmodel_from_int (INTVAL (operands[6]));
! mod_f = memmodel_from_int (INTVAL (operands[7]));
mode = GET_MODE (mem);
alpha_pre_atomic_barrier (mod_s);
*************** alpha_split_compare_and_swap (rtx operan
emit_unlikely_jump (x, label1);
}
! if (mod_f != MEMMODEL_RELAXED)
emit_label (XEXP (label2, 0));
alpha_post_atomic_barrier (mod_s);
! if (mod_f == MEMMODEL_RELAXED)
emit_label (XEXP (label2, 0));
}
emit_unlikely_jump (x, label1);
}
! if (!is_mm_relaxed (mod_f))
emit_label (XEXP (label2, 0));
alpha_post_atomic_barrier (mod_s);
! if (is_mm_relaxed (mod_f))
emit_label (XEXP (label2, 0));
}
*************** alpha_split_compare_and_swap_12 (rtx ope
newval = operands[4];
align = operands[5];
is_weak = (operands[6] != const0_rtx);
! mod_s = (enum memmodel) INTVAL (operands[7]);
! mod_f = (enum memmodel) INTVAL (operands[8]);
scratch = operands[9];
mode = GET_MODE (orig_mem);
addr = XEXP (orig_mem, 0);
newval = operands[4];
align = operands[5];
is_weak = (operands[6] != const0_rtx);
! mod_s = memmodel_from_int (INTVAL (operands[7]));
! mod_f = memmodel_from_int (INTVAL (operands[8]));
scratch = operands[9];
mode = GET_MODE (orig_mem);
addr = XEXP (orig_mem, 0);
*************** alpha_split_compare_and_swap_12 (rtx ope
emit_unlikely_jump (x, label1);
}
! if (mod_f != MEMMODEL_RELAXED)
emit_label (XEXP (label2, 0));
alpha_post_atomic_barrier (mod_s);
! if (mod_f == MEMMODEL_RELAXED)
emit_label (XEXP (label2, 0));
}
emit_unlikely_jump (x, label1);
}
! if (!is_mm_relaxed (mod_f))
emit_label (XEXP (label2, 0));
alpha_post_atomic_barrier (mod_s);
! if (is_mm_relaxed (mod_f))
emit_label (XEXP (label2, 0));
}
===================================================================
*************** arm_expand_compare_and_swap (rtx operand
promote succ to ACQ_REL so that we don't lose the acquire semantics. */
if (TARGET_HAVE_LDACQ
! && INTVAL (mod_f) == MEMMODEL_ACQUIRE
! && INTVAL (mod_s) == MEMMODEL_RELEASE)
mod_s = GEN_INT (MEMMODEL_ACQ_REL);
switch (mode)
promote succ to ACQ_REL so that we don't lose the acquire semantics. */
if (TARGET_HAVE_LDACQ
! && is_mm_acquire (memmodel_from_int (INTVAL (mod_f)))
! && is_mm_release (memmodel_from_int (INTVAL (mod_s))))
mod_s = GEN_INT (MEMMODEL_ACQ_REL);
switch (mode)
*************** arm_split_compare_and_swap (rtx operands
oldval = operands[2];
newval = operands[3];
is_weak = (operands[4] != const0_rtx);
! mod_s = (enum memmodel) INTVAL (operands[5]);
! mod_f = (enum memmodel) INTVAL (operands[6]);
scratch = operands[7];
mode = GET_MODE (mem);
bool use_acquire = TARGET_HAVE_LDACQ
! && !(mod_s == MEMMODEL_RELAXED
! || mod_s == MEMMODEL_CONSUME
! || mod_s == MEMMODEL_RELEASE);
!
bool use_release = TARGET_HAVE_LDACQ
! && !(mod_s == MEMMODEL_RELAXED
! || mod_s == MEMMODEL_CONSUME
! || mod_s == MEMMODEL_ACQUIRE);
/* Checks whether a barrier is needed and emits one accordingly. */
if (!(use_acquire || use_release))
oldval = operands[2];
newval = operands[3];
is_weak = (operands[4] != const0_rtx);
! mod_s = memmodel_from_int (INTVAL (operands[5]));
! mod_f = memmodel_from_int (INTVAL (operands[6]));
scratch = operands[7];
mode = GET_MODE (mem);
bool use_acquire = TARGET_HAVE_LDACQ
! && !(is_mm_relaxed (mod_s) || is_mm_consume (mod_s)
! || is_mm_release (mod_s));
!
bool use_release = TARGET_HAVE_LDACQ
! && !(is_mm_relaxed (mod_s) || is_mm_consume (mod_s)
! || is_mm_acquire (mod_s));
/* Checks whether a barrier is needed and emits one accordingly. */
if (!(use_acquire || use_release))
*************** arm_split_compare_and_swap (rtx operands
emit_unlikely_jump (gen_rtx_SET (VOIDmode, pc_rtx, x));
}
! if (mod_f != MEMMODEL_RELAXED)
emit_label (label2);
/* Checks whether a barrier is needed and emits one accordingly. */
if (!(use_acquire || use_release))
arm_post_atomic_barrier (mod_s);
! if (mod_f == MEMMODEL_RELAXED)
emit_label (label2);
}
emit_unlikely_jump (gen_rtx_SET (VOIDmode, pc_rtx, x));
}
! if (!is_mm_relaxed (mod_f))
emit_label (label2);
/* Checks whether a barrier is needed and emits one accordingly. */
if (!(use_acquire || use_release))
arm_post_atomic_barrier (mod_s);
! if (is_mm_relaxed (mod_f))
emit_label (label2);
}
*************** void
arm_split_atomic_op (enum rtx_code code, rtx old_out, rtx new_out, rtx mem,
rtx value, rtx model_rtx, rtx cond)
{
! enum memmodel model = (enum memmodel) INTVAL (model_rtx);
machine_mode mode = GET_MODE (mem);
machine_mode wmode = (mode == DImode ? DImode : SImode);
rtx_code_label *label;
rtx x;
bool use_acquire = TARGET_HAVE_LDACQ
! && !(model == MEMMODEL_RELAXED
! || model == MEMMODEL_CONSUME
! || model == MEMMODEL_RELEASE);
bool use_release = TARGET_HAVE_LDACQ
! && !(model == MEMMODEL_RELAXED
! || model == MEMMODEL_CONSUME
! || model == MEMMODEL_ACQUIRE);
/* Checks whether a barrier is needed and emits one accordingly. */
if (!(use_acquire || use_release))
arm_split_atomic_op (enum rtx_code code, rtx old_out, rtx new_out, rtx mem,
rtx value, rtx model_rtx, rtx cond)
{
! enum memmodel model = memmodel_from_int (INTVAL (model_rtx));
machine_mode mode = GET_MODE (mem);
machine_mode wmode = (mode == DImode ? DImode : SImode);
rtx_code_label *label;
rtx x;
bool use_acquire = TARGET_HAVE_LDACQ
! && !(is_mm_relaxed (model) || is_mm_consume (model)
! || is_mm_release (model));
bool use_release = TARGET_HAVE_LDACQ
! && !(is_mm_relaxed (model) || is_mm_consume (model)
! || is_mm_acquire (model));
/* Checks whether a barrier is needed and emits one accordingly. */
if (!(use_acquire || use_release))
===================================================================
***************
VUNSPEC_LDA))]
"TARGET_HAVE_LDACQ"
{
! enum memmodel model = (enum memmodel) INTVAL (operands[2]);
! if (model == MEMMODEL_RELAXED
! || model == MEMMODEL_CONSUME
! || model == MEMMODEL_RELEASE)
return \"ldr<sync_sfx>\\t%0, %1\";
else
return \"lda<sync_sfx>\\t%0, %1\";
VUNSPEC_LDA))]
"TARGET_HAVE_LDACQ"
{
! enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
! if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model))
return \"ldr<sync_sfx>\\t%0, %1\";
else
return \"lda<sync_sfx>\\t%0, %1\";
***************
VUNSPEC_STL))]
"TARGET_HAVE_LDACQ"
{
! enum memmodel model = (enum memmodel) INTVAL (operands[2]);
! if (model == MEMMODEL_RELAXED
! || model == MEMMODEL_CONSUME
! || model == MEMMODEL_ACQUIRE)
return \"str<sync_sfx>\t%1, %0\";
else
return \"stl<sync_sfx>\t%1, %0\";
VUNSPEC_STL))]
"TARGET_HAVE_LDACQ"
{
! enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
! if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_acquire (model))
return \"str<sync_sfx>\t%1, %0\";
else
return \"stl<sync_sfx>\t%1, %0\";
***************
(match_operand:SI 2 "const_int_operand")] ;; model
"TARGET_HAVE_LDREXD && ARM_DOUBLEWORD_ALIGN"
{
! enum memmodel model = (enum memmodel) INTVAL (operands[2]);
expand_mem_thread_fence (model);
emit_insn (gen_atomic_loaddi_1 (operands[0], operands[1]));
! if (model == MEMMODEL_SEQ_CST)
expand_mem_thread_fence (model);
DONE;
})
(match_operand:SI 2 "const_int_operand")] ;; model
"TARGET_HAVE_LDREXD && ARM_DOUBLEWORD_ALIGN"
{
! enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
expand_mem_thread_fence (model);
emit_insn (gen_atomic_loaddi_1 (operands[0], operands[1]));
! if (is_mm_seq_cst (model))
expand_mem_thread_fence (model);
DONE;
})
===================================================================
*************** ix86_destroy_cost_data (void *data)
static unsigned HOST_WIDE_INT
ix86_memmodel_check (unsigned HOST_WIDE_INT val)
{
! unsigned HOST_WIDE_INT model = val & MEMMODEL_MASK;
bool strong;
if (val & ~(unsigned HOST_WIDE_INT)(IX86_HLE_ACQUIRE|IX86_HLE_RELEASE
static unsigned HOST_WIDE_INT
ix86_memmodel_check (unsigned HOST_WIDE_INT val)
{
! enum memmodel model = memmodel_from_int (val);
bool strong;
if (val & ~(unsigned HOST_WIDE_INT)(IX86_HLE_ACQUIRE|IX86_HLE_RELEASE
*************** ix86_memmodel_check (unsigned HOST_WIDE_
"Unknown architecture specific memory model");
return MEMMODEL_SEQ_CST;
}
! strong = (model == MEMMODEL_ACQ_REL || model == MEMMODEL_SEQ_CST);
! if (val & IX86_HLE_ACQUIRE && !(model == MEMMODEL_ACQUIRE || strong))
{
warning (OPT_Winvalid_memory_model,
"HLE_ACQUIRE not used with ACQUIRE or stronger memory model");
return MEMMODEL_SEQ_CST | IX86_HLE_ACQUIRE;
}
! if (val & IX86_HLE_RELEASE && !(model == MEMMODEL_RELEASE || strong))
{
warning (OPT_Winvalid_memory_model,
"HLE_RELEASE not used with RELEASE or stronger memory model");
"Unknown architecture specific memory model");
return MEMMODEL_SEQ_CST;
}
! strong = (is_mm_acq_rel (model) || is_mm_seq_cst (model));
! if (val & IX86_HLE_ACQUIRE && !(is_mm_acquire (model) || strong))
{
warning (OPT_Winvalid_memory_model,
"HLE_ACQUIRE not used with ACQUIRE or stronger memory model");
return MEMMODEL_SEQ_CST | IX86_HLE_ACQUIRE;
}
! if (val & IX86_HLE_RELEASE && !(is_mm_release (model) || strong))
{
warning (OPT_Winvalid_memory_model,
"HLE_RELEASE not used with RELEASE or stronger memory model");
===================================================================
***************
[(match_operand:SI 0 "const_int_operand")] ;; model
""
{
! enum memmodel model = (enum memmodel) (INTVAL (operands[0]) & MEMMODEL_MASK);
/* Unless this is a SEQ_CST fence, the i386 memory model is strong
enough not to require barriers of any kind. */
! if (model == MEMMODEL_SEQ_CST)
{
rtx (*mfence_insn)(rtx);
rtx mem;
[(match_operand:SI 0 "const_int_operand")] ;; model
""
{
! enum memmodel model = memmodel_from_int (INTVAL (operands[0]));
/* Unless this is a SEQ_CST fence, the i386 memory model is strong
enough not to require barriers of any kind. */
! if (is_mm_seq_cst (model))
{
rtx (*mfence_insn)(rtx);
rtx mem;
***************
UNSPEC_STA))]
""
{
! enum memmodel model = (enum memmodel) (INTVAL (operands[2]) & MEMMODEL_MASK);
if (<MODE>mode == DImode && !TARGET_64BIT)
{
UNSPEC_STA))]
""
{
! enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
if (<MODE>mode == DImode && !TARGET_64BIT)
{
***************
operands[1] = force_reg (<MODE>mode, operands[1]);
/* For seq-cst stores, when we lack MFENCE, use XCHG. */
! if (model == MEMMODEL_SEQ_CST && !(TARGET_64BIT || TARGET_SSE2))
{
emit_insn (gen_atomic_exchange<mode> (gen_reg_rtx (<MODE>mode),
operands[0], operands[1],
operands[1] = force_reg (<MODE>mode, operands[1]);
/* For seq-cst stores, when we lack MFENCE, use XCHG. */
! if (is_mm_seq_cst (model) && !(TARGET_64BIT || TARGET_SSE2))
{
emit_insn (gen_atomic_exchange<mode> (gen_reg_rtx (<MODE>mode),
operands[0], operands[1],
***************
operands[2]));
}
/* ... followed by an MFENCE, if required. */
! if (model == MEMMODEL_SEQ_CST)
emit_insn (gen_mem_thread_fence (operands[2]));
DONE;
})
operands[2]));
}
/* ... followed by an MFENCE, if required. */
! if (is_mm_seq_cst (model))
emit_insn (gen_mem_thread_fence (operands[2]));
DONE;
})
===================================================================
*************** ia64_expand_atomic_op (enum rtx_code cod
{
case MEMMODEL_ACQ_REL:
case MEMMODEL_SEQ_CST:
+ case MEMMODEL_SYNC_SEQ_CST:
emit_insn (gen_memory_barrier ());
/* FALLTHRU */
case MEMMODEL_RELAXED:
case MEMMODEL_ACQUIRE:
+ case MEMMODEL_SYNC_ACQUIRE:
case MEMMODEL_CONSUME:
if (mode == SImode)
icode = CODE_FOR_fetchadd_acq_si;
*************** ia64_expand_atomic_op (enum rtx_code cod
icode = CODE_FOR_fetchadd_acq_di;
break;
case MEMMODEL_RELEASE:
+ case MEMMODEL_SYNC_RELEASE:
if (mode == SImode)
icode = CODE_FOR_fetchadd_rel_si;
else
*************** ia64_expand_atomic_op (enum rtx_code cod
front half of the full barrier. The end half is the cmpxchg.rel.
For relaxed and release memory models, we don't need this. But we
also don't bother trying to prevent it either. */
! gcc_assert (model == MEMMODEL_RELAXED
! || model == MEMMODEL_RELEASE
|| MEM_VOLATILE_P (mem));
old_reg = gen_reg_rtx (DImode);
front half of the full barrier. The end half is the cmpxchg.rel.
For relaxed and release memory models, we don't need this. But we
also don't bother trying to prevent it either. */
! gcc_assert (is_mm_relaxed (model) || is_mm_release (model)
|| MEM_VOLATILE_P (mem));
old_reg = gen_reg_rtx (DImode);
*************** ia64_expand_atomic_op (enum rtx_code cod
{
case MEMMODEL_RELAXED:
case MEMMODEL_ACQUIRE:
+ case MEMMODEL_SYNC_ACQUIRE:
case MEMMODEL_CONSUME:
switch (mode)
{
*************** ia64_expand_atomic_op (enum rtx_code cod
break;
case MEMMODEL_RELEASE:
+ case MEMMODEL_SYNC_RELEASE:
case MEMMODEL_ACQ_REL:
case MEMMODEL_SEQ_CST:
+ case MEMMODEL_SYNC_SEQ_CST:
switch (mode)
{
case QImode: icode = CODE_FOR_cmpxchg_rel_qi; break;
===================================================================
***************
[(match_operand:SI 0 "const_int_operand" "")] ;; model
""
{
! if (INTVAL (operands[0]) == MEMMODEL_SEQ_CST)
emit_insn (gen_memory_barrier ());
DONE;
})
[(match_operand:SI 0 "const_int_operand" "")] ;; model
""
{
! if (is_mm_seq_cst (memmodel_from_int (INTVAL (operands[0]))))
emit_insn (gen_memory_barrier ());
DONE;
})
***************
(match_operand:SI 2 "const_int_operand" "")] ;; model
""
{
! enum memmodel model = (enum memmodel) INTVAL (operands[2]);
/* Unless the memory model is relaxed, we want to emit ld.acq, which
will happen automatically for volatile memories. */
! gcc_assert (model == MEMMODEL_RELAXED || MEM_VOLATILE_P (operands[1]));
emit_move_insn (operands[0], operands[1]);
DONE;
})
(match_operand:SI 2 "const_int_operand" "")] ;; model
""
{
! enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
/* Unless the memory model is relaxed, we want to emit ld.acq, which
will happen automatically for volatile memories. */
! gcc_assert (is_mm_relaxed (model) || MEM_VOLATILE_P (operands[1]));
emit_move_insn (operands[0], operands[1]);
DONE;
})
***************
(match_operand:SI 2 "const_int_operand" "")] ;; model
""
{
! enum memmodel model = (enum memmodel) INTVAL (operands[2]);
/* Unless the memory model is relaxed, we want to emit st.rel, which
will happen automatically for volatile memories. */
! gcc_assert (model == MEMMODEL_RELAXED || MEM_VOLATILE_P (operands[0]));
emit_move_insn (operands[0], operands[1]);
/* Sequentially consistent stores need a subsequent MF. See
http://www.decadent.org.uk/pipermail/cpp-threads/2008-December/001952.html
for a discussion of why a MF is needed here, but not for atomic_load. */
! if (model == MEMMODEL_SEQ_CST)
emit_insn (gen_memory_barrier ());
DONE;
})
(match_operand:SI 2 "const_int_operand" "")] ;; model
""
{
! enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
/* Unless the memory model is relaxed, we want to emit st.rel, which
will happen automatically for volatile memories. */
! gcc_assert (is_mm_relaxed (model) || MEM_VOLATILE_P (operands[0]));
emit_move_insn (operands[0], operands[1]);
/* Sequentially consistent stores need a subsequent MF. See
http://www.decadent.org.uk/pipermail/cpp-threads/2008-December/001952.html
for a discussion of why a MF is needed here, but not for atomic_load. */
! if (is_mm_seq_cst (model))
emit_insn (gen_memory_barrier ());
DONE;
})
***************
(match_operand:SI 7 "const_int_operand" "")] ;; fail model
""
{
! enum memmodel model = (enum memmodel) INTVAL (operands[6]);
rtx ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
rtx dval, eval;
(match_operand:SI 7 "const_int_operand" "")] ;; fail model
""
{
! /* No need to distinquish __sync from __atomic, so get base value. */
! enum memmodel model = memmodel_base (INTVAL (operands[6]));
rtx ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
rtx dval, eval;
***************
(match_operand:SI 3 "const_int_operand" "")] ;; succ model
""
{
! enum memmodel model = (enum memmodel) INTVAL (operands[3]);
switch (model)
{
(match_operand:SI 3 "const_int_operand" "")] ;; succ model
""
{
! /* No need to distinquish __sync from __atomic, so get base value. */
! enum memmodel model = memmodel_base (INTVAL (operands[3]));
switch (model)
{
===================================================================
*************** mips_process_sync_loop (rtx_insn *insn,
model = MEMMODEL_ACQUIRE;
break;
default:
! model = (enum memmodel) INTVAL (operands[memmodel_attr]);
}
mips_multi_start ();
model = MEMMODEL_ACQUIRE;
break;
default:
! model = memmodel_from_int (INTVAL (operands[memmodel_attr]));
}
mips_multi_start ();
===================================================================
***************
(match_operand:SI 2 "const_int_operand")] ;; model
"!TARGET_64BIT && !TARGET_SOFT_FLOAT"
{
! enum memmodel model = (enum memmodel) INTVAL (operands[2]);
operands[1] = force_reg (SImode, XEXP (operands[1], 0));
operands[2] = gen_reg_rtx (DImode);
expand_mem_thread_fence (model);
emit_insn (gen_atomic_loaddi_1 (operands[0], operands[1], operands[2]));
! if ((model & MEMMODEL_MASK) == MEMMODEL_SEQ_CST)
expand_mem_thread_fence (model);
DONE;
})
(match_operand:SI 2 "const_int_operand")] ;; model
"!TARGET_64BIT && !TARGET_SOFT_FLOAT"
{
! enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
operands[1] = force_reg (SImode, XEXP (operands[1], 0));
operands[2] = gen_reg_rtx (DImode);
expand_mem_thread_fence (model);
emit_insn (gen_atomic_loaddi_1 (operands[0], operands[1], operands[2]));
! if (is_mm_seq_cst (model))
expand_mem_thread_fence (model);
DONE;
})
***************
(match_operand:SI 2 "const_int_operand")] ;; model
"!TARGET_64BIT && !TARGET_SOFT_FLOAT"
{
! enum memmodel model = (enum memmodel) INTVAL (operands[2]);
operands[0] = force_reg (SImode, XEXP (operands[0], 0));
operands[2] = gen_reg_rtx (DImode);
expand_mem_thread_fence (model);
emit_insn (gen_atomic_storedi_1 (operands[0], operands[1], operands[2]));
! if ((model & MEMMODEL_MASK) == MEMMODEL_SEQ_CST)
expand_mem_thread_fence (model);
DONE;
})
(match_operand:SI 2 "const_int_operand")] ;; model
"!TARGET_64BIT && !TARGET_SOFT_FLOAT"
{
! enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
operands[0] = force_reg (SImode, XEXP (operands[0], 0));
operands[2] = gen_reg_rtx (DImode);
expand_mem_thread_fence (model);
emit_insn (gen_atomic_storedi_1 (operands[0], operands[1], operands[2]));
! if (is_mm_seq_cst (model))
expand_mem_thread_fence (model);
DONE;
})
===================================================================
*************** rs6000_pre_atomic_barrier (rtx mem, enum
case MEMMODEL_RELAXED:
case MEMMODEL_CONSUME:
case MEMMODEL_ACQUIRE:
+ case MEMMODEL_SYNC_ACQUIRE:
break;
case MEMMODEL_RELEASE:
+ case MEMMODEL_SYNC_RELEASE:
case MEMMODEL_ACQ_REL:
emit_insn (gen_lwsync ());
break;
case MEMMODEL_SEQ_CST:
+ case MEMMODEL_SYNC_SEQ_CST:
emit_insn (gen_hwsync ());
break;
default:
*************** rs6000_post_atomic_barrier (enum memmode
case MEMMODEL_RELAXED:
case MEMMODEL_CONSUME:
case MEMMODEL_RELEASE:
+ case MEMMODEL_SYNC_RELEASE:
break;
case MEMMODEL_ACQUIRE:
+ case MEMMODEL_SYNC_ACQUIRE:
case MEMMODEL_ACQ_REL:
case MEMMODEL_SEQ_CST:
+ case MEMMODEL_SYNC_SEQ_CST:
emit_insn (gen_isync ());
break;
default:
*************** rs6000_expand_atomic_compare_and_swap (r
oldval = operands[3];
newval = operands[4];
is_weak = (INTVAL (operands[5]) != 0);
! mod_s = (enum memmodel) INTVAL (operands[6]);
! mod_f = (enum memmodel) INTVAL (operands[7]);
orig_mode = mode = GET_MODE (mem);
mask = shift = NULL_RTX;
oldval = operands[3];
newval = operands[4];
is_weak = (INTVAL (operands[5]) != 0);
! mod_s = memmodel_from_int (INTVAL (operands[6]));
! mod_f = memmodel_from_int (INTVAL (operands[7]));
orig_mode = mode = GET_MODE (mem);
mask = shift = NULL_RTX;
*************** rs6000_expand_atomic_compare_and_swap (r
emit_unlikely_jump (x, label1);
}
! if (mod_f != MEMMODEL_RELAXED)
emit_label (XEXP (label2, 0));
rs6000_post_atomic_barrier (mod_s);
! if (mod_f == MEMMODEL_RELAXED)
emit_label (XEXP (label2, 0));
if (shift)
emit_unlikely_jump (x, label1);
}
! if (!is_mm_relaxed (mod_f))
emit_label (XEXP (label2, 0));
rs6000_post_atomic_barrier (mod_s);
! if (is_mm_relaxed (mod_f))
emit_label (XEXP (label2, 0));
if (shift)
===================================================================
***************
[(match_operand:SI 0 "const_int_operand" "")] ;; model
""
{
! enum memmodel model = (enum memmodel) INTVAL (operands[0]);
switch (model)
{
case MEMMODEL_RELAXED:
break;
case MEMMODEL_CONSUME:
case MEMMODEL_ACQUIRE:
case MEMMODEL_RELEASE:
case MEMMODEL_ACQ_REL:
emit_insn (gen_lwsync ());
break;
case MEMMODEL_SEQ_CST:
emit_insn (gen_hwsync ());
break;
default:
[(match_operand:SI 0 "const_int_operand" "")] ;; model
""
{
! enum memmodel model = memmodel_from_int (INTVAL (operands[0]));
switch (model)
{
case MEMMODEL_RELAXED:
break;
case MEMMODEL_CONSUME:
case MEMMODEL_ACQUIRE:
+ case MEMMODEL_SYNC_ACQUIRE:
case MEMMODEL_RELEASE:
+ case MEMMODEL_SYNC_RELEASE:
case MEMMODEL_ACQ_REL:
emit_insn (gen_lwsync ());
break;
case MEMMODEL_SEQ_CST:
+ case MEMMODEL_SYNC_SEQ_CST:
emit_insn (gen_hwsync ());
break;
default:
***************
if (<MODE>mode == TImode && !TARGET_SYNC_TI)
FAIL;
! enum memmodel model = (enum memmodel) INTVAL (operands[2]);
! if (model == MEMMODEL_SEQ_CST)
emit_insn (gen_hwsync ());
if (<MODE>mode != TImode)
if (<MODE>mode == TImode && !TARGET_SYNC_TI)
FAIL;
! enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
! if (is_mm_seq_cst (model))
emit_insn (gen_hwsync ());
if (<MODE>mode != TImode)
***************
break;
case MEMMODEL_CONSUME:
case MEMMODEL_ACQUIRE:
+ case MEMMODEL_SYNC_ACQUIRE:
case MEMMODEL_SEQ_CST:
+ case MEMMODEL_SYNC_SEQ_CST:
emit_insn (gen_loadsync_<mode> (operands[0]));
break;
default:
***************
if (<MODE>mode == TImode && !TARGET_SYNC_TI)
FAIL;
! enum memmodel model = (enum memmodel) INTVAL (operands[2]);
switch (model)
{
case MEMMODEL_RELAXED:
break;
case MEMMODEL_RELEASE:
emit_insn (gen_lwsync ());
break;
case MEMMODEL_SEQ_CST:
emit_insn (gen_hwsync ());
break;
default:
if (<MODE>mode == TImode && !TARGET_SYNC_TI)
FAIL;
! enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
switch (model)
{
case MEMMODEL_RELAXED:
break;
case MEMMODEL_RELEASE:
+ case MEMMODEL_SYNC_RELEASE:
emit_insn (gen_lwsync ());
break;
case MEMMODEL_SEQ_CST:
+ case MEMMODEL_SYNC_SEQ_CST:
emit_insn (gen_hwsync ());
break;
default:
===================================================================
***************
{
/* Unless this is a SEQ_CST fence, the s390 memory model is strong
enough not to require barriers of any kind. */
! if (INTVAL (operands[0]) == MEMMODEL_SEQ_CST)
{
rtx mem = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
MEM_VOLATILE_P (mem) = 1;
{
/* Unless this is a SEQ_CST fence, the s390 memory model is strong
enough not to require barriers of any kind. */
! if (is_mm_seq_cst (memmodel_from_int (INTVAL (operands[0]))))
{
rtx mem = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
MEM_VOLATILE_P (mem) = 1;
***************
(match_operand:SI 2 "const_int_operand")] ;; model
""
{
! enum memmodel model = (enum memmodel) INTVAL (operands[2]);
if (MEM_ALIGN (operands[0]) < GET_MODE_BITSIZE (GET_MODE (operands[0])))
FAIL;
(match_operand:SI 2 "const_int_operand")] ;; model
""
{
! enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
if (MEM_ALIGN (operands[0]) < GET_MODE_BITSIZE (GET_MODE (operands[0])))
FAIL;
***************
emit_insn (gen_atomic_storedi_1 (operands[0], operands[1]));
else
emit_move_insn (operands[0], operands[1]);
! if (model == MEMMODEL_SEQ_CST)
emit_insn (gen_mem_thread_fence (operands[2]));
DONE;
})
emit_insn (gen_atomic_storedi_1 (operands[0], operands[1]));
else
emit_move_insn (operands[0], operands[1]);
! if (is_mm_seq_cst (model))
emit_insn (gen_mem_thread_fence (operands[2]));
DONE;
})
===================================================================
*************** sparc_emit_membar_for_model (enum memmod
if (before_after & 1)
{
! if (model == MEMMODEL_RELEASE
! || model == MEMMODEL_ACQ_REL
! || model == MEMMODEL_SEQ_CST)
{
if (load_store & 1)
mm |= LoadLoad | StoreLoad;
if (before_after & 1)
{
! if (is_mm_release (model) || is_mm_acq_rel (model)
! || is_mm_seq_cst (model))
{
if (load_store & 1)
mm |= LoadLoad | StoreLoad;
*************** sparc_emit_membar_for_model (enum memmod
}
if (before_after & 2)
{
! if (model == MEMMODEL_ACQUIRE
! || model == MEMMODEL_ACQ_REL
! || model == MEMMODEL_SEQ_CST)
{
if (load_store & 1)
mm |= LoadLoad | LoadStore;
}
if (before_after & 2)
{
! if (is_mm_acquire (model) || is_mm_acq_rel (model)
! || is_mm_seq_cst (model))
{
if (load_store & 1)
mm |= LoadLoad | LoadStore;
===================================================================
*************** functions map any run-time value to @cod
than invoke a runtime library call or inline a switch statement. This is
standard compliant, safe, and the simplest approach for now.
! The memory model parameter is a signed int, but only the lower 8 bits are
reserved for the memory model. The remainder of the signed int is reserved
! for future use and should be 0. Use of the predefined atomic values
ensures proper usage.
@deftypefn {Built-in Function} @var{type} __atomic_load_n (@var{type} *ptr, int memmodel)
than invoke a runtime library call or inline a switch statement. This is
standard compliant, safe, and the simplest approach for now.
! The memory model parameter is a signed int, but only the lower 16 bits are
reserved for the memory model. The remainder of the signed int is reserved
! for target use and should be 0. Use of the predefined atomic values
ensures proper usage.
@deftypefn {Built-in Function} @var{type} __atomic_load_n (@var{type} *ptr, int memmodel)