diff mbox

Fix c++/60272

Message ID 5306402A.40402@redhat.com
State New
Headers show

Commit Message

Richard Henderson Feb. 20, 2014, 5:49 p.m. UTC
As described in the PR, we were performing an unconditional store back to the
EXPECT parameter.  This is fine, so long as EXPECT is not in thread shared
memory, e.g. a local variable.  But the example in the PR uses shared memory
where the extra store introduces a race condition.

I've left a note in the code about fixing this conditional store once we have
amacleod's gimple atomic stuff committed, where we'd be able to tell (at some
point in during optimization) if EXPECT is local, e.g. an SSA_NAME.

Tested on x86_64 and i686, and manually inspecting the generated code.
Any ideas how to regression test this?


r~
gcc/
	* builtins.c (expand_builtin_atomic_compare_exchange): Conditionalize
	on failure the store back into EXPECT.
libatomic/
	* cas_n.c (libat_compare_exchange): Conditionalize on failure
	the store back to EPTR.

Comments

Jakub Jelinek Feb. 20, 2014, 6:09 p.m. UTC | #1
On Thu, Feb 20, 2014 at 11:49:30AM -0600, Richard Henderson wrote:
> Tested on x86_64 and i686, and manually inspecting the generated code.
> Any ideas how to regression test this?

No idea about how to test this.

> @@ -5330,14 +5330,23 @@ expand_builtin_atomic_compare_exchange (enum machine_mode mode, tree exp,
>    if (tree_fits_shwi_p (weak) && tree_to_shwi (weak) != 0)
>      is_weak = true;
>  
> +  if (target == const0_rtx)
> +    target = NULL;
>    oldval = expect;
> -  if (!expand_atomic_compare_and_swap ((target == const0_rtx ? NULL : &target),
> -				       &oldval, mem, oldval, desired,
> +
> +  if (!expand_atomic_compare_and_swap (&target, &oldval, mem, oldval, desired,

I'm wondering if this shouldn't be instead:
  oldval = NULL;
  if (!expand_atomic_compare_and_swap (&target, &oldval, mem, expected, desired,
  				       is_weak, success, failure))

because otherwise expand_atomic_compare_and_swap could in theory already
store into expect MEM, couldn't it?  I mean, it does:
  /* Load expected into a register for the compare and swap.  */
  if (MEM_P (expected))
    expected = copy_to_reg (expected);

  /* Make sure we always have some place to put the return oldval.
     Further, make sure that place is distinct from the input expected,
     just in case we need that path down below.  */
  if (ptarget_oval == NULL
      || (target_oval = *ptarget_oval) == NULL
      || reg_overlap_mentioned_p (expected, target_oval))
    target_oval = gen_reg_rtx (mode);
so with NULL *ptarget_oval it will surely allocate a pseudo, but if it is
the expected MEM, as expected has been forced into register earlier,
I don't think it overlaps with that REG and thus it can be already stored
and have oldval == expect after the call.

	Jakub
Richard Henderson Feb. 20, 2014, 7:22 p.m. UTC | #2
On 02/20/2014 12:09 PM, Jakub Jelinek wrote:
> On Thu, Feb 20, 2014 at 11:49:30AM -0600, Richard Henderson wrote:
>> Tested on x86_64 and i686, and manually inspecting the generated code.
>> Any ideas how to regression test this?
> 
> No idea about how to test this.
> 
>> @@ -5330,14 +5330,23 @@ expand_builtin_atomic_compare_exchange (enum machine_mode mode, tree exp,
>>    if (tree_fits_shwi_p (weak) && tree_to_shwi (weak) != 0)
>>      is_weak = true;
>>  
>> +  if (target == const0_rtx)
>> +    target = NULL;
>>    oldval = expect;
>> -  if (!expand_atomic_compare_and_swap ((target == const0_rtx ? NULL : &target),
>> -				       &oldval, mem, oldval, desired,
>> +
>> +  if (!expand_atomic_compare_and_swap (&target, &oldval, mem, oldval, desired,
> 
> I'm wondering if this shouldn't be instead:
>   oldval = NULL;
>   if (!expand_atomic_compare_and_swap (&target, &oldval, mem, expected, desired,
>   				       is_weak, success, failure))
> 
> because otherwise expand_atomic_compare_and_swap could in theory already
> store into expect MEM, couldn't it?  I mean, it does:
>   /* Load expected into a register for the compare and swap.  */
>   if (MEM_P (expected))
>     expected = copy_to_reg (expected);
> 
>   /* Make sure we always have some place to put the return oldval.
>      Further, make sure that place is distinct from the input expected,
>      just in case we need that path down below.  */
>   if (ptarget_oval == NULL
>       || (target_oval = *ptarget_oval) == NULL
>       || reg_overlap_mentioned_p (expected, target_oval))
>     target_oval = gen_reg_rtx (mode);
> so with NULL *ptarget_oval it will surely allocate a pseudo, but if it is
> the expected MEM, as expected has been forced into register earlier,
> I don't think it overlaps with that REG and thus it can be already stored
> and have oldval == expect after the call.

I don't know any target that actually accepts a MEM for oldval, and since the
current definition of __atomic_compare_and_swap_n takes an address for
expected, we'll always have a MEM.  So at present we'll always allocate a new
pseudo just as if we zero out oldval.

But, fair enough.  It does seem generally safer your way.


r~
diff mbox

Patch

diff --git a/gcc/builtins.c b/gcc/builtins.c
index f5f55bf..09fefe50 100644
--- a/gcc/builtins.c
+++ b/gcc/builtins.c
@@ -5292,7 +5292,7 @@  static rtx
 expand_builtin_atomic_compare_exchange (enum machine_mode mode, tree exp, 
 					rtx target)
 {
-  rtx expect, desired, mem, oldval;
+  rtx expect, desired, mem, oldval, label;
   enum memmodel success, failure;
   tree weak;
   bool is_weak;
@@ -5330,14 +5330,23 @@  expand_builtin_atomic_compare_exchange (enum machine_mode mode, tree exp,
   if (tree_fits_shwi_p (weak) && tree_to_shwi (weak) != 0)
     is_weak = true;
 
+  if (target == const0_rtx)
+    target = NULL;
   oldval = expect;
-  if (!expand_atomic_compare_and_swap ((target == const0_rtx ? NULL : &target),
-				       &oldval, mem, oldval, desired,
+
+  if (!expand_atomic_compare_and_swap (&target, &oldval, mem, oldval, desired,
 				       is_weak, success, failure))
     return NULL_RTX;
 
-  if (oldval != expect)
-    emit_move_insn (expect, oldval);
+  /* Conditionally store back to EXPECT, lest we create a race condition
+     with an improper store to memory.  */
+  /* ??? With a rearrangement of atomics at the gimple level, we can handle
+     the normal case where EXPECT is totally private, i.e. a register.  At
+     which point the store can be unconditional.  */
+  label = gen_label_rtx ();
+  emit_cmp_and_jump_insns (target, const0_rtx, NE, NULL, VOIDmode, 1, label);
+  emit_move_insn (expect, oldval);
+  emit_label (label);
 
   return target;
 }
diff --git a/libatomic/cas_n.c b/libatomic/cas_n.c
index 39c7833..801262d 100644
--- a/libatomic/cas_n.c
+++ b/libatomic/cas_n.c
@@ -51,10 +51,9 @@  SIZE(libat_compare_exchange) (UTYPE *mptr, UTYPE *eptr, UTYPE newval,
 #if !DONE && N <= WORDSIZE && defined(atomic_compare_exchange_w)
 bool
 SIZE(libat_compare_exchange) (UTYPE *mptr, UTYPE *eptr, UTYPE newval,
-			      int smodel, int fmodel UNUSED)
+			      int smodel, int fmodel)
 {
   UWORD mask, shift, weval, woldval, wnewval, t, *wptr;
-  bool ret = false;
 
   pre_barrier (smodel);
 
@@ -82,12 +81,13 @@  SIZE(libat_compare_exchange) (UTYPE *mptr, UTYPE *eptr, UTYPE newval,
     }
   while (!atomic_compare_exchange_w (wptr, &woldval, t, true,
 				     __ATOMIC_RELAXED, __ATOMIC_RELAXED));
-  ret = true;
+  post_barrier (smodel);
+  return true;
+
  failure:
   *eptr = woldval >> shift;
-
-  post_barrier (smodel);
-  return ret;
+  post_barrier (fmodel);
+  return false;
 }
 
 #define DONE 1
@@ -102,18 +102,17 @@  SIZE(libat_compare_exchange) (UTYPE *mptr, UTYPE *eptr, UTYPE newval,
 {
   UTYPE oldval;
   UWORD magic;
-  bool ret = false;
+  bool ret;
 
   pre_seq_barrier (smodel);
   magic = protect_start (mptr);
 
   oldval = *mptr;
-  if (oldval == *eptr)
-    {
-      *mptr = newval;
-      ret = true;
-    }
-  *eptr = oldval;
+  ret = (oldval == *eptr);
+  if (ret)
+    *mptr = newval;
+  else
+    *eptr = oldval;
 
   protect_end (mptr, magic);
   post_seq_barrier (smodel);