diff mbox

[Target,maintainers] : Please update libjava/sysdep/*/locks.h with new atomic builtins

Message ID CAGWvnykyN4z5pxgtNH3xZyrR8GA7S6PrDMVeM-f3VBDraQyu2A@mail.gmail.com
State New
Headers show

Commit Message

David Edelsohn June 20, 2012, 1:10 p.m. UTC
Alan and I both re-implemented the locks and settled on the following
patch.  This uses the __atomic intrinsics, not the __sync instrinsics,
to avoid generating expensive instructions for a memory model that is
stricter than necessary.

If these intrinsics correctly represent the semantics of the libjava
barriers, it probably can be used as a generic implementation for
targets that support the __atomic intrinsics.

- David

2012-06-20  David Edelsohn  <dje.gcc@gmail.com>
            Alan Modra  <amodra@gmail.com>

        * sysdep/powerpc/locks.h (compare_and_swap): Use GCC atomic
        intrinsics.
        (release_set): Same.
        (compare_and_swap_release): Same.
        (read_barrier): Same.
        (write_barrier): Same.

Comments

Alan Modra June 20, 2012, 1:35 p.m. UTC | #1
On Wed, Jun 20, 2012 at 09:10:44AM -0400, David Edelsohn wrote:
>  inline static void
>  release_set (volatile obj_addr_t *addr, obj_addr_t new_val)
>  {
> -  __asm__ __volatile__ ("sync" : : : "memory");
> -  *addr = new_val;
> +  __atomic_store_n(addr, val, __ATOMIC_RELEASE);

A typo seems to have crept in here.  s/val/new_val/
David Edelsohn June 20, 2012, 1:49 p.m. UTC | #2
On Wed, Jun 20, 2012 at 9:35 AM, Alan Modra <amodra@gmail.com> wrote:
> On Wed, Jun 20, 2012 at 09:10:44AM -0400, David Edelsohn wrote:
>>  inline static void
>>  release_set (volatile obj_addr_t *addr, obj_addr_t new_val)
>>  {
>> -  __asm__ __volatile__ ("sync" : : : "memory");
>> -  *addr = new_val;
>> +  __atomic_store_n(addr, val, __ATOMIC_RELEASE);
>
> A typo seems to have crept in here.  s/val/new_val/

Fixed.

Thanks, David
diff mbox

Patch

Index: locks.h
===================================================================
--- locks.h	(revision 188778)
+++ locks.h	(working copy)
@@ -11,87 +11,63 @@ 
 #ifndef __SYSDEP_LOCKS_H__
 #define __SYSDEP_LOCKS_H__
 
-#ifdef __LP64__
-#define _LARX "ldarx "
-#define _STCX "stdcx. "
-#else
-#define _LARX "lwarx "
-#ifdef __PPC405__
-#define _STCX "sync; stwcx. "
-#else
-#define _STCX "stwcx. "
-#endif
-#endif
-
 typedef size_t obj_addr_t;	/* Integer type big enough for object	*/
 				/* address.				*/
 
+// Atomically replace *addr by new_val if it was initially equal to old.
+// Return true if the comparison succeeded.
+// Assumed to have acquire semantics, i.e. later memory operations
+// cannot execute before the compare_and_swap finishes.
+
 inline static bool
-compare_and_swap (volatile obj_addr_t *addr, obj_addr_t old,
+compare_and_swap (volatile obj_addr_t *addr,
+		  obj_addr_t old,
 		  obj_addr_t new_val) 
 {
-  obj_addr_t ret;
-
-  __asm__ __volatile__ (
-	   "      " _LARX "%0,0,%1 \n"
-	   "      xor. %0,%3,%0\n"
-	   "      bne $+12\n"
-	   "      " _STCX "%2,0,%1\n"
-	   "      bne- $-16\n"
-	: "=&r" (ret)
-	: "r" (addr), "r" (new_val), "r" (old)
-	: "cr0", "memory");
-
-  /* This version of __compare_and_swap is to be used when acquiring
-     a lock, so we don't need to worry about whether other memory
-     operations have completed, but we do need to be sure that any loads
-     after this point really occur after we have acquired the lock.  */
-  __asm__ __volatile__ ("isync" : : : "memory");
-  return ret == 0;
+  return __atomic_compare_exchange_n (addr, &old, new_val, 0,
+				      __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
 }
 
+
+// Set *addr to new_val with release semantics, i.e. making sure
+// that prior loads and stores complete before this
+// assignment.
+
 inline static void
 release_set (volatile obj_addr_t *addr, obj_addr_t new_val)
 {
-  __asm__ __volatile__ ("sync" : : : "memory");
-  *addr = new_val;
+  __atomic_store_n(addr, val, __ATOMIC_RELEASE);
 }
 
+
+// Compare_and_swap with release semantics instead of acquire semantics.
+
 inline static bool
 compare_and_swap_release (volatile obj_addr_t *addr, obj_addr_t old,
 			  obj_addr_t new_val)
 {
-  obj_addr_t ret;
-
-  __asm__ __volatile__ ("sync" : : : "memory");
-
-  __asm__ __volatile__ (
-	   "      " _LARX "%0,0,%1 \n"
-	   "      xor. %0,%3,%0\n"
-	   "      bne $+12\n"
-	   "      " _STCX "%2,0,%1\n"
-	   "      bne- $-16\n"
-	: "=&r" (ret)
-	: "r" (addr), "r" (new_val), "r" (old)
-	: "cr0", "memory");
-
-  return ret == 0;
+  return __atomic_compare_exchange_n (addr, &old, new_val, 0,
+				      __ATOMIC_RELEASE, __ATOMIC_RELAXED);
 }
 
+
 // Ensure that subsequent instructions do not execute on stale
 // data that was loaded from memory before the barrier.
+
 inline static void
 read_barrier ()
 {
-  __asm__ __volatile__ ("isync" : : : "memory");
+  __atomic_thread_fence (__ATOMIC_ACQUIRE);
 }
 
+
 // Ensure that prior stores to memory are completed with respect to other
 // processors.
+
 inline static void
 write_barrier ()
 {
-  __asm__ __volatile__ ("sync" : : : "memory");
+  __atomic_thread_fence (__ATOMIC_RELEASE);
 }
 
 #endif