diff mbox series

[7/7] Remove atomic_decrement/decrement_val/decrement_and_test

Message ID AM5PR0801MB166819C77A12BE32CA1AE4D783809@AM5PR0801MB1668.eurprd08.prod.outlook.com
State New
Headers show
Series [1/7] Use atomic_exchange_release/acquire | expand

Commit Message

Wilco Dijkstra July 6, 2022, 3:20 p.m. UTC
Replace atomic_decrement, atomic_decrement_val and atomic_decrement_and_test with atomic_fetch_add_relaxed.

---
diff mbox series

Patch

diff --git a/htl/pt-create.c b/htl/pt-create.c
index 14f02cd2b8a19e8581a170dfba2b948ef8304203..5d37edbbffb16b7cbb6db133016653227fc45f47 100644
--- a/htl/pt-create.c
+++ b/htl/pt-create.c
@@ -262,7 +262,7 @@  failed_starting:
     }
 
   __pthread_setid (pthread->thread, NULL);
-  atomic_decrement (&__pthread_total);
+  atomic_fetch_add_relaxed (&__pthread_total, -1);
 failed_sigstate:
   __pthread_sigstate_destroy (pthread);
 failed_setup:
diff --git a/htl/pt-dealloc.c b/htl/pt-dealloc.c
index c776e3471dc376977ca7058f0d143f0a842d5799..86bbb3091fbd3758b06294ca705dc774cf8bf0eb 100644
--- a/htl/pt-dealloc.c
+++ b/htl/pt-dealloc.c
@@ -33,7 +33,7 @@  extern pthread_mutex_t __pthread_free_threads_lock;
 void
 __pthread_dealloc (struct __pthread *pthread)
 {
-  if (!atomic_decrement_and_test (&pthread->nr_refs))
+  if (atomic_fetch_add_relaxed (&pthread->nr_refs, -1) != 1)
     return;
 
   /* Withdraw this thread from the thread ID lookup table.  */
diff --git a/htl/pt-exit.c b/htl/pt-exit.c
index f0759c8738e6da4c8e0ba47f6d15720203f6954e..3c0a8c52f34ed64b94ce71a0764512cb76351a3a 100644
--- a/htl/pt-exit.c
+++ b/htl/pt-exit.c
@@ -50,7 +50,7 @@  __pthread_exit (void *status)
 
   /* Decrease the number of threads.  We use an atomic operation to
      make sure that only the last thread calls `exit'.  */
-  if (atomic_decrement_and_test (&__pthread_total))
+  if (atomic_fetch_add_relaxed (&__pthread_total, -1) == 1)
     /* We are the last thread.  */
     exit (0);
 
diff --git a/include/atomic.h b/include/atomic.h
index 50778792e444238dddceb95386afd3d70450b2a1..0f31ea77ba2095ea461bf84f89c9987317f63b35 100644
--- a/include/atomic.h
+++ b/include/atomic.h
@@ -182,27 +182,6 @@ 
   } while (0)
 #endif
 
-#ifndef atomic_add
-# define atomic_add(mem, value) (void) atomic_exchange_and_add ((mem), (value))
-#endif
-
-
-#ifndef atomic_decrement
-# define atomic_decrement(mem) atomic_add ((mem), -1)
-#endif
-
-
-#ifndef atomic_decrement_val
-# define atomic_decrement_val(mem) (atomic_exchange_and_add ((mem), -1) - 1)
-#endif
-
-
-/* Subtract 1 from *MEM and return true iff it's now zero.  */
-#ifndef atomic_decrement_and_test
-# define atomic_decrement_and_test(mem) \
-  (atomic_exchange_and_add ((mem), -1) == 1)
-#endif
-
 
 /* Decrement *MEM if it is > 0, and return the old value.  */
 #ifndef atomic_decrement_if_positive
diff --git a/manual/ipc.texi b/manual/ipc.texi
index f7cbdc3e09b0b4aea9a96ddcaf571c474024cc32..be74664af9c146dd3024b080e90b7ca9834b14ef 100644
--- a/manual/ipc.texi
+++ b/manual/ipc.texi
@@ -89,7 +89,7 @@  by @theglibc{}.
 @c
 @c Given the use atomic operations this function seems
 @c to be AS-safe.  It is AC-unsafe because there is still
-@c a window between atomic_decrement and the pthread_push
+@c a window between atomic_fetch_add_relaxed and the pthread_push
 @c of the handler that undoes that operation.  A cancellation
 @c at that point would fail to remove the process from the
 @c waiters count.
diff --git a/manual/llio.texi b/manual/llio.texi
index 4e6e3fb672bb8ecbb1bb52faf3d70d9b7b33973f..1b801ee817db2935d8866894be23ffa516690ca3 100644
--- a/manual/llio.texi
+++ b/manual/llio.texi
@@ -2569,7 +2569,7 @@  aiocb64}, since the LFS transparently replaces the old interface.
 @c      lll_lock (pd->lock) @asulock @aculock
 @c      atomic_fetch_add_relaxed ok
 @c      clone ok
-@c      atomic_decrement ok
+@c      atomic_fetch_add_relaxed ok
 @c      atomic_exchange_acquire ok
 @c      lll_futex_wake ok
 @c      deallocate_stack dup
@@ -2614,7 +2614,7 @@  aiocb64}, since the LFS transparently replaces the old interface.
 @c      free @ascuheap @acsmem
 @c     libc_thread_freeres
 @c      libc_thread_subfreeres ok
-@c     atomic_decrement_and_test ok
+@c     atomic_fetch_add_relaxed ok
 @c     td_eventword ok
 @c     td_eventmask ok
 @c     atomic_compare_exchange_bool_acq ok
diff --git a/manual/memory.texi b/manual/memory.texi
index abc867eaebff732b2d7cc086d9f5e3430d425295..110e736a64c667988f4ca2fe92deb409225a4a88 100644
--- a/manual/memory.texi
+++ b/manual/memory.texi
@@ -397,7 +397,7 @@  this function is in @file{stdlib.h}.
 @c     mutex_lock (list_lock) dup @asulock @aculock
 @c     atomic_write_barrier ok
 @c     mutex_unlock (list_lock) @aculock
-@c    atomic_decrement ok
+@c    atomic_fetch_add_relaxed ok
 @c    reused_arena @asulock @aculock
 @c      reads&writes next_to_use and iterates over arena next without guards
 @c      those are harmless as long as we don't drop arenas from the
diff --git a/misc/tst-atomic.c b/misc/tst-atomic.c
index 67073c23c34126b87a6049b8d9ecd6cf2d9d23bc..2d1172a64d86cc81af49158085a84429c3a8f0dd 100644
--- a/misc/tst-atomic.c
+++ b/misc/tst-atomic.c
@@ -96,68 +96,6 @@  do_test (void)
       ret = 1;
     }
 
-  mem = 2;
-  if (atomic_exchange_and_add (&mem, 11) != 2
-      || mem != 13)
-    {
-      puts ("atomic_exchange_and_add test failed");
-      ret = 1;
-    }
-
-  mem = 2;
-  if (atomic_exchange_and_add_acq (&mem, 11) != 2
-      || mem != 13)
-    {
-      puts ("atomic_exchange_and_add test failed");
-      ret = 1;
-    }
-
-  mem = 2;
-  if (atomic_exchange_and_add_rel (&mem, 11) != 2
-      || mem != 13)
-    {
-      puts ("atomic_exchange_and_add test failed");
-      ret = 1;
-    }
-
-  mem = 17;
-  atomic_decrement (&mem);
-  if (mem != 16)
-    {
-      puts ("atomic_decrement test failed");
-      ret = 1;
-    }
-
-  if (atomic_decrement_val (&mem) != 15)
-    {
-      puts ("atomic_decrement_val test failed");
-      ret = 1;
-    }
-
-  mem = 0;
-  if (atomic_decrement_and_test (&mem)
-      || mem != -1)
-    {
-      puts ("atomic_decrement_and_test test 1 failed");
-      ret = 1;
-    }
-
-  mem = 15;
-  if (atomic_decrement_and_test (&mem)
-      || mem != 14)
-    {
-      puts ("atomic_decrement_and_test test 2 failed");
-      ret = 1;
-    }
-
-  mem = 1;
-  if (! atomic_decrement_and_test (&mem)
-      || mem != 0)
-    {
-      puts ("atomic_decrement_and_test test 3 failed");
-      ret = 1;
-    }
-
   mem = 1;
   if (atomic_decrement_if_positive (&mem) != 1
       || mem != 0)
diff --git a/nptl/cond-perf.c b/nptl/cond-perf.c
index 9c9488e2743f17eb4beeaadf45ef83a38a9f9bf9..baf69e6d34fdb3ffef97b32fb9db422077a0359b 100644
--- a/nptl/cond-perf.c
+++ b/nptl/cond-perf.c
@@ -24,7 +24,7 @@  cons (void *arg)
 
   do
     {
-      if (atomic_decrement_and_test (&ntogo))
+      if (atomic_fetch_add_relaxed (&ntogo, -1) == 1)
 	{
 	  pthread_mutex_lock (&mut2);
 	  alldone = true;
diff --git a/nptl/nptl_setxid.c b/nptl/nptl_setxid.c
index 3b7e2d434abe8a15145349d1a08a4e706061c74d..301809d200d1ac6c89fbf100e553c5713f326ed7 100644
--- a/nptl/nptl_setxid.c
+++ b/nptl/nptl_setxid.c
@@ -88,7 +88,7 @@  __nptl_setxid_sighandler (int sig, siginfo_t *si, void *ctx)
   self->setxid_futex = 1;
   futex_wake (&self->setxid_futex, 1, FUTEX_PRIVATE);
 
-  if (atomic_decrement_val (&xidcmd->cntr) == 0)
+  if (atomic_fetch_add_relaxed (&xidcmd->cntr, -1) == 1)
     futex_wake ((unsigned int *) &xidcmd->cntr, 1, FUTEX_PRIVATE);
 }
 libc_hidden_def (__nptl_setxid_sighandler)
diff --git a/nptl/pthread_create.c b/nptl/pthread_create.c
index 6542c3183eda33b145aa80adcb620c7a19450aa2..ad84eeda2362e2a6b9e0f8d85911afa52d3ce373 100644
--- a/nptl/pthread_create.c
+++ b/nptl/pthread_create.c
@@ -489,7 +489,7 @@  start_thread (void *arg)
      the breakpoint reports TD_THR_RUN state rather than TD_THR_ZOMBIE.  */
   atomic_fetch_or_acquire (&pd->cancelhandling, 1 << EXITING_BIT);
 
-  if (__glibc_unlikely (atomic_decrement_and_test (&__nptl_nthreads)))
+  if (__glibc_unlikely (atomic_fetch_add_relaxed (&__nptl_nthreads, -1) == 1))
     /* This was the last thread.  */
     exit (0);
 
@@ -861,7 +861,7 @@  __pthread_create_2_1 (pthread_t *newthread, const pthread_attr_t *attr,
 	 NOTES above).  */
 
       /* Oops, we lied for a second.  */
-      atomic_decrement (&__nptl_nthreads);
+      atomic_fetch_add_relaxed (&__nptl_nthreads, -1);
 
       /* Free the resources.  */
       __nptl_deallocate_stack (pd);
diff --git a/nscd/nscd-client.h b/nscd/nscd-client.h
index fee2a15dcc323258cecd45c5fb8e472da9161792..ca9e6def1a88ff14c5e8b39f0e236aa8a30f95ae 100644
--- a/nscd/nscd-client.h
+++ b/nscd/nscd-client.h
@@ -421,7 +421,7 @@  __nscd_drop_map_ref (struct mapped_database *map, int *gc_cycle)
 	  return -1;
 	}
 
-      if (atomic_decrement_val (&map->counter) == 0)
+      if (atomic_fetch_add_relaxed (&map->counter, -1) == 1)
 	__nscd_unmap (map);
     }
 
diff --git a/nscd/nscd_getai.c b/nscd/nscd_getai.c
index a99a4d8142b89521f3ca1759f42d5d88e1103f61..8e4650ebaedc6d79bd5ccda6721551b0227dbbb5 100644
--- a/nscd/nscd_getai.c
+++ b/nscd/nscd_getai.c
@@ -198,7 +198,7 @@  __nscd_getai (const char *key, struct nscd_ai_result **result, int *h_errnop)
       if ((gc_cycle & 1) != 0 || ++nretries == 5 || retval == -1)
 	{
 	  /* nscd is just running gc now.  Disable using the mapping.  */
-	  if (atomic_decrement_val (&mapped->counter) == 0)
+	  if (atomic_fetch_add_relaxed (&mapped->counter, -1) == 1)
 	    __nscd_unmap (mapped);
 	  mapped = NO_MAPPING;
 	}
diff --git a/nscd/nscd_getgr_r.c b/nscd/nscd_getgr_r.c
index 3636c031ec9403c7b447ebc52eecb2c60d74740f..bde3b588a01274730e1b1d817dfff62fd4f60c15 100644
--- a/nscd/nscd_getgr_r.c
+++ b/nscd/nscd_getgr_r.c
@@ -312,7 +312,7 @@  nscd_getgr_r (const char *key, size_t keylen, request_type type,
       if ((gc_cycle & 1) != 0 || ++nretries == 5 || retval == -1)
 	{
 	  /* nscd is just running gc now.  Disable using the mapping.  */
-	  if (atomic_decrement_val (&mapped->counter) == 0)
+	  if (atomic_fetch_add_relaxed (&mapped->counter, -1) == 1)
 	    __nscd_unmap (mapped);
 	  mapped = NO_MAPPING;
 	}
diff --git a/nscd/nscd_gethst_r.c b/nscd/nscd_gethst_r.c
index 9becb620335d14d7becebe6c29ad96cc4e7463ee..31d13580a11e64792d6f02ecd7437884d4436eb3 100644
--- a/nscd/nscd_gethst_r.c
+++ b/nscd/nscd_gethst_r.c
@@ -440,7 +440,7 @@  nscd_gethst_r (const char *key, size_t keylen, request_type type,
       if ((gc_cycle & 1) != 0 || ++nretries == 5 || retval == -1)
 	{
 	  /* nscd is just running gc now.  Disable using the mapping.  */
-	  if (atomic_decrement_val (&mapped->counter) == 0)
+	  if (atomic_fetch_add_relaxed (&mapped->counter, -1) == 1)
 	    __nscd_unmap (mapped);
 	  mapped = NO_MAPPING;
 	}
diff --git a/nscd/nscd_getpw_r.c b/nscd/nscd_getpw_r.c
index 20986f44332fca42d2f43794b4c365f6b3968cae..82fdd17d8c39287a9d450ff25802116028dc9475 100644
--- a/nscd/nscd_getpw_r.c
+++ b/nscd/nscd_getpw_r.c
@@ -225,7 +225,7 @@  nscd_getpw_r (const char *key, size_t keylen, request_type type,
       if ((gc_cycle & 1) != 0 || ++nretries == 5 || retval == -1)
 	{
 	  /* nscd is just running gc now.  Disable using the mapping.  */
-	  if (atomic_decrement_val (&mapped->counter) == 0)
+	  if (atomic_fetch_add_relaxed (&mapped->counter, -1) == 1)
 	    __nscd_unmap (mapped);
 	  mapped = NO_MAPPING;
 	}
diff --git a/nscd/nscd_getserv_r.c b/nscd/nscd_getserv_r.c
index 42b875d024c674a6c46dd28be8308a8d5d650ca6..de843b33631e2c57ac4e0192c6f1027c586cf329 100644
--- a/nscd/nscd_getserv_r.c
+++ b/nscd/nscd_getserv_r.c
@@ -365,7 +365,7 @@  nscd_getserv_r (const char *crit, size_t critlen, const char *proto,
       if ((gc_cycle & 1) != 0 || ++nretries == 5 || retval == -1)
 	{
 	  /* nscd is just running gc now.  Disable using the mapping.  */
-	  if (atomic_decrement_val (&mapped->counter) == 0)
+	  if (atomic_fetch_add_relaxed (&mapped->counter, -1) == 1)
 	    __nscd_unmap (mapped);
 	  mapped = NO_MAPPING;
 	}
diff --git a/nscd/nscd_helper.c b/nscd/nscd_helper.c
index d3e05e272a854e13c0e4d18594ea57336b8db2bf..fc41bfdb6eebb880d6132ea5cf409ca657570f82 100644
--- a/nscd/nscd_helper.c
+++ b/nscd/nscd_helper.c
@@ -390,7 +390,7 @@  __nscd_get_mapping (request_type type, const char *key,
   struct mapped_database *oldval = *mappedp;
   *mappedp = result;
 
-  if (oldval != NULL && atomic_decrement_val (&oldval->counter) == 0)
+  if (oldval != NULL && atomic_fetch_add_relaxed (&oldval->counter, -1) == 1)
     __nscd_unmap (oldval);
 
   return result;
diff --git a/nscd/nscd_initgroups.c b/nscd/nscd_initgroups.c
index dfce76a06090d5bd2869f8a239bdef51866bd854..47b6deb0699eaa984691388d146ae5e01ddd83ab 100644
--- a/nscd/nscd_initgroups.c
+++ b/nscd/nscd_initgroups.c
@@ -166,7 +166,7 @@  __nscd_getgrouplist (const char *user, gid_t group, long int *size,
       if ((gc_cycle & 1) != 0 || ++nretries == 5 || retval == -1)
 	{
 	  /* nscd is just running gc now.  Disable using the mapping.  */
-	  if (atomic_decrement_val (&mapped->counter) == 0)
+	  if (atomic_fetch_add_relaxed (&mapped->counter, -1) == 1)
 	    __nscd_unmap (mapped);
 	  mapped = NO_MAPPING;
 	}
diff --git a/nscd/nscd_netgroup.c b/nscd/nscd_netgroup.c
index 7e51dd3d941e383ab97bc0f70dd4dda65c340e0b..11b7f3214c2fb08f2e08df86f0d984e7bfc9bda2 100644
--- a/nscd/nscd_netgroup.c
+++ b/nscd/nscd_netgroup.c
@@ -148,7 +148,7 @@  __nscd_setnetgrent (const char *group, struct __netgrent *datap)
       if ((gc_cycle & 1) != 0 || ++nretries == 5 || retval == -1)
 	{
 	  /* nscd is just running gc now.  Disable using the mapping.  */
-	  if (atomic_decrement_val (&mapped->counter) == 0)
+	  if (atomic_fetch_add_relaxed (&mapped->counter, -1) == 1)
 	    __nscd_unmap (mapped);
 	  mapped = NO_MAPPING;
 	}
@@ -272,7 +272,7 @@  __nscd_innetgr (const char *netgroup, const char *host, const char *user,
       if ((gc_cycle & 1) != 0 || ++nretries == 5 || retval == -1)
 	{
 	  /* nscd is just running gc now.  Disable using the mapping.  */
-	  if (atomic_decrement_val (&mapped->counter) == 0)
+	  if (atomic_fetch_add_relaxed (&mapped->counter, -1) == 1)
 	    __nscd_unmap (mapped);
 	  mapped = NO_MAPPING;
 	}
diff --git a/sysdeps/aarch64/atomic-machine.h b/sysdeps/aarch64/atomic-machine.h
index 459deeec18713a374bbe186bf84b0c366896a8d7..a7a600c86fa8ac6496d04a36c779542f76e7d7c9 100644
--- a/sysdeps/aarch64/atomic-machine.h
+++ b/sysdeps/aarch64/atomic-machine.h
@@ -103,28 +103,6 @@ 
   __atomic_val_bysize (__arch_compare_and_exchange_val, int,    \
                        mem, new, old, __ATOMIC_RELEASE)
 
-/* Atomically add value and return the previous (unincremented) value.  */
-
-# define __arch_exchange_and_add_8_int(mem, value, model)	\
-  __atomic_fetch_add (mem, value, model)
-
-# define __arch_exchange_and_add_16_int(mem, value, model)	\
-  __atomic_fetch_add (mem, value, model)
-
-# define __arch_exchange_and_add_32_int(mem, value, model)	\
-  __atomic_fetch_add (mem, value, model)
-
-#  define __arch_exchange_and_add_64_int(mem, value, model)	\
-  __atomic_fetch_add (mem, value, model)
-
-# define atomic_exchange_and_add_acq(mem, value)			\
-  __atomic_val_bysize (__arch_exchange_and_add, int, mem, value,	\
-		       __ATOMIC_ACQUIRE)
-
-# define atomic_exchange_and_add_rel(mem, value)			\
-  __atomic_val_bysize (__arch_exchange_and_add, int, mem, value,	\
-		       __ATOMIC_RELEASE)
-
 /* Barrier macro. */
 #define atomic_full_barrier() __sync_synchronize()
 
diff --git a/sysdeps/m68k/m680x0/m68020/atomic-machine.h b/sysdeps/m68k/m680x0/m68020/atomic-machine.h
index 6e454c5ceddd4e82b88b71203a94549bbb20ed20..8460fb61072dce030957b029d7c180de13089481 100644
--- a/sysdeps/m68k/m680x0/m68020/atomic-machine.h
+++ b/sysdeps/m68k/m680x0/m68020/atomic-machine.h
@@ -124,66 +124,3 @@ 
 			   : "memory");					      \
        }								      \
      __result; })
-
-#define atomic_add(mem, value) \
-  (void) ({ if (sizeof (*(mem)) == 1)					      \
-	      __asm __volatile ("add%.b %1,%0"				      \
-				: "+m" (*(mem))				      \
-				: "id" (value));			      \
-	    else if (sizeof (*(mem)) == 2)				      \
-	      __asm __volatile ("add%.w %1,%0"				      \
-				: "+m" (*(mem))				      \
-				: "id" (value));			      \
-	    else if (sizeof (*(mem)) == 4)				      \
-	      __asm __volatile ("add%.l %1,%0"				      \
-				: "+m" (*(mem))				      \
-				: "id" (value));			      \
-	    else							      \
-	      {								      \
-		__typeof (mem) __memp = (mem);				      \
-		__typeof (*(mem)) __oldval = *__memp;			      \
-		__typeof (*(mem)) __temp;				      \
-		__asm __volatile ("1: move%.l %0,%1;"			      \
-				  "   move%.l %R0,%R1;"			      \
-				  "   add%.l %R2,%R1;"			      \
-				  "   addx%.l %2,%1;"			      \
-				  "   cas2%.l %0:%R0,%1:%R1,(%3):(%4);"	      \
-				  "   jbne 1b"				      \
-				  : "=d" (__oldval), "=&d" (__temp)	      \
-				  : "d" ((__typeof (*(mem))) (value)),	      \
-				    "r" (__memp), "r" ((char *) __memp + 4),  \
-				    "0" (__oldval)			      \
-				  : "memory");				      \
-	      }								      \
-	    })
-
-#define atomic_decrement_and_test(mem) \
-  ({ char __result;							      \
-     if (sizeof (*(mem)) == 1)						      \
-       __asm __volatile ("subq%.b %#1,%1; seq %0"			      \
-			 : "=dm" (__result), "+m" (*(mem)));		      \
-     else if (sizeof (*(mem)) == 2)					      \
-       __asm __volatile ("subq%.w %#1,%1; seq %0"			      \
-			 : "=dm" (__result), "+m" (*(mem)));		      \
-     else if (sizeof (*(mem)) == 4)					      \
-       __asm __volatile ("subq%.l %#1,%1; seq %0"			      \
-			 : "=dm" (__result), "+m" (*(mem)));		      \
-     else								      \
-       {								      \
-	 __typeof (mem) __memp = (mem);					      \
-	 __typeof (*(mem)) __oldval = *__memp;				      \
-	 __typeof (*(mem)) __temp;					      \
-	 __asm __volatile ("1: move%.l %1,%2;"				      \
-			   "   move%.l %R1,%R2;"			      \
-			   "   subq%.l %#1,%R2;"			      \
-			   "   subx%.l %5,%2;"				      \
-			   "   seq %0;"					      \
-			   "   cas2%.l %1:%R1,%2:%R2,(%3):(%4);"	      \
-			   "   jbne 1b"					      \
-			   : "=&dm" (__result), "=d" (__oldval),	      \
-			     "=&d" (__temp)				      \
-			   : "r" (__memp), "r" ((char *) __memp + 4),	      \
-			     "d" (0), "1" (__oldval)			      \
-			   : "memory");					      \
-       }								      \
-     __result; })
diff --git a/sysdeps/microblaze/atomic-machine.h b/sysdeps/microblaze/atomic-machine.h
index 4f94632c5184d3154b7b3b12cd9369d25008c2d8..5781b4440bf22a747fb90c4e7cd5476f14fb8573 100644
--- a/sysdeps/microblaze/atomic-machine.h
+++ b/sysdeps/microblaze/atomic-machine.h
@@ -172,42 +172,3 @@ 
        abort ();                                                               \
     __result;                                                                  \
   })
-
-#define __arch_atomic_decrement_val_32(mem)                                    \
-  ({                                                                           \
-    __typeof (*(mem)) __val;                                                   \
-    int test;                                                                  \
-    __asm __volatile (                                                         \
-                "   addc    r0, r0, r0;"                                       \
-                "1: lwx     %0, %3, r0;"                                       \
-                "   addic   %1, r0, 0;"                                        \
-                "   bnei    %1, 1b;"                                           \
-                "   rsubi   %0, %0, 1;"                                        \
-                "   swx     %0, %3, r0;"                                       \
-                "   addic   %1, r0, 0;"                                        \
-                "   bnei    %1, 1b;"                                           \
-                    : "=&r" (__val),                                           \
-                    "=&r" (test),                                              \
-                    "=m" (*mem)                                                \
-                    : "r" (mem),                                               \
-                    "m" (*mem)                                                 \
-                    : "cc", "memory");                                         \
-    __val;                                                                     \
-  })
-
-#define __arch_atomic_decrement_val_64(mem)                                    \
-  (abort (), (__typeof (*mem)) 0)
-
-#define atomic_decrement_val(mem)                                              \
-  ({                                                                           \
-    __typeof (*(mem)) __result;                                                \
-    if (sizeof (*(mem)) == 4)                                                  \
-      __result = __arch_atomic_decrement_val_32 (mem);                         \
-    else if (sizeof (*(mem)) == 8)                                             \
-      __result = __arch_atomic_decrement_val_64 (mem);                         \
-    else                                                                       \
-       abort ();                                                               \
-    __result;                                                                  \
-  })
-
-#define atomic_decrement(mem) ({ atomic_decrement_val (mem); (void) 0; })
diff --git a/sysdeps/nptl/libc_start_call_main.h b/sysdeps/nptl/libc_start_call_main.h
index a9e85f2b098dfc6790d719bba9662e428ab237e6..c10a16b2c4a61e74f6bf18396935b87208df4f7e 100644
--- a/sysdeps/nptl/libc_start_call_main.h
+++ b/sysdeps/nptl/libc_start_call_main.h
@@ -65,7 +65,7 @@  __libc_start_call_main (int (*main) (int, char **, char ** MAIN_AUXVEC_DECL),
       /* One less thread.  Decrement the counter.  If it is zero we
          terminate the entire process.  */
       result = 0;
-      if (! atomic_decrement_and_test (&__nptl_nthreads))
+      if (atomic_fetch_add_relaxed (&__nptl_nthreads, -1) != 1)
         /* Not much left to do but to exit the thread, not the process.  */
 	while (1)
 	  INTERNAL_SYSCALL_CALL (exit, 0);
diff --git a/sysdeps/powerpc/atomic-machine.h b/sysdeps/powerpc/atomic-machine.h
index 10d94d6cbf0497f8d2b0f431efd9f79fb9b127e1..f2114322f53699009aea29b9503492b1d5a03e2e 100644
--- a/sysdeps/powerpc/atomic-machine.h
+++ b/sysdeps/powerpc/atomic-machine.h
@@ -151,19 +151,6 @@ 
     __val;								      \
   })
 
-#define __arch_atomic_decrement_val_32(mem) \
-  ({									      \
-    __typeof (*(mem)) __val;						      \
-    __asm __volatile ("1:	lwarx	%0,0,%2\n"			      \
-		      "		subi	%0,%0,1\n"			      \
-		      "		stwcx.	%0,0,%2\n"			      \
-		      "		bne-	1b"				      \
-		      : "=&b" (__val), "=m" (*mem)			      \
-		      : "b" (mem), "m" (*mem)				      \
-		      : "cr0", "memory");				      \
-    __val;								      \
-  })
-
 #define __arch_atomic_decrement_if_positive_32(mem) \
   ({ int __val, __tmp;							      \
      __asm __volatile ("1:	lwarx	%0,0,%3\n"			      \
@@ -261,21 +248,6 @@ 
     __result;								      \
   })
 
-#define atomic_decrement_val(mem) \
-  ({									      \
-    __typeof (*(mem)) __result;						      \
-    if (sizeof (*(mem)) == 4)						      \
-      __result = __arch_atomic_decrement_val_32 (mem);			      \
-    else if (sizeof (*(mem)) == 8)					      \
-      __result = __arch_atomic_decrement_val_64 (mem);			      \
-    else 								      \
-       abort ();							      \
-    __result;								      \
-  })
-
-#define atomic_decrement(mem) ({ atomic_decrement_val (mem); (void) 0; })
-
-
 /* Decrement *MEM if it is > 0, and return the old value.  */
 #define atomic_decrement_if_positive(mem) \
   ({ __typeof (*(mem)) __result;					      \
diff --git a/sysdeps/unix/sysv/linux/sh/atomic-machine.h b/sysdeps/unix/sysv/linux/sh/atomic-machine.h
index 6d4f8bdf1a9a96153c86c7a7fc9a796bc4feac7b..582d67db61e89d654862d9e15665f2fec94a1202 100644
--- a/sysdeps/unix/sysv/linux/sh/atomic-machine.h
+++ b/sysdeps/unix/sysv/linux/sh/atomic-machine.h
@@ -150,104 +150,3 @@ 
 	 (void) __value; \
        } \
      __result; })
-
-#define atomic_add(mem, value) \
-  (void) ({ __typeof (*(mem)) __tmp, __value = (value); \
-	    if (sizeof (*(mem)) == 1) \
-	      __asm __volatile ("\
-		mova 1f,r0\n\
-		mov r15,r1\n\
-		.align 2\n\
-		mov #(0f-1f),r15\n\
-	     0: mov.b @%1,r2\n\
-		add %0,r2\n\
-		mov.b r2,@%1\n\
-	     1: mov r1,r15"\
-		: "=&r" (__tmp) : "u" (mem), "0" (__value) \
-		: "r0", "r1", "r2", "memory"); \
-	    else if (sizeof (*(mem)) == 2) \
-	      __asm __volatile ("\
-		mova 1f,r0\n\
-		mov r15,r1\n\
-		.align 2\n\
-		mov #(0f-1f),r15\n\
-	     0: mov.w @%1,r2\n\
-		add %0,r2\n\
-		mov.w r2,@%1\n\
-	     1: mov r1,r15"\
-		: "=&r" (__tmp) : "u" (mem), "0" (__value) \
-		: "r0", "r1", "r2", "memory"); \
-	    else if (sizeof (*(mem)) == 4) \
-	      __asm __volatile ("\
-		mova 1f,r0\n\
-		mov r15,r1\n\
-		.align 2\n\
-		mov #(0f-1f),r15\n\
-	     0: mov.l @%1,r2\n\
-		add %0,r2\n\
-		mov.l r2,@%1\n\
-	     1: mov r1,r15"\
-		: "=&r" (__tmp) : "u" (mem), "0" (__value) \
-		: "r0", "r1", "r2", "memory"); \
-	    else \
-	      { \
-		__typeof (*(mem)) oldval; \
-		__typeof (mem) memp = (mem); \
-		do \
-		  oldval = *memp; \
-		while (__arch_compare_and_exchange_val_64_acq \
-			(memp, oldval + __value, oldval) == oldval); \
-		(void) __value; \
-	      } \
-	    })
-
-#define atomic_add_zero(mem, value) \
-  ({ unsigned char __result; \
-     __typeof (*(mem)) __tmp, __value = (value); \
-     if (sizeof (*(mem)) == 1) \
-       __asm __volatile ("\
-	  mova 1f,r0\n\
-	  mov r15,r1\n\
-	  .align 2\n\
-	  mov #(0f-1f),r15\n\
-       0: mov.b @%2,r2\n\
-	  add %1,r2\n\
-	  mov.b r2,@%2\n\
-       1: mov r1,r15\n\
-	  tst r2,r2\n\
-	  movt %0"\
-	: "=r" (__result), "=&r" (__tmp) : "u" (mem), "1" (__value) \
-	: "r0", "r1", "r2", "t", "memory"); \
-     else if (sizeof (*(mem)) == 2) \
-       __asm __volatile ("\
-	  mova 1f,r0\n\
-	  mov r15,r1\n\
-	  .align 2\n\
-	  mov #(0f-1f),r15\n\
-       0: mov.w @%2,r2\n\
-	  add %1,r2\n\
-	  mov.w r2,@%2\n\
-       1: mov r1,r15\n\
-	  tst r2,r2\n\
-	  movt %0"\
-	: "=r" (__result), "=&r" (__tmp) : "u" (mem), "1" (__value) \
-	: "r0", "r1", "r2", "t", "memory"); \
-     else if (sizeof (*(mem)) == 4) \
-       __asm __volatile ("\
-	  mova 1f,r0\n\
-	  mov r15,r1\n\
-	  .align 2\n\
-	  mov #(0f-1f),r15\n\
-       0: mov.l @%2,r2\n\
-	  add %1,r2\n\
-	  mov.l r2,@%2\n\
-       1: mov r1,r15\n\
-	  tst r2,r2\n\
-	  movt %0"\
-	: "=r" (__result), "=&r" (__tmp) : "u" (mem), "1" (__value) \
-	: "r0", "r1", "r2", "t", "memory"); \
-     else \
-       abort (); \
-     __result; })
-
-#define atomic_decrement_and_test(mem) atomic_add_zero((mem), -1)
diff --git a/sysdeps/x86/atomic-machine.h b/sysdeps/x86/atomic-machine.h
index 2f180abe9e682f9e64b471d091e455e192b156ba..2e06877034def3cc3c1cecb128cb770ac02acd78 100644
--- a/sysdeps/x86/atomic-machine.h
+++ b/sysdeps/x86/atomic-machine.h
@@ -30,9 +30,6 @@ 
 #ifdef __x86_64__
 # define __HAVE_64B_ATOMICS		1
 # define SP_REG				"rsp"
-# define SEG_REG			"fs"
-# define BR_CONSTRAINT			"q"
-# define IBR_CONSTRAINT			"iq"
 #else
 /* Since the Pentium, i386 CPUs have supported 64-bit atomics, but the
    i386 psABI supplement provides only 4-byte alignment for uint64_t
@@ -40,9 +37,6 @@ 
    atomics on this platform.  */
 # define __HAVE_64B_ATOMICS		0
 # define SP_REG				"esp"
-# define SEG_REG			"gs"
-# define BR_CONSTRAINT			"r"
-# define IBR_CONSTRAINT			"ir"
 #endif
 #define ATOMIC_EXCHANGE_USES_CAS	0
 
@@ -51,122 +45,6 @@ 
 #define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
   (! __sync_bool_compare_and_swap (mem, oldval, newval))
 
-
-#ifdef __x86_64__
-# define do_exchange_and_add_val_64_acq(pfx, mem, value) 0
-# define do_add_val_64_acq(pfx, mem, value) do { } while (0)
-#else
-/* XXX We do not really need 64-bit compare-and-exchange.  At least
-   not in the moment.  Using it would mean causing portability
-   problems since not many other 32-bit architectures have support for
-   such an operation.  So don't define any code for now.  If it is
-   really going to be used the code below can be used on Intel Pentium
-   and later, but NOT on i486.  */
-# define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval)	      \
-  ({ __typeof (*mem) ret = *(mem);					      \
-     __atomic_link_error ();						      \
-     ret = (newval);							      \
-     ret = (oldval);							      \
-     ret; })
-
-# define do_exchange_and_add_val_64_acq(pfx, mem, value) \
-  ({ __typeof (value) __addval = (value);				      \
-     __typeof (*mem) __result;						      \
-     __typeof (mem) __memp = (mem);					      \
-     __typeof (*mem) __tmpval;						      \
-     __result = *__memp;						      \
-     do									      \
-       __tmpval = __result;						      \
-     while ((__result = pfx##_compare_and_exchange_val_64_acq		      \
-	     (__memp, __result + __addval, __result)) == __tmpval);	      \
-     __result; })
-
-# define do_add_val_64_acq(pfx, mem, value) \
-  {									      \
-    __typeof (value) __addval = (value);				      \
-    __typeof (mem) __memp = (mem);					      \
-    __typeof (*mem) __oldval = *__memp;					      \
-    __typeof (*mem) __tmpval;						      \
-    do									      \
-      __tmpval = __oldval;						      \
-    while ((__oldval = pfx##_compare_and_exchange_val_64_acq		      \
-	    (__memp, __oldval + __addval, __oldval)) == __tmpval);	      \
-  }
-#endif
-
-
-#define __arch_exchange_and_add_body(lock, pfx, mem, value) \
-  ({ __typeof (*mem) __result;						      \
-     __typeof (value) __addval = (value);				      \
-     if (sizeof (*mem) == 1)						      \
-       __asm __volatile (lock "xaddb %b0, %1"				      \
-			 : "=q" (__result), "=m" (*mem)			      \
-			 : "0" (__addval), "m" (*mem));			      \
-     else if (sizeof (*mem) == 2)					      \
-       __asm __volatile (lock "xaddw %w0, %1"				      \
-			 : "=r" (__result), "=m" (*mem)			      \
-			 : "0" (__addval), "m" (*mem));			      \
-     else if (sizeof (*mem) == 4)					      \
-       __asm __volatile (lock "xaddl %0, %1"				      \
-			 : "=r" (__result), "=m" (*mem)			      \
-			 : "0" (__addval), "m" (*mem));			      \
-     else if (__HAVE_64B_ATOMICS)					      \
-       __asm __volatile (lock "xaddq %q0, %1"				      \
-			 : "=r" (__result), "=m" (*mem)			      \
-			 : "0" ((int64_t) cast_to_integer (__addval)),     \
-			   "m" (*mem));					      \
-     else								      \
-       __result = do_exchange_and_add_val_64_acq (pfx, (mem), __addval);      \
-     __result; })
-
-#define atomic_exchange_and_add(mem, value) \
-  __sync_fetch_and_add (mem, value)
-
-#define __arch_decrement_body(lock, pfx, mem) \
-  do {									      \
-    if (sizeof (*mem) == 1)						      \
-      __asm __volatile (lock "decb %b0"					      \
-			: "=m" (*mem)					      \
-			: "m" (*mem));					      \
-    else if (sizeof (*mem) == 2)					      \
-      __asm __volatile (lock "decw %w0"					      \
-			: "=m" (*mem)					      \
-			: "m" (*mem));					      \
-    else if (sizeof (*mem) == 4)					      \
-      __asm __volatile (lock "decl %0"					      \
-			: "=m" (*mem)					      \
-			: "m" (*mem));					      \
-    else if (__HAVE_64B_ATOMICS)					      \
-      __asm __volatile (lock "decq %q0"					      \
-			: "=m" (*mem)					      \
-			: "m" (*mem));					      \
-    else								      \
-      do_add_val_64_acq (pfx, mem, -1);					      \
-  } while (0)
-
-#define atomic_decrement(mem) __arch_decrement_body (LOCK_PREFIX, __arch, mem)
-
-#define atomic_decrement_and_test(mem) \
-  ({ unsigned char __result;						      \
-     if (sizeof (*mem) == 1)						      \
-       __asm __volatile (LOCK_PREFIX "decb %b0; sete %1"		      \
-			 : "=m" (*mem), "=qm" (__result)		      \
-			 : "m" (*mem));					      \
-     else if (sizeof (*mem) == 2)					      \
-       __asm __volatile (LOCK_PREFIX "decw %w0; sete %1"		      \
-			 : "=m" (*mem), "=qm" (__result)		      \
-			 : "m" (*mem));					      \
-     else if (sizeof (*mem) == 4)					      \
-       __asm __volatile (LOCK_PREFIX "decl %0; sete %1"			      \
-			 : "=m" (*mem), "=qm" (__result)		      \
-			 : "m" (*mem));					      \
-     else								      \
-       __asm __volatile (LOCK_PREFIX "decq %q0; sete %1"		      \
-			 : "=m" (*mem), "=qm" (__result)		      \
-			 : "m" (*mem));					      \
-     __result; })
-
-
 /* We don't use mfence because it is supposedly slower due to having to
    provide stronger guarantees (e.g., regarding self-modifying code).  */
 #define atomic_full_barrier() \