diff mbox series

Implement libc_once_retry for atomic initialization with allocation

Message ID 6e4b6b22-3f38-f798-f2e0-eb22311ed6b5@redhat.com
State New
Headers show
Series Implement libc_once_retry for atomic initialization with allocation | expand

Commit Message

Florian Weimer Jan. 4, 2018, 3:09 p.m. UTC
I'd appreciate if we could add this even during the freeze because it 
helps me to write a bug fix (but I guess I could use __libc_once there).

Torvald, does the algorithm look okay to you?

Thanks,
Florian

Comments

Andreas Schwab Jan. 4, 2018, 3:15 p.m. UTC | #1
On Jan 04 2018, Florian Weimer <fweimer@redhat.com> wrote:

> +   static inline struct foo *
> +   get_foo (void)
> +   {
> +     return __libc_once_retry (&global_foo, allocate_foo, free_foo, NULL);

s/__//

Andreas.
Florian Weimer Jan. 4, 2018, 3:16 p.m. UTC | #2
On 01/04/2018 04:15 PM, Andreas Schwab wrote:
> On Jan 04 2018, Florian Weimer <fweimer@redhat.com> wrote:
> 
>> +   static inline struct foo *
>> +   get_foo (void)
>> +   {
>> +     return __libc_once_retry (&global_foo, allocate_foo, free_foo, NULL);
> 
> s/__//

Thanks, fixed locally.

Florian
Torvald Riegel Jan. 4, 2018, 5:15 p.m. UTC | #3
On Thu, 2018-01-04 at 16:09 +0100, Florian Weimer wrote:
> Subject: [PATCH] Implement libc_once_retry for atomic initialization with allocation

Can you find a different name for it?  It has similarities to a generic
libc_once, but the pattern is more special.

Do you need it often enough to make it worth breaking it out?

> diff --git a/include/atomic.h b/include/atomic.h
> index 6af07dba58..91e176b040 100644
> --- a/include/atomic.h
> 
> +++ b/include/atomic.h

I don't think this should go into atomic.h because this is where we
really handle the basics of synchronization.  This is a synchronization
helper, and doesn't need to be touched by someone porting glibc to
another arch.

> @@ -826,4 +826,58 @@ void __atomic_link_error (void);
>  # error ATOMIC_EXCHANGE_USES_CAS has to be defined.
>  #endif
>  
> +/* Slow path for libc_once_retry; see below.  */
> 
> +void *__libc_once_retry_slow (void **__place,
> 
> +			      void *(*__allocate) (void *__closure),
> 
> +			      void (*__deallocate) (void *__closure,
> 
> +						    void *__ptr),
> 
> +			      void *__closure);
> 
> +
> 
> +/* Perform an acquire MO load on *PLACE.  If *PLACE is not NULL,
> 
> +   return *PLACE.  Otherwise, call ALLOCATE (CLOSURE).  If that
> 
> +   returns NULL, return NULL.  Otherwise, atomically replace *PLACE
> 
> +   with PTR, the result of the ALLOCATE call (with acquire-release

That doesn't quite match what you implement.  First, you don't replace
it unconditionally, but use a CAS (you can't just throw in an
"atomically" here because the allocations are mixed in and they aren't).
Second, you use the weak CAS but the loop includes alloc/dealloc; see
below.

> +   MO).  If *PLACE was updated concurrently, call DEALLOCATE (CLOSURE,
> 
> +   PTR) to undo the effect of allocate, and return the new value of
> 
> +   *PLACE.  If DEALLOCATE is NULL, call the free (PTR) instead.

s/the //

> 
> +
> 
> +   It is expected that callers define an inline helper function
> 
> +   function which adds type safety, like this.

s/function function which/function that/
s/./:/

> 
> +
> 
> +   struct foo { ... };
> 
> +   struct foo *global_foo;
> 
> +   static void *allocate_foo (void *closure);
> 
> +   static void *deallocate_foo (void *closure, void *ptr);
> 
> +
> 
> +   static inline struct foo *
> 
> +   get_foo (void)
> 
> +   {
> 
> +     return __libc_once_retry (&global_foo, allocate_foo, free_foo, NULL);
> 
> +   }
> 
> +
> 
> +   Usage of this function looks like this:
> 
> +
> 
> +   struct foo *local_foo = get_foo ();
> 
> +   if (local_foo == NULL)
> 
> +      report_allocation_failure ();
> 
> +
> 
> +   Compare to __libc_once, __libc_once_retry has the advantage that it

Compare_d_

> +   does not need separate space for a control variable, and that it is
> 
> +   safe with regards to cancellation and other forms of exception
> 
> +   handling if the provided callback functions are safe.  */
> 
> +static inline void *
> 
> +libc_once_retry (void **__place, void *(*__allocate) (void *__closure),
> 
> +		 void (*__deallocate) (void *__closure, void *__ptr),
> 
> +		 void *__closure)
> 
> +{
> 
> +  /* Synchronizes with the release-store CAS in

s/release-store/release MO/

> diff --git a/misc/libc_once_retry.c b/misc/libc_once_retry.c
> new file mode 100644
> index 0000000000..ecd352e2a3
> --- /dev/null
> 
> +++ b/misc/libc_once_retry.c
> 
> @@ -0,0 +1,55 @@
> +/* Concurrent initialization of a pointer.
> 
> +   Copyright (C) 2018 Free Software Foundation, Inc.
> 
> +   This file is part of the GNU C Library.
> 
> +
> 
> +   The GNU C Library is free software; you can redistribute it and/or
> 
> +   modify it under the terms of the GNU Lesser General Public
> 
> +   License as published by the Free Software Foundation; either
> 
> +   version 2.1 of the License, or (at your option) any later version.
> 
> +
> 
> +   The GNU C Library is distributed in the hope that it will be useful,
> 
> +   but WITHOUT ANY WARRANTY; without even the implied warranty of
> 
> +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> 
> +   Lesser General Public License for more details.
> 
> +
> 
> +   You should have received a copy of the GNU Lesser General Public
> 
> +   License along with the GNU C Library; if not, see
> 
> +   <http://www.gnu.org/licenses/>.  */
> 
> +
> 
> +#include <atomic.h>
> 
> +#include <stdlib.h>
> 
> +
> 
> +void *
> 
> +__libc_once_retry_slow (void **place, void *(*allocate) (void *closure),
> 
> +                        void (*deallocate) (void *closure, void *ptr),
> 
> +                        void *closure)
> 
> +{
> 
> +  void *result;
> 
> +
> 
> +  do
> 
> +    {
> 
> +      result = allocate (closure);
> 
> +      if (result == NULL)
> 
> +        return NULL;
> 
> +
> 
> +      /* Synchronizes with the acquire MO load in
> 
> +         __libc_once_retry.  */
> 
> +      void *expected = NULL;
> 
> +      if (atomic_compare_exchange_weak_release (place, &expected, result))
> 
> +        return result;

It seems you're looking for a strong CAS here, so why don't you make a
loop around the weak CAS?  Using an acq_rel CAS, this is simpler.

You'd avoid having more than one allocate/deallocate pair, which might
be unexpected given that you're documentation of the function's
semantics doesn't make that clear (and it might perhaps matter for
custom alloc/dealloc functions).
Florian Weimer Jan. 4, 2018, 5:44 p.m. UTC | #4
On 01/04/2018 06:15 PM, Torvald Riegel wrote:
> On Thu, 2018-01-04 at 16:09 +0100, Florian Weimer wrote:
>> Subject: [PATCH] Implement libc_once_retry for atomic initialization with allocation
> 
> Can you find a different name for it?  It has similarities to a generic
> libc_once, but the pattern is more special.

libc_concurrent_init?

> Do you need it often enough to make it worth breaking it out?

I think it's worthwhile to separate the concurrency review from the 
application logic.

>> diff --git a/include/atomic.h b/include/atomic.h
>> index 6af07dba58..91e176b040 100644
>> --- a/include/atomic.h
>>
>> +++ b/include/atomic.h
> 
> I don't think this should go into atomic.h because this is where we
> really handle the basics of synchronization.  This is a synchronization
> helper, and doesn't need to be touched by someone porting glibc to
> another arch.

What would be a good place instead?  A new header file?

>> +/* Perform an acquire MO load on *PLACE.  If *PLACE is not NULL,
>>
>> +   return *PLACE.  Otherwise, call ALLOCATE (CLOSURE).  If that
>>
>> +   returns NULL, return NULL.  Otherwise, atomically replace *PLACE
>>
>> +   with PTR, the result of the ALLOCATE call (with acquire-release
> 
> That doesn't quite match what you implement.  First, you don't replace
> it unconditionally, but use a CAS (you can't just throw in an
> "atomically" here because the allocations are mixed in and they aren't).
> Second, you use the weak CAS but the loop includes alloc/dealloc; see
> below.

Hmm, right, that is unnecessary.  I have updated the comment in the 
attached patch.

/* Perform an acquire MO load on *PLACE.  If *PLACE is not NULL,
    return *PLACE.  Otherwise, call ALLOCATE (CLOSURE), yielding
    RESULT.  If RESULT equals NULL, return NULL.  Otherwise, atomically
    replace the NULL value in *PLACE with the RESULT value.  If it
    turns out that *PLACE was updated concurrently, call DEALLOCATE
    (CLOSURE, RESULT) to undo the effect of ALLOCATE, and return the
    new value of *PLACE (after an acquire MO load).  If DEALLOCATE is
    NULL, call free (RESULT) instead.

    The net effect is that if libc_once_retry returns a non-NULL value,
    the caller can assume that pointed-to data has been initialized
    according to the ALLOCATE function.

>> +  do
>>
>> +    {
>>
>> +      result = allocate (closure);
>>
>> +      if (result == NULL)
>>
>> +        return NULL;
>>
>> +
>>
>> +      /* Synchronizes with the acquire MO load in
>>
>> +         __libc_once_retry.  */
>>
>> +      void *expected = NULL;
>>
>> +      if (atomic_compare_exchange_weak_release (place, &expected, result))
>>
>> +        return result;
> 
> It seems you're looking for a strong CAS here, so why don't you make a
> loop around the weak CAS?  Using an acq_rel CAS, this is simpler.

It's the only CAS we have: weak with relaxed MO on failure.  I need 
strong with acquire MO on failure, and currently, the only way to get 
that is with the loop and the additional load.  Sorry.

> You'd avoid having more than one allocate/deallocate pair, which might
> be unexpected given that you're documentation of the function's
> semantics doesn't make that clear (and it might perhaps matter for
> custom alloc/dealloc functions).

Oh, good point.  I changed the implementation in the attached patch.

Thanks,
Florian
Subject: [PATCH] Implement libc_once_retry for atomic initialization with allocation
To: libc-alpha@sourceware.org

2018-01-04  Florian Weimer  <fweimer@redhat.com>

	* include/atomic.h (__libc_once_retry_slow): Declare.
	(libc_once_retry): Define.
	* misc/libc_once_retry.c: New file.
	* misc/tst-libc_once_retry.c: Likewise.
	* misc/Makefile (routines): Add libc_once_retry.
	(tests-internal): Add tst-libc_once_retry.
	* misc/Versions (GLIBC_PRIVATE): Add __libc_once_retry_slow.

diff --git a/include/atomic.h b/include/atomic.h
index 6af07dba58..ec996cd224 100644
--- a/include/atomic.h
+++ b/include/atomic.h
@@ -826,4 +826,63 @@ void __atomic_link_error (void);
 # error ATOMIC_EXCHANGE_USES_CAS has to be defined.
 #endif
 
+/* Slow path for libc_once_retry; see below.  */
+void *__libc_once_retry_slow (void **__place,
+			      void *(*__allocate) (void *__closure),
+			      void (*__deallocate) (void *__closure,
+						    void *__ptr),
+			      void *__closure);
+
+/* Perform an acquire MO load on *PLACE.  If *PLACE is not NULL,
+   return *PLACE.  Otherwise, call ALLOCATE (CLOSURE), yielding
+   RESULT.  If RESULT equals NULL, return NULL.  Otherwise, atomically
+   replace the NULL value in *PLACE with the RESULT value.  If it
+   turns out that *PLACE was updated concurrently, call DEALLOCATE
+   (CLOSURE, RESULT) to undo the effect of ALLOCATE, and return the
+   new value of *PLACE (after an acquire MO load).  If DEALLOCATE is
+   NULL, call free (RESULT) instead.
+
+   The net effect is that if libc_once_retry returns a non-NULL value,
+   the caller can assume that pointed-to data has been initialized
+   according to the ALLOCATE function.
+
+   It is expected that callers define an inline helper function which
+   adds type safety, like this.
+
+   struct foo { ... };
+   struct foo *global_foo;
+   static void *allocate_foo (void *closure);
+   static void *deallocate_foo (void *closure, void *ptr);
+
+   static inline struct foo *
+   get_foo (void)
+   {
+     return libc_once_retry (&global_foo, allocate_foo, free_foo, NULL);
+   }
+
+   Usage of this function looks like this:
+
+   struct foo *local_foo = get_foo ();
+   if (local_foo == NULL)
+      report_allocation_failure ();
+
+   Compared to __libc_once, __libc_once_retry has the advantage that
+   it does not need separate space for a control variable, and that it
+   is safe with regards to cancellation and other forms of exception
+   handling if the provided callback functions are safe.  */
+static inline void *
+libc_once_retry (void **__place, void *(*__allocate) (void *__closure),
+		 void (*__deallocate) (void *__closure, void *__ptr),
+		 void *__closure)
+{
+  /* Synchronizes with the release MO CAS in
+     __libc_once_retry_slow.  */
+  void *__result = atomic_load_acquire (__place);
+  if (__result != NULL)
+    return __result;
+  else
+    return __libc_once_retry_slow (__place, __allocate, __deallocate,
+				   __closure);
+}
+
 #endif	/* atomic.h */
diff --git a/misc/Makefile b/misc/Makefile
index a5076b3672..7b1314d01b 100644
--- a/misc/Makefile
+++ b/misc/Makefile
@@ -70,7 +70,8 @@ routines := brk sbrk sstk ioctl \
 	    getloadavg getclktck \
 	    fgetxattr flistxattr fremovexattr fsetxattr getxattr \
 	    listxattr lgetxattr llistxattr lremovexattr lsetxattr \
-	    removexattr setxattr getauxval ifunc-impl-list makedev
+	    removexattr setxattr getauxval ifunc-impl-list makedev \
+	    libc_once_retry
 
 generated += tst-error1.mtrace tst-error1-mem.out
 
@@ -84,7 +85,7 @@ tests := tst-dirname tst-tsearch tst-fdset tst-efgcvt tst-mntent tst-hsearch \
 	 tst-preadvwritev tst-preadvwritev64 tst-makedev tst-empty \
 	 tst-preadvwritev2 tst-preadvwritev64v2
 
-tests-internal := tst-atomic tst-atomic-long
+tests-internal := tst-atomic tst-atomic-long tst-libc_once_retry
 tests-static := tst-empty
 
 ifeq ($(run-built-tests),yes)
diff --git a/misc/Versions b/misc/Versions
index bfbda505e4..a129e90fc0 100644
--- a/misc/Versions
+++ b/misc/Versions
@@ -165,5 +165,6 @@ libc {
     __tdelete; __tfind; __tsearch; __twalk;
     __mmap; __munmap; __mprotect;
     __sched_get_priority_min; __sched_get_priority_max;
+    __libc_once_retry_slow;
   }
 }
diff --git a/misc/libc_once_retry.c b/misc/libc_once_retry.c
new file mode 100644
index 0000000000..91c486975a
--- /dev/null
+++ b/misc/libc_once_retry.c
@@ -0,0 +1,57 @@
+/* Concurrent initialization of a pointer.
+   Copyright (C) 2018 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <atomic.h>
+#include <stdlib.h>
+#include <stdbool.h>
+
+void *
+__libc_once_retry_slow (void **place, void *(*allocate) (void *closure),
+                        void (*deallocate) (void *closure, void *ptr),
+                        void *closure)
+{
+  void *result = allocate (closure);
+  if (result == NULL)
+    return NULL;
+
+  while (true)
+    {
+      /* Synchronizes with the acquire MO load in
+         __libc_once_retry.  */
+      void *expected = NULL;
+      if (atomic_compare_exchange_weak_release (place, &expected, result))
+        return result;
+
+      /* The failed CAS has relaxed MO semantics, so perform another
+         acquire MO load.  */
+      void *other_result = atomic_load_acquire (place);
+      if (other_result == NULL)
+        /* Spurious failure.  Try again.  */
+        continue;
+
+      /* We lost the race.  Free what we allocated and return the
+         other result.  */
+      if (deallocate == NULL)
+        free (result);
+      else
+        deallocate (closure, result);
+      return other_result;
+    }
+
+  return result;
+}
diff --git a/misc/tst-libc_once_retry.c b/misc/tst-libc_once_retry.c
new file mode 100644
index 0000000000..9b2bbd187c
--- /dev/null
+++ b/misc/tst-libc_once_retry.c
@@ -0,0 +1,175 @@
+/* Test the libc_once_retry function.
+   Copyright (C) 2018 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <atomic.h>
+#include <string.h>
+#include <support/check.h>
+#include <support/support.h>
+
+/* Allocate a new string.  */
+static void *
+allocate_string (void *closure)
+{
+  return xstrdup (closure);
+}
+
+/* Allocation and deallocation functions which are not expected to be
+   called.  */
+
+static void *
+allocate_not_called (void *closure)
+{
+  FAIL_EXIT1 ("allocation function called unexpectedly (%p)", closure);
+}
+
+static void
+deallocate_not_called (void *closure, void *ptr)
+{
+  FAIL_EXIT1 ("deallocate function called unexpectedly (%p, %p)",
+              closure, ptr);
+}
+
+/* Counter for various function calls.  */
+static int function_called;
+
+/* An allocation function which returns NULL and records that it has
+   been called.  */
+static void *
+allocate_return_null (void *closure)
+{
+  /* The function should only be called once.  */
+  TEST_COMPARE (function_called, 0);
+  ++function_called;
+  return NULL;
+}
+
+
+/* The following is used to check the retry logic, by causing a fake
+   race condition.  */
+static void *fake_race_place;
+static char fake_race_region[3]; /* To obtain unique addresses.  */
+
+static void *
+fake_race_allocate (void *closure)
+{
+  TEST_VERIFY (closure == &fake_race_region[0]);
+  TEST_COMPARE (function_called, 0);
+  ++function_called;
+  /* Fake allocation by another thread.  */
+  fake_race_place = &fake_race_region[1];
+  return &fake_race_region[2];
+}
+
+static void
+fake_race_deallocate (void *closure, void *ptr)
+{
+  /* Check that the pointer returned from fake_race_allocate is
+     deallocated (and not the one stored in fake_race_place).  */
+  TEST_VERIFY (ptr == &fake_race_region[2]);
+
+  TEST_VERIFY (fake_race_place == &fake_race_region[1]);
+  TEST_VERIFY (closure == &fake_race_region[0]);
+  TEST_COMPARE (function_called, 1);
+  ++function_called;
+}
+
+/* Similar to fake_race_allocate, but expects to be paired with free
+   as the deallocation function.  */
+static void *
+fake_race_allocate_for_free (void *closure)
+{
+  TEST_VERIFY (closure == &fake_race_region[0]);
+  TEST_COMPARE (function_called, 0);
+  ++function_called;
+  /* Fake allocation by another thread.  */
+  fake_race_place = &fake_race_region[1];
+  return xstrdup ("to be freed");
+}
+
+static int
+do_test (void)
+{
+  /* Simple allocation.  */
+  void *place1 = NULL;
+  char *string1 = libc_once_retry (&place1, allocate_string,
+                                   deallocate_not_called,
+                                   (char *) "test string 1");
+  TEST_VERIFY_EXIT (string1 != NULL);
+  TEST_VERIFY (strcmp ("test string 1", string1) == 0);
+  /* Second call returns the first pointer, without calling any
+     callbacks.  */
+  TEST_VERIFY (string1
+               == libc_once_retry (&place1, allocate_not_called,
+                                   deallocate_not_called,
+                                   (char *) "test string 1a"));
+
+  /* Difference place should result in another call.  */
+  void *place2 = NULL;
+  char *string2 = libc_once_retry (&place2, allocate_string,
+                                   deallocate_not_called,
+                                   (char *) "test string 2");
+  TEST_VERIFY_EXIT (string2 != NULL);
+  TEST_VERIFY (strcmp ("test string 2", string2) == 0);
+  TEST_VERIFY (string1 != string2);
+
+  /* Check error reporting (NULL return value from the allocation
+     function).  */
+  void *place3 = NULL;
+  char *string3 = libc_once_retry (&place3, allocate_return_null,
+                                   deallocate_not_called, NULL);
+  TEST_VERIFY (string3 == NULL);
+  TEST_COMPARE (function_called, 1);
+
+  /* Check that the deallocation function is called if the race is
+     lost.  */
+  function_called = 0;
+  TEST_VERIFY (libc_once_retry (&fake_race_place,
+                                fake_race_allocate,
+                                fake_race_deallocate,
+                                &fake_race_region[0])
+               == &fake_race_region[1]);
+  TEST_COMPARE (function_called, 2);
+  function_called = 3;
+  TEST_VERIFY (libc_once_retry (&fake_race_place,
+                                fake_race_allocate,
+                                fake_race_deallocate,
+                                &fake_race_region[0])
+               == &fake_race_region[1]);
+  TEST_COMPARE (function_called, 3);
+
+  /* Similar, but this time rely on that free is called.  */
+  function_called = 0;
+  fake_race_place = NULL;
+  TEST_VERIFY (libc_once_retry (&fake_race_place,
+                                fake_race_allocate_for_free,
+                                NULL,
+                                &fake_race_region[0])
+               == &fake_race_region[1]);
+  TEST_COMPARE (function_called, 1);
+  function_called = 3;
+  TEST_VERIFY (libc_once_retry (&fake_race_place,
+                                fake_race_allocate_for_free,
+                                NULL,
+                                &fake_race_region[0])
+               == &fake_race_region[1]);
+  TEST_COMPARE (function_called, 3);
+
+  return 0;
+}
+
+#include <support/test-driver.c>
Torvald Riegel Jan. 4, 2018, 6:33 p.m. UTC | #5
On Thu, 2018-01-04 at 18:44 +0100, Florian Weimer wrote:
> On 01/04/2018 06:15 PM, Torvald Riegel wrote:
> > On Thu, 2018-01-04 at 16:09 +0100, Florian Weimer wrote:
> >> Subject: [PATCH] Implement libc_once_retry for atomic initialization with allocation
> > 
> > Can you find a different name for it?  It has similarities to a generic
> > libc_once, but the pattern is more special.
> 
> libc_concurrent_init?

Maybe sth like libc_allocate_and_init_once, or just libc_init_once?  One
can do without allocation if setting DEALLOCATE to an empty function,
but the parameters really suggest that this is about allocation.

> > Do you need it often enough to make it worth breaking it out?
> 
> I think it's worthwhile to separate the concurrency review from the 
> application logic.

That can be helpful, but making a generic helper for special cases (if
it's indeed one...) also has consts. 

> >> diff --git a/include/atomic.h b/include/atomic.h
> >> index 6af07dba58..91e176b040 100644
> >> --- a/include/atomic.h
> >>
> >> +++ b/include/atomic.h
> > 
> > I don't think this should go into atomic.h because this is where we
> > really handle the basics of synchronization.  This is a synchronization
> > helper, and doesn't need to be touched by someone porting glibc to
> > another arch.
> 
> What would be a good place instead?  A new header file?

I think so.  I'm not too keen on fine-granular headers usually, but
atomics and what we build on top of them is worth separating IMO.

> >> +/* Perform an acquire MO load on *PLACE.  If *PLACE is not NULL,
> >>
> >> +   return *PLACE.  Otherwise, call ALLOCATE (CLOSURE).  If that
> >>
> >> +   returns NULL, return NULL.  Otherwise, atomically replace *PLACE
> >>
> >> +   with PTR, the result of the ALLOCATE call (with acquire-release
> > 
> > That doesn't quite match what you implement.  First, you don't replace
> > it unconditionally, but use a CAS (you can't just throw in an
> > "atomically" here because the allocations are mixed in and they aren't).
> > Second, you use the weak CAS but the loop includes alloc/dealloc; see
> > below.
> 
> Hmm, right, that is unnecessary.  I have updated the comment in the 
> attached patch.
> 
> /* Perform an acquire MO load on *PLACE.  If *PLACE is not NULL,
>     return *PLACE.  Otherwise, call ALLOCATE (CLOSURE), yielding
>     RESULT.  If RESULT equals NULL, return NULL.  Otherwise, atomically
>     replace the NULL value in *PLACE with the RESULT value.  If it

You don't replace it atomically though, you really do run a CAS that
ensures that you replace iff *PLACE is still NULL. 

>     turns out that *PLACE was updated concurrently, call DEALLOCATE
>     (CLOSURE, RESULT) to undo the effect of ALLOCATE, and return the
>     new value of *PLACE (after an acquire MO load).  If DEALLOCATE is
>     NULL, call free (RESULT) instead.
> 
>     The net effect is that if libc_once_retry returns a non-NULL value,
>     the caller can assume that pointed-to data has been initialized
>     according to the ALLOCATE function.

That's a useful addition, and might even be the first sentence :)

> >> +  do
> >>
> >> +    {
> >>
> >> +      result = allocate (closure);
> >>
> >> +      if (result == NULL)
> >>
> >> +        return NULL;
> >>
> >> +
> >>
> >> +      /* Synchronizes with the acquire MO load in
> >>
> >> +         __libc_once_retry.  */
> >>
> >> +      void *expected = NULL;
> >>
> >> +      if (atomic_compare_exchange_weak_release (place, &expected, result))
> >>
> >> +        return result;
> > 
> > It seems you're looking for a strong CAS here, so why don't you make a
> > loop around the weak CAS?  Using an acq_rel CAS, this is simpler.
> 
> It's the only CAS we have: weak with relaxed MO on failure.  I need 
> strong with acquire MO on failure, and currently, the only way to get 
> that is with the loop and the additional load.  Sorry.

That's fine.  The strong CAS is really simple to do with a weak CAS if
one ignores back-off.
Dmitry V. Levin Jan. 4, 2018, 10:59 p.m. UTC | #6
On Thu, Jan 04, 2018 at 04:09:43PM +0100, Florian Weimer wrote:
> I'd appreciate if we could add this even during the freeze because it 
> helps me to write a bug fix (but I guess I could use __libc_once there).

Yes, I think we can add a symbol to GLIBC_PRIVATE during soft freeze.
Florian Weimer Jan. 5, 2018, 11:08 a.m. UTC | #7
On 01/04/2018 07:33 PM, Torvald Riegel wrote:

> Maybe sth like libc_allocate_and_init_once, or just libc_init_once?  One
> can do without allocation if setting DEALLOCATE to an empty function,
> but the parameters really suggest that this is about allocation.

If DEALLOCATE does nothing, you can just use a simple atomic store. 8-) 
(Which is why I used NULL to denote free, not a nop.)

>> /* Perform an acquire MO load on *PLACE.  If *PLACE is not NULL,
>>      return *PLACE.  Otherwise, call ALLOCATE (CLOSURE), yielding
>>      RESULT.  If RESULT equals NULL, return NULL.  Otherwise, atomically
>>      replace the NULL value in *PLACE with the RESULT value.  If it
> 
> You don't replace it atomically though, you really do run a CAS that
> ensures that you replace iff *PLACE is still NULL.

Please have a look at the updated documentation in the attached patch.

Thanks,
Florian
Subject: [PATCH] Implement allocate_once for atomic initialization with allocation
To: libc-alpha@sourceware.org

2018-01-04  Florian Weimer  <fweimer@redhat.com>

	Implement allocate_once.
	* include/allocate_once.h: New file.
	* misc/allocate_once.c: Likewise.
	* misc/tst-allocate_once.c: Likewise.
	* misc/Makefile (routines): Add allocate_once.
	(tests-internal): Add tst-allocate_once.
	* misc/Versions (GLIBC_PRIVATE): Add __libc_allocate_once_slow.

diff --git a/include/allocate_once.h b/include/allocate_once.h
new file mode 100644
index 0000000000..af07f9b614
--- /dev/null
+++ b/include/allocate_once.h
@@ -0,0 +1,88 @@
+/* Allocate and initialize and object once, in a thread-safe fashion.
+   Copyright (C) 2018 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#ifndef _ALLOCATE_ONCE_H
+#define _ALLOCATE_ONCE_H
+
+#include <atomic.h>
+
+/* Slow path for allocate_once; see below.  */
+void *__libc_allocate_once_slow (void **__place,
+                                 void *(*__allocate) (void *__closure),
+                                 void (*__deallocate) (void *__closure,
+                                                       void *__ptr),
+                                 void *__closure);
+
+/* Return an a pointer to an allocated and initialized data structure.
+   If this function returns a non-NULL value, the caller can assume
+   that pointed-to data has been initialized according to the ALLOCATE
+   function.
+
+   It is expected that callers define an inline helper function which
+   adds type safety, like this.
+
+   struct foo { ... };
+   struct foo *global_foo;
+   static void *allocate_foo (void *closure);
+   static void *deallocate_foo (void *closure, void *ptr);
+
+   static inline struct foo *
+   get_foo (void)
+   {
+     return allocate_once (&global_foo, allocate_foo, free_foo, NULL);
+   }
+
+   (Note that the global_foo variable is initialized to zero.)
+   Usage of this helper function looks like this:
+
+   struct foo *local_foo = get_foo ();
+   if (local_foo == NULL)
+      report_allocation_failure ();
+
+   allocate_once first performs an acquire MO load on *PLACE.  If the
+   result is not null, it is returned.  Otherwise, ALLOCATE (CLOSURE)
+   is called, yielding a value RESULT.  If RESULT equals NULL,
+   allocate_once returns NULL.  Otherwise, the function uses CAS to
+   update the NULL value in *PLACE with the RESULT value.  If it turns
+   out that *PLACE was updated concurrently, allocate_once calls
+   DEALLOCATE (CLOSURE, RESULT) to undo the effect of ALLOCATE, and
+   returns the new value of *PLACE (after an acquire MO load).  If
+   DEALLOCATE is NULL, free (RESULT) is called instead.
+
+   Compared to __libc_once, allocate_once has the advantage that it
+   does not need separate space for a control variable, and that it is
+   safe with regards to cancellation and other forms of exception
+   handling if the supplied callback functions are safe in that
+   regard.  allocate_once passes a closure parameter to the allocation
+   function, too.  */
+static inline void *
+allocate_once (void **__place, void *(*__allocate) (void *__closure),
+               void (*__deallocate) (void *__closure, void *__ptr),
+               void *__closure)
+{
+  /* Synchronizes with the release MO CAS in
+     __allocate_once_slow.  */
+  void *__result = atomic_load_acquire (__place);
+  if (__result != NULL)
+    return __result;
+  else
+    return __libc_allocate_once_slow (__place, __allocate, __deallocate,
+                                      __closure);
+}
+
+#endif /* _ALLOCATE_ONCE_H */
diff --git a/misc/Makefile b/misc/Makefile
index a5076b3672..1fccb729a5 100644
--- a/misc/Makefile
+++ b/misc/Makefile
@@ -70,7 +70,8 @@ routines := brk sbrk sstk ioctl \
 	    getloadavg getclktck \
 	    fgetxattr flistxattr fremovexattr fsetxattr getxattr \
 	    listxattr lgetxattr llistxattr lremovexattr lsetxattr \
-	    removexattr setxattr getauxval ifunc-impl-list makedev
+	    removexattr setxattr getauxval ifunc-impl-list makedev \
+	    allocate_once
 
 generated += tst-error1.mtrace tst-error1-mem.out
 
@@ -84,7 +85,7 @@ tests := tst-dirname tst-tsearch tst-fdset tst-efgcvt tst-mntent tst-hsearch \
 	 tst-preadvwritev tst-preadvwritev64 tst-makedev tst-empty \
 	 tst-preadvwritev2 tst-preadvwritev64v2
 
-tests-internal := tst-atomic tst-atomic-long
+tests-internal := tst-atomic tst-atomic-long tst-allocate_once
 tests-static := tst-empty
 
 ifeq ($(run-built-tests),yes)
diff --git a/misc/Versions b/misc/Versions
index bfbda505e4..900e4ffb79 100644
--- a/misc/Versions
+++ b/misc/Versions
@@ -165,5 +165,6 @@ libc {
     __tdelete; __tfind; __tsearch; __twalk;
     __mmap; __munmap; __mprotect;
     __sched_get_priority_min; __sched_get_priority_max;
+    __libc_allocate_once_slow;
   }
 }
diff --git a/misc/allocate_once.c b/misc/allocate_once.c
new file mode 100644
index 0000000000..9353bd828f
--- /dev/null
+++ b/misc/allocate_once.c
@@ -0,0 +1,58 @@
+/* Concurrent allocation and initialization of a pointer.
+   Copyright (C) 2018 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <atomic.h>
+#include <stdlib.h>
+#include <stdbool.h>
+
+void *
+__libc_allocate_once_slow (void **place, void *(*allocate) (void *closure),
+                           void (*deallocate) (void *closure, void *ptr),
+                           void *closure)
+{
+  void *result = allocate (closure);
+  if (result == NULL)
+    return NULL;
+
+  /* This loop implements a strong CAS on *place, with acquire-release
+     MO semantics, from a weak CAS with relaxed-release MO.  */
+  while (true)
+    {
+      /* Synchronizes with the acquire MO load in allocate_once.  */
+      void *expected = NULL;
+      if (atomic_compare_exchange_weak_release (place, &expected, result))
+        return result;
+
+      /* The failed CAS has relaxed MO semantics, so perform another
+         acquire MO load.  */
+      void *other_result = atomic_load_acquire (place);
+      if (other_result == NULL)
+        /* Spurious failure.  Try again.  */
+        continue;
+
+      /* We lost the race.  Free what we allocated and return the
+         other result.  */
+      if (deallocate == NULL)
+        free (result);
+      else
+        deallocate (closure, result);
+      return other_result;
+    }
+
+  return result;
+}
diff --git a/misc/tst-allocate_once.c b/misc/tst-allocate_once.c
new file mode 100644
index 0000000000..2d18aa04ab
--- /dev/null
+++ b/misc/tst-allocate_once.c
@@ -0,0 +1,175 @@
+/* Test the allocate_once function.
+   Copyright (C) 2018 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <allocate_once.h>
+#include <string.h>
+#include <support/check.h>
+#include <support/support.h>
+
+/* Allocate a new string.  */
+static void *
+allocate_string (void *closure)
+{
+  return xstrdup (closure);
+}
+
+/* Allocation and deallocation functions which are not expected to be
+   called.  */
+
+static void *
+allocate_not_called (void *closure)
+{
+  FAIL_EXIT1 ("allocation function called unexpectedly (%p)", closure);
+}
+
+static void
+deallocate_not_called (void *closure, void *ptr)
+{
+  FAIL_EXIT1 ("deallocate function called unexpectedly (%p, %p)",
+              closure, ptr);
+}
+
+/* Counter for various function calls.  */
+static int function_called;
+
+/* An allocation function which returns NULL and records that it has
+   been called.  */
+static void *
+allocate_return_null (void *closure)
+{
+  /* The function should only be called once.  */
+  TEST_COMPARE (function_called, 0);
+  ++function_called;
+  return NULL;
+}
+
+
+/* The following is used to check the retry logic, by causing a fake
+   race condition.  */
+static void *fake_race_place;
+static char fake_race_region[3]; /* To obtain unique addresses.  */
+
+static void *
+fake_race_allocate (void *closure)
+{
+  TEST_VERIFY (closure == &fake_race_region[0]);
+  TEST_COMPARE (function_called, 0);
+  ++function_called;
+  /* Fake allocation by another thread.  */
+  fake_race_place = &fake_race_region[1];
+  return &fake_race_region[2];
+}
+
+static void
+fake_race_deallocate (void *closure, void *ptr)
+{
+  /* Check that the pointer returned from fake_race_allocate is
+     deallocated (and not the one stored in fake_race_place).  */
+  TEST_VERIFY (ptr == &fake_race_region[2]);
+
+  TEST_VERIFY (fake_race_place == &fake_race_region[1]);
+  TEST_VERIFY (closure == &fake_race_region[0]);
+  TEST_COMPARE (function_called, 1);
+  ++function_called;
+}
+
+/* Similar to fake_race_allocate, but expects to be paired with free
+   as the deallocation function.  */
+static void *
+fake_race_allocate_for_free (void *closure)
+{
+  TEST_VERIFY (closure == &fake_race_region[0]);
+  TEST_COMPARE (function_called, 0);
+  ++function_called;
+  /* Fake allocation by another thread.  */
+  fake_race_place = &fake_race_region[1];
+  return xstrdup ("to be freed");
+}
+
+static int
+do_test (void)
+{
+  /* Simple allocation.  */
+  void *place1 = NULL;
+  char *string1 = allocate_once (&place1, allocate_string,
+                                   deallocate_not_called,
+                                   (char *) "test string 1");
+  TEST_VERIFY_EXIT (string1 != NULL);
+  TEST_VERIFY (strcmp ("test string 1", string1) == 0);
+  /* Second call returns the first pointer, without calling any
+     callbacks.  */
+  TEST_VERIFY (string1
+               == allocate_once (&place1, allocate_not_called,
+                                 deallocate_not_called,
+                                 (char *) "test string 1a"));
+
+  /* Difference place should result in another call.  */
+  void *place2 = NULL;
+  char *string2 = allocate_once (&place2, allocate_string,
+                                 deallocate_not_called,
+                                 (char *) "test string 2");
+  TEST_VERIFY_EXIT (string2 != NULL);
+  TEST_VERIFY (strcmp ("test string 2", string2) == 0);
+  TEST_VERIFY (string1 != string2);
+
+  /* Check error reporting (NULL return value from the allocation
+     function).  */
+  void *place3 = NULL;
+  char *string3 = allocate_once (&place3, allocate_return_null,
+                                 deallocate_not_called, NULL);
+  TEST_VERIFY (string3 == NULL);
+  TEST_COMPARE (function_called, 1);
+
+  /* Check that the deallocation function is called if the race is
+     lost.  */
+  function_called = 0;
+  TEST_VERIFY (allocate_once (&fake_race_place,
+                              fake_race_allocate,
+                              fake_race_deallocate,
+                              &fake_race_region[0])
+               == &fake_race_region[1]);
+  TEST_COMPARE (function_called, 2);
+  function_called = 3;
+  TEST_VERIFY (allocate_once (&fake_race_place,
+                              fake_race_allocate,
+                              fake_race_deallocate,
+                              &fake_race_region[0])
+               == &fake_race_region[1]);
+  TEST_COMPARE (function_called, 3);
+
+  /* Similar, but this time rely on that free is called.  */
+  function_called = 0;
+  fake_race_place = NULL;
+  TEST_VERIFY (allocate_once (&fake_race_place,
+                                fake_race_allocate_for_free,
+                                NULL,
+                                &fake_race_region[0])
+               == &fake_race_region[1]);
+  TEST_COMPARE (function_called, 1);
+  function_called = 3;
+  TEST_VERIFY (allocate_once (&fake_race_place,
+                              fake_race_allocate_for_free,
+                              NULL,
+                              &fake_race_region[0])
+               == &fake_race_region[1]);
+  TEST_COMPARE (function_called, 3);
+
+  return 0;
+}
+
+#include <support/test-driver.c>
diff mbox series

Patch

Subject: [PATCH] Implement libc_once_retry for atomic initialization with allocation
To: libc-alpha@sourceware.org

2018-01-04  Florian Weimer  <fweimer@redhat.com>

	* include/atomic.h (__libc_once_retry_slow): Declare.
	(libc_once_retry): Define.
	* misc/libc_once_retry.c: New file.
	* misc/tst-libc_once_retry.c: Likewise.
	* misc/Makefile (routines): Add libc_once_retry.
	(tests-internal): Add tst-libc_once_retry.
	* misc/Versions (GLIBC_PRIVATE): Add __libc_once_retry_slow.

diff --git a/include/atomic.h b/include/atomic.h
index 6af07dba58..91e176b040 100644
--- a/include/atomic.h
+++ b/include/atomic.h
@@ -826,4 +826,58 @@  void __atomic_link_error (void);
 # error ATOMIC_EXCHANGE_USES_CAS has to be defined.
 #endif
 
+/* Slow path for libc_once_retry; see below.  */
+void *__libc_once_retry_slow (void **__place,
+			      void *(*__allocate) (void *__closure),
+			      void (*__deallocate) (void *__closure,
+						    void *__ptr),
+			      void *__closure);
+
+/* Perform an acquire MO load on *PLACE.  If *PLACE is not NULL,
+   return *PLACE.  Otherwise, call ALLOCATE (CLOSURE).  If that
+   returns NULL, return NULL.  Otherwise, atomically replace *PLACE
+   with PTR, the result of the ALLOCATE call (with acquire-release
+   MO).  If *PLACE was updated concurrently, call DEALLOCATE (CLOSURE,
+   PTR) to undo the effect of allocate, and return the new value of
+   *PLACE.  If DEALLOCATE is NULL, call the free (PTR) instead.
+
+   It is expected that callers define an inline helper function
+   function which adds type safety, like this.
+
+   struct foo { ... };
+   struct foo *global_foo;
+   static void *allocate_foo (void *closure);
+   static void *deallocate_foo (void *closure, void *ptr);
+
+   static inline struct foo *
+   get_foo (void)
+   {
+     return __libc_once_retry (&global_foo, allocate_foo, free_foo, NULL);
+   }
+
+   Usage of this function looks like this:
+
+   struct foo *local_foo = get_foo ();
+   if (local_foo == NULL)
+      report_allocation_failure ();
+
+   Compare to __libc_once, __libc_once_retry has the advantage that it
+   does not need separate space for a control variable, and that it is
+   safe with regards to cancellation and other forms of exception
+   handling if the provided callback functions are safe.  */
+static inline void *
+libc_once_retry (void **__place, void *(*__allocate) (void *__closure),
+		 void (*__deallocate) (void *__closure, void *__ptr),
+		 void *__closure)
+{
+  /* Synchronizes with the release-store CAS in
+     __libc_once_retry_slow.  */
+  void *__result = atomic_load_acquire (__place);
+  if (__result != NULL)
+    return __result;
+  else
+    return __libc_once_retry_slow (__place, __allocate, __deallocate,
+				   __closure);
+}
+
 #endif	/* atomic.h */
diff --git a/misc/Makefile b/misc/Makefile
index a5076b3672..7b1314d01b 100644
--- a/misc/Makefile
+++ b/misc/Makefile
@@ -70,7 +70,8 @@  routines := brk sbrk sstk ioctl \
 	    getloadavg getclktck \
 	    fgetxattr flistxattr fremovexattr fsetxattr getxattr \
 	    listxattr lgetxattr llistxattr lremovexattr lsetxattr \
-	    removexattr setxattr getauxval ifunc-impl-list makedev
+	    removexattr setxattr getauxval ifunc-impl-list makedev \
+	    libc_once_retry
 
 generated += tst-error1.mtrace tst-error1-mem.out
 
@@ -84,7 +85,7 @@  tests := tst-dirname tst-tsearch tst-fdset tst-efgcvt tst-mntent tst-hsearch \
 	 tst-preadvwritev tst-preadvwritev64 tst-makedev tst-empty \
 	 tst-preadvwritev2 tst-preadvwritev64v2
 
-tests-internal := tst-atomic tst-atomic-long
+tests-internal := tst-atomic tst-atomic-long tst-libc_once_retry
 tests-static := tst-empty
 
 ifeq ($(run-built-tests),yes)
diff --git a/misc/Versions b/misc/Versions
index bfbda505e4..a129e90fc0 100644
--- a/misc/Versions
+++ b/misc/Versions
@@ -165,5 +165,6 @@  libc {
     __tdelete; __tfind; __tsearch; __twalk;
     __mmap; __munmap; __mprotect;
     __sched_get_priority_min; __sched_get_priority_max;
+    __libc_once_retry_slow;
   }
 }
diff --git a/misc/libc_once_retry.c b/misc/libc_once_retry.c
new file mode 100644
index 0000000000..ecd352e2a3
--- /dev/null
+++ b/misc/libc_once_retry.c
@@ -0,0 +1,55 @@ 
+/* Concurrent initialization of a pointer.
+   Copyright (C) 2018 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <atomic.h>
+#include <stdlib.h>
+
+void *
+__libc_once_retry_slow (void **place, void *(*allocate) (void *closure),
+                        void (*deallocate) (void *closure, void *ptr),
+                        void *closure)
+{
+  void *result;
+
+  do
+    {
+      result = allocate (closure);
+      if (result == NULL)
+        return NULL;
+
+      /* Synchronizes with the acquire MO load in
+         __libc_once_retry.  */
+      void *expected = NULL;
+      if (atomic_compare_exchange_weak_release (place, &expected, result))
+        return result;
+
+      /* We lost the race.  Free our value.  */
+      if (deallocate == NULL)
+        free (result);
+      else
+        deallocate (closure, result);
+
+      /* The failed CAS has relaxed MO semantics, so perform another
+         acquire MO load.  */
+      result = atomic_load_acquire (place);
+
+      /* Loop around in case of a spurious CAS failure.  */
+    } while (result == NULL);
+
+  return result;
+}
diff --git a/misc/tst-libc_once_retry.c b/misc/tst-libc_once_retry.c
new file mode 100644
index 0000000000..5dedd21964
--- /dev/null
+++ b/misc/tst-libc_once_retry.c
@@ -0,0 +1,175 @@ 
+/* Test the libc_once_retry function.
+   Copyright (C) 2018 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <atomic.h>
+#include <string.h>
+#include <support/check.h>
+#include <support/support.h>
+
+/* Allocate a new string.  */
+static void *
+allocate_string (void *closure)
+{
+  return xstrdup (closure);
+}
+
+/* Allocation and deallocation functions which are not expected to be
+   called.  */
+
+static void *
+allocate_not_called (void *closure)
+{
+  FAIL_EXIT1 ("allocation function called unexpectedly (%p)", closure);
+}
+
+static void
+deallocate_not_called (void *closure, void *ptr)
+{
+  FAIL_EXIT1 ("deallocate function called unexpectedly (%p, %p)",
+              closure, ptr);
+}
+
+/* Counter for various function calls.  */
+static int function_called;
+
+/* An allocation function which returns NULL and records that it has
+   been called.  */
+static void *
+allocate_return_null (void *closure)
+{
+  /* The function should only be called once.  */
+  TEST_COMPARE (function_called, 0);
+  ++function_called;
+  return NULL;
+}
+
+
+/* The following is used to check the retry logic, by causing a fake
+   race condition.  */
+static void *fake_race_place;
+static char fake_race_region[3]; /* To obtain unique addresses.  */
+
+static void *
+fake_race_allocate (void *closure)
+{
+  TEST_VERIFY (closure == &fake_race_region[0]);
+  TEST_COMPARE (function_called, 0);
+  ++function_called;
+  /* Fake allocation by another thread.  */
+  fake_race_place = &fake_race_region[1];
+  return &fake_race_region[2];
+}
+
+static void
+fake_race_deallocate (void *closure, void *ptr)
+{
+  /* Check that the pointer returned from fake_race_allocate is
+     deallocated (and not the one stored in fake_race_place).  */
+  TEST_VERIFY (ptr == &fake_race_region[2]);
+
+  TEST_VERIFY (fake_race_place == &fake_race_region[1]);
+  TEST_VERIFY (closure == &fake_race_region[0]);
+  TEST_COMPARE (function_called, 1);
+  ++function_called;
+}
+
+/* Similar to fake_race_allocate, but expects to be paired with free
+   as the deallocation function.  */
+static void *
+fake_race_allocate_for_free (void *closure)
+{
+  TEST_VERIFY (closure == &fake_race_region[0]);
+  TEST_COMPARE (function_called, 0);
+  ++function_called;
+  /* Fake allocation by another thread.  */
+  fake_race_place = &fake_race_region[1];
+  return xstrdup ("to be freed");
+}
+
+static int
+do_test (void)
+{
+  /* Simple allocation.  */
+  void *place1 = NULL;
+  char *string1 = libc_once_retry (&place1, allocate_string,
+                                   deallocate_not_called,
+                                   (char *) "test string 1");
+  TEST_VERIFY_EXIT (string1 != NULL);
+  TEST_VERIFY (strcmp ("test string 1", string1) == 0);
+  /* Second call returns the first pointer, without calling any
+     callbacks.  */
+  TEST_VERIFY (string1
+               == libc_once_retry (&place1, allocate_not_called,
+                                   deallocate_not_called,
+                                   (char *) "test string 1a"));
+
+  /* Difference place should result in another call.  */
+  void *place2 = NULL;
+  char *string2 = libc_once_retry (&place2, allocate_string,
+                                   deallocate_not_called,
+                                   (char *) "test string 2");
+  TEST_VERIFY_EXIT (string2 != NULL);
+  TEST_VERIFY (strcmp ("test string 2", string2) == 0);
+  TEST_VERIFY (string1 != string2);
+
+  /* Check error reporting (NULL return value from the allocation
+     function).  */
+  void *place3 = NULL;
+  char *string3 = libc_once_retry (&place3, allocate_return_null,
+                                   deallocate_not_called, NULL);
+  TEST_VERIFY (string3 == NULL);
+  TEST_COMPARE (function_called, 1);
+
+  /* Check that the deallocation function is called if the race is
+     lost. */
+  function_called = 0;
+  TEST_VERIFY (libc_once_retry (&fake_race_place,
+                                fake_race_allocate,
+                                fake_race_deallocate,
+                                &fake_race_region[0])
+               == &fake_race_region[1]);
+  TEST_COMPARE (function_called, 2);
+  function_called = 3;
+  TEST_VERIFY (libc_once_retry (&fake_race_place,
+                                fake_race_allocate,
+                                fake_race_deallocate,
+                                &fake_race_region[0])
+               == &fake_race_region[1]);
+  TEST_COMPARE (function_called, 3);
+
+  /* Similar, but this time rely on that free is called. */
+  function_called = 0;
+  fake_race_place = NULL;
+  TEST_VERIFY (libc_once_retry (&fake_race_place,
+                                fake_race_allocate_for_free,
+                                NULL,
+                                &fake_race_region[0])
+               == &fake_race_region[1]);
+  TEST_COMPARE (function_called, 1);
+  function_called = 3;
+  TEST_VERIFY (libc_once_retry (&fake_race_place,
+                                fake_race_allocate_for_free,
+                                NULL,
+                                &fake_race_region[0])
+               == &fake_race_region[1]);
+  TEST_COMPARE (function_called, 3);
+
+  return 0;
+}
+
+#include <support/test-driver.c>