diff mbox

[v3] Fix double-checked locking in _res_hconf_reorder_addrs [BZ #19074]

Message ID 561BD573.40003@redhat.com
State New
Headers show

Commit Message

Florian Weimer Oct. 12, 2015, 3:44 p.m. UTC
On 10/08/2015 05:14 PM, Torvald Riegel wrote:
> And now with actual comments on the patch...

Thanks.  I tried to address your comments.  I'm attaching the new version.

Florian

Comments

Torvald Riegel Oct. 14, 2015, 12:11 p.m. UTC | #1
On Mon, 2015-10-12 at 17:44 +0200, Florian Weimer wrote:
> diff --git a/resolv/res_hconf.c b/resolv/res_hconf.c
> index 692d948..00b620d 100644
> --- a/resolv/res_hconf.c
> +++ b/resolv/res_hconf.c
> @@ -45,6 +45,7 @@
>  #include "ifreq.h"
>  #include "res_hconf.h"
>  #include <wchar.h>
> +#include <atomic.h>
>  
>  #if IS_IN (libc)
>  # define fgets_unlocked __fgets_unlocked
> @@ -391,9 +392,14 @@ _res_hconf_reorder_addrs (struct hostent *hp)
>  {
>  #if defined SIOCGIFCONF && defined SIOCGIFNETMASK
>    int i, j;
> -  /* Number of interfaces.  */
> +  /* Number of interfaces.  Also serves as a flag for the
> +     double-checked locking idiom.  */
>    static int num_ifs = -1;
> -  /* We need to protect the dynamic buffer handling.  */
> +  /* Local copy of num_ifs, for non-atomic access.  */
> +  int num_ifs_local;
> +  /* We need to protect the dynamic buffer handling.  The lock is only
> +     acquired during initialization.  Afterwards, a positive num_ifs
> +     value indicates initialization.  */

I'd say "completed initialization".

>    __libc_lock_define_initialized (static, lock);
>  
>    /* Only reorder if we're supposed to.  */
> @@ -404,7 +410,10 @@ _res_hconf_reorder_addrs (struct hostent *hp)
>    if (hp->h_addrtype != AF_INET)
>      return;
>  
> -  if (num_ifs <= 0)
> +  /* This load synchronizes with the release MO store in the
> +     initialization block below.  */
> +  num_ifs_local = atomic_load_acquire (&num_ifs);
> +  if (num_ifs_local <= 0)
>      {
>        struct ifreq *ifr, *cur_ifr;
>        int sd, num, i;
> @@ -421,9 +430,18 @@ _res_hconf_reorder_addrs (struct hostent *hp)
>        /* Get lock.  */
>        __libc_lock_lock (lock);
>  
> -      /* Recheck, somebody else might have done the work by now.  */
> -      if (num_ifs <= 0)
> +      /* Recheck, somebody else might have done the work by now.  A
> +	 relaxed load is sufficient because we have the lock, and
> +	 num_ifs is only updated under the lock.  */

That's not really the reason why the relaxed MO load is okay.  You cite
just concurrent updates to num_ifs, but the relation to these is not
affected by the MO here.  The MOs are about ordering, so you'd need a
reason related to ordering here, informally.
I would instead just refer to point (3) below.

> +      num_ifs_local = atomic_load_relaxed (num_ifs);
> +      if (num_ifs_local <= 0)
>  	{
> +	  /* This is the only block which writes to num_ifs.  It can
> +	     be executed several times (sequentially) if
> +	     initialization does not yield any interfaces, and num_ifs
> +	     remains zero.  However, once we stored a positive value
> +	     in num_ifs below, this block cannot be entered again due
> +	     to the condition above.  */
>  	  int new_num_ifs = 0;
>  
>  	  /* Get a list of interfaces.  */
> @@ -472,7 +490,14 @@ _res_hconf_reorder_addrs (struct hostent *hp)
>  	  /* Release lock, preserve error value, and close socket.  */
>  	  errno = save;
>  
> -	  num_ifs = new_num_ifs;
> +	  /* Advertise successful initialization if new_num_ifs is
> +	     positive (and no updates to ifaddrs are permitted after
> +	     that).  Otherwise, num_ifs remains unchanged, at zero.
> +	     This store synchronizes with the initial acquire MO
> +	     load.  */
> +	  atomic_store_release (&num_ifs, new_num_ifs);
> +	  /* Keep the local copy current, to save another load.  */
> +	  num_ifs_local = new_num_ifs;
>  	}
>  
>        __libc_lock_unlock (lock);
> @@ -480,15 +505,43 @@ _res_hconf_reorder_addrs (struct hostent *hp)
>        __close (sd);
>      }
>  
> -  if (num_ifs == 0)
> +  /* num_ifs_local cannot be negative because the if statement above
> +     covered this case.  It can still be zero if we just performed
> +     initialization, but could not find any interfaces.  */
> +  if (num_ifs_local == 0)
>      return;
>  
> +  /* The code below accesses ifaddrs, so we need to ensure that the
> +     initialization happens-before this point.
> +
> +     The actual initialization is sequenced-before the release store
> +     to num_ifs, and sequenced-before the end of the critical section.
> +
> +     This means there are three possible executions:
> +
> +     (1) The thread that initialized the data also uses it, so
> +         sequenced-before is sufficient to ensure happens-before.
> +
> +     (2) The release MO store of num_ifs synchronizes-with the acquire
> +         MO load, and the acquire MO load is sequenced before the use
> +         of the initialized data below.
> +
> +     (3) We enter the critical section, and the relaxed MO load of
> +         num_fis yields a positive value.  The write to ifaddrs is

s/num_fis/num_ifs/ and ...

> +         sequenced-before leaving the critical section.  Leaving the
> +         critical section happens-before we entered the critical
> +         section ourselves, which means that the the write to ifaddrs

s/the the/the/.  Sorry for not spotting these earlier :)

> +         happens-before this point.
> +
> +     Consequently, all potential writes to ifaddrs (and the data it
> +     points to) happens-before this point.  */
> +
>    /* Find an address for which we have a direct connection.  */
>    for (i = 0; hp->h_addr_list[i]; ++i)
>      {
>        struct in_addr *haddr = (struct in_addr *) hp->h_addr_list[i];
>  
> -      for (j = 0; j < num_ifs; ++j)
> +      for (j = 0; j < num_ifs_local; ++j)
>  	{
>  	  u_int32_t if_addr    = ifaddrs[j].u.ipv4.addr;
>  	  u_int32_t if_netmask = ifaddrs[j].u.ipv4.mask;

Okay with the changes above.  Thanks!
Florian Weimer Oct. 14, 2015, 1:07 p.m. UTC | #2
On 10/14/2015 02:11 PM, Torvald Riegel wrote:
>> > -      /* Recheck, somebody else might have done the work by now.  */
>> > -      if (num_ifs <= 0)
>> > +      /* Recheck, somebody else might have done the work by now.  A
>> > +	 relaxed load is sufficient because we have the lock, and
>> > +	 num_ifs is only updated under the lock.  */

> That's not really the reason why the relaxed MO load is okay.  You cite
> just concurrent updates to num_ifs, but the relation to these is not
> affected by the MO here.  The MOs are about ordering, so you'd need a
> reason related to ordering here, informally.
> I would instead just refer to point (3) below.

Right, we don't need a non-atomic load here, but you wanted one in case
of future changes.

I'll commit this:

      /* Recheck, somebody else might have done the work by now.  No
	 ordering is required for the load because we have the lock,
	 and num_ifs is only updated under the lock.  Also see (3) in
	 the analysis below.  */

I've incorporated your other changes.

Thanks,
Florian
diff mbox

Patch

2015-10-12  Florian Weimer  <fweimer@redhat.com>
 
	[BZ #19074]
	* resolv/res_hconf.c (_res_hconf_reorder_addrs): Use atomics to
	load and store num_ifs.

diff --git a/NEWS b/NEWS
index a0a91b5..805c7b9 100644
--- a/NEWS
+++ b/NEWS
@@ -49,7 +49,7 @@  Version 2.22
   18533, 18534, 18536, 18539, 18540, 18542, 18544, 18545, 18546, 18547,
   18549, 18553, 18557, 18558, 18569, 18583, 18585, 18586, 18592, 18593,
   18594, 18602, 18612, 18613, 18619, 18633, 18635, 18641, 18643, 18648,
-  18657, 18676, 18694, 18696, 18887.
+  18657, 18676, 18694, 18696, 18887, 19074.
 
 * Cache information can be queried via sysconf() function on s390 e.g. with
   _SC_LEVEL1_ICACHE_SIZE as argument.
diff --git a/resolv/res_hconf.c b/resolv/res_hconf.c
index 692d948..00b620d 100644
--- a/resolv/res_hconf.c
+++ b/resolv/res_hconf.c
@@ -45,6 +45,7 @@ 
 #include "ifreq.h"
 #include "res_hconf.h"
 #include <wchar.h>
+#include <atomic.h>
 
 #if IS_IN (libc)
 # define fgets_unlocked __fgets_unlocked
@@ -391,9 +392,14 @@  _res_hconf_reorder_addrs (struct hostent *hp)
 {
 #if defined SIOCGIFCONF && defined SIOCGIFNETMASK
   int i, j;
-  /* Number of interfaces.  */
+  /* Number of interfaces.  Also serves as a flag for the
+     double-checked locking idiom.  */
   static int num_ifs = -1;
-  /* We need to protect the dynamic buffer handling.  */
+  /* Local copy of num_ifs, for non-atomic access.  */
+  int num_ifs_local;
+  /* We need to protect the dynamic buffer handling.  The lock is only
+     acquired during initialization.  Afterwards, a positive num_ifs
+     value indicates initialization.  */
   __libc_lock_define_initialized (static, lock);
 
   /* Only reorder if we're supposed to.  */
@@ -404,7 +410,10 @@  _res_hconf_reorder_addrs (struct hostent *hp)
   if (hp->h_addrtype != AF_INET)
     return;
 
-  if (num_ifs <= 0)
+  /* This load synchronizes with the release MO store in the
+     initialization block below.  */
+  num_ifs_local = atomic_load_acquire (&num_ifs);
+  if (num_ifs_local <= 0)
     {
       struct ifreq *ifr, *cur_ifr;
       int sd, num, i;
@@ -421,9 +430,18 @@  _res_hconf_reorder_addrs (struct hostent *hp)
       /* Get lock.  */
       __libc_lock_lock (lock);
 
-      /* Recheck, somebody else might have done the work by now.  */
-      if (num_ifs <= 0)
+      /* Recheck, somebody else might have done the work by now.  A
+	 relaxed load is sufficient because we have the lock, and
+	 num_ifs is only updated under the lock.  */
+      num_ifs_local = atomic_load_relaxed (num_ifs);
+      if (num_ifs_local <= 0)
 	{
+	  /* This is the only block which writes to num_ifs.  It can
+	     be executed several times (sequentially) if
+	     initialization does not yield any interfaces, and num_ifs
+	     remains zero.  However, once we stored a positive value
+	     in num_ifs below, this block cannot be entered again due
+	     to the condition above.  */
 	  int new_num_ifs = 0;
 
 	  /* Get a list of interfaces.  */
@@ -472,7 +490,14 @@  _res_hconf_reorder_addrs (struct hostent *hp)
 	  /* Release lock, preserve error value, and close socket.  */
 	  errno = save;
 
-	  num_ifs = new_num_ifs;
+	  /* Advertise successful initialization if new_num_ifs is
+	     positive (and no updates to ifaddrs are permitted after
+	     that).  Otherwise, num_ifs remains unchanged, at zero.
+	     This store synchronizes with the initial acquire MO
+	     load.  */
+	  atomic_store_release (&num_ifs, new_num_ifs);
+	  /* Keep the local copy current, to save another load.  */
+	  num_ifs_local = new_num_ifs;
 	}
 
       __libc_lock_unlock (lock);
@@ -480,15 +505,43 @@  _res_hconf_reorder_addrs (struct hostent *hp)
       __close (sd);
     }
 
-  if (num_ifs == 0)
+  /* num_ifs_local cannot be negative because the if statement above
+     covered this case.  It can still be zero if we just performed
+     initialization, but could not find any interfaces.  */
+  if (num_ifs_local == 0)
     return;
 
+  /* The code below accesses ifaddrs, so we need to ensure that the
+     initialization happens-before this point.
+
+     The actual initialization is sequenced-before the release store
+     to num_ifs, and sequenced-before the end of the critical section.
+
+     This means there are three possible executions:
+
+     (1) The thread that initialized the data also uses it, so
+         sequenced-before is sufficient to ensure happens-before.
+
+     (2) The release MO store of num_ifs synchronizes-with the acquire
+         MO load, and the acquire MO load is sequenced before the use
+         of the initialized data below.
+
+     (3) We enter the critical section, and the relaxed MO load of
+         num_fis yields a positive value.  The write to ifaddrs is
+         sequenced-before leaving the critical section.  Leaving the
+         critical section happens-before we entered the critical
+         section ourselves, which means that the the write to ifaddrs
+         happens-before this point.
+
+     Consequently, all potential writes to ifaddrs (and the data it
+     points to) happens-before this point.  */
+
   /* Find an address for which we have a direct connection.  */
   for (i = 0; hp->h_addr_list[i]; ++i)
     {
       struct in_addr *haddr = (struct in_addr *) hp->h_addr_list[i];
 
-      for (j = 0; j < num_ifs; ++j)
+      for (j = 0; j < num_ifs_local; ++j)
 	{
 	  u_int32_t if_addr    = ifaddrs[j].u.ipv4.addr;
 	  u_int32_t if_netmask = ifaddrs[j].u.ipv4.mask;
-- 
2.4.3