diff mbox series

[v3,15/32] elf: Remove run-time-writable fields from struct link_map_private

Message ID 7c52d4742aa9032d9b6fca119c06a3372c7edbfc.1701944612.git.fweimer@redhat.com
State New
Headers show
Series RELRO linkmaps | expand

Commit Message

Florian Weimer Dec. 7, 2023, 10:32 a.m. UTC
And introduce struct link_map_rw.

These fields are written during run-time relocation (for lazy binding)
or during dlopen, so they are difficult to handle efficiently with
otherwise read-only link maps.  Moving them into a separate allocation
makes it possible to keep the read-write while the rest of the link
map is read-only.
---
 elf/circleload1.c               |  4 +-
 elf/dl-call_fini.c              |  2 +-
 elf/dl-close.c                  | 33 ++++++++--------
 elf/dl-deps.c                   | 14 +++----
 elf/dl-find_object.c            |  2 +-
 elf/dl-fini.c                   |  6 +--
 elf/dl-init.c                   |  4 +-
 elf/dl-lookup.c                 | 47 ++++++++++++-----------
 elf/dl-object.c                 |  6 +++
 elf/dl-open.c                   | 30 +++++++--------
 elf/dl-sort-maps.c              | 13 ++++---
 elf/dl-support.c                |  1 +
 elf/get-dynamic-info.h          |  2 +-
 elf/loadtest.c                  |  8 ++--
 elf/neededtest.c                |  5 +--
 elf/neededtest2.c               |  4 +-
 elf/neededtest3.c               |  4 +-
 elf/neededtest4.c               |  4 +-
 elf/rtld.c                      |  5 ++-
 elf/unload.c                    |  5 +--
 elf/unload2.c                   |  5 +--
 include/link.h                  | 67 ++++++++++++++++++++-------------
 stdlib/cxa_thread_atexit_impl.c |  4 +-
 sysdeps/x86/dl-prop.h           |  2 +-
 24 files changed, 151 insertions(+), 126 deletions(-)

Comments

Joseph Myers Feb. 23, 2024, 12:09 a.m. UTC | #1
On Thu, 7 Dec 2023, Florian Weimer wrote:

> -	  unsigned int max
> -	    = undef_map->l_reldepsmax ? undef_map->l_reldepsmax * 2 : 10;
> +	  unsigned int max;
> +	  if (undef_map->l_rw->l_reldepsmax > 0)
> +	    max = undef_map->l_rw->l_reldepsmax;
> +	  else
> +	    max = 10;

Any particular reason for the change of logic here (removing the "* 2")?

> @@ -1122,8 +1125,6 @@ rtld_setup_main_map (struct link_map_private *main_map)
>    main_map->l_map_end = 0;
>    /* Perhaps the executable has no PT_LOAD header entries at all.  */
>    main_map->l_map_start = ~0;
> -  /* And it was opened directly.  */
> -  ++main_map->l_direct_opencount;
>    main_map->l_contiguous = 1;

I suppose this value of l_direct_opencount doesn't get used, so it doesn't 
matter that it's no longer incremented here?

> +  /* Reference count for dlopen/dlclose.  See the l_direct_opencount
> +     accessor function below.  */
> +  unsigned int l_direct_opencount;

I don't see any such accessor function in this patch.
diff mbox series

Patch

diff --git a/elf/circleload1.c b/elf/circleload1.c
index dcf04bc25a..6252bd2961 100644
--- a/elf/circleload1.c
+++ b/elf/circleload1.c
@@ -29,8 +29,8 @@  check_loaded_objects (const char **loaded)
   for (lm = MAPS; lm != NULL; lm = l_next (lm))
     {
       if (lm->l_public.l_name && lm->l_public.l_name[0])
-	printf(" %s, count = %d\n", lm->l_public.l_name,
-	       (int) lm->l_direct_opencount);
+	printf(" %s, count = %u\n",
+	       lm->l_public.l_name, lm->l_rw->l_direct_opencount);
       if (lm->l_type == lt_loaded && lm->l_public.l_name)
 	{
 	  int match = 0;
diff --git a/elf/dl-call_fini.c b/elf/dl-call_fini.c
index a9d60e9803..7c5d7e02c9 100644
--- a/elf/dl-call_fini.c
+++ b/elf/dl-call_fini.c
@@ -30,7 +30,7 @@  _dl_call_fini (void *closure_map)
                       map->l_public.l_name, map->l_ns);
 
   /* Make sure nothing happens if we are called twice.  */
-  map->l_init_called = 0;
+  map->l_rw->l_init_called = 0;
 
   ElfW(Dyn) *fini_array = map->l_info[DT_FINI_ARRAY];
   if (fini_array != NULL)
diff --git a/elf/dl-close.c b/elf/dl-close.c
index 7222b21cf0..1af60845f5 100644
--- a/elf/dl-close.c
+++ b/elf/dl-close.c
@@ -109,23 +109,23 @@  void
 _dl_close_worker (struct link_map_private *map, bool force)
 {
   /* One less direct use.  */
-  --map->l_direct_opencount;
+  --map->l_rw->l_direct_opencount;
 
   /* If _dl_close is called recursively (some destructor call dlclose),
      just record that the parent _dl_close will need to do garbage collection
      again and return.  */
   static enum { not_pending, pending, rerun } dl_close_state;
 
-  if (map->l_direct_opencount > 0 || map->l_type != lt_loaded
+  if (map->l_rw->l_direct_opencount > 0 || map->l_type != lt_loaded
       || dl_close_state != not_pending)
     {
-      if (map->l_direct_opencount == 0 && map->l_type == lt_loaded)
+      if (map->l_rw->l_direct_opencount == 0 && map->l_type == lt_loaded)
 	dl_close_state = rerun;
 
       /* There are still references to this object.  Do nothing more.  */
       if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
 	_dl_debug_printf ("\nclosing file=%s; direct_opencount=%u\n",
-			  map->l_public.l_name, map->l_direct_opencount);
+			  map->l_public.l_name, map->l_rw->l_direct_opencount);
 
       return;
     }
@@ -175,11 +175,11 @@  _dl_close_worker (struct link_map_private *map, bool force)
 
       /* Check whether this object is still used.  */
       if (l->l_type == lt_loaded
-	  && l->l_direct_opencount == 0
-	  && !l->l_nodelete_active
+	  && l->l_rw->l_direct_opencount == 0
+	  && !l->l_rw->l_nodelete_active
 	  /* See CONCURRENCY NOTES in cxa_thread_atexit_impl.c to know why
 	     acquire is sufficient and correct.  */
-	  && atomic_load_acquire (&l->l_tls_dtor_count) == 0
+	  && atomic_load_acquire (&l->l_rw->l_tls_dtor_count) == 0
 	  && !l->l_map_used)
 	continue;
 
@@ -217,10 +217,10 @@  _dl_close_worker (struct link_map_private *map, bool force)
 	    }
 	}
       /* And the same for relocation dependencies.  */
-      if (l->l_reldeps != NULL)
-	for (unsigned int j = 0; j < l->l_reldeps->act; ++j)
+      if (l->l_rw->l_reldeps != NULL)
+	for (unsigned int j = 0; j < l->l_rw->l_reldeps->act; ++j)
 	  {
-	    struct link_map_private *jmap = l->l_reldeps->list[j];
+	    struct link_map_private *jmap = l->l_rw->l_reldeps->list[j];
 
 	    if (jmap->l_idx != IDX_STILL_USED)
 	      {
@@ -255,12 +255,12 @@  _dl_close_worker (struct link_map_private *map, bool force)
 
       if (!imap->l_map_used)
 	{
-	  assert (imap->l_type == lt_loaded && !imap->l_nodelete_active);
+	  assert (imap->l_type == lt_loaded && !imap->l_rw->l_nodelete_active);
 
 	  /* Call its termination function.  Do not do it for
 	     half-cooked objects.  Temporarily disable exception
 	     handling, so that errors are fatal.  */
-	  if (imap->l_init_called)
+	  if (imap->l_rw->l_init_called)
 	    _dl_catch_exception (NULL, _dl_call_fini, imap);
 
 #ifdef SHARED
@@ -507,7 +507,7 @@  _dl_close_worker (struct link_map_private *map, bool force)
 	      if (GL(dl_tls_dtv_slotinfo_list) != NULL
 		  && ! remove_slotinfo (imap->l_tls_modid,
 					GL(dl_tls_dtv_slotinfo_list), 0,
-					imap->l_init_called))
+					imap->l_rw->l_init_called))
 		/* All dynamically loaded modules with TLS are unloaded.  */
 		/* Can be read concurrently.  */
 		atomic_store_relaxed (&GL(dl_tls_max_dtv_idx),
@@ -663,7 +663,8 @@  _dl_close_worker (struct link_map_private *map, bool force)
 	  if (imap->l_origin != (char *) -1)
 	    free ((char *) imap->l_origin);
 
-	  free (imap->l_reldeps);
+	  free (imap->l_rw->l_reldeps);
+	  free (imap->l_rw);
 
 	  /* Print debugging message.  */
 	  if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
@@ -769,7 +770,7 @@  _dl_close (void *_map)
      before we took the lock. There is no way to detect this (see below)
      so we proceed assuming this isn't the case.  First see whether we
      can remove the object at all.  */
-  if (__glibc_unlikely (map->l_nodelete_active))
+  if (__glibc_unlikely (map->l_rw->l_nodelete_active))
     {
       /* Nope.  Do nothing.  */
       __rtld_lock_unlock_recursive (GL(dl_load_lock));
@@ -786,7 +787,7 @@  _dl_close (void *_map)
      should be a detectable case and given that dlclose should be threadsafe
      we need this to be a reliable detection.
      This is bug 20990. */
-  if (__builtin_expect (map->l_direct_opencount, 1) == 0)
+  if (__builtin_expect (map->l_rw->l_direct_opencount, 1) == 0)
     {
       __rtld_lock_unlock_recursive (GL(dl_load_lock));
       _dl_signal_error (0, map->l_public.l_name, NULL,
diff --git a/elf/dl-deps.c b/elf/dl-deps.c
index c730713167..1e759fd895 100644
--- a/elf/dl-deps.c
+++ b/elf/dl-deps.c
@@ -483,20 +483,20 @@  _dl_map_object_deps (struct link_map_private *map,
 
   /* Maybe we can remove some relocation dependencies now.  */
   struct link_map_reldeps *l_reldeps = NULL;
-  if (map->l_reldeps != NULL)
+  if (map->l_rw->l_reldeps != NULL)
     {
       for (i = 0; i < nlist; ++i)
 	map->l_searchlist.r_list[i]->l_reserved = 1;
 
       /* Avoid removing relocation dependencies of the main binary.  */
       map->l_reserved = 0;
-      struct link_map_private **list = &map->l_reldeps->list[0];
-      for (i = 0; i < map->l_reldeps->act; ++i)
+      struct link_map_private **list = &map->l_rw->l_reldeps->list[0];
+      for (i = 0; i < map->l_rw->l_reldeps->act; ++i)
 	if (list[i]->l_reserved)
 	  {
 	    /* Need to allocate new array of relocation dependencies.  */
 	    l_reldeps = malloc (sizeof (*l_reldeps)
-				+ map->l_reldepsmax
+				+ map->l_rw->l_reldepsmax
 				  * sizeof (struct link_map_private *));
 	    if (l_reldeps == NULL)
 	      /* Bad luck, keep the reldeps duplicated between
@@ -507,7 +507,7 @@  _dl_map_object_deps (struct link_map_private *map,
 		unsigned int j = i;
 		memcpy (&l_reldeps->list[0], &list[0],
 			i * sizeof (struct link_map_private *));
-		for (i = i + 1; i < map->l_reldeps->act; ++i)
+		for (i = i + 1; i < map->l_rw->l_reldeps->act; ++i)
 		  if (!list[i]->l_reserved)
 		    l_reldeps->list[j++] = list[i];
 		l_reldeps->act = j;
@@ -552,8 +552,8 @@  _dl_map_object_deps (struct link_map_private *map,
   if (l_reldeps != NULL)
     {
       atomic_write_barrier ();
-      void *old_l_reldeps = map->l_reldeps;
-      map->l_reldeps = l_reldeps;
+      void *old_l_reldeps = map->l_rw->l_reldeps;
+      map->l_rw->l_reldeps = l_reldeps;
       _dl_scope_free (old_l_reldeps);
     }
   if (old_l_initfini != NULL)
diff --git a/elf/dl-find_object.c b/elf/dl-find_object.c
index 5042b0a8c1..f81351b0ef 100644
--- a/elf/dl-find_object.c
+++ b/elf/dl-find_object.c
@@ -508,7 +508,7 @@  _dlfo_process_initial (void)
       if (l != main_map && l == l->l_real)
         {
           /* lt_library link maps are implicitly NODELETE.  */
-          if (l->l_type == lt_library || l->l_nodelete_active)
+          if (l->l_type == lt_library || l->l_rw->l_nodelete_active)
             {
               if (_dlfo_nodelete_mappings != NULL)
                 /* Second pass only.  */
diff --git a/elf/dl-fini.c b/elf/dl-fini.c
index 2e10f1b0b6..5c78159fee 100644
--- a/elf/dl-fini.c
+++ b/elf/dl-fini.c
@@ -82,7 +82,7 @@  _dl_fini (void)
 
 		/* Bump l_direct_opencount of all objects so that they
 		   are not dlclose()ed from underneath us.  */
-		++l->l_direct_opencount;
+		++l->l_rw->l_direct_opencount;
 	      }
 	  assert (ns != LM_ID_BASE || i == nloaded);
 	  assert (ns == LM_ID_BASE || i == nloaded || i == nloaded - 1);
@@ -109,7 +109,7 @@  _dl_fini (void)
 	    {
 	      struct link_map_private *l = maps[i];
 
-	      if (l->l_init_called)
+	      if (l->l_rw->l_init_called)
 		{
 		  _dl_call_fini (l);
 #ifdef SHARED
@@ -119,7 +119,7 @@  _dl_fini (void)
 		}
 
 	      /* Correct the previous increment.  */
-	      --l->l_direct_opencount;
+	      --l->l_rw->l_direct_opencount;
 	    }
 
 #ifdef SHARED
diff --git a/elf/dl-init.c b/elf/dl-init.c
index b220ca9239..0e44e6c24a 100644
--- a/elf/dl-init.c
+++ b/elf/dl-init.c
@@ -34,13 +34,13 @@  call_init (struct link_map_private *l, int argc, char **argv, char **env)
      need relocation.)  */
   assert (l->l_relocated || l->l_type == lt_executable);
 
-  if (l->l_init_called)
+  if (l->l_rw->l_init_called)
     /* This object is all done.  */
     return;
 
   /* Avoid handling this constructor again in case we have a circular
      dependency.  */
-  l->l_init_called = 1;
+  l->l_rw->l_init_called = 1;
 
   /* Check for object which constructors we do not run here.  */
   if (__builtin_expect (l->l_public.l_name[0], 'a') == '\0'
diff --git a/elf/dl-lookup.c b/elf/dl-lookup.c
index d3c705811c..1cfaedbd4e 100644
--- a/elf/dl-lookup.c
+++ b/elf/dl-lookup.c
@@ -175,9 +175,9 @@  static void
 mark_nodelete (struct link_map_private *map, int flags)
 {
   if (flags & DL_LOOKUP_FOR_RELOCATE)
-    map->l_nodelete_pending = true;
+    map->l_rw->l_nodelete_pending = true;
   else
-    map->l_nodelete_active = true;
+    map->l_rw->l_nodelete_active = true;
 }
 
 /* Return true if MAP is marked as NODELETE according to the lookup
@@ -187,8 +187,8 @@  is_nodelete (struct link_map_private *map, int flags)
 {
   /* Non-pending NODELETE always counts.  Pending NODELETE only counts
      during initial relocation processing.  */
-  return map->l_nodelete_active
-    || ((flags & DL_LOOKUP_FOR_RELOCATE) && map->l_nodelete_pending);
+  return map->l_rw->l_nodelete_active
+    || ((flags & DL_LOOKUP_FOR_RELOCATE) && map->l_rw->l_nodelete_pending);
 }
 
 /* Utility function for do_lookup_x. Lookup an STB_GNU_UNIQUE symbol
@@ -535,7 +535,7 @@  add_dependency (struct link_map_private *undef_map,
     return 0;
 
   struct link_map_reldeps *l_reldeps
-    = atomic_forced_read (undef_map->l_reldeps);
+    = atomic_forced_read (undef_map->l_rw->l_reldeps);
 
   /* Make sure l_reldeps is read before l_initfini.  */
   atomic_read_barrier ();
@@ -595,22 +595,23 @@  add_dependency (struct link_map_private *undef_map,
 
       /* Redo the l_reldeps check if undef_map's l_reldeps changed in
 	 the mean time.  */
-      if (undef_map->l_reldeps != NULL)
+      if (undef_map->l_rw->l_reldeps != NULL)
 	{
-	  if (undef_map->l_reldeps != l_reldeps)
+	  if (undef_map->l_rw->l_reldeps != l_reldeps)
 	    {
-	      struct link_map_private **list = &undef_map->l_reldeps->list[0];
-	      l_reldepsact = undef_map->l_reldeps->act;
+	      struct link_map_private **list
+		= &undef_map->l_rw->l_reldeps->list[0];
+	      l_reldepsact = undef_map->l_rw->l_reldeps->act;
 	      for (i = 0; i < l_reldepsact; ++i)
 		if (list[i] == map)
 		  goto out_check;
 	    }
-	  else if (undef_map->l_reldeps->act > l_reldepsact)
+	  else if (undef_map->l_rw->l_reldeps->act > l_reldepsact)
 	    {
 	      struct link_map_private **list
-		= &undef_map->l_reldeps->list[0];
+		= &undef_map->l_rw->l_reldeps->list[0];
 	      i = l_reldepsact;
-	      l_reldepsact = undef_map->l_reldeps->act;
+	      l_reldepsact = undef_map->l_rw->l_reldeps->act;
 	      for (; i < l_reldepsact; ++i)
 		if (list[i] == map)
 		  goto out_check;
@@ -666,14 +667,17 @@  marking %s [%lu] as NODELETE due to reference from %s [%lu]\n",
 	}
 
       /* Add the reference now.  */
-      if (__glibc_unlikely (l_reldepsact >= undef_map->l_reldepsmax))
+      if (__glibc_unlikely (l_reldepsact >= undef_map->l_rw->l_reldepsmax))
 	{
 	  /* Allocate more memory for the dependency list.  Since this
 	     can never happen during the startup phase we can use
 	     `realloc'.  */
 	  struct link_map_reldeps *newp;
-	  unsigned int max
-	    = undef_map->l_reldepsmax ? undef_map->l_reldepsmax * 2 : 10;
+	  unsigned int max;
+	  if (undef_map->l_rw->l_reldepsmax > 0)
+	    max = undef_map->l_rw->l_reldepsmax;
+	  else
+	    max = 10;
 
 #ifdef RTLD_PREPARE_FOREIGN_CALL
 	  RTLD_PREPARE_FOREIGN_CALL;
@@ -701,23 +705,24 @@  marking %s [%lu] as NODELETE due to memory allocation failure\n",
 	  else
 	    {
 	      if (l_reldepsact)
-		memcpy (&newp->list[0], &undef_map->l_reldeps->list[0],
+		memcpy (&newp->list[0],
+			&undef_map->l_rw->l_reldeps->list[0],
 			l_reldepsact * sizeof (struct link_map_private *));
 	      newp->list[l_reldepsact] = map;
 	      newp->act = l_reldepsact + 1;
 	      atomic_write_barrier ();
-	      void *old = undef_map->l_reldeps;
-	      undef_map->l_reldeps = newp;
-	      undef_map->l_reldepsmax = max;
+	      void *old = undef_map->l_rw->l_reldeps;
+	      undef_map->l_rw->l_reldeps = newp;
+	      undef_map->l_rw->l_reldepsmax = max;
 	      if (old)
 		_dl_scope_free (old);
 	    }
 	}
       else
 	{
-	  undef_map->l_reldeps->list[l_reldepsact] = map;
+	  undef_map->l_rw->l_reldeps->list[l_reldepsact] = map;
 	  atomic_write_barrier ();
-	  undef_map->l_reldeps->act = l_reldepsact + 1;
+	  undef_map->l_rw->l_reldeps->act = l_reldepsact + 1;
 	}
 
       /* Display information if we are debugging.  */
diff --git a/elf/dl-object.c b/elf/dl-object.c
index 3e06e22ab2..c6c0f7824b 100644
--- a/elf/dl-object.c
+++ b/elf/dl-object.c
@@ -94,6 +94,12 @@  _dl_new_object (char *realname, const char *libname, int type,
 		+ sizeof (*newname) + libname_len, 1);
   if (new == NULL)
     return NULL;
+  new->l_rw = calloc (1, sizeof (*new->l_rw));
+  if (new->l_rw == NULL)
+    {
+      free (new);
+      return NULL;
+    }
 
   new->l_real = new;
   new->l_symbolic_searchlist.r_list
diff --git a/elf/dl-open.c b/elf/dl-open.c
index 542889a6b8..306cdcc6ac 100644
--- a/elf/dl-open.c
+++ b/elf/dl-open.c
@@ -263,7 +263,7 @@  resize_scopes (struct link_map_private *new)
 
       /* If the initializer has been called already, the object has
 	 not been loaded here and now.  */
-      if (imap->l_init_called && imap->l_type == lt_loaded)
+      if (imap->l_rw->l_init_called && imap->l_type == lt_loaded)
 	{
 	  if (scope_has_map (imap, new))
 	    /* Avoid duplicates.  */
@@ -327,7 +327,7 @@  update_scopes (struct link_map_private *new)
       struct link_map_private *imap = new->l_searchlist.r_list[i];
       int from_scope = 0;
 
-      if (imap->l_init_called && imap->l_type == lt_loaded)
+      if (imap->l_rw->l_init_called && imap->l_type == lt_loaded)
 	{
 	  if (scope_has_map (imap, new))
 	    /* Avoid duplicates.  */
@@ -368,7 +368,7 @@  resize_tls_slotinfo (struct link_map_private *new)
 
       /* Only add TLS memory if this object is loaded now and
 	 therefore is not yet initialized.  */
-      if (! imap->l_init_called && imap->l_tls_blocksize > 0)
+      if (! imap->l_rw->l_init_called && imap->l_tls_blocksize > 0)
 	{
 	  _dl_add_to_slotinfo (imap, false);
 	  any_tls = true;
@@ -390,7 +390,7 @@  update_tls_slotinfo (struct link_map_private *new)
 
       /* Only add TLS memory if this object is loaded now and
 	 therefore is not yet initialized.  */
-      if (! imap->l_init_called && imap->l_tls_blocksize > 0)
+      if (! imap->l_rw->l_init_called && imap->l_tls_blocksize > 0)
 	{
 	  _dl_add_to_slotinfo (imap, true);
 
@@ -415,7 +415,7 @@  TLS generation counter wrapped!  Please report this."));
       struct link_map_private *imap = new->l_searchlist.r_list[i];
 
       if (imap->l_need_tls_init
-	  && ! imap->l_init_called
+	  && ! imap->l_rw->l_init_called
 	  && imap->l_tls_blocksize > 0)
 	{
 	  /* For static TLS we have to allocate the memory here and
@@ -451,7 +451,7 @@  activate_nodelete (struct link_map_private *new)
      NODELETE status for objects outside the local scope.  */
   for (struct link_map_private *l = GL (dl_ns)[new->l_ns]._ns_loaded;
        l != NULL; l = l_next (l))
-    if (l->l_nodelete_pending)
+    if (l->l_rw->l_nodelete_pending)
       {
 	if (__glibc_unlikely (GLRO (dl_debug_mask) & DL_DEBUG_FILES))
 	  _dl_debug_printf ("activating NODELETE for %s [%lu]\n",
@@ -460,11 +460,11 @@  activate_nodelete (struct link_map_private *new)
 	/* The flag can already be true at this point, e.g. a signal
 	   handler may have triggered lazy binding and set NODELETE
 	   status immediately.  */
-	l->l_nodelete_active = true;
+	l->l_rw->l_nodelete_active = true;
 
 	/* This is just a debugging aid, to indicate that
 	   activate_nodelete has run for this map.  */
-	l->l_nodelete_pending = false;
+	l->l_rw->l_nodelete_pending = false;
       }
 }
 
@@ -503,7 +503,7 @@  _dl_open_relocate_one_object (struct dl_open_args *args, struct r_debug *r,
 	  _dl_start_profile ();
 
 	  /* Prevent unloading the object.  */
-	  GL(dl_profile_map)->l_nodelete_active = true;
+	  GL(dl_profile_map)->l_rw->l_nodelete_active = true;
 	}
     }
   else
@@ -591,7 +591,7 @@  dl_open_worker_begin (void *a)
     return;
 
   /* This object is directly loaded.  */
-  ++new->l_direct_opencount;
+  ++new->l_rw->l_direct_opencount;
 
   /* It was already open.  */
   if (__glibc_unlikely (new->l_searchlist.r_list != NULL))
@@ -600,7 +600,7 @@  dl_open_worker_begin (void *a)
       if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
 	_dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
 			  new->l_public.l_name, new->l_ns,
-			  new->l_direct_opencount);
+			  new->l_rw->l_direct_opencount);
 
       /* If the user requested the object to be in the global
 	 namespace but it is not so far, prepare to add it now.  This
@@ -613,10 +613,10 @@  dl_open_worker_begin (void *a)
       if (__glibc_unlikely (mode & RTLD_NODELETE))
 	{
 	  if (__glibc_unlikely (GLRO (dl_debug_mask) & DL_DEBUG_FILES)
-	      && !new->l_nodelete_active)
+	      && !new->l_rw->l_nodelete_active)
 	    _dl_debug_printf ("marking %s [%lu] as NODELETE\n",
 			      new->l_public.l_name, new->l_ns);
-	  new->l_nodelete_active = true;
+	  new->l_rw->l_nodelete_active = true;
 	}
 
       /* Finalize the addition to the global scope.  */
@@ -633,7 +633,7 @@  dl_open_worker_begin (void *a)
   /* Schedule NODELETE marking for the directly loaded object if
      requested.  */
   if (__glibc_unlikely (mode & RTLD_NODELETE))
-    new->l_nodelete_pending = true;
+    new->l_rw->l_nodelete_pending = true;
 
   /* Load that object's dependencies.  */
   _dl_map_object_deps (new, NULL, 0, 0,
@@ -838,7 +838,7 @@  dl_open_worker (void *a)
   if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
     _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
 		      new->l_public.l_name, new->l_ns,
-		      new->l_direct_opencount);
+		      new->l_rw->l_direct_opencount);
 }
 
 void *
diff --git a/elf/dl-sort-maps.c b/elf/dl-sort-maps.c
index bcc49fa0e9..e3a547e4da 100644
--- a/elf/dl-sort-maps.c
+++ b/elf/dl-sort-maps.c
@@ -87,10 +87,11 @@  _dl_sort_maps_original (struct link_map_private **maps, unsigned int nmaps,
 		  goto next;
 		}
 
-	  if (__glibc_unlikely (for_fini && maps[k]->l_reldeps != NULL))
+	  if (__glibc_unlikely (for_fini && maps[k]->l_rw->l_reldeps != NULL))
 	    {
-	      unsigned int m = maps[k]->l_reldeps->act;
-	      struct link_map_private **relmaps = &maps[k]->l_reldeps->list[0];
+	      unsigned int m = maps[k]->l_rw->l_reldeps->act;
+	      struct link_map_private **relmaps
+		= &maps[k]->l_rw->l_reldeps->list[0];
 
 	      /* Look through the relocation dependencies of the object.  */
 	      while (m-- > 0)
@@ -153,15 +154,15 @@  dfs_traversal (struct link_map_private ***rpo, struct link_map_private *map,
 	}
     }
 
-  if (__glibc_unlikely (do_reldeps != NULL && map->l_reldeps != NULL))
+  if (__glibc_unlikely (do_reldeps != NULL && map->l_rw->l_reldeps != NULL))
     {
       /* Indicate that we encountered relocation dependencies during
 	 traversal.  */
       *do_reldeps = true;
 
-      for (int m = map->l_reldeps->act - 1; m >= 0; m--)
+      for (int m = map->l_rw->l_reldeps->act - 1; m >= 0; m--)
 	{
-	  struct link_map_private *dep = map->l_reldeps->list[m];
+	  struct link_map_private *dep = map->l_rw->l_reldeps->list[m];
 	  if (dep->l_visited == 0
 	      && dep->l_main_map == 0)
 	    dfs_traversal (rpo, dep, do_reldeps);
diff --git a/elf/dl-support.c b/elf/dl-support.c
index 3648dd4d05..9c422baa9b 100644
--- a/elf/dl-support.c
+++ b/elf/dl-support.c
@@ -81,6 +81,7 @@  int _dl_bind_not;
 static struct link_map_private _dl_main_map =
   {
     .l_public = { .l_name = (char *) "", },
+    .l_rw = &(struct link_map_rw) { },
     .l_real = &_dl_main_map,
     .l_ns = LM_ID_BASE,
     .l_libname = &(struct libname_list) { .name = "", .dont_free = 1 },
diff --git a/elf/get-dynamic-info.h b/elf/get-dynamic-info.h
index 066395eab5..ce5f40834b 100644
--- a/elf/get-dynamic-info.h
+++ b/elf/get-dynamic-info.h
@@ -163,7 +163,7 @@  elf_get_dynamic_info (struct link_map_private *l, bool bootstrap,
 	{
 	  l->l_flags_1 = info[VERSYMIDX (DT_FLAGS_1)]->d_un.d_val;
 	  if (l->l_flags_1 & DF_1_NODELETE)
-	    l->l_nodelete_pending = true;
+	    l->l_rw->l_nodelete_pending = true;
 
 	  /* Only DT_1_SUPPORTED_MASK bits are supported, and we would like
 	     to assert this, but we can't. Users have been setting
diff --git a/elf/loadtest.c b/elf/loadtest.c
index ca7b634347..15355dd8f9 100644
--- a/elf/loadtest.c
+++ b/elf/loadtest.c
@@ -77,9 +77,9 @@  static const struct
     {								\
       for (map = MAPS; map != NULL; map = l_next (map))		\
 	if (map->l_type == lt_loaded)				\
-	  printf ("name = \"%s\", direct_opencount = %d\n",	\
+	  printf ("name = \"%s\", direct_opencount = %u\n",	\
 		  map->l_public.l_name,				\
-		  (int) map->l_direct_opencount);		\
+		  map->l_rw->l_direct_opencount);		\
       fflush (stdout);						\
     }								\
   while (0)
@@ -191,8 +191,8 @@  main (int argc, char *argv[])
   for (map = MAPS; map != NULL; map = l_next (map))
     if (map->l_type == lt_loaded)
       {
-	printf ("name = \"%s\", direct_opencount = %d\n",
-		map->l_public.l_name, (int) map->l_direct_opencount);
+	printf ("name = \"%s\", direct_opencount = %u\n",
+		map->l_public.l_name, map->l_rw->l_direct_opencount);
 	result = 1;
       }
 
diff --git a/elf/neededtest.c b/elf/neededtest.c
index 1fce50b81a..7a555f7780 100644
--- a/elf/neededtest.c
+++ b/elf/neededtest.c
@@ -29,9 +29,8 @@  check_loaded_objects (const char **loaded)
   for (lm = MAPS; lm; lm = l_next (lm))
     {
       if (lm->l_public.l_name && lm->l_public.l_name[0])
-	printf(" %s, count = %d\n",
-	       lm->l_public.l_name,
-	       (int) lm->l_direct_opencount);
+	printf(" %s, count = %u\n",
+	       lm->l_public.l_name, lm->l_rw->l_direct_opencount);
       if (lm->l_type == lt_loaded && lm->l_public.l_name)
 	{
 	  int match = 0;
diff --git a/elf/neededtest2.c b/elf/neededtest2.c
index 00b5dd0cb1..c5ae7dbe71 100644
--- a/elf/neededtest2.c
+++ b/elf/neededtest2.c
@@ -29,8 +29,8 @@  check_loaded_objects (const char **loaded)
   for (lm = MAPS; lm; lm = l_next (lm))
     {
       if (lm->l_public.l_name && lm->l_public.l_name[0])
-	printf(" %s, count = %d\n",
-	       lm->l_public.l_name, (int) lm->l_direct_opencount);
+	printf(" %s, count = %u\n",
+	       lm->l_public.l_name, lm->l_rw->l_direct_opencount);
       if (lm->l_type == lt_loaded && lm->l_public.l_name)
 	{
 	  int match = 0;
diff --git a/elf/neededtest3.c b/elf/neededtest3.c
index cb625649fa..a32547a646 100644
--- a/elf/neededtest3.c
+++ b/elf/neededtest3.c
@@ -29,8 +29,8 @@  check_loaded_objects (const char **loaded)
   for (lm = MAPS; lm; lm = l_next (lm))
     {
       if (lm->l_public.l_name && lm->l_public.l_name[0])
-	printf(" %s, count = %d\n",
-	       lm->l_public.l_name, (int) lm->l_direct_opencount);
+	printf(" %s, count = %u\n",
+	       lm->l_public.l_name, lm->l_rw->l_direct_opencount);
       if (lm->l_type == lt_loaded && lm->l_public.l_name)
 	{
 	  int match = 0;
diff --git a/elf/neededtest4.c b/elf/neededtest4.c
index 9f5d5fcbc3..58dc13f015 100644
--- a/elf/neededtest4.c
+++ b/elf/neededtest4.c
@@ -29,8 +29,8 @@  check_loaded_objects (const char **loaded)
   for (lm = MAPS; lm; lm = l_next (lm))
     {
       if (lm->l_public.l_name && lm->l_public.l_name[0])
-	printf(" %s, count = %d\n",
-	       lm->l_public.l_name, (int) lm->l_direct_opencount);
+	printf(" %s, count = %u\n",
+	       lm->l_public.l_name, lm->l_rw->l_direct_opencount);
       if (lm->l_type == lt_loaded && lm->l_public.l_name)
 	{
 	  int match = 0;
diff --git a/elf/rtld.c b/elf/rtld.c
index 92d8fa6fd4..8e1cc38800 100644
--- a/elf/rtld.c
+++ b/elf/rtld.c
@@ -458,6 +458,9 @@  _dl_start_final (void *arg, struct dl_start_final_info *info)
      interfere with __rtld_static_init.  */
   GLRO (dl_find_object) = &_dl_find_object;
 
+  static struct link_map_rw rtld_map_rw;
+  GL (dl_rtld_map).l_rw = &rtld_map_rw;
+
   /* If it hasn't happen yet record the startup time.  */
   rtld_timer_start (&start_time);
 #if !defined DONT_USE_BOOTSTRAP_MAP
@@ -1122,8 +1125,6 @@  rtld_setup_main_map (struct link_map_private *main_map)
   main_map->l_map_end = 0;
   /* Perhaps the executable has no PT_LOAD header entries at all.  */
   main_map->l_map_start = ~0;
-  /* And it was opened directly.  */
-  ++main_map->l_direct_opencount;
   main_map->l_contiguous = 1;
 
   /* A PT_LOAD segment at an unexpected address will clear the
diff --git a/elf/unload.c b/elf/unload.c
index ab27d9da4a..b86aee4702 100644
--- a/elf/unload.c
+++ b/elf/unload.c
@@ -14,9 +14,8 @@ 
 #define OUT \
   for (map = MAPS; map != NULL; map = l_next (map))			      \
     if (map->l_type == lt_loaded)					      \
-      printf ("name = \"%s\", direct_opencount = %d\n",			      \
-	      map->l_public.l_name,					      \
-	      (int) map->l_direct_opencount);				      \
+      printf ("name = \"%s\", direct_opencount = %u\n",			      \
+	      map->l_public.l_name, map->l_rw->l_direct_opencount);	      \
   fflush (stdout)
 
 typedef struct
diff --git a/elf/unload2.c b/elf/unload2.c
index 3d6b224610..66fde61343 100644
--- a/elf/unload2.c
+++ b/elf/unload2.c
@@ -11,9 +11,8 @@ 
 #define OUT \
   for (map = MAPS; map != NULL; map = l_next (map))			      \
     if (map->l_type == lt_loaded)					      \
-      printf ("name = \"%s\", direct_opencount = %d\n",			      \
-	      map->l_public.l_name,					      \
-	      (int) map->l_direct_opencount);				      \
+      printf ("name = \"%s\", direct_opencount = %u\n",			      \
+	      map->l_public.l_name, map->l_rw->l_direct_opencount);	      \
   fflush (stdout)
 
 int
diff --git a/include/link.h b/include/link.h
index ae76a99c30..03194c0db2 100644
--- a/include/link.h
+++ b/include/link.h
@@ -75,6 +75,43 @@  struct r_search_path_struct
 extern struct r_search_path_struct __rtld_search_dirs attribute_hidden;
 extern struct r_search_path_struct __rtld_env_path_list attribute_hidden;
 
+
+/* Link map attributes that are always readable and writable.  */
+struct link_map_rw
+{
+  /* List of the dependencies introduced through symbol binding.  */
+  struct link_map_reldeps
+  {
+    unsigned int act;
+    struct link_map_private *list[];
+  } *l_reldeps;
+  unsigned int l_reldepsmax;
+
+  /* Reference count for dlopen/dlclose.  See the l_direct_opencount
+     accessor function below.  */
+  unsigned int l_direct_opencount;
+
+  /* Number of thread_local objects constructed by this DSO.  This is
+     atomically accessed and modified and is not always protected by the load
+     lock.  See also: CONCURRENCY NOTES in cxa_thread_atexit_impl.c.  */
+  size_t l_tls_dtor_count;
+
+  /* Ture if ELF constructors have been called.  */
+  bool l_init_called;
+
+  /* NODELETE status of the map.  Only valid for maps of type
+     lt_loaded.  Lazy binding sets l_nodelete_active directly,
+     potentially from signal handlers.  Initial loading of an
+     DF_1_NODELETE object set l_nodelete_pending.  Relocation may
+     set l_nodelete_pending as well.  l_nodelete_pending maps are
+     promoted to l_nodelete_active status in the final stages of
+     dlopen, prior to calling ELF constructors.  dlclose only
+     refuses to unload l_nodelete_active maps, the pending status is
+     ignored.  */
+  bool l_nodelete_active;
+  bool l_nodelete_pending;
+};
+
 /* Structure describing a loaded shared object.  The `l_next' and `l_prev'
    members form a chain of all the shared objects loaded at startup.
 
@@ -98,6 +135,9 @@  struct link_map_private
        than one namespace.  */
     struct link_map_private *l_real;
 
+    /* Run-time writable fields.  */
+    struct link_map_rw *l_rw;
+
     /* Number of the namespace this link map belongs to.  */
     Lmid_t l_ns;
 
@@ -157,7 +197,6 @@  struct link_map_private
       const Elf_Symndx *l_buckets;
     };
 
-    unsigned int l_direct_opencount; /* Reference count for dlopen/dlclose.  */
     enum			/* Where this object came from.  */
       {
 	lt_executable,		/* The main executable program.  */
@@ -167,7 +206,6 @@  struct link_map_private
     unsigned int l_dt_relr_ref:1; /* Nonzero if GLIBC_ABI_DT_RELR is
 				     referenced.  */
     unsigned int l_relocated:1;	/* Nonzero if object's relocations done.  */
-    unsigned int l_init_called:1; /* Nonzero if DT_INIT function called.  */
     unsigned int l_global:1;	/* Nonzero if object in _dl_global_scope.  */
     unsigned int l_reserved:2;	/* Reserved for internal use.  */
     unsigned int l_main_map:1;  /* Nonzero for the map of the main program.  */
@@ -200,18 +238,6 @@  struct link_map_private
 					       needs to process this
 					       lt_library map.  */
 
-    /* NODELETE status of the map.  Only valid for maps of type
-       lt_loaded.  Lazy binding sets l_nodelete_active directly,
-       potentially from signal handlers.  Initial loading of an
-       DF_1_NODELETE object set l_nodelete_pending.  Relocation may
-       set l_nodelete_pending as well.  l_nodelete_pending maps are
-       promoted to l_nodelete_active status in the final stages of
-       dlopen, prior to calling ELF constructors.  dlclose only
-       refuses to unload l_nodelete_active maps, the pending status is
-       ignored.  */
-    bool l_nodelete_active;
-    bool l_nodelete_pending;
-
 #include <link_map.h>
 
     /* Collected information about own RPATH directories.  */
@@ -263,14 +289,6 @@  struct link_map_private
     /* List of object in order of the init and fini calls.  */
     struct link_map_private **l_initfini;
 
-    /* List of the dependencies introduced through symbol binding.  */
-    struct link_map_reldeps
-      {
-	unsigned int act;
-	struct link_map_private *list[];
-      } *l_reldeps;
-    unsigned int l_reldepsmax;
-
     /* Nonzero if the DSO is used.  */
     unsigned int l_used;
 
@@ -321,11 +339,6 @@  struct link_map_private
     /* Index of the module in the dtv array.  */
     size_t l_tls_modid;
 
-    /* Number of thread_local objects constructed by this DSO.  This is
-       atomically accessed and modified and is not always protected by the load
-       lock.  See also: CONCURRENCY NOTES in cxa_thread_atexit_impl.c.  */
-    size_t l_tls_dtor_count;
-
     /* Information used to change permission after the relocations are
        done.  */
     ElfW(Addr) l_relro_addr;
diff --git a/stdlib/cxa_thread_atexit_impl.c b/stdlib/cxa_thread_atexit_impl.c
index d35002af30..360cde46a2 100644
--- a/stdlib/cxa_thread_atexit_impl.c
+++ b/stdlib/cxa_thread_atexit_impl.c
@@ -133,7 +133,7 @@  __cxa_thread_atexit_impl (dtor_func func, void *obj, void *dso_symbol)
      _dl_close_worker is protected by the dl_load_lock.  The execution in
      __call_tls_dtors does not really depend on this value beyond the fact that
      it should be atomic, so Relaxed MO should be sufficient.  */
-  atomic_fetch_add_relaxed (&lm_cache->l_tls_dtor_count, 1);
+  atomic_fetch_add_relaxed (&lm_cache->l_rw->l_tls_dtor_count, 1);
   __rtld_lock_unlock_recursive (GL(dl_load_lock));
 
   new->map = lm_cache;
@@ -159,7 +159,7 @@  __call_tls_dtors (void)
 	 l_tls_dtor_count decrement.  That way, we protect this access from a
 	 potential DSO unload in _dl_close_worker, which happens when
 	 l_tls_dtor_count is 0.  See CONCURRENCY NOTES for more detail.  */
-      atomic_fetch_add_release (&cur->map->l_tls_dtor_count, -1);
+      atomic_fetch_add_release (&cur->map->l_rw->l_tls_dtor_count, -1);
       free (cur);
     }
 }
diff --git a/sysdeps/x86/dl-prop.h b/sysdeps/x86/dl-prop.h
index ba70b06c3a..f24fc1b028 100644
--- a/sysdeps/x86/dl-prop.h
+++ b/sysdeps/x86/dl-prop.h
@@ -40,7 +40,7 @@  dl_isa_level_check (struct link_map_private *m, const char *program)
       l = m->l_initfini[i];
 
       /* Skip ISA level check if functions have been executed.  */
-      if (l->l_init_called)
+      if (l->l_rw->l_init_called)
 	continue;
 
 #ifdef SHARED