@@ -706,6 +706,17 @@ static struct malloc_state *_int_new_arena(size_t size, int numa_node)
/* Add the new arena to the global lists. */
a->numa_node = numa_node;
+ /*
+ * a->next must be a valid pointer before changing
+ * main_arena.next, otherwise a reader following the list
+ * pointers could end up dereferencing NULL or worse. The
+ * same reasoning applies to a->local_next.
+ *
+ * No atomic_read_barrier() is needed, because these are
+ * dependent reads that are naturally ordered. You really
+ * cannot load a->next->next before loading a->next, after
+ * all.
+ */
a->next = main_arena.next;
atomic_write_barrier();
main_arena.next = a;
@@ -797,10 +808,9 @@ static struct malloc_state *arena_get(size_t size)
* to use a numa-local arena, but are limited to best-effort.
*/
tsd_getspecific(arena_key, arena);
- if (!arena || arena->numa_node != node) {
+ if (!arena || arena->numa_node != node)
arena = numa_arena[node];
- numa_arena[node] = arena->local_next;
- }
+
if (arena && !mutex_trylock(&arena->mutex)) {
THREAD_STAT(++(arena->stat_lock_direct));
} else
@@ -110,15 +110,15 @@ typedef pthread_key_t tsd_key_t;
#ifndef atomic_full_barrier
-# define atomic_full_barrier() __asm ("" ::: "memory")
+# define atomic_full_barrier() __sync_synchronize()
#endif
#ifndef atomic_read_barrier
-# define atomic_read_barrier() atomic_full_barrier ()
+# define atomic_read_barrier() atomic_full_barrier()
#endif
#ifndef atomic_write_barrier
-# define atomic_write_barrier() atomic_full_barrier ()
+# define atomic_write_barrier() atomic_full_barrier()
#endif
#ifndef DEFAULT_TOP_PAD