@@ -688,11 +688,18 @@ static struct malloc_state *_int_new_arena(size_t size, int numa_node)
(void)mutex_lock(&a->mutex);
- /* Add the new arena to the global list. */
+ /* Add the new arena to the global lists. */
+ a->numa_node = numa_node;
+
a->next = main_arena.next;
atomic_write_barrier();
main_arena.next = a;
- a->numa_node = numa_node;
+
+ if (numa_arena[numa_node]) {
+ a->local_next = numa_arena[numa_node]->local_next;
+ atomic_write_barrier();
+ numa_arena[numa_node]->local_next = a;
+ }
THREAD_STAT(++(a->stat_lock_loop));
@@ -703,7 +710,7 @@ static struct malloc_state *arena_get2(struct malloc_state *a_tsd, size_t size)
{
struct malloc_state *a;
- a = a_tsd->next;
+ a = a_tsd->local_next;
if (!a)
abort();
@@ -718,7 +725,7 @@ static struct malloc_state *arena_get2(struct malloc_state *a_tsd, size_t size)
tsd_setspecific(arena_key, (Void_t *) a);
return a;
}
- a = a->next;
+ a = a->local_next;
} while (a != a_tsd);
/* If not even the list_lock can be obtained, try again. This can
@@ -768,8 +775,10 @@ static struct malloc_state *arena_get(size_t size)
int node = getnode();
tsd_getspecific(arena_key, arena);
- if (!arena || arena->numa_node != node)
+ if (!arena || arena->numa_node != node) {
arena = numa_arena[node];
+ numa_arena[node] = arena->local_next;
+ }
if (arena && !mutex_trylock(&arena->mutex)) {
THREAD_STAT(++(arena->stat_lock_direct));
} else