@@ -782,3 +782,22 @@ static struct malloc_state *arena_get(size_t size)
arena = arena_get2(arena, size);
return arena;
}
+
+static inline void arena_lock(struct malloc_state *arena)
+{
+#if THREAD_STATS
+ if(!mutex_trylock(&arena->mutex))
+ ++(arena->stat_lock_direct);
+ else {
+ (void)mutex_lock(&arena->mutex);
+ ++(arena->stat_lock_wait);
+ }
+#else
+ (void)mutex_lock(&arena->mutex);
+#endif
+}
+
+static inline void arena_unlock(struct malloc_state *arena)
+{
+ (void)mutex_unlock(&arena->mutex);
+}
@@ -3249,19 +3249,19 @@ mremap_chunk(mchunkptr p, size_t new_size)
#endif /* HAVE_MREMAP */
-static struct malloc_state *get_backup_arena(struct malloc_state *ar_ptr, size_t bytes)
+static struct malloc_state *get_backup_arena(struct malloc_state *arena, size_t bytes)
{
- if (ar_ptr != &main_arena) {
+ if (arena != &main_arena) {
/* Maybe the failure is due to running out of mmapped areas. */
- (void)mutex_unlock(&ar_ptr->mutex);
- ar_ptr = &main_arena;
- (void)mutex_lock(&ar_ptr->mutex);
+ arena_unlock(arena);
+ arena = &main_arena;
+ arena_lock(arena);
} else {
/* ... or sbrk() has failed and there is still a chance to mmap() */
- ar_ptr = arena_get2(ar_ptr, bytes);
- (void)mutex_unlock(&main_arena.mutex);
+ arena = arena_get2(arena, bytes);
+ arena_unlock(&main_arena);
}
- return ar_ptr;
+ return arena;
}
/*------------------------ Public wrappers. --------------------------------*/
@@ -3284,7 +3284,7 @@ Void_t *public_mALLOc(size_t bytes)
ar_ptr = get_backup_arena(ar_ptr, bytes);
victim = _int_malloc(ar_ptr, bytes);
}
- (void)mutex_unlock(&ar_ptr->mutex);
+ arena_unlock(ar_ptr);
assert(!victim || chunk_is_mmapped(mem2chunk(victim)) || ar_ptr == arena_for_chunk(mem2chunk(victim)));
return victim;
}
@@ -3317,18 +3317,9 @@ public_fREe(Void_t* mem)
#ifdef ATOMIC_FASTBINS
_int_free(ar_ptr, p, 0);
#else
-# if THREAD_STATS
- if(!mutex_trylock(&ar_ptr->mutex))
- ++(ar_ptr->stat_lock_direct);
- else {
- (void)mutex_lock(&ar_ptr->mutex);
- ++(ar_ptr->stat_lock_wait);
- }
-# else
- (void)mutex_lock(&ar_ptr->mutex);
-# endif
+ arena_lock(ar_ptr);
_int_free(ar_ptr, p);
- (void)mutex_unlock(&ar_ptr->mutex);
+ arena_unlock(ar_ptr);
#endif
}
@@ -3389,16 +3380,7 @@ public_rEALLOc(Void_t* oldmem, size_t bytes)
}
ar_ptr = arena_for_chunk(oldp);
-#if THREAD_STATS
- if(!mutex_trylock(&ar_ptr->mutex))
- ++(ar_ptr->stat_lock_direct);
- else {
- (void)mutex_lock(&ar_ptr->mutex);
- ++(ar_ptr->stat_lock_wait);
- }
-#else
- (void)mutex_lock(&ar_ptr->mutex);
-#endif
+ arena_lock(ar_ptr);
#if !defined NO_THREADS
/* As in malloc(), remember this arena for the next allocation. */
@@ -3407,7 +3389,7 @@ public_rEALLOc(Void_t* oldmem, size_t bytes)
newp = _int_realloc(ar_ptr, oldp, oldsize, nb);
- (void)mutex_unlock(&ar_ptr->mutex);
+ arena_unlock(ar_ptr);
assert(!newp || chunk_is_mmapped(mem2chunk(newp)) ||
ar_ptr == arena_for_chunk(mem2chunk(newp)));
@@ -3421,18 +3403,9 @@ public_rEALLOc(Void_t* oldmem, size_t bytes)
#ifdef ATOMIC_FASTBINS
_int_free(ar_ptr, oldp, 0);
#else
-# if THREAD_STATS
- if(!mutex_trylock(&ar_ptr->mutex))
- ++(ar_ptr->stat_lock_direct);
- else {
- (void)mutex_lock(&ar_ptr->mutex);
- ++(ar_ptr->stat_lock_wait);
- }
-# else
- (void)mutex_lock(&ar_ptr->mutex);
-# endif
+ arena_lock(ar_ptr);
_int_free(ar_ptr, oldp);
- (void)mutex_unlock(&ar_ptr->mutex);
+ arena_unlock(ar_ptr);
#endif
}
}
@@ -3465,7 +3438,7 @@ Void_t *public_mEMALIGn(size_t alignment, size_t bytes)
ar_ptr = get_backup_arena(ar_ptr, bytes);
p = _int_memalign(ar_ptr, alignment, bytes);
}
- (void)mutex_unlock(&ar_ptr->mutex);
+ arena_unlock(ar_ptr);
assert(!p || chunk_is_mmapped(mem2chunk(p)) || ar_ptr == arena_for_chunk(mem2chunk(p)));
return p;
}
@@ -3488,12 +3461,12 @@ Void_t *public_vALLOc(size_t bytes)
if (!ar_ptr)
return 0;
p = _int_valloc(ar_ptr, bytes);
- (void)mutex_unlock(&ar_ptr->mutex);
+ arena_unlock(ar_ptr);
if (!p) {
ar_ptr = get_backup_arena(ar_ptr, bytes);
p = _int_memalign(ar_ptr, pagesz, bytes);
}
- (void)mutex_unlock(&ar_ptr->mutex);
+ arena_unlock(ar_ptr);
assert(!p || chunk_is_mmapped(mem2chunk(p)) || ar_ptr == arena_for_chunk(mem2chunk(p)));
return p;
@@ -3517,12 +3490,11 @@ Void_t *public_pVALLOc(size_t bytes)
ar_ptr = arena_get(bytes + 2 * pagesz + MINSIZE);
p = _int_pvalloc(ar_ptr, bytes);
- (void)mutex_unlock(&ar_ptr->mutex);
if (!p) {
ar_ptr = get_backup_arena(ar_ptr, bytes);
p = _int_memalign(ar_ptr, pagesz, rounded_bytes);
}
- (void)mutex_unlock(&ar_ptr->mutex);
+ arena_unlock(ar_ptr);
assert(!p || chunk_is_mmapped(mem2chunk(p)) || ar_ptr == arena_for_chunk(mem2chunk(p)));
return p;
@@ -3591,7 +3563,7 @@ Void_t *public_cALLOc(size_t n, size_t elem_size)
av = get_backup_arena(av, bytes);
mem = _int_malloc(&main_arena, sz);
}
- (void)mutex_unlock(&av->mutex);
+ arena_unlock(av);
assert(!mem || chunk_is_mmapped(mem2chunk(mem)) || av == arena_for_chunk(mem2chunk(mem)));
if (mem == 0)
@@ -3658,7 +3630,7 @@ public_iCALLOc(size_t n, size_t elem_size, Void_t** chunks)
return 0;
m = _int_icalloc(ar_ptr, n, elem_size, chunks);
- (void)mutex_unlock(&ar_ptr->mutex);
+ arena_unlock(ar_ptr);
return m;
}
@@ -3673,7 +3645,7 @@ public_iCOMALLOc(size_t n, size_t sizes[], Void_t** chunks)
return 0;
m = _int_icomalloc(ar_ptr, n, sizes, chunks);
- (void)mutex_unlock(&ar_ptr->mutex);
+ arena_unlock(ar_ptr);
return m;
}
@@ -3695,9 +3667,9 @@ public_mTRIm(size_t s)
struct malloc_state * ar_ptr = &main_arena;
do
{
- (void) mutex_lock (&ar_ptr->mutex);
+ arena_lock(ar_ptr);
result |= mTRIm (ar_ptr, s);
- (void) mutex_unlock (&ar_ptr->mutex);
+ arena_unlock(ar_ptr);
ar_ptr = ar_ptr->next;
}
@@ -3728,9 +3700,9 @@ struct mallinfo public_mALLINFo()
if(__malloc_initialized < 0)
ptmalloc_init ();
- (void)mutex_lock(&main_arena.mutex);
+ arena_lock(&main_arena);
m = mALLINFo(&main_arena);
- (void)mutex_unlock(&main_arena.mutex);
+ arena_unlock(&main_arena);
ret.arena = (int)m.arena;
ret.ordblks = (int)m.ordblks;
ret.smblks = (int)m.smblks;
@@ -4302,7 +4274,7 @@ _int_free(struct malloc_state * av, mchunkptr p)
errout:
#ifdef ATOMIC_FASTBINS
if (! have_lock && locked)
- (void)mutex_unlock(&av->mutex);
+ arena_unlock(av);
#endif
malloc_printerr (check_action, errstr, chunk2mem(p));
return;
@@ -4342,7 +4314,7 @@ _int_free(struct malloc_state * av, mchunkptr p)
after getting the lock. */
if (have_lock
|| ({ assert (locked == 0);
- mutex_lock(&av->mutex);
+ arena_lock*av);
locked = 1;
chunk_at_offset (p, size)->size <= 2 * SIZE_SZ
|| chunksize (chunk_at_offset (p, size)) >= av->system_mem;
@@ -4355,7 +4327,7 @@ _int_free(struct malloc_state * av, mchunkptr p)
#ifdef ATOMIC_FASTBINS
if (! have_lock)
{
- (void)mutex_unlock(&av->mutex);
+ arena_unlock(av);
locked = 0;
}
#endif
@@ -4419,16 +4391,7 @@ _int_free(struct malloc_state * av, mchunkptr p)
else if (!chunk_is_mmapped(p)) {
#ifdef ATOMIC_FASTBINS
if (! have_lock) {
-# if THREAD_STATS
- if(!mutex_trylock(&av->mutex))
- ++(av->stat_lock_direct);
- else {
- (void)mutex_lock(&av->mutex);
- ++(av->stat_lock_wait);
- }
-# else
- (void)mutex_lock(&av->mutex);
-# endif
+ arena_lock(av);
locked = 1;
}
#endif
@@ -4564,7 +4527,7 @@ _int_free(struct malloc_state * av, mchunkptr p)
#ifdef ATOMIC_FASTBINS
if (! have_lock) {
assert (locked);
- (void)mutex_unlock(&av->mutex);
+ arena_unlock(av);
}
#endif
}
@@ -5388,7 +5351,7 @@ void mSTATs()
if(__malloc_initialized < 0)
ptmalloc_init ();
for (i=0, ar_ptr = &main_arena;; i++) {
- (void)mutex_lock(&ar_ptr->mutex);
+ arena_lock(ar_ptr);
mi = mALLINFo(ar_ptr);
fprintf(stderr, "Arena %d:\n", i);
fprintf(stderr, "system bytes = %14lu\n", mi.arena);
@@ -5404,7 +5367,7 @@ void mSTATs()
stat_lock_loop += ar_ptr->stat_lock_loop;
stat_lock_wait += ar_ptr->stat_lock_wait;
#endif
- (void)mutex_unlock(&ar_ptr->mutex);
+ arena_unlock(ar_ptr);
ar_ptr = ar_ptr->next;
if(ar_ptr == &main_arena) break;
}
@@ -5439,7 +5402,7 @@ int mALLOPt(int param_number, int value)
if(__malloc_initialized < 0)
ptmalloc_init ();
- (void)mutex_lock(&av->mutex);
+ arena_lock(av);
/* Ensure initialization/consolidation */
malloc_consolidate(av);
@@ -5481,7 +5444,7 @@ int mALLOPt(int param_number, int value)
break;
}
- (void)mutex_unlock(&av->mutex);
+ arena_unlock(av);
return res;
}
@@ -5736,7 +5699,7 @@ dlmalloc_info (int options, FILE *fp)
} sizes[NFASTBINS + NBINS - 1];
#define nsizes (sizeof (sizes) / sizeof (sizes[0]))
- mutex_lock (&ar_ptr->mutex);
+ arena_lock(ar_ptr);
for (size_t i = 0; i < NFASTBINS; ++i)
{
@@ -5807,7 +5770,7 @@ dlmalloc_info (int options, FILE *fp)
avail += sizes[NFASTBINS - 1 + i].total;
}
- mutex_unlock (&ar_ptr->mutex);
+ arena_unlock(ar_ptr);
total_nfastblocks += nfastblocks;
total_fastavail += fastavail;
From: Joern Engel <joern@purestorage.org> Move the #ifdef THREAD_STATS into a helper function instead of open-coding it in some places and forgetting it in others. JIRA: PURE-27597 --- tpc/malloc2.13/arena.h | 19 +++++++++ tpc/malloc2.13/malloc.c | 111 ++++++++++++++++-------------------------------- 2 files changed, 56 insertions(+), 74 deletions(-)