@@ -2118,6 +2118,12 @@ struct malloc_state {
* not protected by mutex - use atomic operations on this.
*/
mchunkptr atomic_free_list;
+
+ /*
+ * Secondary free list in case there are too many objects on
+ * the primary list to free all at once.
+ */
+ mchunkptr amortized_free_list;
};
struct malloc_par {
@@ -118,23 +118,29 @@ static inline int is_accessed(struct thread_cache *cache, int bin)
return get_bit(cache->accessed_map, bin);
}
+/*
+ * Free objects from the atomic_free_list while holding the
+ * arena_lock. In case the atomic_free_list has become obscenely big
+ * we limit ourselves to freeing 64 objects at once.
+ */
static void free_atomic_list(struct malloc_state *arena)
{
struct malloc_chunk *victim, *next;
+ int i;
- /*
- * Check without using atomic first - if we lose the race we will
- * free things next time around.
- */
- if (!arena->atomic_free_list)
- return;
+ if (!arena->amortized_free_list) {
+ if (!arena->atomic_free_list)
+ return;
+ arena->amortized_free_list = __sync_lock_test_and_set(&arena->atomic_free_list, NULL);
+ }
- victim = __sync_lock_test_and_set(&arena->atomic_free_list, NULL);
- while (victim) {
+ victim = arena->amortized_free_list;
+ for (i = 64; i && victim; i--) {
next = victim->fd;
_int_free(arena, victim);
victim = next;
}
+ arena->amortized_free_list = victim;
}
static void tcache_gc(struct thread_cache *cache)