diff mbox

malloc: limit free_atomic_list() latency

Message ID 1453767942-19369-44-git-send-email-joern@purestorage.com
State New
Headers show

Commit Message

Jörn Engel Jan. 26, 2016, 12:25 a.m. UTC
Costa and Scott expressed concerns about long free lists causing it to
spend a long time clearing things up.  This puts a hard cap on things.

JIRA: PURE-27597
---
 tpc/malloc2.13/malloc.c |  6 ++++++
 tpc/malloc2.13/tcache.h | 22 ++++++++++++++--------
 2 files changed, 20 insertions(+), 8 deletions(-)
diff mbox

Patch

diff --git a/tpc/malloc2.13/malloc.c b/tpc/malloc2.13/malloc.c
index 18c7b407bbea..9f2d2df47ea1 100644
--- a/tpc/malloc2.13/malloc.c
+++ b/tpc/malloc2.13/malloc.c
@@ -2118,6 +2118,12 @@  struct malloc_state {
 	 * not protected by mutex - use atomic operations on this.
 	 */
 	mchunkptr atomic_free_list;
+
+	/*
+	 * Secondary free list in case there are too many objects on
+	 * the primary list to free all at once.
+	 */
+	mchunkptr amortized_free_list;
 };
 
 struct malloc_par {
diff --git a/tpc/malloc2.13/tcache.h b/tpc/malloc2.13/tcache.h
index b02203398f2f..edfe7acbc75e 100644
--- a/tpc/malloc2.13/tcache.h
+++ b/tpc/malloc2.13/tcache.h
@@ -118,23 +118,29 @@  static inline int is_accessed(struct thread_cache *cache, int bin)
 	return get_bit(cache->accessed_map, bin);
 }
 
+/*
+ * Free objects from the atomic_free_list while holding the
+ * arena_lock.  In case the atomic_free_list has become obscenely big
+ * we limit ourselves to freeing 64 objects at once.
+ */
 static void free_atomic_list(struct malloc_state *arena)
 {
 	struct malloc_chunk *victim, *next;
+	int i;
 
-	/*
-	 * Check without using atomic first - if we lose the race we will
-	 * free things next time around.
-	 */
-	if (!arena->atomic_free_list)
-		return;
+	if (!arena->amortized_free_list) {
+		if (!arena->atomic_free_list)
+			return;
+		arena->amortized_free_list = __sync_lock_test_and_set(&arena->atomic_free_list, NULL);
+	}
 
-	victim = __sync_lock_test_and_set(&arena->atomic_free_list, NULL);
-	while (victim) {
+	victim = arena->amortized_free_list;
+	for (i = 64; i && victim; i--) {
 		next = victim->fd;
 		_int_free(arena, victim);
 		victim = next;
 	}
+	arena->amortized_free_list = victim;
 }
 
 static void tcache_gc(struct thread_cache *cache)