@@ -91,6 +91,13 @@ struct thread_cache {
unsigned long accessed_map[CACHE_BITMAP_SIZE];
+ /*
+ * Bins are single-linked lists, using the fd pointer of
+ * struct malloc_chunk. The bk pointer stores the number of
+ * objects in a bin. Since bins are LIFO, the bk pointer is
+ * written once when objects are put into a but and remain
+ * valid forever.
+ */
struct malloc_chunk *tc_bin[CACHE_NO_BINS];
};
@@ -127,16 +134,21 @@ static void tcache_gc(struct thread_cache *cache)
{
struct malloc_chunk *victim, *next;
struct malloc_state *arena = NULL;
- int i, did_repeat = 0;
+ unsigned long limit;
+ int i;
-repeat:
+ repeat:
for (i = 0; i < CACHE_NO_BINS; i++) {
victim = cache->tc_bin[i];
/* accessed bins get skipped - they are useful */
if (is_accessed(cache, i) || !victim)
continue;
- cache->tc_bin[i] = NULL;
- while (victim) {
+ /*
+ * Only free half of each bin. Cache remains fuller on
+ * average and hit rates are higher.
+ */
+ limit = (unsigned long)victim->bk >> 1;
+ while (victim && (unsigned long)victim->bk > limit) {
next = victim->fd;
cache->tc_size -= chunksize(victim);
cache->tc_count--;
@@ -156,6 +168,7 @@ repeat:
} while (!__sync_bool_compare_and_swap(&arena->atomic_free_list, victim->fd, victim));
victim = next;
}
+ cache->tc_bin[i] = victim;
}
memset(cache->accessed_map, 0, sizeof(cache->accessed_map));
@@ -164,18 +177,10 @@ repeat:
* Since we skip accessed bins we can run into
* pathological cases where all bins are empty or
* accessed and we made no progress. In those cases
- * we retry after clearing the accessed bits, freeing
- * the entire cache. Should be rare.
+ * we retry after clearing the accessed bits.
+ * Should be rare.
*/
- did_repeat = 1;
goto repeat;
- } else if (did_repeat) {
- /*
- * Since we freed the entire cache, we can verify the
- * counters are consistent.
- */
- assert(cache->tc_size == 0);
- assert(cache->tc_count == 0);
}
if (arena && !mutex_trylock(&arena->mutex)) {
/*
@@ -197,6 +202,16 @@ repeat:
}
}
+static void add_to_bin(struct malloc_chunk **bin, struct malloc_chunk *p)
+{
+ struct malloc_chunk *old;
+
+ old = *bin;
+ p->fd = old;
+ p->bk = (void *) (old ? (unsigned long)old->bk + 1 : 1);
+ *bin = p;
+}
+
static void *tcache_malloc(size_t size)
{
struct thread_cache *cache;
@@ -285,8 +300,7 @@ static void *tcache_malloc(size_t size)
assert(cache_bin(chunksize(prefetch)) == bin_no);
cache->tc_size += chunksize(prefetch);
cache->tc_count++;
- prefetch->fd = *bin;
- *bin = prefetch;
+ add_to_bin(bin, prefetch);
}
}
arena_unlock(arena);
@@ -325,8 +339,7 @@ static void tcache_free(mchunkptr p)
malloc_printerr(check_action, "invalid tcache entry", chunk2mem(p));
return;
}
- p->fd = *bin;
- *bin = p;
+ add_to_bin(bin, p);
if (cache->tc_size > CACHE_SIZE)
tcache_gc(cache);
return;