@@ -3018,6 +3018,31 @@ tcache_thread_shutdown (void)
#endif /* !USE_TCACHE */
+static inline size_t
+size_class_pad (size_t bytes)
+{
+ if (bytes <= MAX_FAST_SIZE || bytes >= DEFAULT_MMAP_THRESHOLD_MAX)
+ return bytes;
+ /*
+ * Use jemalloc-inspired size classes for mid-size allocations to
+ * minimize fragmentation. This means we pay a 0-20% overhead on
+ * the initial allocations to improve the likelyhood of reuse.
+ */
+ size_t max = sizeof(void *) << 4;
+ size_t nxt;
+
+ do {
+ if (bytes <= max) {
+ size_t sc_bytes = ALIGN_UP (bytes, max >> 3);
+
+ return sc_bytes <= DEFAULT_MMAP_THRESHOLD_MAX ? sc_bytes : bytes;
+ }
+ nxt = max << 1;
+ } while (nxt > max && nxt < DEFAULT_MMAP_THRESHOLD_MAX && (max = nxt));
+
+ return bytes;
+}
+
void *
__libc_malloc (size_t bytes)
{
@@ -3031,6 +3056,7 @@ __libc_malloc (size_t bytes)
= atomic_forced_read (__malloc_hook);
if (__builtin_expect (hook != NULL, 0))
return (*hook)(bytes, RETURN_ADDRESS (0));
+ bytes = size_class_pad (bytes);
#if USE_TCACHE
/* int_free also calls request2size, be careful to not pad twice. */
size_t tbytes;
@@ -3150,6 +3176,8 @@ __libc_realloc (void *oldmem, size_t bytes)
if (oldmem == 0)
return __libc_malloc (bytes);
+ bytes = size_class_pad (bytes);
+
/* chunk corresponding to oldmem */
const mchunkptr oldp = mem2chunk (oldmem);
/* its size */
@@ -3391,6 +3419,7 @@ __libc_calloc (size_t n, size_t elem_size)
return memset (mem, 0, sz);
}
+ sz = size_class_pad (sz);
MAYBE_INIT_TCACHE ();
if (SINGLE_THREAD_P)