From patchwork Tue Jan 26 00:24:51 2016 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: =?utf-8?q?J=C3=B6rn_Engel?= X-Patchwork-Id: 572989 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Received: from sourceware.org (server1.sourceware.org [209.132.180.131]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by ozlabs.org (Postfix) with ESMTPS id 139DC1402DD for ; Tue, 26 Jan 2016 11:28:14 +1100 (AEDT) Authentication-Results: ozlabs.org; dkim=pass (1024-bit key; secure) header.d=sourceware.org header.i=@sourceware.org header.b=AlsqnjoG; dkim-atps=neutral DomainKey-Signature: a=rsa-sha1; c=nofws; d=sourceware.org; h=list-id :list-unsubscribe:list-subscribe:list-archive:list-post :list-help:sender:from:to:cc:subject:date:message-id:in-reply-to :references; q=dns; s=default; b=kOWRjAVRV22AWfkpYBLBuKHFxA8lTfn ll1vgcxbL57DqhHe0OQdhWHY6hjt2kWZWpkWMYEZZvDT6RSqNStbXcmhh/MechYn oqrpqBw+4I6m7/m4/eAAxBpgjdV1xZII9VLf4qS/GQAtzC6jx5XJuAMTNOLQgtXv TA7vhsVC+76k= DKIM-Signature: v=1; a=rsa-sha1; c=relaxed; d=sourceware.org; h=list-id :list-unsubscribe:list-subscribe:list-archive:list-post :list-help:sender:from:to:cc:subject:date:message-id:in-reply-to :references; s=default; bh=upHMOiEmaadU9+/m+WNVNZYMmy4=; b=Alsqn joG/LcU/Hp2MGv/8KizGfJ0USF5sb4fHt0WDaR432iC+1a9mQeWIpJaBR0CQk4qP gKIW6WKWyuYa+Ff3KlwSLsU0CV+d+HZEnMSU+619fjuxJRJ/pDBKZO1pDN5LeUPT mMVeFS/M5dirmUAvoXF4Rcqw54d9oySa8Dv7mQ= Received: (qmail 68781 invoked by alias); 26 Jan 2016 00:26:53 -0000 Mailing-List: contact libc-alpha-help@sourceware.org; run by ezmlm Precedence: bulk List-Id: List-Unsubscribe: List-Subscribe: List-Archive: List-Post: List-Help: , Sender: libc-alpha-owner@sourceware.org Delivered-To: mailing list libc-alpha@sourceware.org Received: (qmail 68683 invoked by uid 89); 26 Jan 2016 00:26:52 -0000 Authentication-Results: sourceware.org; auth=none X-Virus-Found: No X-Spam-SWARE-Status: No, score=-2.5 required=5.0 tests=AWL, BAYES_00, RCVD_IN_DNSWL_LOW, SPF_PASS autolearn=ham version=3.3.2 spammy=sbrk, victim X-HELO: mail-qg0-f41.google.com X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20130820; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=qF/bYt9pLZqnUF21zxVfVD2s9xThkh88SBEoE2Azkw4=; b=EcSeydgF97cWyjABAFuJbmpzCLtomK97uJNJkVWAmRWulYkbqSGAB4UZCL7gDbPJPl bwZvlqntZvCAeD5RDSDuiOkmP3YzlkkKOSABSzoFWSuI+6mUnhrBuBVwDSc1F5D/0dpJ tsUs3RjzGAk8OtECCSiylNGOkNBxyrielBogilGCpMkJkSMci2cXw7BcWJZ68Z066PvW o4k569cRQnrHqgu9uJ7n5wMzzLiaVyMNImyFKnMUcHgVdENg+7VOP09ft7D2FRe8zOD4 oh777ZRk3EPCwBNy6hBXnITztu9zhCUcpaB6EkiykY9oPhUUDSlb6RYCpqUuuFMuBmDj kY9g== X-Gm-Message-State: AG10YOSypGBzLxnXF+rKhgSxWyucuDBq+sT28G/ix0WAae3XZaeGAqdu9S2dNvLmru4A3zPO X-Received: by 10.140.179.68 with SMTP id z65mr26775962qhz.70.1453767999378; Mon, 25 Jan 2016 16:26:39 -0800 (PST) From: Joern Engel To: "GNU C. Library" Cc: Siddhesh Poyarekar , Joern Engel Subject: [PATCH] malloc: introduce get_backup_arena() Date: Mon, 25 Jan 2016 16:24:51 -0800 Message-Id: <1453767942-19369-13-git-send-email-joern@purestorage.com> In-Reply-To: <1453767942-19369-1-git-send-email-joern@purestorage.com> References: <1453767942-19369-1-git-send-email-joern@purestorage.com> From: Joern Engel Removes a lot of duplicate code. Not all copies were identical and I believe some were somewhat buggy. Then again, this code is very unlikely to run at all, so those bugs were equally unlikely to matter in practice. JIRA: PURE-27597 --- tpc/malloc2.13/malloc.c | 124 ++++++++++++++---------------------------------- 1 file changed, 35 insertions(+), 89 deletions(-) diff --git a/tpc/malloc2.13/malloc.c b/tpc/malloc2.13/malloc.c index 28d9d902b7ec..7c94a8cefcac 100644 --- a/tpc/malloc2.13/malloc.c +++ b/tpc/malloc2.13/malloc.c @@ -3396,6 +3396,20 @@ mremap_chunk(mchunkptr p, size_t new_size) #endif /* HAVE_MREMAP */ +static struct malloc_state *get_backup_arena(struct malloc_state *ar_ptr, size_t bytes) +{ + if (ar_ptr != &main_arena) { + /* Maybe the failure is due to running out of mmapped areas. */ + (void)mutex_unlock(&ar_ptr->mutex); + ar_ptr = &main_arena; + (void)mutex_lock(&ar_ptr->mutex); + } else { + /* ... or sbrk() has failed and there is still a chance to mmap() */ + ar_ptr = arena_get2(ar_ptr, bytes); + (void)mutex_unlock(&main_arena.mutex); + } + return ar_ptr; +} /*------------------------ Public wrappers. --------------------------------*/ @@ -3409,30 +3423,15 @@ Void_t *public_mALLOc(size_t bytes) if (__builtin_expect(hook != NULL, 0)) return (*hook) (bytes, RETURN_ADDRESS(0)); - arena_lookup(ar_ptr); - arena_lock(ar_ptr, bytes); + arena_get(ar_ptr, bytes); if (!ar_ptr) return 0; victim = _int_malloc(ar_ptr, bytes); if (!victim) { - /* Maybe the failure is due to running out of mmapped areas. */ - if (ar_ptr != &main_arena) { - (void)mutex_unlock(&ar_ptr->mutex); - ar_ptr = &main_arena; - (void)mutex_lock(&ar_ptr->mutex); - victim = _int_malloc(ar_ptr, bytes); - (void)mutex_unlock(&ar_ptr->mutex); - } else { - /* ... or sbrk() has failed and there is still a chance to mmap() */ - ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes); - (void)mutex_unlock(&main_arena.mutex); - if (ar_ptr) { - victim = _int_malloc(ar_ptr, bytes); - (void)mutex_unlock(&ar_ptr->mutex); - } - } - } else - (void)mutex_unlock(&ar_ptr->mutex); + ar_ptr = get_backup_arena(ar_ptr, bytes); + victim = _int_malloc(ar_ptr, bytes); + } + (void)mutex_unlock(&ar_ptr->mutex); assert(!victim || chunk_is_mmapped(mem2chunk(victim)) || ar_ptr == arena_for_chunk(mem2chunk(victim))); return victim; } @@ -3618,25 +3617,10 @@ Void_t *public_mEMALIGn(size_t alignment, size_t bytes) return 0; p = _int_memalign(ar_ptr, alignment, bytes); if (!p) { - /* Maybe the failure is due to running out of mmapped areas. */ - if (ar_ptr != &main_arena) { - (void)mutex_unlock(&ar_ptr->mutex); - ar_ptr = &main_arena; - (void)mutex_lock(&ar_ptr->mutex); - p = _int_memalign(ar_ptr, alignment, bytes); - (void)mutex_unlock(&ar_ptr->mutex); - } else { - /* ... or sbrk() has failed and there is still a chance to mmap() */ - struct malloc_state *prev = ar_ptr->next ? ar_ptr : 0; - (void)mutex_unlock(&ar_ptr->mutex); - ar_ptr = arena_get2(prev, bytes); - if (ar_ptr) { - p = _int_memalign(ar_ptr, alignment, bytes); - (void)mutex_unlock(&ar_ptr->mutex); - } - } - } else - (void)mutex_unlock(&ar_ptr->mutex); + ar_ptr = get_backup_arena(ar_ptr, bytes); + p = _int_memalign(ar_ptr, alignment, bytes); + } + (void)mutex_unlock(&ar_ptr->mutex); assert(!p || chunk_is_mmapped(mem2chunk(p)) || ar_ptr == arena_for_chunk(mem2chunk(p))); return p; } @@ -3661,21 +3645,10 @@ Void_t *public_vALLOc(size_t bytes) p = _int_valloc(ar_ptr, bytes); (void)mutex_unlock(&ar_ptr->mutex); if (!p) { - /* Maybe the failure is due to running out of mmapped areas. */ - if (ar_ptr != &main_arena) { - ar_ptr = &main_arena; - (void)mutex_lock(&ar_ptr->mutex); - p = _int_memalign(ar_ptr, pagesz, bytes); - (void)mutex_unlock(&ar_ptr->mutex); - } else { - /* ... or sbrk() has failed and there is still a chance to mmap() */ - ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes); - if (ar_ptr) { - p = _int_memalign(ar_ptr, pagesz, bytes); - (void)mutex_unlock(&ar_ptr->mutex); - } - } + ar_ptr = get_backup_arena(ar_ptr, bytes); + p = _int_memalign(ar_ptr, pagesz, bytes); } + (void)mutex_unlock(&ar_ptr->mutex); assert(!p || chunk_is_mmapped(mem2chunk(p)) || ar_ptr == arena_for_chunk(mem2chunk(p))); return p; @@ -3701,21 +3674,10 @@ Void_t *public_pVALLOc(size_t bytes) p = _int_pvalloc(ar_ptr, bytes); (void)mutex_unlock(&ar_ptr->mutex); if (!p) { - /* Maybe the failure is due to running out of mmapped areas. */ - if (ar_ptr != &main_arena) { - ar_ptr = &main_arena; - (void)mutex_lock(&ar_ptr->mutex); - p = _int_memalign(ar_ptr, pagesz, rounded_bytes); - (void)mutex_unlock(&ar_ptr->mutex); - } else { - /* ... or sbrk() has failed and there is still a chance to mmap() */ - ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes + 2 * pagesz + MINSIZE); - if (ar_ptr) { - p = _int_memalign(ar_ptr, pagesz, rounded_bytes); - (void)mutex_unlock(&ar_ptr->mutex); - } - } + ar_ptr = get_backup_arena(ar_ptr, bytes); + p = _int_memalign(ar_ptr, pagesz, rounded_bytes); } + (void)mutex_unlock(&ar_ptr->mutex); assert(!p || chunk_is_mmapped(mem2chunk(p)) || ar_ptr == arena_for_chunk(mem2chunk(p))); return p; @@ -3780,31 +3742,15 @@ Void_t *public_cALLOc(size_t n, size_t elem_size) } #endif mem = _int_malloc(av, sz); - - /* Only clearing follows, so we can unlock early. */ + if (mem == 0) { + av = get_backup_arena(av, bytes); + mem = _int_malloc(&main_arena, sz); + } (void)mutex_unlock(&av->mutex); assert(!mem || chunk_is_mmapped(mem2chunk(mem)) || av == arena_for_chunk(mem2chunk(mem))); - - if (mem == 0) { - /* Maybe the failure is due to running out of mmapped areas. */ - if (av != &main_arena) { - (void)mutex_lock(&main_arena.mutex); - mem = _int_malloc(&main_arena, sz); - (void)mutex_unlock(&main_arena.mutex); - } else { - /* ... or sbrk() has failed and there is still a chance to mmap() */ - (void)mutex_lock(&main_arena.mutex); - av = arena_get2(av->next ? av : 0, sz); - (void)mutex_unlock(&main_arena.mutex); - if (av) { - mem = _int_malloc(av, sz); - (void)mutex_unlock(&av->mutex); - } - } - if (mem == 0) - return 0; - } + if (mem == 0) + return 0; p = mem2chunk(mem); /* Two optional cases in which clearing not necessary */