@@ -719,7 +719,7 @@ new_heap(size, top_pad) size_t size, top_pad;
anyway). */
p2 = MAP_FAILED;
if(aligned_heap_area) {
- p2 = (char *)MMAP(aligned_heap_area, HEAP_MAX_SIZE, PROT_NONE,
+ p2 = (char *)MMAP(aligned_heap_area, HEAP_MAX_SIZE, PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_NORESERVE);
aligned_heap_area = NULL;
if (p2 != MAP_FAILED && ((unsigned long)p2 & (HEAP_MAX_SIZE-1))) {
@@ -728,7 +728,7 @@ new_heap(size, top_pad) size_t size, top_pad;
}
}
if(p2 == MAP_FAILED) {
- p1 = (char *)MMAP(0, HEAP_MAX_SIZE<<1, PROT_NONE,
+ p1 = (char *)MMAP(0, HEAP_MAX_SIZE<<1, PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_NORESERVE);
if(p1 != MAP_FAILED) {
p2 = (char *)(((unsigned long)p1 + (HEAP_MAX_SIZE-1))
@@ -742,7 +742,7 @@ new_heap(size, top_pad) size_t size, top_pad;
} else {
/* Try to take the chance that an allocation of only HEAP_MAX_SIZE
is already aligned. */
- p2 = (char *)MMAP(0, HEAP_MAX_SIZE, PROT_NONE, MAP_PRIVATE|MAP_NORESERVE);
+ p2 = (char *)MMAP(0, HEAP_MAX_SIZE, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_NORESERVE);
if(p2 == MAP_FAILED)
return 0;
if((unsigned long)p2 & (HEAP_MAX_SIZE-1)) {
@@ -751,10 +751,6 @@ new_heap(size, top_pad) size_t size, top_pad;
}
}
}
- if(mprotect(p2, size, PROT_READ|PROT_WRITE) != 0) {
- munmap(p2, HEAP_MAX_SIZE);
- return 0;
- }
h = (heap_info *)p2;
h->size = size;
h->mprotect_size = size;
@@ -780,10 +776,6 @@ grow_heap(h, diff) heap_info *h; long diff;
if((unsigned long) new_size > (unsigned long) HEAP_MAX_SIZE)
return -1;
if((unsigned long) new_size > h->mprotect_size) {
- if (mprotect((char *)h + h->mprotect_size,
- (unsigned long) new_size - h->mprotect_size,
- PROT_READ|PROT_WRITE) != 0)
- return -2;
h->mprotect_size = new_size;
}
@@ -807,21 +799,7 @@ shrink_heap(h, diff) heap_info *h; long diff;
return -1;
/* Try to re-map the extra heap space freshly to save memory, and
make it inaccessible. */
-#ifdef _LIBC
- if (__builtin_expect (__libc_enable_secure, 0))
-#else
- if (1)
-#endif
- {
- if((char *)MMAP((char *)h + new_size, diff, PROT_NONE,
- MAP_PRIVATE|MAP_FIXED) == (char *) MAP_FAILED)
- return -2;
- h->mprotect_size = new_size;
- }
-#ifdef _LIBC
- else
- madvise ((char *)h + new_size, diff, MADV_DONTNEED);
-#endif
+ madvise ((char *)h + new_size, diff, MADV_DONTNEED);
/*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/
h->size = new_size;
From: Joern Engel <joern@purestorage.org> The only reason to use mprotect is to reduce the "commit charge", i.e. the amount of memory we are charged for when using overcommit_memory=2 (don't overcommit). We want to overcommit, so there is zero benefit to us from using mprotect. But the cost is significant. In a microbenchmark I got a 9x speed improvement from this change. How much of that remains for foed is unclear, but a typical pattern was to call mprotect for 4k chunks, which took the mmap_sem writeable each time, so the contention on mmap_sem should be noticeably reduced. shrink_heap() used to call mmap(PROT_NONE) for our build-parameters. Change that to calling madvise(DONT_NEED) instead, as mmap(PROT_NONE) would now cause a segfault and mmap(PROT_READ|PROT_WRITE) would be an expensive noop. JIRA: PURE-27597 --- tpc/malloc2.13/arena.ch | 30 ++++-------------------------- 1 file changed, 4 insertions(+), 26 deletions(-)