From 2bc983b78c215765979a29a2e98b0cc01791c2d1 Mon Sep 17 00:00:00 2001 From: "H.J. Lu" Date: Sat, 23 Apr 2016 06:05:01 -0700 Subject: Reduce number of mmap calls from __libc_memalign in ld.so __libc_memalign in ld.so allocates one page at a time and tries to optimize consecutive __libc_memalign calls by hoping that the next mmap is after the current memory allocation. However, the kernel hands out mmap addresses in top-down order, so this optimization in practice never happens, with the result that we have more mmap calls and waste a bunch of space for each __libc_memalign. This change makes __libc_memalign to mmap one page extra. Worst case, the kernel never puts a backing page behind it, but best case it allows __libc_memalign to operate much much better. For elf/tst-align --direct, it reduces number of mmap calls from 12 to 9. * elf/dl-minimal.c (__libc_memalign): Mmap one extra page. --- elf/dl-minimal.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) (limited to 'elf/dl-minimal.c') diff --git a/elf/dl-minimal.c b/elf/dl-minimal.c index 762e65b4d0..c8a8f8dc93 100644 --- a/elf/dl-minimal.c +++ b/elf/dl-minimal.c @@ -66,15 +66,13 @@ __libc_memalign (size_t align, size_t n) if (alloc_ptr + n >= alloc_end || n >= -(uintptr_t) alloc_ptr) { - /* Insufficient space left; allocate another page. */ + /* Insufficient space left; allocate another page plus one extra + page to reduce number of mmap calls. */ caddr_t page; size_t nup = (n + GLRO(dl_pagesize) - 1) & ~(GLRO(dl_pagesize) - 1); - if (__glibc_unlikely (nup == 0)) - { - if (n) - return NULL; - nup = GLRO(dl_pagesize); - } + if (__glibc_unlikely (nup == 0 && n != 0)) + return NULL; + nup += GLRO(dl_pagesize); page = __mmap (0, nup, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0); if (page == MAP_FAILED) -- cgit v1.2.3