summaryrefslogtreecommitdiff
path: root/malloc
diff options
context:
space:
mode:
authorJakub Jelinek <jakub@redhat.com>2007-12-12 18:13:35 +0000
committerJakub Jelinek <jakub@redhat.com>2007-12-12 18:13:35 +0000
commit574e283890a6ca92325a06dafa76ff307a8019a2 (patch)
tree055e44e24a55fb4863e5d9cdc04e320cde52ffe9 /malloc
parenta162e5955f7e324be82d9318bbcbe869c66ffb86 (diff)
Updated to fedora-glibc-20071212T1051
Diffstat (limited to 'malloc')
-rw-r--r--malloc/arena.c84
-rw-r--r--malloc/malloc.c4
2 files changed, 53 insertions, 35 deletions
diff --git a/malloc/arena.c b/malloc/arena.c
index ce64335567..9932ee049b 100644
--- a/malloc/arena.c
+++ b/malloc/arena.c
@@ -160,6 +160,10 @@ static void (*save_free_hook) (__malloc_ptr_t __ptr,
__const __malloc_ptr_t);
static Void_t* save_arena;
+#ifdef ATFORK_MEM
+ATFORK_MEM;
+#endif
+
/* Magic value for the thread-specific arena pointer when
malloc_atfork() is in use. */
@@ -700,8 +704,8 @@ new_heap(size, top_pad) size_t size, top_pad;
return h;
}
-/* Grow or shrink a heap. size is automatically rounded up to a
- multiple of the page size if it is positive. */
+/* Grow a heap. size is automatically rounded up to a
+ multiple of the page size. */
static int
#if __STD_C
@@ -713,41 +717,55 @@ grow_heap(h, diff) heap_info *h; long diff;
size_t page_mask = malloc_getpagesize - 1;
long new_size;
- if(diff >= 0) {
- diff = (diff + page_mask) & ~page_mask;
- new_size = (long)h->size + diff;
- if((unsigned long) new_size > (unsigned long) HEAP_MAX_SIZE)
- return -1;
- if((unsigned long) new_size > h->mprotect_size) {
- if (mprotect((char *)h + h->mprotect_size,
- (unsigned long) new_size - h->mprotect_size,
- PROT_READ|PROT_WRITE) != 0)
- return -2;
- h->mprotect_size = new_size;
- }
- } else {
- new_size = (long)h->size + diff;
- if(new_size < (long)sizeof(*h))
- return -1;
- /* Try to re-map the extra heap space freshly to save memory, and
- make it inaccessible. */
+ diff = (diff + page_mask) & ~page_mask;
+ new_size = (long)h->size + diff;
+ if((unsigned long) new_size > (unsigned long) HEAP_MAX_SIZE)
+ return -1;
+ if((unsigned long) new_size > h->mprotect_size) {
+ if (mprotect((char *)h + h->mprotect_size,
+ (unsigned long) new_size - h->mprotect_size,
+ PROT_READ|PROT_WRITE) != 0)
+ return -2;
+ h->mprotect_size = new_size;
+ }
+
+ h->size = new_size;
+ return 0;
+}
+
+/* Shrink a heap. */
+
+static int
+#if __STD_C
+shrink_heap(heap_info *h, long diff)
+#else
+shrink_heap(h, diff) heap_info *h; long diff;
+#endif
+{
+ long new_size;
+
+ new_size = (long)h->size - diff;
+ if(new_size < (long)sizeof(*h))
+ return -1;
+ /* Try to re-map the extra heap space freshly to save memory, and
+ make it inaccessible. */
#ifdef _LIBC
- if (__builtin_expect (__libc_enable_secure, 0))
+ if (__builtin_expect (__libc_enable_secure, 0))
#else
- if (1)
+ if (1)
#endif
- {
- if((char *)MMAP((char *)h + new_size, -diff, PROT_NONE,
- MAP_PRIVATE|MAP_FIXED) == (char *) MAP_FAILED)
- return -2;
- h->mprotect_size = new_size;
- }
+ {
+ if((char *)MMAP((char *)h + new_size, diff, PROT_NONE,
+ MAP_PRIVATE|MAP_FIXED) == (char *) MAP_FAILED)
+ return -2;
+ h->mprotect_size = new_size;
+ }
#ifdef _LIBC
- else
- madvise ((char *)h + new_size, -diff, MADV_DONTNEED);
+ else
+ madvise ((char *)h + new_size, diff, MADV_DONTNEED);
#endif
- /*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/
- }
+ /*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/
+
h->size = new_size;
return 0;
}
@@ -807,7 +825,7 @@ heap_trim(heap, pad) heap_info *heap; size_t pad;
if(extra < (long)pagesz)
return 0;
/* Try to shrink. */
- if(grow_heap(heap, -extra) != 0)
+ if(shrink_heap(heap, extra) != 0)
return 0;
ar_ptr->system_mem -= extra;
arena_mem -= extra;
diff --git a/malloc/malloc.c b/malloc/malloc.c
index 39d5b3fa52..1e716089a2 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -4475,7 +4475,7 @@ _int_malloc(mstate av, size_t bytes)
We require that av->top always exists (i.e., has size >=
MINSIZE) after initialization, so if it would otherwise be
- exhuasted by current request, it is replenished. (The main
+ exhausted by current request, it is replenished. (The main
reason for ensuring it exists is that we may need MINSIZE space
to put in fenceposts in sysmalloc.)
*/
@@ -4515,7 +4515,7 @@ _int_malloc(mstate av, size_t bytes)
*/
else {
void *p = sYSMALLOc(nb, av);
- if (__builtin_expect (perturb_byte, 0))
+ if (p != NULL && __builtin_expect (perturb_byte, 0))
alloc_perturb (p, bytes);
return p;
}