summaryrefslogtreecommitdiff
path: root/malloc/malloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'malloc/malloc.c')
-rw-r--r--malloc/malloc.c173
1 files changed, 106 insertions, 67 deletions
diff --git a/malloc/malloc.c b/malloc/malloc.c
index f361bad636..452f036387 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -1059,7 +1059,7 @@ static void* _int_realloc(mstate, mchunkptr, INTERNAL_SIZE_T,
static void* _int_memalign(mstate, size_t, size_t);
static void* _mid_memalign(size_t, size_t, void *);
-static void malloc_printerr(int action, const char *str, void *ptr);
+static void malloc_printerr(int action, const char *str, void *ptr, mstate av);
static void* internal_function mem2mem_check(void *p, size_t sz);
static int internal_function top_check(void);
@@ -1411,11 +1411,11 @@ typedef struct malloc_chunk *mbinptr;
#define last(b) ((b)->bk)
/* Take a chunk off a bin list */
-#define unlink(P, BK, FD) { \
+#define unlink(AV, P, BK, FD) { \
FD = P->fd; \
BK = P->bk; \
if (__builtin_expect (FD->bk != P || BK->fd != P, 0)) \
- malloc_printerr (check_action, "corrupted double-linked list", P); \
+ malloc_printerr (check_action, "corrupted double-linked list", P, AV); \
else { \
FD->bk = BK; \
BK->fd = FD; \
@@ -1424,7 +1424,8 @@ typedef struct malloc_chunk *mbinptr;
if (__builtin_expect (P->fd_nextsize->bk_nextsize != P, 0) \
|| __builtin_expect (P->bk_nextsize->fd_nextsize != P, 0)) \
malloc_printerr (check_action, \
- "corrupted double-linked list (not small)", P);\
+ "corrupted double-linked list (not small)", \
+ P, AV); \
if (FD->fd_nextsize == NULL) { \
if (P->fd_nextsize == P) \
FD->fd_nextsize = FD->bk_nextsize = FD; \
@@ -1656,6 +1657,15 @@ typedef struct malloc_chunk *mfastbinptr;
#define set_noncontiguous(M) ((M)->flags |= NONCONTIGUOUS_BIT)
#define set_contiguous(M) ((M)->flags &= ~NONCONTIGUOUS_BIT)
+/* ARENA_CORRUPTION_BIT is set if a memory corruption was detected on the
+ arena. Such an arena is no longer used to allocate chunks. Chunks
+ allocated in that arena before detecting corruption are not freed. */
+
+#define ARENA_CORRUPTION_BIT (4U)
+
+#define arena_is_corrupt(A) (((A)->flags & ARENA_CORRUPTION_BIT))
+#define set_arena_corrupt(A) ((A)->flags |= ARENA_CORRUPTION_BIT)
+
/*
Set value of max_fast.
Use impossibly small value if 0.
@@ -2280,8 +2290,9 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av)
rather than expanding top.
*/
- if ((unsigned long) (nb) >= (unsigned long) (mp_.mmap_threshold) &&
- (mp_.n_mmaps < mp_.n_mmaps_max))
+ if (av == NULL
+ || ((unsigned long) (nb) >= (unsigned long) (mp_.mmap_threshold)
+ && (mp_.n_mmaps < mp_.n_mmaps_max)))
{
char *mm; /* return value from mmap call*/
@@ -2354,6 +2365,10 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av)
}
}
+ /* There are no usable arenas and mmap also failed. */
+ if (av == NULL)
+ return 0;
+
/* Record incoming configuration of top */
old_top = av->top;
@@ -2528,7 +2543,8 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av)
else if (contiguous (av) && old_size && brk < old_end)
{
/* Oops! Someone else killed our space.. Can't touch anything. */
- malloc_printerr (3, "break adjusted to free malloc space", brk);
+ malloc_printerr (3, "break adjusted to free malloc space", brk,
+ av);
}
/*
@@ -2818,7 +2834,7 @@ munmap_chunk (mchunkptr p)
if (__builtin_expect (((block | total_size) & (GLRO (dl_pagesize) - 1)) != 0, 0))
{
malloc_printerr (check_action, "munmap_chunk(): invalid pointer",
- chunk2mem (p));
+ chunk2mem (p), NULL);
return;
}
@@ -2888,22 +2904,19 @@ __libc_malloc (size_t bytes)
arena_get (ar_ptr, bytes);
- if (!ar_ptr)
- return 0;
-
victim = _int_malloc (ar_ptr, bytes);
- if (!victim)
+ /* Retry with another arena only if we were able to find a usable arena
+ before. */
+ if (!victim && ar_ptr != NULL)
{
LIBC_PROBE (memory_malloc_retry, 1, bytes);
ar_ptr = arena_get_retry (ar_ptr, bytes);
- if (__builtin_expect (ar_ptr != NULL, 1))
- {
- victim = _int_malloc (ar_ptr, bytes);
- (void) mutex_unlock (&ar_ptr->mutex);
- }
+ victim = _int_malloc (ar_ptr, bytes);
}
- else
+
+ if (ar_ptr != NULL)
(void) mutex_unlock (&ar_ptr->mutex);
+
assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
ar_ptr == arena_for_chunk (mem2chunk (victim)));
return victim;
@@ -2979,6 +2992,11 @@ __libc_realloc (void *oldmem, size_t bytes)
/* its size */
const INTERNAL_SIZE_T oldsize = chunksize (oldp);
+ if (chunk_is_mmapped (oldp))
+ ar_ptr = NULL;
+ else
+ ar_ptr = arena_for_chunk (oldp);
+
/* Little security check which won't hurt performance: the
allocator never wrapps around at the end of the address space.
Therefore we can exclude some size values which might appear
@@ -2986,7 +3004,8 @@ __libc_realloc (void *oldmem, size_t bytes)
if (__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0)
|| __builtin_expect (misaligned_chunk (oldp), 0))
{
- malloc_printerr (check_action, "realloc(): invalid pointer", oldmem);
+ malloc_printerr (check_action, "realloc(): invalid pointer", oldmem,
+ ar_ptr);
return NULL;
}
@@ -3015,10 +3034,8 @@ __libc_realloc (void *oldmem, size_t bytes)
return newmem;
}
- ar_ptr = arena_for_chunk (oldp);
(void) mutex_lock (&ar_ptr->mutex);
-
newp = _int_realloc (ar_ptr, oldp, oldsize, nb);
(void) mutex_unlock (&ar_ptr->mutex);
@@ -3093,22 +3110,18 @@ _mid_memalign (size_t alignment, size_t bytes, void *address)
}
arena_get (ar_ptr, bytes + alignment + MINSIZE);
- if (!ar_ptr)
- return 0;
p = _int_memalign (ar_ptr, alignment, bytes);
- if (!p)
+ if (!p && ar_ptr != NULL)
{
LIBC_PROBE (memory_memalign_retry, 2, bytes, alignment);
ar_ptr = arena_get_retry (ar_ptr, bytes);
- if (__builtin_expect (ar_ptr != NULL, 1))
- {
- p = _int_memalign (ar_ptr, alignment, bytes);
- (void) mutex_unlock (&ar_ptr->mutex);
- }
+ p = _int_memalign (ar_ptr, alignment, bytes);
}
- else
+
+ if (ar_ptr != NULL)
(void) mutex_unlock (&ar_ptr->mutex);
+
assert (!p || chunk_is_mmapped (mem2chunk (p)) ||
ar_ptr == arena_for_chunk (mem2chunk (p)));
return p;
@@ -3187,47 +3200,53 @@ __libc_calloc (size_t n, size_t elem_size)
sz = bytes;
arena_get (av, sz);
- if (!av)
- return 0;
-
- /* Check if we hand out the top chunk, in which case there may be no
- need to clear. */
+ if (av)
+ {
+ /* Check if we hand out the top chunk, in which case there may be no
+ need to clear. */
#if MORECORE_CLEARS
- oldtop = top (av);
- oldtopsize = chunksize (top (av));
+ oldtop = top (av);
+ oldtopsize = chunksize (top (av));
# if MORECORE_CLEARS < 2
- /* Only newly allocated memory is guaranteed to be cleared. */
- if (av == &main_arena &&
- oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *) oldtop)
- oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *) oldtop);
+ /* Only newly allocated memory is guaranteed to be cleared. */
+ if (av == &main_arena &&
+ oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *) oldtop)
+ oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *) oldtop);
# endif
- if (av != &main_arena)
+ if (av != &main_arena)
+ {
+ heap_info *heap = heap_for_ptr (oldtop);
+ if (oldtopsize < (char *) heap + heap->mprotect_size - (char *) oldtop)
+ oldtopsize = (char *) heap + heap->mprotect_size - (char *) oldtop;
+ }
+#endif
+ }
+ else
{
- heap_info *heap = heap_for_ptr (oldtop);
- if (oldtopsize < (char *) heap + heap->mprotect_size - (char *) oldtop)
- oldtopsize = (char *) heap + heap->mprotect_size - (char *) oldtop;
+ /* No usable arenas. */
+ oldtop = 0;
+ oldtopsize = 0;
}
-#endif
mem = _int_malloc (av, sz);
assert (!mem || chunk_is_mmapped (mem2chunk (mem)) ||
av == arena_for_chunk (mem2chunk (mem)));
- if (mem == 0)
+ if (mem == 0 && av != NULL)
{
LIBC_PROBE (memory_calloc_retry, 1, sz);
av = arena_get_retry (av, sz);
- if (__builtin_expect (av != NULL, 1))
- {
- mem = _int_malloc (av, sz);
- (void) mutex_unlock (&av->mutex);
- }
- if (mem == 0)
- return 0;
+ mem = _int_malloc (av, sz);
}
- else
+
+ if (av != NULL)
(void) mutex_unlock (&av->mutex);
+
+ /* Allocation failed even after a retry. */
+ if (mem == 0)
+ return 0;
+
p = mem2chunk (mem);
/* Two optional cases in which clearing not necessary */
@@ -3323,6 +3342,16 @@ _int_malloc (mstate av, size_t bytes)
checked_request2size (bytes, nb);
+ /* There are no usable arenas. Fall back to sysmalloc to get a chunk from
+ mmap. */
+ if (__glibc_unlikely (av == NULL))
+ {
+ void *p = sysmalloc (nb, av);
+ if (p != NULL)
+ alloc_perturb (p, bytes);
+ return p;
+ }
+
/*
If the size qualifies as a fastbin, first check corresponding bin.
This code is safe to execute even if av is not yet initialized, so we
@@ -3348,7 +3377,7 @@ _int_malloc (mstate av, size_t bytes)
{
errstr = "malloc(): memory corruption (fast)";
errout:
- malloc_printerr (check_action, errstr, chunk2mem (victim));
+ malloc_printerr (check_action, errstr, chunk2mem (victim), av);
return NULL;
}
check_remalloced_chunk (av, victim, nb);
@@ -3437,7 +3466,7 @@ _int_malloc (mstate av, size_t bytes)
if (__builtin_expect (victim->size <= 2 * SIZE_SZ, 0)
|| __builtin_expect (victim->size > av->system_mem, 0))
malloc_printerr (check_action, "malloc(): memory corruption",
- chunk2mem (victim));
+ chunk2mem (victim), av);
size = chunksize (victim);
/*
@@ -3584,7 +3613,7 @@ _int_malloc (mstate av, size_t bytes)
victim = victim->fd;
remainder_size = size - nb;
- unlink (victim, bck, fwd);
+ unlink (av, victim, bck, fwd);
/* Exhaust */
if (remainder_size < MINSIZE)
@@ -3689,7 +3718,7 @@ _int_malloc (mstate av, size_t bytes)
remainder_size = size - nb;
/* unlink */
- unlink (victim, bck, fwd);
+ unlink (av, victim, bck, fwd);
/* Exhaust */
if (remainder_size < MINSIZE)
@@ -3829,7 +3858,7 @@ _int_free (mstate av, mchunkptr p, int have_lock)
errout:
if (!have_lock && locked)
(void) mutex_unlock (&av->mutex);
- malloc_printerr (check_action, errstr, chunk2mem (p));
+ malloc_printerr (check_action, errstr, chunk2mem (p), av);
return;
}
/* We know that each chunk is at least MINSIZE bytes in size or a
@@ -3967,7 +3996,7 @@ _int_free (mstate av, mchunkptr p, int have_lock)
prevsize = p->prev_size;
size += prevsize;
p = chunk_at_offset(p, -((long) prevsize));
- unlink(p, bck, fwd);
+ unlink(av, p, bck, fwd);
}
if (nextchunk != av->top) {
@@ -3976,7 +4005,7 @@ _int_free (mstate av, mchunkptr p, int have_lock)
/* consolidate forward */
if (!nextinuse) {
- unlink(nextchunk, bck, fwd);
+ unlink(av, nextchunk, bck, fwd);
size += nextsize;
} else
clear_inuse_bit_at_offset(nextchunk, 0);
@@ -4137,7 +4166,7 @@ static void malloc_consolidate(mstate av)
prevsize = p->prev_size;
size += prevsize;
p = chunk_at_offset(p, -((long) prevsize));
- unlink(p, bck, fwd);
+ unlink(av, p, bck, fwd);
}
if (nextchunk != av->top) {
@@ -4145,7 +4174,7 @@ static void malloc_consolidate(mstate av)
if (!nextinuse) {
size += nextsize;
- unlink(nextchunk, bck, fwd);
+ unlink(av, nextchunk, bck, fwd);
} else
clear_inuse_bit_at_offset(nextchunk, 0);
@@ -4214,7 +4243,7 @@ _int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
{
errstr = "realloc(): invalid old size";
errout:
- malloc_printerr (check_action, errstr, chunk2mem (oldp));
+ malloc_printerr (check_action, errstr, chunk2mem (oldp), av);
return NULL;
}
@@ -4260,7 +4289,7 @@ _int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
(unsigned long) (nb))
{
newp = oldp;
- unlink (next, bck, fwd);
+ unlink (av, next, bck, fwd);
}
/* allocate, copy, free */
@@ -4455,6 +4484,10 @@ _int_memalign (mstate av, size_t alignment, size_t bytes)
static int
mtrim (mstate av, size_t pad)
{
+ /* Don't touch corrupt arenas. */
+ if (arena_is_corrupt (av))
+ return 0;
+
/* Ensure initialization/consolidation */
malloc_consolidate (av);
@@ -4945,8 +4978,14 @@ libc_hidden_def (__libc_mallopt)
extern char **__libc_argv attribute_hidden;
static void
-malloc_printerr (int action, const char *str, void *ptr)
+malloc_printerr (int action, const char *str, void *ptr, mstate ar_ptr)
{
+ /* Avoid using this arena in future. We do not attempt to synchronize this
+ with anything else because we minimally want to ensure that __libc_message
+ gets its resources safely without stumbling on the current corruption. */
+ if (ar_ptr)
+ set_arena_corrupt (ar_ptr);
+
if ((action & 5) == 5)
__libc_message (action & 2, "%s\n", str);
else if (action & 1)