summaryrefslogtreecommitdiff
path: root/malloc/malloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'malloc/malloc.c')
-rw-r--r--malloc/malloc.c1580
1 files changed, 962 insertions, 618 deletions
diff --git a/malloc/malloc.c b/malloc/malloc.c
index d20d5955db..e247c77b7d 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -1,5 +1,5 @@
/* Malloc implementation for multiple threads without lock contention.
- Copyright (C) 1996-2016 Free Software Foundation, Inc.
+ Copyright (C) 1996-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Wolfram Gloger <wg@malloc.de>
and Doug Lea <dl@cs.oswego.edu>, 2001.
@@ -84,7 +84,6 @@
independent_calloc(size_t n_elements, size_t size, void* chunks[]);
independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
pvalloc(size_t n);
- cfree(void* p);
malloc_trim(size_t pad);
malloc_usable_size(void* p);
malloc_stats();
@@ -173,8 +172,6 @@
Changing default word sizes:
INTERNAL_SIZE_T size_t
- MALLOC_ALIGNMENT MAX (2 * sizeof(INTERNAL_SIZE_T),
- __alignof__ (long double))
Configuration and functionality options:
@@ -216,9 +213,6 @@
#include <stdlib.h> /* for getenv(), abort() */
#include <unistd.h> /* for __libc_enable_secure */
-#include <malloc-machine.h>
-#include <malloc-sysdep.h>
-
#include <atomic.h>
#include <_itoa.h>
#include <bits/wordsize.h>
@@ -229,6 +223,7 @@
#include <unistd.h>
#include <stdio.h> /* needed for malloc_stats */
#include <errno.h>
+#include <assert.h>
#include <shlib-compat.h>
@@ -242,8 +237,15 @@
#include <sys/param.h>
/* For ALIGN_UP et. al. */
-#include <libc-internal.h>
+#include <libc-pointer-arith.h>
+
+/* For DIAG_PUSH/POP_NEEDS_COMMENT et al. */
+#include <libc-diag.h>
+#include <malloc/malloc-internal.h>
+
+/* For SINGLE_THREAD_P. */
+#include <sysdep-cancel.h>
/*
Debugging:
@@ -277,13 +279,9 @@
#define MALLOC_DEBUG 0
#endif
-#ifdef NDEBUG
-# define assert(expr) ((void) 0)
-#else
-# define assert(expr) \
- ((expr) \
- ? ((void) 0) \
- : __malloc_assert (#expr, __FILE__, __LINE__, __func__))
+#ifndef NDEBUG
+# define __assert_fail(assertion, file, line, function) \
+ __malloc_assert(assertion, file, line, function)
extern const char *__progname;
@@ -301,76 +299,30 @@ __malloc_assert (const char *assertion, const char *file, unsigned int line,
}
#endif
+#if USE_TCACHE
+/* We want 64 entries. This is an arbitrary limit, which tunables can reduce. */
+# define TCACHE_MAX_BINS 64
+# define MAX_TCACHE_SIZE tidx2usize (TCACHE_MAX_BINS-1)
-/*
- INTERNAL_SIZE_T is the word-size used for internal bookkeeping
- of chunk sizes.
-
- The default version is the same as size_t.
-
- While not strictly necessary, it is best to define this as an
- unsigned type, even if size_t is a signed type. This may avoid some
- artificial size limitations on some systems.
-
- On a 64-bit machine, you may be able to reduce malloc overhead by
- defining INTERNAL_SIZE_T to be a 32 bit `unsigned int' at the
- expense of not being able to handle more than 2^32 of malloced
- space. If this limitation is acceptable, you are encouraged to set
- this unless you are on a platform requiring 16byte alignments. In
- this case the alignment requirements turn out to negate any
- potential advantages of decreasing size_t word size.
-
- Implementors: Beware of the possible combinations of:
- - INTERNAL_SIZE_T might be signed or unsigned, might be 32 or 64 bits,
- and might be the same width as int or as long
- - size_t might have different width and signedness as INTERNAL_SIZE_T
- - int and long might be 32 or 64 bits, and might be the same width
- To deal with this, most comparisons and difference computations
- among INTERNAL_SIZE_Ts should cast them to unsigned long, being
- aware of the fact that casting an unsigned int to a wider long does
- not sign-extend. (This also makes checking for negative numbers
- awkward.) Some of these casts result in harmless compiler warnings
- on some systems.
-*/
-
-#ifndef INTERNAL_SIZE_T
-#define INTERNAL_SIZE_T size_t
-#endif
-
-/* The corresponding word size */
-#define SIZE_SZ (sizeof(INTERNAL_SIZE_T))
-
-
-/*
- MALLOC_ALIGNMENT is the minimum alignment for malloc'ed chunks.
- It must be a power of two at least 2 * SIZE_SZ, even on machines
- for which smaller alignments would suffice. It may be defined as
- larger than this though. Note however that code and data structures
- are optimized for the case of 8-byte alignment.
-*/
+/* Only used to pre-fill the tunables. */
+# define tidx2usize(idx) (((size_t) idx) * MALLOC_ALIGNMENT + MINSIZE - SIZE_SZ)
+/* When "x" is from chunksize(). */
+# define csize2tidx(x) (((x) - MINSIZE + MALLOC_ALIGNMENT - 1) / MALLOC_ALIGNMENT)
+/* When "x" is a user-provided size. */
+# define usize2tidx(x) csize2tidx (request2size (x))
-#ifndef MALLOC_ALIGNMENT
-# if !SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_16)
-/* This is the correct definition when there is no past ABI to constrain it.
+/* With rounding and alignment, the bins are...
+ idx 0 bytes 0..24 (64-bit) or 0..12 (32-bit)
+ idx 1 bytes 25..40 or 13..20
+ idx 2 bytes 41..56 or 21..28
+ etc. */
- Among configurations with a past ABI constraint, it differs from
- 2*SIZE_SZ only on powerpc32. For the time being, changing this is
- causing more compatibility problems due to malloc_get_state and
- malloc_set_state than will returning blocks not adequately aligned for
- long double objects under -mlong-double-128. */
-
-# define MALLOC_ALIGNMENT (2 *SIZE_SZ < __alignof__ (long double) \
- ? __alignof__ (long double) : 2 *SIZE_SZ)
-# else
-# define MALLOC_ALIGNMENT (2 *SIZE_SZ)
-# endif
+/* This is another arbitrary limit, which tunables can change. Each
+ tcache bin will hold at most this number of chunks. */
+# define TCACHE_FILL_COUNT 7
#endif
-/* The corresponding bit mask value */
-#define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)
-
-
/*
REALLOC_ZERO_BYTES_FREES should be set if a call to
@@ -502,6 +454,15 @@ void *(*__morecore)(ptrdiff_t) = __default_morecore;
#define HAVE_MREMAP 0
#endif
+/* We may need to support __malloc_initialize_hook for backwards
+ compatibility. */
+
+#if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_24)
+# define HAVE_MALLOC_INIT_HOOK 1
+#else
+# define HAVE_MALLOC_INIT_HOOK 0
+#endif
+
/*
This version of malloc supports the standard SVID/XPG mallinfo
@@ -580,9 +541,9 @@ void* __libc_calloc(size_t, size_t);
REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of
zero (re)allocates a minimum-sized chunk.
- Large chunks that were internally obtained via mmap will always
- be reallocated using malloc-copy-free sequences unless
- the system supports MREMAP (currently only linux).
+ Large chunks that were internally obtained via mmap will always be
+ grown using malloc-copy-free sequences unless the system supports
+ MREMAP (currently only linux).
The old unix realloc convention of allowing the last-free'd chunk
to be used as an argument to realloc is not supported.
@@ -649,8 +610,7 @@ libc_hidden_proto (__libc_mallopt)
have been freed but not use resused or consolidated)
hblks: current number of mmapped regions
hblkhd: total bytes held in mmapped regions
- usmblks: the maximum total allocated space. This will be greater
- than current total if trimming has occurred.
+ usmblks: always 0
fsmblks: total bytes held in fastbin blocks
uordblks: current total allocated space (normal or mmapped)
fordblks: total free space
@@ -738,22 +698,6 @@ size_t __malloc_usable_size(void*);
void __malloc_stats(void);
/*
- malloc_get_state(void);
-
- Returns the state of all malloc variables in an opaque data
- structure.
-*/
-void* __malloc_get_state(void);
-
-/*
- malloc_set_state(void* state);
-
- Restore the state of all malloc variables from data obtained with
- malloc_get_state().
-*/
-int __malloc_set_state(void*);
-
-/*
posix_memalign(void **memptr, size_t alignment, size_t size);
POSIX wrapper like memalign(), checking for validity of size.
@@ -1039,13 +983,6 @@ int __posix_memalign(void **, size_t, size_t);
#define RETURN_ADDRESS(X_) (NULL)
#endif
-/* On some platforms we can compile internal, not exported functions better.
- Let the environment provide a macro and define it to be empty if it
- is not available. */
-#ifndef internal_function
-# define internal_function
-#endif
-
/* Forward declarations. */
struct malloc_chunk;
typedef struct malloc_chunk* mchunkptr;
@@ -1059,13 +996,13 @@ static void* _int_realloc(mstate, mchunkptr, INTERNAL_SIZE_T,
static void* _int_memalign(mstate, size_t, size_t);
static void* _mid_memalign(size_t, size_t, void *);
-static void malloc_printerr(int action, const char *str, void *ptr, mstate av);
+static void malloc_printerr(const char *str) __attribute__ ((noreturn));
-static void* internal_function mem2mem_check(void *p, size_t sz);
-static int internal_function top_check(void);
-static void internal_function munmap_chunk(mchunkptr p);
+static void* mem2mem_check(void *p, size_t sz);
+static void top_check(void);
+static void munmap_chunk(mchunkptr p);
#if HAVE_MREMAP
-static mchunkptr internal_function mremap_chunk(mchunkptr p, size_t new_size);
+static mchunkptr mremap_chunk(mchunkptr p, size_t new_size);
#endif
static void* malloc_check(size_t sz, const void *caller);
@@ -1074,10 +1011,6 @@ static void* realloc_check(void* oldmem, size_t bytes,
const void *caller);
static void* memalign_check(size_t alignment, size_t bytes,
const void *caller);
-#ifndef NO_THREADS
-static void* malloc_atfork(size_t sz, const void *caller);
-static void free_atfork(void* mem, const void *caller);
-#endif
/* ------------------ MMAP support ------------------ */
@@ -1110,8 +1043,8 @@ static void free_atfork(void* mem, const void *caller);
struct malloc_chunk {
- INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */
- INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */
+ INTERNAL_SIZE_T mchunk_prev_size; /* Size of previous chunk (if free). */
+ INTERNAL_SIZE_T mchunk_size; /* Size in bytes, including overhead. */
struct malloc_chunk* fd; /* double links -- used only if free. */
struct malloc_chunk* bk;
@@ -1140,18 +1073,19 @@ struct malloc_chunk {
chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | Size of previous chunk, if allocated | |
+ | Size of previous chunk, if unallocated (P clear) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | Size of chunk, in bytes |M|P|
+ | Size of chunk, in bytes |A|M|P|
mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| User data starts here... .
. .
. (malloc_usable_size() bytes) .
. |
nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | Size of chunk |
+ | (size of chunk, but used for application data) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Size of next chunk, in bytes |A|0|1|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-
Where "chunk" is the front of the chunk for the purpose of most of
the malloc code, but "mem" is the pointer that is returned to the
@@ -1164,9 +1098,9 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Free chunks are stored in circular doubly-linked lists, and look like this:
chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | Size of previous chunk |
+ | Size of previous chunk, if unallocated (P clear) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- `head:' | Size of chunk, in bytes |P|
+ `head:' | Size of chunk, in bytes |A|0|P|
mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Forward pointer to next chunk in list |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
@@ -1178,6 +1112,8 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
`foot:' | Size of chunk, in bytes |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Size of next chunk, in bytes |A|0|0|
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
The P (PREV_INUSE) bit, stored in the unused low-order bit of the
chunk size (which is always a multiple of two words), is an in-use
@@ -1190,12 +1126,21 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
the size of the previous chunk, and might even get a memory
addressing fault when trying to do so.
+ The A (NON_MAIN_ARENA) bit is cleared for chunks on the initial,
+ main arena, described by the main_arena variable. When additional
+ threads are spawned, each thread receives its own arena (up to a
+ configurable limit, after which arenas are reused for multiple
+ threads), and the chunks in these arenas have the A bit set. To
+ find the arena for a chunk on such a non-main arena, heap_for_ptr
+ performs a bit mask operation and indirection through the ar_ptr
+ member of the per-heap header heap_info (see arena.c).
+
Note that the `foot' of the current chunk is actually represented
as the prev_size of the NEXT chunk. This makes it easier to
deal with alignments etc but can be very confusing when trying
to extend or adapt this code.
- The two exceptions to all this are
+ The three exceptions to all this are:
1. The special chunk `top' doesn't bother using the
trailing size field since there is no next contiguous chunk
@@ -1205,8 +1150,16 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2. Chunks allocated via mmap, which have the second-lowest-order
bit M (IS_MMAPPED) set in their size fields. Because they are
- allocated one-by-one, each must contain its own trailing size field.
-
+ allocated one-by-one, each must contain its own trailing size
+ field. If the M bit is set, the other bits are ignored
+ (because mmapped chunks are neither in an arena, nor adjacent
+ to a freed chunk). The M bit is also used for chunks which
+ originally came from a dumped heap via malloc_set_state in
+ hooks.c.
+
+ 3. Chunks in fastbins are treated as allocated chunks from the
+ point of view of the chunk allocator. They are consolidated
+ with their neighbors only in bulk, in malloc_consolidate.
*/
/*
@@ -1252,14 +1205,21 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
MINSIZE : \
((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
-/* Same, except also perform argument check */
-
-#define checked_request2size(req, sz) \
- if (REQUEST_OUT_OF_RANGE (req)) { \
- __set_errno (ENOMEM); \
- return 0; \
- } \
- (sz) = request2size (req);
+/* Same, except also perform an argument and result check. First, we check
+ that the padding done by request2size didn't result in an integer
+ overflow. Then we check (using REQUEST_OUT_OF_RANGE) that the resulting
+ size isn't so large that a later alignment would lead to another integer
+ overflow. */
+#define checked_request2size(req, sz) \
+({ \
+ (sz) = request2size (req); \
+ if (((sz) < (req)) \
+ || REQUEST_OUT_OF_RANGE (sz)) \
+ { \
+ __set_errno (ENOMEM); \
+ return 0; \
+ } \
+})
/*
--------------- Physical chunk operations ---------------
@@ -1270,14 +1230,14 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#define PREV_INUSE 0x1
/* extract inuse bit of previous chunk */
-#define prev_inuse(p) ((p)->size & PREV_INUSE)
+#define prev_inuse(p) ((p)->mchunk_size & PREV_INUSE)
/* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
#define IS_MMAPPED 0x2
/* check for mmap()'ed chunk */
-#define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
+#define chunk_is_mmapped(p) ((p)->mchunk_size & IS_MMAPPED)
/* size field is or'ed with NON_MAIN_ARENA if the chunk was obtained
@@ -1285,8 +1245,11 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
the chunk to the user, if necessary. */
#define NON_MAIN_ARENA 0x4
-/* check for chunk from non-main arena */
-#define chunk_non_main_arena(p) ((p)->size & NON_MAIN_ARENA)
+/* Check for chunk from main arena. */
+#define chunk_main_arena(p) (((p)->mchunk_size & NON_MAIN_ARENA) == 0)
+
+/* Mark a chunk as not being on the main arena. */
+#define set_non_main_arena(p) ((p)->mchunk_size |= NON_MAIN_ARENA)
/*
@@ -1300,50 +1263,61 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#define SIZE_BITS (PREV_INUSE | IS_MMAPPED | NON_MAIN_ARENA)
/* Get size, ignoring use bits */
-#define chunksize(p) ((p)->size & ~(SIZE_BITS))
+#define chunksize(p) (chunksize_nomask (p) & ~(SIZE_BITS))
+/* Like chunksize, but do not mask SIZE_BITS. */
+#define chunksize_nomask(p) ((p)->mchunk_size)
/* Ptr to next physical malloc_chunk. */
-#define next_chunk(p) ((mchunkptr) (((char *) (p)) + ((p)->size & ~SIZE_BITS)))
+#define next_chunk(p) ((mchunkptr) (((char *) (p)) + chunksize (p)))
-/* Ptr to previous physical malloc_chunk */
-#define prev_chunk(p) ((mchunkptr) (((char *) (p)) - ((p)->prev_size)))
+/* Size of the chunk below P. Only valid if !prev_inuse (P). */
+#define prev_size(p) ((p)->mchunk_prev_size)
+
+/* Set the size of the chunk below P. Only valid if !prev_inuse (P). */
+#define set_prev_size(p, sz) ((p)->mchunk_prev_size = (sz))
+
+/* Ptr to previous physical malloc_chunk. Only valid if !prev_inuse (P). */
+#define prev_chunk(p) ((mchunkptr) (((char *) (p)) - prev_size (p)))
/* Treat space at ptr + offset as a chunk */
#define chunk_at_offset(p, s) ((mchunkptr) (((char *) (p)) + (s)))
/* extract p's inuse bit */
#define inuse(p) \
- ((((mchunkptr) (((char *) (p)) + ((p)->size & ~SIZE_BITS)))->size) & PREV_INUSE)
+ ((((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size) & PREV_INUSE)
/* set/clear chunk as being inuse without otherwise disturbing */
#define set_inuse(p) \
- ((mchunkptr) (((char *) (p)) + ((p)->size & ~SIZE_BITS)))->size |= PREV_INUSE
+ ((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size |= PREV_INUSE
#define clear_inuse(p) \
- ((mchunkptr) (((char *) (p)) + ((p)->size & ~SIZE_BITS)))->size &= ~(PREV_INUSE)
+ ((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size &= ~(PREV_INUSE)
/* check/set/clear inuse bits in known places */
#define inuse_bit_at_offset(p, s) \
- (((mchunkptr) (((char *) (p)) + (s)))->size & PREV_INUSE)
+ (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size & PREV_INUSE)
#define set_inuse_bit_at_offset(p, s) \
- (((mchunkptr) (((char *) (p)) + (s)))->size |= PREV_INUSE)
+ (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size |= PREV_INUSE)
#define clear_inuse_bit_at_offset(p, s) \
- (((mchunkptr) (((char *) (p)) + (s)))->size &= ~(PREV_INUSE))
+ (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size &= ~(PREV_INUSE))
/* Set size at head, without disturbing its use bit */
-#define set_head_size(p, s) ((p)->size = (((p)->size & SIZE_BITS) | (s)))
+#define set_head_size(p, s) ((p)->mchunk_size = (((p)->mchunk_size & SIZE_BITS) | (s)))
/* Set size/use field */
-#define set_head(p, s) ((p)->size = (s))
+#define set_head(p, s) ((p)->mchunk_size = (s))
/* Set size at footer (only when chunk is not in use) */
-#define set_foot(p, s) (((mchunkptr) ((char *) (p) + (s)))->prev_size = (s))
+#define set_foot(p, s) (((mchunkptr) ((char *) (p) + (s)))->mchunk_prev_size = (s))
+
+#pragma GCC poison mchunk_size
+#pragma GCC poison mchunk_prev_size
/*
-------------------- Internal data structures --------------------
@@ -1412,20 +1386,20 @@ typedef struct malloc_chunk *mbinptr;
/* Take a chunk off a bin list */
#define unlink(AV, P, BK, FD) { \
+ if (__builtin_expect (chunksize(P) != prev_size (next_chunk(P)), 0)) \
+ malloc_printerr ("corrupted size vs. prev_size"); \
FD = P->fd; \
BK = P->bk; \
if (__builtin_expect (FD->bk != P || BK->fd != P, 0)) \
- malloc_printerr (check_action, "corrupted double-linked list", P, AV); \
+ malloc_printerr ("corrupted double-linked list"); \
else { \
FD->bk = BK; \
BK->fd = FD; \
- if (!in_smallbin_range (P->size) \
+ if (!in_smallbin_range (chunksize_nomask (P)) \
&& __builtin_expect (P->fd_nextsize != NULL, 0)) { \
if (__builtin_expect (P->fd_nextsize->bk_nextsize != P, 0) \
|| __builtin_expect (P->bk_nextsize->fd_nextsize != P, 0)) \
- malloc_printerr (check_action, \
- "corrupted double-linked list (not small)", \
- P, AV); \
+ malloc_printerr ("corrupted double-linked list (not small)"); \
if (FD->fd_nextsize == NULL) { \
if (P->fd_nextsize == P) \
FD->fd_nextsize = FD->bk_nextsize = FD; \
@@ -1621,27 +1595,6 @@ typedef struct malloc_chunk *mfastbinptr;
#define FASTBIN_CONSOLIDATION_THRESHOLD (65536UL)
/*
- Since the lowest 2 bits in max_fast don't matter in size comparisons,
- they are used as flags.
- */
-
-/*
- FASTCHUNKS_BIT held in max_fast indicates that there are probably
- some fastbin chunks. It is set true on entering a chunk into any
- fastbin, and cleared only in malloc_consolidate.
-
- The truth value is inverted so that have_fastchunks will be true
- upon startup (since statics are zero-filled), simplifying
- initialization checks.
- */
-
-#define FASTCHUNKS_BIT (1U)
-
-#define have_fastchunks(M) (((M)->flags & FASTCHUNKS_BIT) == 0)
-#define clear_fastchunks(M) catomic_or (&(M)->flags, FASTCHUNKS_BIT)
-#define set_fastchunks(M) catomic_and (&(M)->flags, ~FASTCHUNKS_BIT)
-
-/*
NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous
regions. Otherwise, contiguity is exploited in merging together,
when possible, results from consecutive MORECORE calls.
@@ -1657,40 +1610,63 @@ typedef struct malloc_chunk *mfastbinptr;
#define set_noncontiguous(M) ((M)->flags |= NONCONTIGUOUS_BIT)
#define set_contiguous(M) ((M)->flags &= ~NONCONTIGUOUS_BIT)
-/* ARENA_CORRUPTION_BIT is set if a memory corruption was detected on the
- arena. Such an arena is no longer used to allocate chunks. Chunks
- allocated in that arena before detecting corruption are not freed. */
-
-#define ARENA_CORRUPTION_BIT (4U)
-
-#define arena_is_corrupt(A) (((A)->flags & ARENA_CORRUPTION_BIT))
-#define set_arena_corrupt(A) ((A)->flags |= ARENA_CORRUPTION_BIT)
+/* Maximum size of memory handled in fastbins. */
+static INTERNAL_SIZE_T global_max_fast;
/*
Set value of max_fast.
Use impossibly small value if 0.
- Precondition: there are no existing fastbin chunks.
- Setting the value clears fastchunk bit but preserves noncontiguous bit.
+ Precondition: there are no existing fastbin chunks in the main arena.
+ Since do_check_malloc_state () checks this, we call malloc_consolidate ()
+ before changing max_fast. Note other arenas will leak their fast bin
+ entries if max_fast is reduced.
*/
#define set_max_fast(s) \
global_max_fast = (((s) == 0) \
? SMALLBIN_WIDTH : ((s + SIZE_SZ) & ~MALLOC_ALIGN_MASK))
-#define get_max_fast() global_max_fast
+static inline INTERNAL_SIZE_T
+get_max_fast (void)
+{
+ /* Tell the GCC optimizers that global_max_fast is never larger
+ than MAX_FAST_SIZE. This avoids out-of-bounds array accesses in
+ _int_malloc after constant propagation of the size parameter.
+ (The code never executes because malloc preserves the
+ global_max_fast invariant, but the optimizers may not recognize
+ this.) */
+ if (global_max_fast > MAX_FAST_SIZE)
+ __builtin_unreachable ();
+ return global_max_fast;
+}
/*
----------- Internal state representation and initialization -----------
*/
+/*
+ have_fastchunks indicates that there are probably some fastbin chunks.
+ It is set true on entering a chunk into any fastbin, and cleared early in
+ malloc_consolidate. The value is approximate since it may be set when there
+ are no fastbin chunks, or it may be clear even if there are fastbin chunks
+ available. Given it's sole purpose is to reduce number of redundant calls to
+ malloc_consolidate, it does not affect correctness. As a result we can safely
+ use relaxed atomic accesses.
+ */
+
+
struct malloc_state
{
/* Serialize access. */
- mutex_t mutex;
+ __libc_lock_define (, mutex);
/* Flags (formerly in max_fast). */
int flags;
+ /* Set if the fastbin chunks contain recently inserted free blocks. */
+ /* Note this is a bool but not all targets support atomics on booleans. */
+ int have_fastchunks;
+
/* Fastbins */
mfastbinptr fastbinsY[NFASTBINS];
@@ -1743,13 +1719,21 @@ struct malloc_par
/* Statistics */
INTERNAL_SIZE_T mmapped_mem;
- /*INTERNAL_SIZE_T sbrked_mem;*/
- /*INTERNAL_SIZE_T max_sbrked_mem;*/
INTERNAL_SIZE_T max_mmapped_mem;
- INTERNAL_SIZE_T max_total_mem; /* only kept for NO_THREADS */
/* First address handed out by MORECORE/sbrk. */
char *sbrk_base;
+
+#if USE_TCACHE
+ /* Maximum number of buckets to use. */
+ size_t tcache_bins;
+ size_t tcache_max_bytes;
+ /* Maximum number of chunks in each bucket. */
+ size_t tcache_count;
+ /* Maximum number of chunks to remove from the unsorted list, which
+ aren't used to prefill the cache. */
+ size_t tcache_unsorted_limit;
+#endif
};
/* There are several instances of this struct ("arenas") in this
@@ -1765,6 +1749,19 @@ static struct malloc_state main_arena =
.attached_threads = 1
};
+/* These variables are used for undumping support. Chunked are marked
+ as using mmap, but we leave them alone if they fall into this
+ range. NB: The chunk size for these chunks only includes the
+ initial size field (of SIZE_SZ bytes), there is no trailing size
+ field (unlike with regular mmapped chunks). */
+static mchunkptr dumped_main_arena_start; /* Inclusive. */
+static mchunkptr dumped_main_arena_end; /* Exclusive. */
+
+/* True if the pointer falls into the dumped arena. Use this after
+ chunk_is_mmapped indicates a chunk is mmapped. */
+#define DUMPED_MAIN_ARENA_CHUNK(p) \
+ ((p) >= dumped_main_arena_start && (p) < dumped_main_arena_end)
+
/* There is only one instance of the malloc parameters. */
static struct malloc_par mp_ =
@@ -1775,25 +1772,20 @@ static struct malloc_par mp_ =
.trim_threshold = DEFAULT_TRIM_THRESHOLD,
#define NARENAS_FROM_NCORES(n) ((n) * (sizeof (long) == 4 ? 2 : 8))
.arena_test = NARENAS_FROM_NCORES (1)
+#if USE_TCACHE
+ ,
+ .tcache_count = TCACHE_FILL_COUNT,
+ .tcache_bins = TCACHE_MAX_BINS,
+ .tcache_max_bytes = tidx2usize (TCACHE_MAX_BINS-1),
+ .tcache_unsorted_limit = 0 /* No limit. */
+#endif
};
-
-/* Non public mallopt parameters. */
-#define M_ARENA_TEST -7
-#define M_ARENA_MAX -8
-
-
-/* Maximum size of memory handled in fastbins. */
-static INTERNAL_SIZE_T global_max_fast;
-
/*
Initialize a malloc_state struct.
- This is called only from within malloc_consolidate, which needs
- be called in the same contexts anyway. It is never called directly
- outside of malloc_consolidate because some optimizing compilers try
- to inline it at all call points, which turns out not to be an
- optimization at all. (Inlining it in malloc_consolidate is fine though.)
+ This is called from ptmalloc_init () or from _int_new_arena ()
+ when creating a new arena.
*/
static void
@@ -1815,7 +1807,7 @@ malloc_init_state (mstate av)
set_noncontiguous (av);
if (av == &main_arena)
set_max_fast (DEFAULT_MXFAST);
- av->flags |= FASTCHUNKS_BIT;
+ atomic_store_relaxed (&av->have_fastchunks, false);
av->top = initial_top (av);
}
@@ -1847,7 +1839,12 @@ static void *realloc_hook_ini (void *ptr, size_t sz,
static void *memalign_hook_ini (size_t alignment, size_t sz,
const void *caller) __THROW;
+#if HAVE_MALLOC_INIT_HOOK
void weak_variable (*__malloc_initialize_hook) (void) = NULL;
+compat_symbol (libc, __malloc_initialize_hook,
+ __malloc_initialize_hook, GLIBC_2_0);
+#endif
+
void weak_variable (*__free_hook) (void *__ptr,
const void *) = NULL;
void *weak_variable (*__malloc_hook)
@@ -1860,15 +1857,9 @@ void *weak_variable (*__memalign_hook)
= memalign_hook_ini;
void weak_variable (*__after_morecore_hook) (void) = NULL;
-
-/* ---------------- Error behavior ------------------------------------ */
-
-#ifndef DEFAULT_CHECK_ACTION
-# define DEFAULT_CHECK_ACTION 3
-#endif
-
-static int check_action = DEFAULT_CHECK_ACTION;
-
+/* This function is called from the arena shutdown hook, to free the
+ thread cache (if it exists). */
+static void tcache_thread_shutdown (void);
/* ------------------ Testing support ----------------------------------*/
@@ -1954,7 +1945,7 @@ do_check_chunk (mstate av, mchunkptr p)
assert (prev_inuse (p));
}
}
- else
+ else if (!DUMPED_MAIN_ARENA_CHUNK (p))
{
/* address is outside main heap */
if (contiguous (av) && av->top != initial_top (av))
@@ -1962,7 +1953,7 @@ do_check_chunk (mstate av, mchunkptr p)
assert (((char *) p) < min_address || ((char *) p) >= max_address);
}
/* chunk is page-aligned */
- assert (((p->prev_size + sz) & (GLRO (dl_pagesize) - 1)) == 0);
+ assert (((prev_size (p) + sz) & (GLRO (dl_pagesize) - 1)) == 0);
/* mem is aligned */
assert (aligned_OK (chunk2mem (p)));
}
@@ -1975,7 +1966,7 @@ do_check_chunk (mstate av, mchunkptr p)
static void
do_check_free_chunk (mstate av, mchunkptr p)
{
- INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE | NON_MAIN_ARENA);
+ INTERNAL_SIZE_T sz = chunksize_nomask (p) & ~(PREV_INUSE | NON_MAIN_ARENA);
mchunkptr next = chunk_at_offset (p, sz);
do_check_chunk (av, p);
@@ -1990,7 +1981,7 @@ do_check_free_chunk (mstate av, mchunkptr p)
assert ((sz & MALLOC_ALIGN_MASK) == 0);
assert (aligned_OK (chunk2mem (p)));
/* ... matching footer field */
- assert (next->prev_size == sz);
+ assert (prev_size (next_chunk (p)) == sz);
/* ... and is fully consolidated */
assert (prev_inuse (p));
assert (next == av->top || inuse (next));
@@ -2050,15 +2041,15 @@ do_check_inuse_chunk (mstate av, mchunkptr p)
static void
do_check_remalloced_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T s)
{
- INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE | NON_MAIN_ARENA);
+ INTERNAL_SIZE_T sz = chunksize_nomask (p) & ~(PREV_INUSE | NON_MAIN_ARENA);
if (!chunk_is_mmapped (p))
{
assert (av == arena_for_chunk (p));
- if (chunk_non_main_arena (p))
- assert (av != &main_arena);
- else
+ if (chunk_main_arena (p))
assert (av == &main_arena);
+ else
+ assert (av != &main_arena);
}
do_check_inuse_chunk (av, p);
@@ -2126,8 +2117,11 @@ do_check_malloc_state (mstate av)
/* alignment is a power of 2 */
assert ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT - 1)) == 0);
- /* cannot run remaining checks until fully initialized */
- if (av->top == 0 || av->top == initial_top (av))
+ /* Check the arena is initialized. */
+ assert (av->top != 0);
+
+ /* No memory has been allocated yet, so doing more tests is not possible. */
+ if (av->top == initial_top (av))
return;
/* pagesize is a power of 2 */
@@ -2175,11 +2169,6 @@ do_check_malloc_state (mstate av)
}
}
- if (total != 0)
- assert (have_fastchunks (av));
- else if (!have_fastchunks (av))
- assert (total == 0);
-
/* check normal bins */
for (i = 1; i < NBINS; ++i)
{
@@ -2347,12 +2336,13 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av)
{
correction = MALLOC_ALIGNMENT - front_misalign;
p = (mchunkptr) (mm + correction);
- p->prev_size = correction;
+ set_prev_size (p, correction);
set_head (p, (size - correction) | IS_MMAPPED);
}
else
{
p = (mchunkptr) mm;
+ set_prev_size (p, 0);
set_head (p, size | IS_MMAPPED);
}
@@ -2410,7 +2400,6 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av)
&& grow_heap (old_heap, MINSIZE + nb - old_size) == 0)
{
av->system_mem += old_heap->size - old_heap_size;
- arena_mem += old_heap->size - old_heap_size;
set_head (old_top, (((char *) old_heap + old_heap->size) - (char *) old_top)
| PREV_INUSE);
}
@@ -2420,7 +2409,6 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av)
heap->ar_ptr = av;
heap->prev = old_heap;
av->system_mem += heap->size;
- arena_mem += heap->size;
/* Set up the new top. */
top (av) = chunk_at_offset (heap, sizeof (*heap));
set_head (top (av), (heap->size - sizeof (*heap)) | PREV_INUSE);
@@ -2548,11 +2536,8 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av)
set_head (old_top, (size + old_size) | PREV_INUSE);
else if (contiguous (av) && old_size && brk < old_end)
- {
- /* Oops! Someone else killed our space.. Can't touch anything. */
- malloc_printerr (3, "break adjusted to free malloc space", brk,
- av);
- }
+ /* Oops! Someone else killed our space.. Can't touch anything. */
+ malloc_printerr ("break adjusted to free malloc space");
/*
Otherwise, make adjustments:
@@ -2704,11 +2689,10 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av)
intentional. We need the fencepost, even if old_top otherwise gets
lost.
*/
- chunk_at_offset (old_top, old_size)->size =
- (2 * SIZE_SZ) | PREV_INUSE;
-
- chunk_at_offset (old_top, old_size + 2 * SIZE_SZ)->size =
- (2 * SIZE_SZ) | PREV_INUSE;
+ set_head (chunk_at_offset (old_top, old_size),
+ (2 * SIZE_SZ) | PREV_INUSE);
+ set_head (chunk_at_offset (old_top, old_size + 2 * SIZE_SZ),
+ (2 * SIZE_SZ) | PREV_INUSE);
/* If possible, release the rest. */
if (old_size >= MINSIZE)
@@ -2824,26 +2808,26 @@ systrim (size_t pad, mstate av)
}
static void
-internal_function
munmap_chunk (mchunkptr p)
{
INTERNAL_SIZE_T size = chunksize (p);
assert (chunk_is_mmapped (p));
- uintptr_t block = (uintptr_t) p - p->prev_size;
- size_t total_size = p->prev_size + size;
+ /* Do nothing if the chunk is a faked mmapped chunk in the dumped
+ main arena. We never free this memory. */
+ if (DUMPED_MAIN_ARENA_CHUNK (p))
+ return;
+
+ uintptr_t block = (uintptr_t) p - prev_size (p);
+ size_t total_size = prev_size (p) + size;
/* Unfortunately we have to do the compilers job by hand here. Normally
we would test BLOCK and TOTAL-SIZE separately for compliance with the
page size. But gcc does not recognize the optimization possibility
(in the moment at least) so we combine the two values into one before
the bit test. */
if (__builtin_expect (((block | total_size) & (GLRO (dl_pagesize) - 1)) != 0, 0))
- {
- malloc_printerr (check_action, "munmap_chunk(): invalid pointer",
- chunk2mem (p), NULL);
- return;
- }
+ malloc_printerr ("munmap_chunk(): invalid pointer");
atomic_decrement (&mp_.n_mmaps);
atomic_add (&mp_.mmapped_mem, -total_size);
@@ -2857,11 +2841,10 @@ munmap_chunk (mchunkptr p)
#if HAVE_MREMAP
static mchunkptr
-internal_function
mremap_chunk (mchunkptr p, size_t new_size)
{
size_t pagesize = GLRO (dl_pagesize);
- INTERNAL_SIZE_T offset = p->prev_size;
+ INTERNAL_SIZE_T offset = prev_size (p);
INTERNAL_SIZE_T size = chunksize (p);
char *cp;
@@ -2885,7 +2868,7 @@ mremap_chunk (mchunkptr p, size_t new_size)
assert (aligned_OK (chunk2mem (p)));
- assert ((p->prev_size == offset));
+ assert (prev_size (p) == offset);
set_head (p, (new_size - offset) | IS_MMAPPED);
INTERNAL_SIZE_T new;
@@ -2898,6 +2881,132 @@ mremap_chunk (mchunkptr p, size_t new_size)
/*------------------------ Public wrappers. --------------------------------*/
+#if USE_TCACHE
+
+/* We overlay this structure on the user-data portion of a chunk when
+ the chunk is stored in the per-thread cache. */
+typedef struct tcache_entry
+{
+ struct tcache_entry *next;
+} tcache_entry;
+
+/* There is one of these for each thread, which contains the
+ per-thread cache (hence "tcache_perthread_struct"). Keeping
+ overall size low is mildly important. Note that COUNTS and ENTRIES
+ are redundant (we could have just counted the linked list each
+ time), this is for performance reasons. */
+typedef struct tcache_perthread_struct
+{
+ char counts[TCACHE_MAX_BINS];
+ tcache_entry *entries[TCACHE_MAX_BINS];
+} tcache_perthread_struct;
+
+static __thread bool tcache_shutting_down = false;
+static __thread tcache_perthread_struct *tcache = NULL;
+
+/* Caller must ensure that we know tc_idx is valid and there's room
+ for more chunks. */
+static __always_inline void
+tcache_put (mchunkptr chunk, size_t tc_idx)
+{
+ tcache_entry *e = (tcache_entry *) chunk2mem (chunk);
+ assert (tc_idx < TCACHE_MAX_BINS);
+ e->next = tcache->entries[tc_idx];
+ tcache->entries[tc_idx] = e;
+ ++(tcache->counts[tc_idx]);
+}
+
+/* Caller must ensure that we know tc_idx is valid and there's
+ available chunks to remove. */
+static __always_inline void *
+tcache_get (size_t tc_idx)
+{
+ tcache_entry *e = tcache->entries[tc_idx];
+ assert (tc_idx < TCACHE_MAX_BINS);
+ assert (tcache->entries[tc_idx] > 0);
+ tcache->entries[tc_idx] = e->next;
+ --(tcache->counts[tc_idx]);
+ return (void *) e;
+}
+
+static void
+tcache_thread_shutdown (void)
+{
+ int i;
+ tcache_perthread_struct *tcache_tmp = tcache;
+
+ if (!tcache)
+ return;
+
+ /* Disable the tcache and prevent it from being reinitialized. */
+ tcache = NULL;
+ tcache_shutting_down = true;
+
+ /* Free all of the entries and the tcache itself back to the arena
+ heap for coalescing. */
+ for (i = 0; i < TCACHE_MAX_BINS; ++i)
+ {
+ while (tcache_tmp->entries[i])
+ {
+ tcache_entry *e = tcache_tmp->entries[i];
+ tcache_tmp->entries[i] = e->next;
+ __libc_free (e);
+ }
+ }
+
+ __libc_free (tcache_tmp);
+}
+
+static void
+tcache_init(void)
+{
+ mstate ar_ptr;
+ void *victim = 0;
+ const size_t bytes = sizeof (tcache_perthread_struct);
+
+ if (tcache_shutting_down)
+ return;
+
+ arena_get (ar_ptr, bytes);
+ victim = _int_malloc (ar_ptr, bytes);
+ if (!victim && ar_ptr != NULL)
+ {
+ ar_ptr = arena_get_retry (ar_ptr, bytes);
+ victim = _int_malloc (ar_ptr, bytes);
+ }
+
+
+ if (ar_ptr != NULL)
+ __libc_lock_unlock (ar_ptr->mutex);
+
+ /* In a low memory situation, we may not be able to allocate memory
+ - in which case, we just keep trying later. However, we
+ typically do this very early, so either there is sufficient
+ memory, or there isn't enough memory to do non-trivial
+ allocations anyway. */
+ if (victim)
+ {
+ tcache = (tcache_perthread_struct *) victim;
+ memset (tcache, 0, sizeof (tcache_perthread_struct));
+ }
+
+}
+
+# define MAYBE_INIT_TCACHE() \
+ if (__glibc_unlikely (tcache == NULL)) \
+ tcache_init();
+
+#else /* !USE_TCACHE */
+# define MAYBE_INIT_TCACHE()
+
+static void
+tcache_thread_shutdown (void)
+{
+ /* Nothing to do if there is no thread cache. */
+}
+
+#endif /* !USE_TCACHE */
+
void *
__libc_malloc (size_t bytes)
{
@@ -2908,6 +3017,32 @@ __libc_malloc (size_t bytes)
= atomic_forced_read (__malloc_hook);
if (__builtin_expect (hook != NULL, 0))
return (*hook)(bytes, RETURN_ADDRESS (0));
+#if USE_TCACHE
+ /* int_free also calls request2size, be careful to not pad twice. */
+ size_t tbytes;
+ checked_request2size (bytes, tbytes);
+ size_t tc_idx = csize2tidx (tbytes);
+
+ MAYBE_INIT_TCACHE ();
+
+ DIAG_PUSH_NEEDS_COMMENT;
+ if (tc_idx < mp_.tcache_bins
+ /*&& tc_idx < TCACHE_MAX_BINS*/ /* to appease gcc */
+ && tcache
+ && tcache->entries[tc_idx] != NULL)
+ {
+ return tcache_get (tc_idx);
+ }
+ DIAG_POP_NEEDS_COMMENT;
+#endif
+
+ if (SINGLE_THREAD_P)
+ {
+ victim = _int_malloc (&main_arena, bytes);
+ assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
+ &main_arena == arena_for_chunk (mem2chunk (victim)));
+ return victim;
+ }
arena_get (ar_ptr, bytes);
@@ -2922,7 +3057,7 @@ __libc_malloc (size_t bytes)
}
if (ar_ptr != NULL)
- (void) mutex_unlock (&ar_ptr->mutex);
+ __libc_lock_unlock (ar_ptr->mutex);
assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
ar_ptr == arena_for_chunk (mem2chunk (victim)));
@@ -2951,10 +3086,12 @@ __libc_free (void *mem)
if (chunk_is_mmapped (p)) /* release mmapped memory. */
{
- /* see if the dynamic brk/mmap threshold needs adjusting */
+ /* See if the dynamic brk/mmap threshold needs adjusting.
+ Dumped fake mmapped chunks do not affect the threshold. */
if (!mp_.no_dyn_threshold
- && p->size > mp_.mmap_threshold
- && p->size <= DEFAULT_MMAP_THRESHOLD_MAX)
+ && chunksize_nomask (p) > mp_.mmap_threshold
+ && chunksize_nomask (p) <= DEFAULT_MMAP_THRESHOLD_MAX
+ && !DUMPED_MAIN_ARENA_CHUNK (p))
{
mp_.mmap_threshold = chunksize (p);
mp_.trim_threshold = 2 * mp_.mmap_threshold;
@@ -2965,6 +3102,8 @@ __libc_free (void *mem)
return;
}
+ MAYBE_INIT_TCACHE ();
+
ar_ptr = arena_for_chunk (p);
_int_free (ar_ptr, p, 0);
}
@@ -3002,24 +3141,44 @@ __libc_realloc (void *oldmem, size_t bytes)
if (chunk_is_mmapped (oldp))
ar_ptr = NULL;
else
- ar_ptr = arena_for_chunk (oldp);
-
- /* Little security check which won't hurt performance: the
- allocator never wrapps around at the end of the address space.
- Therefore we can exclude some size values which might appear
- here by accident or by "design" from some intruder. */
- if (__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0)
- || __builtin_expect (misaligned_chunk (oldp), 0))
{
- malloc_printerr (check_action, "realloc(): invalid pointer", oldmem,
- ar_ptr);
- return NULL;
+ MAYBE_INIT_TCACHE ();
+ ar_ptr = arena_for_chunk (oldp);
}
+ /* Little security check which won't hurt performance: the allocator
+ never wrapps around at the end of the address space. Therefore
+ we can exclude some size values which might appear here by
+ accident or by "design" from some intruder. We need to bypass
+ this check for dumped fake mmap chunks from the old main arena
+ because the new malloc may provide additional alignment. */
+ if ((__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0)
+ || __builtin_expect (misaligned_chunk (oldp), 0))
+ && !DUMPED_MAIN_ARENA_CHUNK (oldp))
+ malloc_printerr ("realloc(): invalid pointer");
+
checked_request2size (bytes, nb);
if (chunk_is_mmapped (oldp))
{
+ /* If this is a faked mmapped chunk from the dumped main arena,
+ always make a copy (and do not free the old chunk). */
+ if (DUMPED_MAIN_ARENA_CHUNK (oldp))
+ {
+ /* Must alloc, copy, free. */
+ void *newmem = __libc_malloc (bytes);
+ if (newmem == 0)
+ return NULL;
+ /* Copy as many bytes as are available from the old chunk
+ and fit into the new size. NB: The overhead for faked
+ mmapped chunks is only SIZE_SZ, not 2 * SIZE_SZ as for
+ regular mmapped chunks. */
+ if (bytes > oldsize - SIZE_SZ)
+ bytes = oldsize - SIZE_SZ;
+ memcpy (newmem, oldmem, bytes);
+ return newmem;
+ }
+
void *newmem;
#if HAVE_MREMAP
@@ -3041,11 +3200,20 @@ __libc_realloc (void *oldmem, size_t bytes)
return newmem;
}
- (void) mutex_lock (&ar_ptr->mutex);
+ if (SINGLE_THREAD_P)
+ {
+ newp = _int_realloc (ar_ptr, oldp, oldsize, nb);
+ assert (!newp || chunk_is_mmapped (mem2chunk (newp)) ||
+ ar_ptr == arena_for_chunk (mem2chunk (newp)));
+
+ return newp;
+ }
+
+ __libc_lock_lock (ar_ptr->mutex);
newp = _int_realloc (ar_ptr, oldp, oldsize, nb);
- (void) mutex_unlock (&ar_ptr->mutex);
+ __libc_lock_unlock (ar_ptr->mutex);
assert (!newp || chunk_is_mmapped (mem2chunk (newp)) ||
ar_ptr == arena_for_chunk (mem2chunk (newp)));
@@ -3116,6 +3284,15 @@ _mid_memalign (size_t alignment, size_t bytes, void *address)
alignment = a;
}
+ if (SINGLE_THREAD_P)
+ {
+ p = _int_memalign (&main_arena, alignment, bytes);
+ assert (!p || chunk_is_mmapped (mem2chunk (p)) ||
+ &main_arena == arena_for_chunk (mem2chunk (p)));
+
+ return p;
+ }
+
arena_get (ar_ptr, bytes + alignment + MINSIZE);
p = _int_memalign (ar_ptr, alignment, bytes);
@@ -3127,7 +3304,7 @@ _mid_memalign (size_t alignment, size_t bytes, void *address)
}
if (ar_ptr != NULL)
- (void) mutex_unlock (&ar_ptr->mutex);
+ __libc_lock_unlock (ar_ptr->mutex);
assert (!p || chunk_is_mmapped (mem2chunk (p)) ||
ar_ptr == arena_for_chunk (mem2chunk (p)));
@@ -3206,7 +3383,13 @@ __libc_calloc (size_t n, size_t elem_size)
sz = bytes;
- arena_get (av, sz);
+ MAYBE_INIT_TCACHE ();
+
+ if (SINGLE_THREAD_P)
+ av = &main_arena;
+ else
+ arena_get (av, sz);
+
if (av)
{
/* Check if we hand out the top chunk, in which case there may be no
@@ -3236,19 +3419,21 @@ __libc_calloc (size_t n, size_t elem_size)
}
mem = _int_malloc (av, sz);
-
assert (!mem || chunk_is_mmapped (mem2chunk (mem)) ||
av == arena_for_chunk (mem2chunk (mem)));
- if (mem == 0 && av != NULL)
+ if (!SINGLE_THREAD_P)
{
- LIBC_PROBE (memory_calloc_retry, 1, sz);
- av = arena_get_retry (av, sz);
- mem = _int_malloc (av, sz);
- }
+ if (mem == 0 && av != NULL)
+ {
+ LIBC_PROBE (memory_calloc_retry, 1, sz);
+ av = arena_get_retry (av, sz);
+ mem = _int_malloc (av, sz);
+ }
- if (av != NULL)
- (void) mutex_unlock (&av->mutex);
+ if (av != NULL)
+ __libc_lock_unlock (av->mutex);
+ }
/* Allocation failed even after a retry. */
if (mem == 0)
@@ -3336,7 +3521,9 @@ _int_malloc (mstate av, size_t bytes)
mchunkptr fwd; /* misc temp for linking */
mchunkptr bck; /* misc temp for linking */
- const char *errstr = NULL;
+#if USE_TCACHE
+ size_t tcache_unsorted_count; /* count of unsorted chunks processed */
+#endif
/*
Convert request size to internal form by adding SIZE_SZ bytes
@@ -3365,33 +3552,64 @@ _int_malloc (mstate av, size_t bytes)
can try it without checking, which saves some time on this fast path.
*/
+#define REMOVE_FB(fb, victim, pp) \
+ do \
+ { \
+ victim = pp; \
+ if (victim == NULL) \
+ break; \
+ } \
+ while ((pp = catomic_compare_and_exchange_val_acq (fb, victim->fd, victim)) \
+ != victim); \
+
if ((unsigned long) (nb) <= (unsigned long) (get_max_fast ()))
{
idx = fastbin_index (nb);
mfastbinptr *fb = &fastbin (av, idx);
- mchunkptr pp = *fb;
- do
- {
- victim = pp;
- if (victim == NULL)
- break;
- }
- while ((pp = catomic_compare_and_exchange_val_acq (fb, victim->fd, victim))
- != victim);
- if (victim != 0)
- {
- if (__builtin_expect (fastbin_index (chunksize (victim)) != idx, 0))
- {
- errstr = "malloc(): memory corruption (fast)";
- errout:
- malloc_printerr (check_action, errstr, chunk2mem (victim), av);
- return NULL;
- }
- check_remalloced_chunk (av, victim, nb);
- void *p = chunk2mem (victim);
- alloc_perturb (p, bytes);
- return p;
- }
+ mchunkptr pp;
+ victim = *fb;
+
+ if (victim != NULL)
+ {
+ if (SINGLE_THREAD_P)
+ *fb = victim->fd;
+ else
+ REMOVE_FB (fb, pp, victim);
+ if (__glibc_likely (victim != NULL))
+ {
+ size_t victim_idx = fastbin_index (chunksize (victim));
+ if (__builtin_expect (victim_idx != idx, 0))
+ malloc_printerr ("malloc(): memory corruption (fast)");
+ check_remalloced_chunk (av, victim, nb);
+#if USE_TCACHE
+ /* While we're here, if we see other chunks of the same size,
+ stash them in the tcache. */
+ size_t tc_idx = csize2tidx (nb);
+ if (tcache && tc_idx < mp_.tcache_bins)
+ {
+ mchunkptr tc_victim;
+
+ /* While bin not empty and tcache not full, copy chunks. */
+ while (tcache->counts[tc_idx] < mp_.tcache_count
+ && (tc_victim = *fb) != NULL)
+ {
+ if (SINGLE_THREAD_P)
+ *fb = tc_victim->fd;
+ else
+ {
+ REMOVE_FB (fb, pp, tc_victim);
+ if (__glibc_unlikely (tc_victim == NULL))
+ break;
+ }
+ tcache_put (tc_victim, tc_idx);
+ }
+ }
+#endif
+ void *p = chunk2mem (victim);
+ alloc_perturb (p, bytes);
+ return p;
+ }
+ }
}
/*
@@ -3409,27 +3627,45 @@ _int_malloc (mstate av, size_t bytes)
if ((victim = last (bin)) != bin)
{
- if (victim == 0) /* initialization check */
- malloc_consolidate (av);
- else
- {
- bck = victim->bk;
- if (__glibc_unlikely (bck->fd != victim))
- {
- errstr = "malloc(): smallbin double linked list corrupted";
- goto errout;
- }
- set_inuse_bit_at_offset (victim, nb);
- bin->bk = bck;
- bck->fd = bin;
+ bck = victim->bk;
+ if (__glibc_unlikely (bck->fd != victim))
+ malloc_printerr ("malloc(): smallbin double linked list corrupted");
+ set_inuse_bit_at_offset (victim, nb);
+ bin->bk = bck;
+ bck->fd = bin;
+
+ if (av != &main_arena)
+ set_non_main_arena (victim);
+ check_malloced_chunk (av, victim, nb);
+#if USE_TCACHE
+ /* While we're here, if we see other chunks of the same size,
+ stash them in the tcache. */
+ size_t tc_idx = csize2tidx (nb);
+ if (tcache && tc_idx < mp_.tcache_bins)
+ {
+ mchunkptr tc_victim;
- if (av != &main_arena)
- victim->size |= NON_MAIN_ARENA;
- check_malloced_chunk (av, victim, nb);
- void *p = chunk2mem (victim);
- alloc_perturb (p, bytes);
- return p;
- }
+ /* While bin not empty and tcache not full, copy chunks over. */
+ while (tcache->counts[tc_idx] < mp_.tcache_count
+ && (tc_victim = last (bin)) != bin)
+ {
+ if (tc_victim != 0)
+ {
+ bck = tc_victim->bk;
+ set_inuse_bit_at_offset (tc_victim, nb);
+ if (av != &main_arena)
+ set_non_main_arena (tc_victim);
+ bin->bk = bck;
+ bck->fd = bin;
+
+ tcache_put (tc_victim, tc_idx);
+ }
+ }
+ }
+#endif
+ void *p = chunk2mem (victim);
+ alloc_perturb (p, bytes);
+ return p;
}
}
@@ -3447,7 +3683,7 @@ _int_malloc (mstate av, size_t bytes)
else
{
idx = largebin_index (nb);
- if (have_fastchunks (av))
+ if (atomic_load_relaxed (&av->have_fastchunks))
malloc_consolidate (av);
}
@@ -3464,16 +3700,26 @@ _int_malloc (mstate av, size_t bytes)
otherwise need to expand memory to service a "small" request.
*/
+#if USE_TCACHE
+ INTERNAL_SIZE_T tcache_nb = 0;
+ size_t tc_idx = csize2tidx (nb);
+ if (tcache && tc_idx < mp_.tcache_bins)
+ tcache_nb = nb;
+ int return_cached = 0;
+
+ tcache_unsorted_count = 0;
+#endif
+
for (;; )
{
int iters = 0;
while ((victim = unsorted_chunks (av)->bk) != unsorted_chunks (av))
{
bck = victim->bk;
- if (__builtin_expect (victim->size <= 2 * SIZE_SZ, 0)
- || __builtin_expect (victim->size > av->system_mem, 0))
- malloc_printerr (check_action, "malloc(): memory corruption",
- chunk2mem (victim), av);
+ if (__builtin_expect (chunksize_nomask (victim) <= 2 * SIZE_SZ, 0)
+ || __builtin_expect (chunksize_nomask (victim)
+ > av->system_mem, 0))
+ malloc_printerr ("malloc(): memory corruption");
size = chunksize (victim);
/*
@@ -3513,6 +3759,8 @@ _int_malloc (mstate av, size_t bytes)
}
/* remove from unsorted list */
+ if (__glibc_unlikely (bck->fd != victim))
+ malloc_printerr ("malloc(): corrupted unsorted chunks 3");
unsorted_chunks (av)->bk = bck;
bck->fd = unsorted_chunks (av);
@@ -3522,11 +3770,27 @@ _int_malloc (mstate av, size_t bytes)
{
set_inuse_bit_at_offset (victim, size);
if (av != &main_arena)
- victim->size |= NON_MAIN_ARENA;
+ set_non_main_arena (victim);
+#if USE_TCACHE
+ /* Fill cache first, return to user only if cache fills.
+ We may return one of these chunks later. */
+ if (tcache_nb
+ && tcache->counts[tc_idx] < mp_.tcache_count)
+ {
+ tcache_put (victim, tc_idx);
+ return_cached = 1;
+ continue;
+ }
+ else
+ {
+#endif
check_malloced_chunk (av, victim, nb);
void *p = chunk2mem (victim);
alloc_perturb (p, bytes);
return p;
+#if USE_TCACHE
+ }
+#endif
}
/* place chunk in bin */
@@ -3549,8 +3813,9 @@ _int_malloc (mstate av, size_t bytes)
/* Or with inuse bit to speed comparisons */
size |= PREV_INUSE;
/* if smaller than smallest, bypass loop below */
- assert ((bck->bk->size & NON_MAIN_ARENA) == 0);
- if ((unsigned long) (size) < (unsigned long) (bck->bk->size))
+ assert (chunk_main_arena (bck->bk));
+ if ((unsigned long) (size)
+ < (unsigned long) chunksize_nomask (bck->bk))
{
fwd = bck;
bck = bck->bk;
@@ -3561,14 +3826,15 @@ _int_malloc (mstate av, size_t bytes)
}
else
{
- assert ((fwd->size & NON_MAIN_ARENA) == 0);
- while ((unsigned long) size < fwd->size)
+ assert (chunk_main_arena (fwd));
+ while ((unsigned long) size < chunksize_nomask (fwd))
{
fwd = fwd->fd_nextsize;
- assert ((fwd->size & NON_MAIN_ARENA) == 0);
+ assert (chunk_main_arena (fwd));
}
- if ((unsigned long) size == (unsigned long) fwd->size)
+ if ((unsigned long) size
+ == (unsigned long) chunksize_nomask (fwd))
/* Always insert in the second position. */
fwd = fwd->fd;
else
@@ -3591,11 +3857,31 @@ _int_malloc (mstate av, size_t bytes)
fwd->bk = victim;
bck->fd = victim;
+#if USE_TCACHE
+ /* If we've processed as many chunks as we're allowed while
+ filling the cache, return one of the cached ones. */
+ ++tcache_unsorted_count;
+ if (return_cached
+ && mp_.tcache_unsorted_limit > 0
+ && tcache_unsorted_count > mp_.tcache_unsorted_limit)
+ {
+ return tcache_get (tc_idx);
+ }
+#endif
+
#define MAX_ITERS 10000
if (++iters >= MAX_ITERS)
break;
}
+#if USE_TCACHE
+ /* If all the small chunks we found ended up cached, return one now. */
+ if (return_cached)
+ {
+ return tcache_get (tc_idx);
+ }
+#endif
+
/*
If a large request, scan through the chunks of current bin in
sorted order to find smallest that fits. Use the skip list for this.
@@ -3606,8 +3892,9 @@ _int_malloc (mstate av, size_t bytes)
bin = bin_at (av, idx);
/* skip scan if empty or largest chunk is too small */
- if ((victim = first (bin)) != bin &&
- (unsigned long) (victim->size) >= (unsigned long) (nb))
+ if ((victim = first (bin)) != bin
+ && (unsigned long) chunksize_nomask (victim)
+ >= (unsigned long) (nb))
{
victim = victim->bk_nextsize;
while (((unsigned long) (size = chunksize (victim)) <
@@ -3616,7 +3903,9 @@ _int_malloc (mstate av, size_t bytes)
/* Avoid removing the first entry for a size so that the skip
list does not have to be rerouted. */
- if (victim != last (bin) && victim->size == victim->fd->size)
+ if (victim != last (bin)
+ && chunksize_nomask (victim)
+ == chunksize_nomask (victim->fd))
victim = victim->fd;
remainder_size = size - nb;
@@ -3627,7 +3916,7 @@ _int_malloc (mstate av, size_t bytes)
{
set_inuse_bit_at_offset (victim, size);
if (av != &main_arena)
- victim->size |= NON_MAIN_ARENA;
+ set_non_main_arena (victim);
}
/* Split */
else
@@ -3637,11 +3926,8 @@ _int_malloc (mstate av, size_t bytes)
have to perform a complete insert here. */
bck = unsorted_chunks (av);
fwd = bck->fd;
- if (__glibc_unlikely (fwd->bk != bck))
- {
- errstr = "malloc(): corrupted unsorted chunks";
- goto errout;
- }
+ if (__glibc_unlikely (fwd->bk != bck))
+ malloc_printerr ("malloc(): corrupted unsorted chunks");
remainder->bk = bck;
remainder->fd = fwd;
bck->fd = remainder;
@@ -3732,7 +4018,7 @@ _int_malloc (mstate av, size_t bytes)
{
set_inuse_bit_at_offset (victim, size);
if (av != &main_arena)
- victim->size |= NON_MAIN_ARENA;
+ set_non_main_arena (victim);
}
/* Split */
@@ -3744,11 +4030,8 @@ _int_malloc (mstate av, size_t bytes)
have to perform a complete insert here. */
bck = unsorted_chunks (av);
fwd = bck->fd;
- if (__glibc_unlikely (fwd->bk != bck))
- {
- errstr = "malloc(): corrupted unsorted chunks 2";
- goto errout;
- }
+ if (__glibc_unlikely (fwd->bk != bck))
+ malloc_printerr ("malloc(): corrupted unsorted chunks 2");
remainder->bk = bck;
remainder->fd = fwd;
bck->fd = remainder;
@@ -3810,7 +4093,7 @@ _int_malloc (mstate av, size_t bytes)
/* When we are using atomic ops to free fast chunks we can get
here for all block sizes. */
- else if (have_fastchunks (av))
+ else if (atomic_load_relaxed (&av->have_fastchunks))
{
malloc_consolidate (av);
/* restore original bin index */
@@ -3849,9 +4132,6 @@ _int_free (mstate av, mchunkptr p, int have_lock)
mchunkptr bck; /* misc temp for linking */
mchunkptr fwd; /* misc temp for linking */
- const char *errstr = NULL;
- int locked = 0;
-
size = chunksize (p);
/* Little security check which won't hurt performance: the
@@ -3860,24 +4140,28 @@ _int_free (mstate av, mchunkptr p, int have_lock)
here by accident or by "design" from some intruder. */
if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0)
|| __builtin_expect (misaligned_chunk (p), 0))
- {
- errstr = "free(): invalid pointer";
- errout:
- if (!have_lock && locked)
- (void) mutex_unlock (&av->mutex);
- malloc_printerr (check_action, errstr, chunk2mem (p), av);
- return;
- }
+ malloc_printerr ("free(): invalid pointer");
/* We know that each chunk is at least MINSIZE bytes in size or a
multiple of MALLOC_ALIGNMENT. */
if (__glibc_unlikely (size < MINSIZE || !aligned_OK (size)))
- {
- errstr = "free(): invalid size";
- goto errout;
- }
+ malloc_printerr ("free(): invalid size");
check_inuse_chunk(av, p);
+#if USE_TCACHE
+ {
+ size_t tc_idx = csize2tidx (size);
+
+ if (tcache
+ && tc_idx < mp_.tcache_bins
+ && tcache->counts[tc_idx] < mp_.tcache_count)
+ {
+ tcache_put (p, tc_idx);
+ return;
+ }
+ }
+#endif
+
/*
If eligible, place chunk on a fastbin so it can be found
and used quickly in malloc.
@@ -3894,64 +4178,64 @@ _int_free (mstate av, mchunkptr p, int have_lock)
#endif
) {
- if (__builtin_expect (chunk_at_offset (p, size)->size <= 2 * SIZE_SZ, 0)
+ if (__builtin_expect (chunksize_nomask (chunk_at_offset (p, size))
+ <= 2 * SIZE_SZ, 0)
|| __builtin_expect (chunksize (chunk_at_offset (p, size))
>= av->system_mem, 0))
{
+ bool fail = true;
/* We might not have a lock at this point and concurrent modifications
- of system_mem might have let to a false positive. Redo the test
- after getting the lock. */
- if (have_lock
- || ({ assert (locked == 0);
- mutex_lock(&av->mutex);
- locked = 1;
- chunk_at_offset (p, size)->size <= 2 * SIZE_SZ
- || chunksize (chunk_at_offset (p, size)) >= av->system_mem;
- }))
+ of system_mem might result in a false positive. Redo the test after
+ getting the lock. */
+ if (!have_lock)
{
- errstr = "free(): invalid next size (fast)";
- goto errout;
- }
- if (! have_lock)
- {
- (void)mutex_unlock(&av->mutex);
- locked = 0;
+ __libc_lock_lock (av->mutex);
+ fail = (chunksize_nomask (chunk_at_offset (p, size)) <= 2 * SIZE_SZ
+ || chunksize (chunk_at_offset (p, size)) >= av->system_mem);
+ __libc_lock_unlock (av->mutex);
}
+
+ if (fail)
+ malloc_printerr ("free(): invalid next size (fast)");
}
free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
- set_fastchunks(av);
+ atomic_store_relaxed (&av->have_fastchunks, true);
unsigned int idx = fastbin_index(size);
fb = &fastbin (av, idx);
/* Atomically link P to its fastbin: P->FD = *FB; *FB = P; */
mchunkptr old = *fb, old2;
- unsigned int old_idx = ~0u;
- do
- {
- /* Check that the top of the bin is not the record we are going to add
- (i.e., double free). */
- if (__builtin_expect (old == p, 0))
- {
- errstr = "double free or corruption (fasttop)";
- goto errout;
- }
- /* Check that size of fastbin chunk at the top is the same as
- size of the chunk that we are adding. We can dereference OLD
- only if we have the lock, otherwise it might have already been
- deallocated. See use of OLD_IDX below for the actual check. */
- if (have_lock && old != NULL)
- old_idx = fastbin_index(chunksize(old));
- p->fd = old2 = old;
- }
- while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2)) != old2);
- if (have_lock && old != NULL && __builtin_expect (old_idx != idx, 0))
+ if (SINGLE_THREAD_P)
{
- errstr = "invalid fastbin entry (free)";
- goto errout;
+ /* Check that the top of the bin is not the record we are going to
+ add (i.e., double free). */
+ if (__builtin_expect (old == p, 0))
+ malloc_printerr ("double free or corruption (fasttop)");
+ p->fd = old;
+ *fb = p;
}
+ else
+ do
+ {
+ /* Check that the top of the bin is not the record we are going to
+ add (i.e., double free). */
+ if (__builtin_expect (old == p, 0))
+ malloc_printerr ("double free or corruption (fasttop)");
+ p->fd = old2 = old;
+ }
+ while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2))
+ != old2);
+
+ /* Check that size of fastbin chunk at the top is the same as
+ size of the chunk that we are adding. We can dereference OLD
+ only if we have the lock, otherwise it might have already been
+ allocated again. */
+ if (have_lock && old != NULL
+ && __builtin_expect (fastbin_index (chunksize (old)) != idx, 0))
+ malloc_printerr ("invalid fastbin entry (free)");
}
/*
@@ -3959,48 +4243,39 @@ _int_free (mstate av, mchunkptr p, int have_lock)
*/
else if (!chunk_is_mmapped(p)) {
- if (! have_lock) {
- (void)mutex_lock(&av->mutex);
- locked = 1;
- }
+
+ /* If we're single-threaded, don't lock the arena. */
+ if (SINGLE_THREAD_P)
+ have_lock = true;
+
+ if (!have_lock)
+ __libc_lock_lock (av->mutex);
nextchunk = chunk_at_offset(p, size);
/* Lightweight tests: check whether the block is already the
top block. */
if (__glibc_unlikely (p == av->top))
- {
- errstr = "double free or corruption (top)";
- goto errout;
- }
+ malloc_printerr ("double free or corruption (top)");
/* Or whether the next chunk is beyond the boundaries of the arena. */
if (__builtin_expect (contiguous (av)
&& (char *) nextchunk
>= ((char *) av->top + chunksize(av->top)), 0))
- {
- errstr = "double free or corruption (out)";
- goto errout;
- }
+ malloc_printerr ("double free or corruption (out)");
/* Or whether the block is actually not marked used. */
if (__glibc_unlikely (!prev_inuse(nextchunk)))
- {
- errstr = "double free or corruption (!prev)";
- goto errout;
- }
+ malloc_printerr ("double free or corruption (!prev)");
nextsize = chunksize(nextchunk);
- if (__builtin_expect (nextchunk->size <= 2 * SIZE_SZ, 0)
+ if (__builtin_expect (chunksize_nomask (nextchunk) <= 2 * SIZE_SZ, 0)
|| __builtin_expect (nextsize >= av->system_mem, 0))
- {
- errstr = "free(): invalid next size (normal)";
- goto errout;
- }
+ malloc_printerr ("free(): invalid next size (normal)");
free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
/* consolidate backward */
if (!prev_inuse(p)) {
- prevsize = p->prev_size;
+ prevsize = prev_size (p);
size += prevsize;
p = chunk_at_offset(p, -((long) prevsize));
unlink(av, p, bck, fwd);
@@ -4026,10 +4301,7 @@ _int_free (mstate av, mchunkptr p, int have_lock)
bck = unsorted_chunks(av);
fwd = bck->fd;
if (__glibc_unlikely (fwd->bk != bck))
- {
- errstr = "free(): corrupted unsorted chunks";
- goto errout;
- }
+ malloc_printerr ("free(): corrupted unsorted chunks");
p->fd = fwd;
p->bk = bck;
if (!in_smallbin_range(size))
@@ -4072,7 +4344,7 @@ _int_free (mstate av, mchunkptr p, int have_lock)
*/
if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
- if (have_fastchunks(av))
+ if (atomic_load_relaxed (&av->have_fastchunks))
malloc_consolidate(av);
if (av == &main_arena) {
@@ -4091,10 +4363,8 @@ _int_free (mstate av, mchunkptr p, int have_lock)
}
}
- if (! have_lock) {
- assert (locked);
- (void)mutex_unlock(&av->mutex);
- }
+ if (!have_lock)
+ __libc_lock_unlock (av->mutex);
}
/*
If the chunk was allocated via mmap, release via munmap().
@@ -4113,10 +4383,6 @@ _int_free (mstate av, mchunkptr p, int have_lock)
purpose since, among other things, it might place chunks back onto
fastbins. So, instead, we need to use a minor variant of the same
code.
-
- Also, because this routine needs to be called the first time through
- malloc anyway, it turns out to be the perfect place to trigger
- initialization code.
*/
static void malloc_consolidate(mstate av)
@@ -4137,84 +4403,79 @@ static void malloc_consolidate(mstate av)
mchunkptr bck;
mchunkptr fwd;
+ atomic_store_relaxed (&av->have_fastchunks, false);
+
+ unsorted_bin = unsorted_chunks(av);
+
/*
- If max_fast is 0, we know that av hasn't
- yet been initialized, in which case do so below
+ Remove each chunk from fast bin and consolidate it, placing it
+ then in unsorted bin. Among other reasons for doing this,
+ placing in unsorted bin avoids needing to calculate actual bins
+ until malloc is sure that chunks aren't immediately going to be
+ reused anyway.
*/
- if (get_max_fast () != 0) {
- clear_fastchunks(av);
-
- unsorted_bin = unsorted_chunks(av);
+ maxfb = &fastbin (av, NFASTBINS - 1);
+ fb = &fastbin (av, 0);
+ do {
+ p = atomic_exchange_acq (fb, NULL);
+ if (p != 0) {
+ do {
+ {
+ unsigned int idx = fastbin_index (chunksize (p));
+ if ((&fastbin (av, idx)) != fb)
+ malloc_printerr ("malloc_consolidate(): invalid chunk size");
+ }
- /*
- Remove each chunk from fast bin and consolidate it, placing it
- then in unsorted bin. Among other reasons for doing this,
- placing in unsorted bin avoids needing to calculate actual bins
- until malloc is sure that chunks aren't immediately going to be
- reused anyway.
- */
+ check_inuse_chunk(av, p);
+ nextp = p->fd;
- maxfb = &fastbin (av, NFASTBINS - 1);
- fb = &fastbin (av, 0);
- do {
- p = atomic_exchange_acq (fb, 0);
- if (p != 0) {
- do {
- check_inuse_chunk(av, p);
- nextp = p->fd;
-
- /* Slightly streamlined version of consolidation code in free() */
- size = p->size & ~(PREV_INUSE|NON_MAIN_ARENA);
- nextchunk = chunk_at_offset(p, size);
- nextsize = chunksize(nextchunk);
-
- if (!prev_inuse(p)) {
- prevsize = p->prev_size;
- size += prevsize;
- p = chunk_at_offset(p, -((long) prevsize));
- unlink(av, p, bck, fwd);
- }
+ /* Slightly streamlined version of consolidation code in free() */
+ size = chunksize (p);
+ nextchunk = chunk_at_offset(p, size);
+ nextsize = chunksize(nextchunk);
- if (nextchunk != av->top) {
- nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
+ if (!prev_inuse(p)) {
+ prevsize = prev_size (p);
+ size += prevsize;
+ p = chunk_at_offset(p, -((long) prevsize));
+ unlink(av, p, bck, fwd);
+ }
- if (!nextinuse) {
- size += nextsize;
- unlink(av, nextchunk, bck, fwd);
- } else
- clear_inuse_bit_at_offset(nextchunk, 0);
+ if (nextchunk != av->top) {
+ nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
- first_unsorted = unsorted_bin->fd;
- unsorted_bin->fd = p;
- first_unsorted->bk = p;
+ if (!nextinuse) {
+ size += nextsize;
+ unlink(av, nextchunk, bck, fwd);
+ } else
+ clear_inuse_bit_at_offset(nextchunk, 0);
- if (!in_smallbin_range (size)) {
- p->fd_nextsize = NULL;
- p->bk_nextsize = NULL;
- }
+ first_unsorted = unsorted_bin->fd;
+ unsorted_bin->fd = p;
+ first_unsorted->bk = p;
- set_head(p, size | PREV_INUSE);
- p->bk = unsorted_bin;
- p->fd = first_unsorted;
- set_foot(p, size);
+ if (!in_smallbin_range (size)) {
+ p->fd_nextsize = NULL;
+ p->bk_nextsize = NULL;
}
- else {
- size += nextsize;
- set_head(p, size | PREV_INUSE);
- av->top = p;
- }
+ set_head(p, size | PREV_INUSE);
+ p->bk = unsorted_bin;
+ p->fd = first_unsorted;
+ set_foot(p, size);
+ }
- } while ( (p = nextp) != 0);
+ else {
+ size += nextsize;
+ set_head(p, size | PREV_INUSE);
+ av->top = p;
+ }
- }
- } while (fb++ != maxfb);
- }
- else {
- malloc_init_state(av);
- check_malloc_state(av);
- }
+ } while ( (p = nextp) != 0);
+
+ }
+ } while (fb++ != maxfb);
}
/*
@@ -4242,17 +4503,10 @@ _int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
INTERNAL_SIZE_T* s; /* copy source */
INTERNAL_SIZE_T* d; /* copy destination */
- const char *errstr = NULL;
-
/* oldmem size */
- if (__builtin_expect (oldp->size <= 2 * SIZE_SZ, 0)
+ if (__builtin_expect (chunksize_nomask (oldp) <= 2 * SIZE_SZ, 0)
|| __builtin_expect (oldsize >= av->system_mem, 0))
- {
- errstr = "realloc(): invalid old size";
- errout:
- malloc_printerr (check_action, errstr, chunk2mem (oldp), av);
- return NULL;
- }
+ malloc_printerr ("realloc(): invalid old size");
check_inuse_chunk (av, oldp);
@@ -4261,12 +4515,9 @@ _int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
next = chunk_at_offset (oldp, oldsize);
INTERNAL_SIZE_T nextsize = chunksize (next);
- if (__builtin_expect (next->size <= 2 * SIZE_SZ, 0)
+ if (__builtin_expect (chunksize_nomask (next) <= 2 * SIZE_SZ, 0)
|| __builtin_expect (nextsize >= av->system_mem, 0))
- {
- errstr = "realloc(): invalid next size";
- goto errout;
- }
+ malloc_printerr ("realloc(): invalid next size");
if ((unsigned long) (oldsize) >= (unsigned long) (nb))
{
@@ -4417,6 +4668,13 @@ _int_memalign (mstate av, size_t alignment, size_t bytes)
*/
+ /* Check for overflow. */
+ if (nb > SIZE_MAX - alignment - MINSIZE)
+ {
+ __set_errno (ENOMEM);
+ return 0;
+ }
+
/* Call malloc with worst case padding to hit alignment. */
m = (char *) (_int_malloc (av, nb + alignment + MINSIZE));
@@ -4447,7 +4705,7 @@ _int_memalign (mstate av, size_t alignment, size_t bytes)
/* For mmapped chunks, just adjust offset */
if (chunk_is_mmapped (p))
{
- newp->prev_size = p->prev_size + leadsize;
+ set_prev_size (newp, prev_size (p) + leadsize);
set_head (newp, newsize | IS_MMAPPED);
return chunk2mem (newp);
}
@@ -4491,11 +4749,7 @@ _int_memalign (mstate av, size_t alignment, size_t bytes)
static int
mtrim (mstate av, size_t pad)
{
- /* Don't touch corrupt arenas. */
- if (arena_is_corrupt (av))
- return 0;
-
- /* Ensure initialization/consolidation */
+ /* Ensure all blocks are consolidated. */
malloc_consolidate (av);
const size_t ps = GLRO (dl_pagesize);
@@ -4560,9 +4814,9 @@ __malloc_trim (size_t s)
mstate ar_ptr = &main_arena;
do
{
- (void) mutex_lock (&ar_ptr->mutex);
+ __libc_lock_lock (ar_ptr->mutex);
result |= mtrim (ar_ptr, s);
- (void) mutex_unlock (&ar_ptr->mutex);
+ __libc_lock_unlock (ar_ptr->mutex);
ar_ptr = ar_ptr->next;
}
@@ -4588,7 +4842,12 @@ musable (void *mem)
return malloc_check_get_size (p);
if (chunk_is_mmapped (p))
- return chunksize (p) - 2 * SIZE_SZ;
+ {
+ if (DUMPED_MAIN_ARENA_CHUNK (p))
+ return chunksize (p) - SIZE_SZ;
+ else
+ return chunksize (p) - 2 * SIZE_SZ;
+ }
else if (inuse (p))
return chunksize (p) - SIZE_SZ;
}
@@ -4621,10 +4880,6 @@ int_mallinfo (mstate av, struct mallinfo *m)
int nblocks;
int nfastblocks;
- /* Ensure initialization */
- if (av->top == 0)
- malloc_consolidate (av);
-
check_malloc_state (av);
/* Account for top */
@@ -4667,7 +4922,7 @@ int_mallinfo (mstate av, struct mallinfo *m)
{
m->hblks = mp_.n_mmaps;
m->hblkhd = mp_.mmapped_mem;
- m->usmblks = mp_.max_total_mem;
+ m->usmblks = 0;
m->keepcost = chunksize (av->top);
}
}
@@ -4686,9 +4941,9 @@ __libc_mallinfo (void)
ar_ptr = &main_arena;
do
{
- (void) mutex_lock (&ar_ptr->mutex);
+ __libc_lock_lock (ar_ptr->mutex);
int_mallinfo (ar_ptr, &m);
- (void) mutex_unlock (&ar_ptr->mutex);
+ __libc_lock_unlock (ar_ptr->mutex);
ar_ptr = ar_ptr->next;
}
@@ -4711,14 +4966,14 @@ __malloc_stats (void)
if (__malloc_initialized < 0)
ptmalloc_init ();
_IO_flockfile (stderr);
- int old_flags2 = ((_IO_FILE *) stderr)->_flags2;
- ((_IO_FILE *) stderr)->_flags2 |= _IO_FLAGS2_NOTCANCEL;
+ int old_flags2 = stderr->_flags2;
+ stderr->_flags2 |= _IO_FLAGS2_NOTCANCEL;
for (i = 0, ar_ptr = &main_arena;; i++)
{
struct mallinfo mi;
memset (&mi, 0, sizeof (mi));
- (void) mutex_lock (&ar_ptr->mutex);
+ __libc_lock_lock (ar_ptr->mutex);
int_mallinfo (ar_ptr, &mi);
fprintf (stderr, "Arena %d:\n", i);
fprintf (stderr, "system bytes = %10u\n", (unsigned int) mi.arena);
@@ -4729,7 +4984,7 @@ __malloc_stats (void)
#endif
system_b += mi.arena;
in_use_b += mi.uordblks;
- (void) mutex_unlock (&ar_ptr->mutex);
+ __libc_lock_unlock (ar_ptr->mutex);
ar_ptr = ar_ptr->next;
if (ar_ptr == &main_arena)
break;
@@ -4740,7 +4995,7 @@ __malloc_stats (void)
fprintf (stderr, "max mmap regions = %10u\n", (unsigned int) mp_.max_n_mmaps);
fprintf (stderr, "max mmap bytes = %10lu\n",
(unsigned long) mp_.max_mmapped_mem);
- ((_IO_FILE *) stderr)->_flags2 |= old_flags2;
+ stderr->_flags2 = old_flags2;
_IO_funlockfile (stderr);
}
@@ -4748,6 +5003,121 @@ __malloc_stats (void)
/*
------------------------------ mallopt ------------------------------
*/
+static inline int
+__always_inline
+do_set_trim_threshold (size_t value)
+{
+ LIBC_PROBE (memory_mallopt_trim_threshold, 3, value, mp_.trim_threshold,
+ mp_.no_dyn_threshold);
+ mp_.trim_threshold = value;
+ mp_.no_dyn_threshold = 1;
+ return 1;
+}
+
+static inline int
+__always_inline
+do_set_top_pad (size_t value)
+{
+ LIBC_PROBE (memory_mallopt_top_pad, 3, value, mp_.top_pad,
+ mp_.no_dyn_threshold);
+ mp_.top_pad = value;
+ mp_.no_dyn_threshold = 1;
+ return 1;
+}
+
+static inline int
+__always_inline
+do_set_mmap_threshold (size_t value)
+{
+ /* Forbid setting the threshold too high. */
+ if (value <= HEAP_MAX_SIZE / 2)
+ {
+ LIBC_PROBE (memory_mallopt_mmap_threshold, 3, value, mp_.mmap_threshold,
+ mp_.no_dyn_threshold);
+ mp_.mmap_threshold = value;
+ mp_.no_dyn_threshold = 1;
+ return 1;
+ }
+ return 0;
+}
+
+static inline int
+__always_inline
+do_set_mmaps_max (int32_t value)
+{
+ LIBC_PROBE (memory_mallopt_mmap_max, 3, value, mp_.n_mmaps_max,
+ mp_.no_dyn_threshold);
+ mp_.n_mmaps_max = value;
+ mp_.no_dyn_threshold = 1;
+ return 1;
+}
+
+static inline int
+__always_inline
+do_set_mallopt_check (int32_t value)
+{
+ return 1;
+}
+
+static inline int
+__always_inline
+do_set_perturb_byte (int32_t value)
+{
+ LIBC_PROBE (memory_mallopt_perturb, 2, value, perturb_byte);
+ perturb_byte = value;
+ return 1;
+}
+
+static inline int
+__always_inline
+do_set_arena_test (size_t value)
+{
+ LIBC_PROBE (memory_mallopt_arena_test, 2, value, mp_.arena_test);
+ mp_.arena_test = value;
+ return 1;
+}
+
+static inline int
+__always_inline
+do_set_arena_max (size_t value)
+{
+ LIBC_PROBE (memory_mallopt_arena_max, 2, value, mp_.arena_max);
+ mp_.arena_max = value;
+ return 1;
+}
+
+#if USE_TCACHE
+static inline int
+__always_inline
+do_set_tcache_max (size_t value)
+{
+ if (value >= 0 && value <= MAX_TCACHE_SIZE)
+ {
+ LIBC_PROBE (memory_tunable_tcache_max_bytes, 2, value, mp_.tcache_max_bytes);
+ mp_.tcache_max_bytes = value;
+ mp_.tcache_bins = csize2tidx (request2size(value)) + 1;
+ }
+ return 1;
+}
+
+static inline int
+__always_inline
+do_set_tcache_count (size_t value)
+{
+ LIBC_PROBE (memory_tunable_tcache_count, 2, value, mp_.tcache_count);
+ mp_.tcache_count = value;
+ return 1;
+}
+
+static inline int
+__always_inline
+do_set_tcache_unsorted_limit (size_t value)
+{
+ LIBC_PROBE (memory_tunable_tcache_unsorted_limit, 2, value, mp_.tcache_unsorted_limit);
+ mp_.tcache_unsorted_limit = value;
+ return 1;
+}
+#endif
int
__libc_mallopt (int param_number, int value)
@@ -4757,12 +5127,14 @@ __libc_mallopt (int param_number, int value)
if (__malloc_initialized < 0)
ptmalloc_init ();
- (void) mutex_lock (&av->mutex);
- /* Ensure initialization/consolidation */
- malloc_consolidate (av);
+ __libc_lock_lock (av->mutex);
LIBC_PROBE (memory_mallopt, 2, param_number, value);
+ /* We must consolidate main arena before changing max_fast
+ (see definition of set_max_fast). */
+ malloc_consolidate (av);
+
switch (param_number)
{
case M_MXFAST:
@@ -4776,66 +5148,40 @@ __libc_mallopt (int param_number, int value)
break;
case M_TRIM_THRESHOLD:
- LIBC_PROBE (memory_mallopt_trim_threshold, 3, value,
- mp_.trim_threshold, mp_.no_dyn_threshold);
- mp_.trim_threshold = value;
- mp_.no_dyn_threshold = 1;
+ do_set_trim_threshold (value);
break;
case M_TOP_PAD:
- LIBC_PROBE (memory_mallopt_top_pad, 3, value,
- mp_.top_pad, mp_.no_dyn_threshold);
- mp_.top_pad = value;
- mp_.no_dyn_threshold = 1;
+ do_set_top_pad (value);
break;
case M_MMAP_THRESHOLD:
- /* Forbid setting the threshold too high. */
- if ((unsigned long) value > HEAP_MAX_SIZE / 2)
- res = 0;
- else
- {
- LIBC_PROBE (memory_mallopt_mmap_threshold, 3, value,
- mp_.mmap_threshold, mp_.no_dyn_threshold);
- mp_.mmap_threshold = value;
- mp_.no_dyn_threshold = 1;
- }
+ res = do_set_mmap_threshold (value);
break;
case M_MMAP_MAX:
- LIBC_PROBE (memory_mallopt_mmap_max, 3, value,
- mp_.n_mmaps_max, mp_.no_dyn_threshold);
- mp_.n_mmaps_max = value;
- mp_.no_dyn_threshold = 1;
+ do_set_mmaps_max (value);
break;
case M_CHECK_ACTION:
- LIBC_PROBE (memory_mallopt_check_action, 2, value, check_action);
- check_action = value;
+ do_set_mallopt_check (value);
break;
case M_PERTURB:
- LIBC_PROBE (memory_mallopt_perturb, 2, value, perturb_byte);
- perturb_byte = value;
+ do_set_perturb_byte (value);
break;
case M_ARENA_TEST:
if (value > 0)
- {
- LIBC_PROBE (memory_mallopt_arena_test, 2, value, mp_.arena_test);
- mp_.arena_test = value;
- }
+ do_set_arena_test (value);
break;
case M_ARENA_MAX:
if (value > 0)
- {
- LIBC_PROBE (memory_mallopt_arena_max, 2, value, mp_.arena_max);
- mp_.arena_max = value;
- }
+ do_set_arena_max (value);
break;
}
- (void) mutex_unlock (&av->mutex);
+ __libc_lock_unlock (av->mutex);
return res;
}
libc_hidden_def (__libc_mallopt)
@@ -4985,30 +5331,10 @@ libc_hidden_def (__libc_mallopt)
extern char **__libc_argv attribute_hidden;
static void
-malloc_printerr (int action, const char *str, void *ptr, mstate ar_ptr)
+malloc_printerr (const char *str)
{
- /* Avoid using this arena in future. We do not attempt to synchronize this
- with anything else because we minimally want to ensure that __libc_message
- gets its resources safely without stumbling on the current corruption. */
- if (ar_ptr)
- set_arena_corrupt (ar_ptr);
-
- if ((action & 5) == 5)
- __libc_message (action & 2, "%s\n", str);
- else if (action & 1)
- {
- char buf[2 * sizeof (uintptr_t) + 1];
-
- buf[sizeof (buf) - 1] = '\0';
- char *cp = _itoa_word ((uintptr_t) ptr, &buf[sizeof (buf) - 1], 16, 0);
- while (cp > buf)
- *--cp = '0';
-
- __libc_message (action & 2, "*** Error in `%s': %s: 0x%s ***\n",
- __libc_argv[0] ? : "<unknown>", str, cp);
- }
- else if (action & 2)
- abort ();
+ __libc_message (do_abort, "%s\n", str);
+ __builtin_unreachable ();
}
/* We need a wrapper function for one of the additions of POSIX. */
@@ -5082,7 +5408,7 @@ __malloc_info (int options, FILE *fp)
} sizes[NFASTBINS + NBINS - 1];
#define nsizes (sizeof (sizes) / sizeof (sizes[0]))
- mutex_lock (&ar_ptr->mutex);
+ __libc_lock_lock (ar_ptr->mutex);
for (size_t i = 0; i < NFASTBINS; ++i)
{
@@ -5125,12 +5451,13 @@ __malloc_info (int options, FILE *fp)
if (r != NULL)
while (r != bin)
{
+ size_t r_size = chunksize_nomask (r);
++sizes[NFASTBINS - 1 + i].count;
- sizes[NFASTBINS - 1 + i].total += r->size;
+ sizes[NFASTBINS - 1 + i].total += r_size;
sizes[NFASTBINS - 1 + i].from
- = MIN (sizes[NFASTBINS - 1 + i].from, r->size);
+ = MIN (sizes[NFASTBINS - 1 + i].from, r_size);
sizes[NFASTBINS - 1 + i].to = MAX (sizes[NFASTBINS - 1 + i].to,
- r->size);
+ r_size);
r = r->fd;
}
@@ -5141,7 +5468,24 @@ __malloc_info (int options, FILE *fp)
avail += sizes[NFASTBINS - 1 + i].total;
}
- mutex_unlock (&ar_ptr->mutex);
+ size_t heap_size = 0;
+ size_t heap_mprotect_size = 0;
+ size_t heap_count = 0;
+ if (ar_ptr != &main_arena)
+ {
+ /* Iterate over the arena heaps from back to front. */
+ heap_info *heap = heap_for_ptr (top (ar_ptr));
+ do
+ {
+ heap_size += heap->size;
+ heap_mprotect_size += heap->mprotect_size;
+ heap = heap->prev;
+ ++heap_count;
+ }
+ while (heap != NULL);
+ }
+
+ __libc_lock_unlock (ar_ptr->mutex);
total_nfastblocks += nfastblocks;
total_fastavail += fastavail;
@@ -5174,13 +5518,13 @@ __malloc_info (int options, FILE *fp)
if (ar_ptr != &main_arena)
{
- heap_info *heap = heap_for_ptr (top (ar_ptr));
fprintf (fp,
"<aspace type=\"total\" size=\"%zu\"/>\n"
- "<aspace type=\"mprotect\" size=\"%zu\"/>\n",
- heap->size, heap->mprotect_size);
- total_aspace += heap->size;
- total_aspace_mprotect += heap->mprotect_size;
+ "<aspace type=\"mprotect\" size=\"%zu\"/>\n"
+ "<aspace type=\"subheaps\" size=\"%zu\"/>\n",
+ heap_size, heap_mprotect_size, heap_count);
+ total_aspace += heap_size;
+ total_aspace_mprotect += heap_mprotect_size;
}
else
{
@@ -5217,7 +5561,6 @@ weak_alias (__malloc_info, malloc_info)
strong_alias (__libc_calloc, __calloc) weak_alias (__libc_calloc, calloc)
-strong_alias (__libc_free, __cfree) weak_alias (__libc_free, cfree)
strong_alias (__libc_free, __free) strong_alias (__libc_free, free)
strong_alias (__libc_malloc, __malloc) strong_alias (__libc_malloc, malloc)
strong_alias (__libc_memalign, __memalign)
@@ -5232,9 +5575,10 @@ strong_alias (__libc_mallopt, __mallopt) weak_alias (__libc_mallopt, mallopt)
weak_alias (__malloc_stats, malloc_stats)
weak_alias (__malloc_usable_size, malloc_usable_size)
weak_alias (__malloc_trim, malloc_trim)
-weak_alias (__malloc_get_state, malloc_get_state)
-weak_alias (__malloc_set_state, malloc_set_state)
+#if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_26)
+compat_symbol (libc, __libc_free, cfree, GLIBC_2_0);
+#endif
/* ------------------------------------------------------------
History: