summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorUlrich Drepper <drepper@redhat.com>1998-05-07 12:02:29 +0000
committerUlrich Drepper <drepper@redhat.com>1998-05-07 12:02:29 +0000
commit042cf8fd5a3058d1b8dfabfb1b5fe9ab7fa6cc61 (patch)
treedf80ab5249f620c856b8c8c0956cff66e542b4ac
parent2d49df6b1deaedba718c15cd3091b47ee2498a55 (diff)
Bug fixes with fork.
-rw-r--r--malloc/malloc.c327
1 files changed, 279 insertions, 48 deletions
diff --git a/malloc/malloc.c b/malloc/malloc.c
index 057b5a05c0..f004cb1b6c 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -19,7 +19,7 @@
write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
-/* V2.6.4-pt3 Thu Feb 20 1997
+/* $Id$
This work is mainly derived from malloc-2.6.4 by Doug Lea
<dl@cs.oswego.edu>, which is available from:
@@ -523,6 +523,14 @@ do { \
#define MAP_ANONYMOUS MAP_ANON
#endif
+#ifndef MAP_NORESERVE
+# ifdef MAP_AUTORESRV
+# define MAP_NORESERVE MAP_AUTORESRV
+# else
+# define MAP_NORESERVE 0
+# endif
+#endif
+
#endif /* HAVE_MMAP */
/*
@@ -1185,6 +1193,8 @@ static Void_t* realloc_check(Void_t* oldmem, size_t bytes);
static Void_t* memalign_check(size_t alignment, size_t bytes);
static Void_t* malloc_starter(size_t sz);
static void free_starter(Void_t* mem);
+static Void_t* malloc_atfork(size_t sz);
+static void free_atfork(Void_t* mem);
#endif
#else
@@ -1204,6 +1214,8 @@ static Void_t* realloc_check();
static Void_t* memalign_check();
static Void_t* malloc_starter();
static void free_starter();
+static Void_t* malloc_atfork();
+static void free_atfork();
#endif
#endif
@@ -1510,6 +1522,89 @@ static unsigned long max_mmapped_mem = 0;
int __malloc_initialized = -1;
+/* The following two functions are registered via thread_atfork() to
+ make sure that the mutexes remain in a consistent state in the
+ fork()ed version of a thread. Also adapt the malloc and free hooks
+ temporarily, because the `atfork' handler mechanism may use
+ malloc/free internally (e.g. in LinuxThreads). */
+
+#if defined(_LIBC) || defined(MALLOC_HOOKS)
+static __malloc_ptr_t (*save_malloc_hook) __MALLOC_P ((size_t __size));
+static void (*save_free_hook) __MALLOC_P ((__malloc_ptr_t __ptr));
+static Void_t* save_arena;
+#endif
+
+static void
+ptmalloc_lock_all __MALLOC_P((void))
+{
+ arena *ar_ptr;
+
+ (void)mutex_lock(&list_lock);
+ for(ar_ptr = &main_arena;;) {
+ (void)mutex_lock(&ar_ptr->mutex);
+ ar_ptr = ar_ptr->next;
+ if(ar_ptr == &main_arena) break;
+ }
+#if defined(_LIBC) || defined(MALLOC_HOOKS)
+ save_malloc_hook = __malloc_hook;
+ save_free_hook = __free_hook;
+ __malloc_hook = malloc_atfork;
+ __free_hook = free_atfork;
+ /* Only the current thread may perform malloc/free calls now. */
+ tsd_getspecific(arena_key, save_arena);
+ tsd_setspecific(arena_key, (Void_t*)0);
+#endif
+}
+
+static void
+ptmalloc_unlock_all __MALLOC_P((void))
+{
+ arena *ar_ptr;
+
+#if defined(_LIBC) || defined(MALLOC_HOOKS)
+ tsd_setspecific(arena_key, save_arena);
+ __malloc_hook = save_malloc_hook;
+ __free_hook = save_free_hook;
+#endif
+ for(ar_ptr = &main_arena;;) {
+ (void)mutex_unlock(&ar_ptr->mutex);
+ ar_ptr = ar_ptr->next;
+ if(ar_ptr == &main_arena) break;
+ }
+ (void)mutex_unlock(&list_lock);
+}
+
+#ifdef __linux__
+
+/* In LinuxThreads, unlocking a mutex in the child process after a
+ fork() is currently unsafe, whereas re-initializing it is safe and
+ does not leak resources. Therefore, a special atfork handler is
+ installed for the child. */
+
+static void
+ptmalloc_unlock_all2 __MALLOC_P((void))
+{
+ arena *ar_ptr;
+
+#if defined(_LIBC) || defined(MALLOC_HOOKS)
+ tsd_setspecific(arena_key, save_arena);
+ __malloc_hook = save_malloc_hook;
+ __free_hook = save_free_hook;
+#endif
+ for(ar_ptr = &main_arena;;) {
+ (void)mutex_init(&ar_ptr->mutex);
+ ar_ptr = ar_ptr->next;
+ if(ar_ptr == &main_arena) break;
+ }
+ (void)mutex_init(&list_lock);
+}
+
+#else
+
+#define ptmalloc_unlock_all2 ptmalloc_unlock_all
+
+#endif
+
/* Initialization routine. */
#if defined(_LIBC)
#if 0
@@ -1524,8 +1619,6 @@ ptmalloc_init __MALLOC_P((void))
#endif
{
#if defined(_LIBC) || defined(MALLOC_HOOKS)
- __malloc_ptr_t (*save_malloc_hook) __MALLOC_P ((size_t __size));
- void (*save_free_hook) __MALLOC_P ((__malloc_ptr_t __ptr));
const char* s;
#endif
@@ -1550,6 +1643,7 @@ ptmalloc_init __MALLOC_P((void))
mutex_init(&list_lock);
tsd_key_create(&arena_key, NULL);
tsd_setspecific(arena_key, (Void_t *)&main_arena);
+ thread_atfork(ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_unlock_all2);
#endif
#if defined(_LIBC) || defined(MALLOC_HOOKS)
if((s = getenv("MALLOC_TRIM_THRESHOLD_")))
@@ -1573,6 +1667,12 @@ ptmalloc_init __MALLOC_P((void))
__malloc_initialized = 1;
}
+/* There are platforms (e.g. Hurd) with a link-time hook mechanism. */
+#ifdef thread_atfork_static
+thread_atfork_static(ptmalloc_lock_all, ptmalloc_unlock_all, \
+ ptmalloc_unlock_all2)
+#endif
+
#if defined(_LIBC) || defined(MALLOC_HOOKS)
/* Hooks for debugging versions. The initial hooks just call the
@@ -1656,15 +1756,15 @@ __malloc_check_init()
static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
-#define MMAP(size, prot) ((dev_zero_fd < 0) ? \
+#define MMAP(size, prot, flags) ((dev_zero_fd < 0) ? \
(dev_zero_fd = open("/dev/zero", O_RDWR), \
- mmap(0, (size), (prot), MAP_PRIVATE, dev_zero_fd, 0)) : \
- mmap(0, (size), (prot), MAP_PRIVATE, dev_zero_fd, 0))
+ mmap(0, (size), (prot), (flags), dev_zero_fd, 0)) : \
+ mmap(0, (size), (prot), (flags), dev_zero_fd, 0))
#else
-#define MMAP(size, prot) \
- (mmap(0, (size), (prot), MAP_PRIVATE|MAP_ANONYMOUS, -1, 0))
+#define MMAP(size, prot, flags) \
+ (mmap(0, (size), (prot), (flags)|MAP_ANONYMOUS, -1, 0))
#endif
@@ -1684,7 +1784,7 @@ static mchunkptr mmap_chunk(size) size_t size;
*/
size = (size + SIZE_SZ + page_mask) & ~page_mask;
- p = (mchunkptr)MMAP(size, PROT_READ|PROT_WRITE);
+ p = (mchunkptr)MMAP(size, PROT_READ|PROT_WRITE, MAP_PRIVATE);
if(p == (mchunkptr)-1) return 0;
n_mmaps++;
@@ -1812,7 +1912,11 @@ new_heap(size) size_t size;
size = HEAP_MAX_SIZE;
size = (size + page_mask) & ~page_mask;
- p1 = (char *)MMAP(HEAP_MAX_SIZE<<1, PROT_NONE);
+ /* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed.
+ No swap space needs to be reserved for the following large
+ mapping (on Linux, this is the case for all non-writable mappings
+ anyway). */
+ p1 = (char *)MMAP(HEAP_MAX_SIZE<<1, PROT_NONE, MAP_PRIVATE|MAP_NORESERVE);
if(p1 == (char *)-1)
return 0;
p2 = (char *)(((unsigned long)p1 + HEAP_MAX_SIZE) & ~(HEAP_MAX_SIZE-1));
@@ -1905,6 +2009,7 @@ arena_get2(a_tsd, size) arena *a_tsd; size_t size;
}
/* Check the global, circularly linked list for available arenas. */
+repeat:
do {
if(!mutex_trylock(&a->mutex)) {
THREAD_STAT(++(a->stat_lock_loop));
@@ -1914,6 +2019,16 @@ arena_get2(a_tsd, size) arena *a_tsd; size_t size;
a = a->next;
} while(a != a_tsd);
+ /* If not even the list_lock can be obtained, try again. This can
+ happen during `atfork', or for example on systems where thread
+ creation makes it temporarily impossible to obtain _any_
+ locks. */
+ if(mutex_trylock(&list_lock)) {
+ a = a_tsd;
+ goto repeat;
+ }
+ (void)mutex_unlock(&list_lock);
+
/* Nothing immediately available, so generate a new arena. */
h = new_heap(size + (sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT));
if(!h)
@@ -3638,9 +3753,6 @@ malloc_update_mallinfo(ar_ptr, mi) arena *ar_ptr; struct mallinfo *mi;
#endif
INTERNAL_SIZE_T avail;
- /* Initialize the memory. */
- memset (mi, '\0', sizeof (struct mallinfo));
-
(void)mutex_lock(&ar_ptr->mutex);
avail = chunksize(top(ar_ptr));
navail = ((long)(avail) >= (long)MINSIZE)? 1 : 0;
@@ -3664,6 +3776,7 @@ malloc_update_mallinfo(ar_ptr, mi) arena *ar_ptr; struct mallinfo *mi;
mi->arena = ar_ptr->size;
mi->ordblks = navail;
+ mi->smblks = mi->usmblks = mi->fsmblks = 0; /* clear unused fields */
mi->uordblks = ar_ptr->size - avail;
mi->fordblks = avail;
mi->hblks = n_mmaps;
@@ -3990,13 +4103,39 @@ mALLOC_SET_STATe(msptr) Void_t* msptr;
/* A simple, standard set of debugging hooks. Overhead is `only' one
byte per chunk; still this will catch most cases of double frees or
- overruns. */
+ overruns. The goal here is to avoid obscure crashes due to invalid
+ usage, unlike in the MALLOC_DEBUG code. */
#define MAGICBYTE(p) ( ( ((size_t)p >> 3) ^ ((size_t)p >> 11)) & 0xFF )
+/* Instrument a chunk with overrun detector byte(s) and convert it
+ into a user pointer with requested size sz. */
+
+static Void_t*
+#if __STD_C
+chunk2mem_check(mchunkptr p, size_t sz)
+#else
+chunk2mem_check(p, sz) mchunkptr p; size_t sz;
+#endif
+{
+ unsigned char* m_ptr = (unsigned char*)chunk2mem(p);
+ size_t i;
+
+ for(i = chunksize(p) - (chunk_is_mmapped(p) ? 2*SIZE_SZ+1 : SIZE_SZ+1);
+ i > sz;
+ i -= 0xFF) {
+ if(i-sz < 0x100) {
+ m_ptr[i] = (unsigned char)(i-sz);
+ break;
+ }
+ m_ptr[i] = 0xFF;
+ }
+ m_ptr[sz] = MAGICBYTE(p);
+ return (Void_t*)m_ptr;
+}
+
/* Convert a pointer to be free()d or realloc()ed to a valid chunk
- pointer. If the provided pointer is not valid, return NULL. The
- goal here is to avoid crashes, unlike in the MALLOC_DEBUG code. */
+ pointer. If the provided pointer is not valid, return NULL. */
static mchunkptr
#if __STD_C
@@ -4006,7 +4145,8 @@ mem2chunk_check(mem) Void_t* mem;
#endif
{
mchunkptr p;
- INTERNAL_SIZE_T sz;
+ INTERNAL_SIZE_T sz, c;
+ unsigned char magic;
p = mem2chunk(mem);
if(!aligned_OK(p)) return NULL;
@@ -4019,9 +4159,11 @@ mem2chunk_check(mem) Void_t* mem;
(long)prev_chunk(p)<(long)sbrk_base ||
next_chunk(prev_chunk(p))!=p) ))
return NULL;
- if(*((unsigned char*)p + sz + (SIZE_SZ-1)) != MAGICBYTE(p))
- return NULL;
- *((unsigned char*)p + sz + (SIZE_SZ-1)) ^= 0xFF;
+ magic = MAGICBYTE(p);
+ for(sz += SIZE_SZ-1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
+ if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
+ }
+ ((unsigned char*)p)[sz] ^= 0xFF;
} else {
unsigned long offset, page_mask = malloc_getpagesize-1;
@@ -4037,13 +4179,53 @@ mem2chunk_check(mem) Void_t* mem;
( (((unsigned long)p - p->prev_size) & page_mask) != 0 ) ||
( (sz = chunksize(p)), ((p->prev_size + sz) & page_mask) != 0 ) )
return NULL;
- if(*((unsigned char*)p + sz - 1) != MAGICBYTE(p))
- return NULL;
- *((unsigned char*)p + sz - 1) ^= 0xFF;
+ magic = MAGICBYTE(p);
+ for(sz -= 1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
+ if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
+ }
+ ((unsigned char*)p)[sz] ^= 0xFF;
}
return p;
}
+/* Check for corruption of the top chunk, and try to recover if
+ necessary. */
+
+static int
+top_check()
+{
+ mchunkptr t = top(&main_arena);
+ char* brk, * new_brk;
+ INTERNAL_SIZE_T front_misalign, sbrk_size;
+ unsigned long pagesz = malloc_getpagesize;
+
+ if((char*)t + chunksize(t) == sbrk_base + sbrked_mem ||
+ t == initial_top(&main_arena)) return 0;
+
+ switch(check_action) {
+ case 1:
+ fprintf(stderr, "malloc: top chunk is corrupt\n");
+ break;
+ case 2:
+ abort();
+ }
+ /* Try to set up a new top chunk. */
+ brk = MORECORE(0);
+ front_misalign = (unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK;
+ if (front_misalign > 0)
+ front_misalign = MALLOC_ALIGNMENT - front_misalign;
+ sbrk_size = front_misalign + top_pad + MINSIZE;
+ sbrk_size += pagesz - ((unsigned long)(brk + sbrk_size) & (pagesz - 1));
+ new_brk = (char*)(MORECORE (sbrk_size));
+ if (new_brk == (char*)(MORECORE_FAILURE)) return -1;
+ sbrked_mem = (new_brk - sbrk_base) + sbrk_size;
+
+ top(&main_arena) = (mchunkptr)(brk + front_misalign);
+ set_head(top(&main_arena), (sbrk_size - front_misalign) | PREV_INUSE);
+
+ return 0;
+}
+
static Void_t*
#if __STD_C
malloc_check(size_t sz)
@@ -4055,16 +4237,10 @@ malloc_check(sz) size_t sz;
INTERNAL_SIZE_T nb = request2size(sz + 1);
(void)mutex_lock(&main_arena.mutex);
- victim = chunk_alloc(&main_arena, nb);
+ victim = (top_check() >= 0) ? chunk_alloc(&main_arena, nb) : NULL;
(void)mutex_unlock(&main_arena.mutex);
if(!victim) return NULL;
- nb = chunksize(victim);
- if(chunk_is_mmapped(victim))
- --nb;
- else
- nb += SIZE_SZ - 1;
- *((unsigned char*)victim + nb) = MAGICBYTE(victim);
- return chunk2mem(victim);
+ return chunk2mem_check(victim, sz);
}
static void
@@ -4142,7 +4318,7 @@ realloc_check(oldmem, bytes) Void_t* oldmem; size_t bytes;
if(oldsize - SIZE_SZ >= nb) newp = oldp; /* do nothing */
else {
/* Must alloc, copy, free. */
- newp = chunk_alloc(&main_arena, nb);
+ newp = (top_check() >= 0) ? chunk_alloc(&main_arena, nb) : NULL;
if (newp) {
MALLOC_COPY(chunk2mem(newp), oldmem, oldsize - 2*SIZE_SZ);
munmap_chunk(oldp);
@@ -4153,7 +4329,8 @@ realloc_check(oldmem, bytes) Void_t* oldmem; size_t bytes;
#endif
} else {
#endif /* HAVE_MMAP */
- newp = chunk_realloc(&main_arena, oldp, oldsize, nb);
+ newp = (top_check() >= 0) ?
+ chunk_realloc(&main_arena, oldp, oldsize, nb) : NULL;
#if 0 /* Erase freed memory. */
nb = chunksize(newp);
if(oldp<newp || oldp>=chunk_at_offset(newp, nb)) {
@@ -4169,13 +4346,7 @@ realloc_check(oldmem, bytes) Void_t* oldmem; size_t bytes;
(void)mutex_unlock(&main_arena.mutex);
if(!newp) return NULL;
- nb = chunksize(newp);
- if(chunk_is_mmapped(newp))
- --nb;
- else
- nb += SIZE_SZ - 1;
- *((unsigned char*)newp + nb) = MAGICBYTE(newp);
- return chunk2mem(newp);
+ return chunk2mem_check(newp, bytes);
}
static Void_t*
@@ -4193,16 +4364,10 @@ memalign_check(alignment, bytes) size_t alignment; size_t bytes;
nb = request2size(bytes+1);
(void)mutex_lock(&main_arena.mutex);
- p = chunk_align(&main_arena, nb, alignment);
+ p = (top_check() >= 0) ? chunk_align(&main_arena, nb, alignment) : NULL;
(void)mutex_unlock(&main_arena.mutex);
if(!p) return NULL;
- nb = chunksize(p);
- if(chunk_is_mmapped(p))
- --nb;
- else
- nb += SIZE_SZ - 1;
- *((unsigned char*)p + nb) = MAGICBYTE(p);
- return chunk2mem(p);
+ return chunk2mem_check(p, bytes);
}
/* The following hooks are used when the global initialization in
@@ -4240,6 +4405,72 @@ free_starter(mem) Void_t* mem;
chunk_free(&main_arena, p);
}
+/* The following hooks are used while the `atfork' handling mechanism
+ is active. */
+
+static Void_t*
+#if __STD_C
+malloc_atfork(size_t sz)
+#else
+malloc_atfork(sz) size_t sz;
+#endif
+{
+ Void_t *vptr = NULL;
+ mchunkptr victim;
+
+ tsd_getspecific(arena_key, vptr);
+ if(!vptr) {
+ if(save_malloc_hook != malloc_check) {
+ victim = chunk_alloc(&main_arena, request2size(sz));
+ return victim ? chunk2mem(victim) : 0;
+ } else {
+ if(top_check() < 0) return 0;
+ victim = chunk_alloc(&main_arena, request2size(sz+1));
+ return victim ? chunk2mem_check(victim, sz) : 0;
+ }
+ } else {
+ /* Suspend the thread until the `atfork' handlers have completed.
+ By that time, the hooks will have been reset as well, so that
+ mALLOc() can be used again. */
+ (void)mutex_lock(&list_lock);
+ (void)mutex_unlock(&list_lock);
+ return mALLOc(sz);
+ }
+}
+
+static void
+#if __STD_C
+free_atfork(Void_t* mem)
+#else
+free_atfork(mem) Void_t* mem;
+#endif
+{
+ Void_t *vptr = NULL;
+ arena *ar_ptr;
+ mchunkptr p; /* chunk corresponding to mem */
+
+ if (mem == 0) /* free(0) has no effect */
+ return;
+
+ p = mem2chunk(mem); /* do not bother to replicate free_check here */
+
+#if HAVE_MMAP
+ if (chunk_is_mmapped(p)) /* release mmapped memory. */
+ {
+ munmap_chunk(p);
+ return;
+ }
+#endif
+
+ ar_ptr = arena_for_ptr(p);
+ tsd_getspecific(arena_key, vptr);
+ if(vptr)
+ (void)mutex_lock(&ar_ptr->mutex);
+ chunk_free(ar_ptr, p);
+ if(vptr)
+ (void)mutex_unlock(&ar_ptr->mutex);
+}
+
#endif /* defined(_LIBC) || defined(MALLOC_HOOKS) */