summaryrefslogtreecommitdiff
path: root/malloc
diff options
context:
space:
mode:
authorUlrich Drepper <drepper@redhat.com>2009-07-16 09:54:34 -0700
committerPetr Baudis <pasky@suse.cz>2009-07-16 20:36:06 +0200
commit475cfe06fa5de340302b2245e0a0a162d7350c32 (patch)
treec6a4732f7335476838ef5b765522dfcb790efc00 /malloc
parente875bad50a2247e6297c1b2306d87b3eb623a0be (diff)
Fix race in corruption check.
With atomic fastbins the checks performed can race with concurrent modifications of the arena. If we detect a problem re-do the test after getting the lock. (cherry picked from commit bec466d922ee22b94ac0d00415fb605e136efe6e)
Diffstat (limited to 'malloc')
-rw-r--r--malloc/malloc.c25
1 files changed, 23 insertions, 2 deletions
diff --git a/malloc/malloc.c b/malloc/malloc.c
index bd44dee7f4..4b623e2200 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -4779,8 +4779,29 @@ _int_free(mstate av, mchunkptr p)
|| __builtin_expect (chunksize (chunk_at_offset (p, size))
>= av->system_mem, 0))
{
- errstr = "free(): invalid next size (fast)";
- goto errout;
+#ifdef ATOMIC_FASTBINS
+ /* We might not have a lock at this point and concurrent modifications
+ of system_mem might have let to a false positive. Redo the test
+ after getting the lock. */
+ if (have_lock
+ || ({ assert (locked == 0);
+ mutex_lock(&av->mutex);
+ locked = 1;
+ chunk_at_offset (p, size)->size <= 2 * SIZE_SZ
+ || chunksize (chunk_at_offset (p, size)) >= av->system_mem;
+ }))
+#endif
+ {
+ errstr = "free(): invalid next size (fast)";
+ goto errout;
+ }
+#ifdef ATOMIC_FASTBINS
+ if (! have_lock)
+ {
+ (void)mutex_unlock(&av->mutex);
+ locked = 0;
+ }
+#endif
}
if (__builtin_expect (perturb_byte, 0))