summaryrefslogtreecommitdiff
path: root/malloc/malloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'malloc/malloc.c')
-rw-r--r--malloc/malloc.c27
1 files changed, 24 insertions, 3 deletions
diff --git a/malloc/malloc.c b/malloc/malloc.c
index 0b9facefd4..4b623e2200 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -4779,8 +4779,29 @@ _int_free(mstate av, mchunkptr p)
|| __builtin_expect (chunksize (chunk_at_offset (p, size))
>= av->system_mem, 0))
{
- errstr = "free(): invalid next size (fast)";
- goto errout;
+#ifdef ATOMIC_FASTBINS
+ /* We might not have a lock at this point and concurrent modifications
+ of system_mem might have let to a false positive. Redo the test
+ after getting the lock. */
+ if (have_lock
+ || ({ assert (locked == 0);
+ mutex_lock(&av->mutex);
+ locked = 1;
+ chunk_at_offset (p, size)->size <= 2 * SIZE_SZ
+ || chunksize (chunk_at_offset (p, size)) >= av->system_mem;
+ }))
+#endif
+ {
+ errstr = "free(): invalid next size (fast)";
+ goto errout;
+ }
+#ifdef ATOMIC_FASTBINS
+ if (! have_lock)
+ {
+ (void)mutex_unlock(&av->mutex);
+ locked = 0;
+ }
+#endif
}
if (__builtin_expect (perturb_byte, 0))
@@ -4803,7 +4824,7 @@ _int_free(mstate av, mchunkptr p)
}
p->fd = fd = old;
}
- while ((old = catomic_compare_and_exchange_val_acq (fb, p, fd)) != fd);
+ while ((old = catomic_compare_and_exchange_val_rel (fb, p, fd)) != fd);
#else
/* Another simple check: make sure the top of the bin is not the
record we are going to add (i.e., double free). */