summaryrefslogtreecommitdiff
path: root/sysdeps/generic
diff options
context:
space:
mode:
authorSamuel Thibault <samuel.thibault@ens-lyon.org>2014-11-02 15:07:29 +0100
committerSamuel Thibault <samuel.thibault@ens-lyon.org>2014-11-02 15:07:29 +0100
commit01a36d151b82f6034c9e8721a5f486ab95254d35 (patch)
treefadaa5aeecb00c6a36ad1682c389a53a9a29ff12 /sysdeps/generic
parent46d3992fea0690f81ed3924a0a13f3aecde809ae (diff)
Fix safety of pthread_barrier_wait
The barrier queue uses threads' next field, so it can not actually be safely walked without holding the barrier lock. * sysdeps/generic/pt-barrier-wait.c (pthread_barrier_wait): Record an array of __pthread to wake while holding the lock, and wake them only after unlocking it.
Diffstat (limited to 'sysdeps/generic')
-rw-r--r--sysdeps/generic/pt-barrier-wait.c24
1 files changed, 16 insertions, 8 deletions
diff --git a/sysdeps/generic/pt-barrier-wait.c b/sysdeps/generic/pt-barrier-wait.c
index f1de250..42fbdb0 100644
--- a/sysdeps/generic/pt-barrier-wait.c
+++ b/sysdeps/generic/pt-barrier-wait.c
@@ -33,16 +33,24 @@ pthread_barrier_wait (pthread_barrier_t *barrier)
if (barrier->count > 1)
{
struct __pthread *wakeup;
+ unsigned n = 0;
- wakeup = barrier->queue;
- barrier->queue = NULL;
- __pthread_spin_unlock (&barrier->lock);
+ __pthread_queue_iterate (barrier->queue, wakeup)
+ n ++;
- /* We can safely walk the list of waiting threads without
- holding the lock since it is decoupled from the barrier
- variable now. */
- __pthread_dequeuing_iterate (wakeup, wakeup)
- __pthread_wakeup (wakeup);
+ {
+ struct __pthread *wakeups[n];
+ unsigned i = 0;
+
+ __pthread_dequeuing_iterate (barrier->queue, wakeup)
+ wakeups[i ++] = wakeup;
+
+ barrier->queue = NULL;
+ __pthread_spin_unlock (&barrier->lock);
+
+ for (i = 0; i < n; i ++)
+ __pthread_wakeup (wakeups[i]);
+ }
}
return PTHREAD_BARRIER_SERIAL_THREAD;