summaryrefslogtreecommitdiff
path: root/nptl/pthread_rwlock_rdlock.c
diff options
context:
space:
mode:
Diffstat (limited to 'nptl/pthread_rwlock_rdlock.c')
-rw-r--r--nptl/pthread_rwlock_rdlock.c40
1 files changed, 38 insertions, 2 deletions
diff --git a/nptl/pthread_rwlock_rdlock.c b/nptl/pthread_rwlock_rdlock.c
index 0edca654a3..004a386fd5 100644
--- a/nptl/pthread_rwlock_rdlock.c
+++ b/nptl/pthread_rwlock_rdlock.c
@@ -23,6 +23,7 @@
#include <pthreadP.h>
#include <stap-probe.h>
#include <elide.h>
+#include <stdbool.h>
/* Acquire read lock for RWLOCK. Slow path. */
@@ -30,6 +31,7 @@ static int __attribute__((noinline))
__pthread_rwlock_rdlock_slow (pthread_rwlock_t *rwlock)
{
int result = 0;
+ bool wake = false;
/* Lock is taken in caller. */
@@ -81,7 +83,17 @@ __pthread_rwlock_rdlock_slow (pthread_rwlock_t *rwlock)
result = EAGAIN;
}
else
- LIBC_PROBE (rdlock_acquire_read, 1, rwlock);
+ {
+ LIBC_PROBE (rdlock_acquire_read, 1, rwlock);
+ /* See pthread_rwlock_rdlock. */
+ if (rwlock->__data.__nr_readers == 1
+ && rwlock->__data.__nr_readers_queued > 0
+ && rwlock->__data.__nr_writers_queued > 0)
+ {
+ ++rwlock->__data.__readers_wakeup;
+ wake = true;
+ }
+ }
break;
}
@@ -90,6 +102,10 @@ __pthread_rwlock_rdlock_slow (pthread_rwlock_t *rwlock)
/* We are done, free the lock. */
lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
+ if (wake)
+ lll_futex_wake (&rwlock->__data.__readers_wakeup, INT_MAX,
+ rwlock->__data.__shared);
+
return result;
}
@@ -100,6 +116,7 @@ int
__pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
{
int result = 0;
+ bool wake = false;
LIBC_PROBE (rdlock_entry, 1, rwlock);
@@ -126,11 +143,30 @@ __pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
result = EAGAIN;
}
else
- LIBC_PROBE (rdlock_acquire_read, 1, rwlock);
+ {
+ LIBC_PROBE (rdlock_acquire_read, 1, rwlock);
+ /* If we are the first reader, and there are blocked readers and
+ writers (which we don't prefer, see above), then it can be the
+ case that we stole the lock from a writer that was already woken
+ to acquire it. That means that we need to take over the writer's
+ responsibility to wake all readers (see pthread_rwlock_unlock).
+ Thus, wake all readers in this case. */
+ if (rwlock->__data.__nr_readers == 1
+ && rwlock->__data.__nr_readers_queued > 0
+ && rwlock->__data.__nr_writers_queued > 0)
+ {
+ ++rwlock->__data.__readers_wakeup;
+ wake = true;
+ }
+ }
/* We are done, free the lock. */
lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
+ if (wake)
+ lll_futex_wake (&rwlock->__data.__readers_wakeup, INT_MAX,
+ rwlock->__data.__shared);
+
return result;
}