summaryrefslogtreecommitdiff
path: root/kern/rtmutex.c
diff options
context:
space:
mode:
Diffstat (limited to 'kern/rtmutex.c')
-rw-r--r--kern/rtmutex.c84
1 files changed, 75 insertions, 9 deletions
diff --git a/kern/rtmutex.c b/kern/rtmutex.c
index db239206..0070b93f 100644
--- a/kern/rtmutex.c
+++ b/kern/rtmutex.c
@@ -16,6 +16,7 @@
*/
#include <assert.h>
+#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
@@ -26,20 +27,28 @@
#include <kern/thread.h>
#include <kern/turnstile.h>
+static struct thread *
+rtmutex_get_thread(uintptr_t owner)
+{
+ return (struct thread *)(owner & RTMUTEX_OWNER_MASK);
+}
+
static void
rtmutex_set_contended(struct rtmutex *rtmutex)
{
atomic_or(&rtmutex->owner, RTMUTEX_CONTENDED, ATOMIC_RELEASE);
}
-void
-rtmutex_lock_slow(struct rtmutex *rtmutex)
+static int
+rtmutex_lock_slow_common(struct rtmutex *rtmutex, bool timed, uint64_t ticks)
{
struct turnstile *turnstile;
uintptr_t self, owner;
struct thread *thread;
uintptr_t bits;
+ int error;
+ error = 0;
self = (uintptr_t)thread_self();
turnstile = turnstile_lend(rtmutex);
@@ -56,11 +65,40 @@ rtmutex_lock_slow(struct rtmutex *rtmutex)
break;
}
- thread = (struct thread *)(owner & RTMUTEX_OWNER_MASK);
- turnstile_wait(turnstile, "rtmutex", thread);
+ thread = rtmutex_get_thread(owner);
+
+ if (!timed) {
+ turnstile_wait(turnstile, "rtmutex", thread);
+ } else {
+ error = turnstile_timedwait(turnstile, "rtmutex", thread, ticks);
+
+ if (error) {
+ break;
+ }
+ }
+
bits |= RTMUTEX_FORCE_WAIT;
}
+ if (error) {
+ /*
+ * Keep in mind more than one thread may have timed out on waiting.
+ * These threads aren't considered waiters, making the turnstile
+ * empty. The first to reacquire the turnstile clears the contention
+ * bits, allowing the owner to unlock through the fast path.
+ */
+ if (turnstile_empty(turnstile)) {
+ owner = atomic_load(&rtmutex->owner, ATOMIC_RELAXED);
+
+ if (owner & RTMUTEX_CONTENDED) {
+ owner &= RTMUTEX_OWNER_MASK;
+ atomic_store(&rtmutex->owner, owner, ATOMIC_RELAXED);
+ }
+ }
+
+ goto out;
+ }
+
turnstile_own(turnstile);
if (turnstile_empty(turnstile)) {
@@ -68,6 +106,7 @@ rtmutex_lock_slow(struct rtmutex *rtmutex)
assert(owner == (self | bits));
}
+out:
turnstile_return(turnstile);
/*
@@ -76,27 +115,54 @@ rtmutex_lock_slow(struct rtmutex *rtmutex)
* introducing unbounded priority inversion.
* Instead, let new waiters do it, using their own priority.
*/
+
+ return error;
+}
+
+void
+rtmutex_lock_slow(struct rtmutex *rtmutex)
+{
+ int error;
+
+ error = rtmutex_lock_slow_common(rtmutex, false, 0);
+ assert(!error);
+}
+
+int
+rtmutex_timedlock_slow(struct rtmutex *rtmutex, uint64_t ticks)
+{
+ return rtmutex_lock_slow_common(rtmutex, true, ticks);
}
void
rtmutex_unlock_slow(struct rtmutex *rtmutex)
{
struct turnstile *turnstile;
- uintptr_t self, owner;
+ uintptr_t owner;
- self = (uintptr_t)thread_self();
+ for (;;) {
+ turnstile = turnstile_acquire(rtmutex);
+
+ if (turnstile != NULL) {
+ break;
+ }
+
+ owner = rtmutex_unlock_fast(rtmutex);
- turnstile = turnstile_acquire(rtmutex);
- assert(turnstile != NULL);
+ if (!(owner & RTMUTEX_CONTENDED)) {
+ return;
+ }
+ }
owner = atomic_swap_release(&rtmutex->owner,
RTMUTEX_FORCE_WAIT | RTMUTEX_CONTENDED);
- assert((owner & RTMUTEX_OWNER_MASK) == self);
+ assert(rtmutex_get_thread(owner) == thread_self());
turnstile_disown(turnstile);
turnstile_signal(turnstile);
turnstile_release(turnstile);
+ /* TODO Make private, use thread_set_priority_propagation_needed instead */
thread_propagate_priority();
}