summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2017-08-27 17:00:40 +0200
committerRichard Braun <rbraun@sceen.net>2017-08-27 17:03:38 +0200
commite395627038e10c6e966a141743b95d7c286b25bd (patch)
tree746c543e9b04b90ead5bfa8f3d13b74c92df6ab9
parentb155465ec7984d8a3f8c07a5f548e457f31b6af3 (diff)
kern/rtmutex: implement timed waits
-rw-r--r--kern/mutex/mutex_pi_i.h8
-rw-r--r--kern/rtmutex.c84
-rw-r--r--kern/rtmutex.h14
-rw-r--r--kern/rtmutex_i.h2
4 files changed, 99 insertions, 9 deletions
diff --git a/kern/mutex/mutex_pi_i.h b/kern/mutex/mutex_pi_i.h
index 6c39db74..616f09b7 100644
--- a/kern/mutex/mutex_pi_i.h
+++ b/kern/mutex/mutex_pi_i.h
@@ -23,6 +23,8 @@
" use <kern/mutex.h> instead"
#endif
+#include <stdint.h>
+
#include <kern/mutex_types.h>
#include <kern/rtmutex.h>
@@ -51,6 +53,12 @@ mutex_impl_lock(struct mutex *mutex)
rtmutex_lock(&mutex->rtmutex);
}
+static inline int
+mutex_impl_timedlock(struct mutex *mutex, uint64_t ticks)
+{
+ return rtmutex_timedlock(&mutex->rtmutex, ticks);
+}
+
static inline void
mutex_impl_unlock(struct mutex *mutex)
{
diff --git a/kern/rtmutex.c b/kern/rtmutex.c
index db239206..0070b93f 100644
--- a/kern/rtmutex.c
+++ b/kern/rtmutex.c
@@ -16,6 +16,7 @@
*/
#include <assert.h>
+#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
@@ -26,20 +27,28 @@
#include <kern/thread.h>
#include <kern/turnstile.h>
+static struct thread *
+rtmutex_get_thread(uintptr_t owner)
+{
+ return (struct thread *)(owner & RTMUTEX_OWNER_MASK);
+}
+
static void
rtmutex_set_contended(struct rtmutex *rtmutex)
{
atomic_or(&rtmutex->owner, RTMUTEX_CONTENDED, ATOMIC_RELEASE);
}
-void
-rtmutex_lock_slow(struct rtmutex *rtmutex)
+static int
+rtmutex_lock_slow_common(struct rtmutex *rtmutex, bool timed, uint64_t ticks)
{
struct turnstile *turnstile;
uintptr_t self, owner;
struct thread *thread;
uintptr_t bits;
+ int error;
+ error = 0;
self = (uintptr_t)thread_self();
turnstile = turnstile_lend(rtmutex);
@@ -56,11 +65,40 @@ rtmutex_lock_slow(struct rtmutex *rtmutex)
break;
}
- thread = (struct thread *)(owner & RTMUTEX_OWNER_MASK);
- turnstile_wait(turnstile, "rtmutex", thread);
+ thread = rtmutex_get_thread(owner);
+
+ if (!timed) {
+ turnstile_wait(turnstile, "rtmutex", thread);
+ } else {
+ error = turnstile_timedwait(turnstile, "rtmutex", thread, ticks);
+
+ if (error) {
+ break;
+ }
+ }
+
bits |= RTMUTEX_FORCE_WAIT;
}
+ if (error) {
+ /*
+ * Keep in mind more than one thread may have timed out on waiting.
+ * These threads aren't considered waiters, making the turnstile
+ * empty. The first to reacquire the turnstile clears the contention
+ * bits, allowing the owner to unlock through the fast path.
+ */
+ if (turnstile_empty(turnstile)) {
+ owner = atomic_load(&rtmutex->owner, ATOMIC_RELAXED);
+
+ if (owner & RTMUTEX_CONTENDED) {
+ owner &= RTMUTEX_OWNER_MASK;
+ atomic_store(&rtmutex->owner, owner, ATOMIC_RELAXED);
+ }
+ }
+
+ goto out;
+ }
+
turnstile_own(turnstile);
if (turnstile_empty(turnstile)) {
@@ -68,6 +106,7 @@ rtmutex_lock_slow(struct rtmutex *rtmutex)
assert(owner == (self | bits));
}
+out:
turnstile_return(turnstile);
/*
@@ -76,27 +115,54 @@ rtmutex_lock_slow(struct rtmutex *rtmutex)
* introducing unbounded priority inversion.
* Instead, let new waiters do it, using their own priority.
*/
+
+ return error;
+}
+
+void
+rtmutex_lock_slow(struct rtmutex *rtmutex)
+{
+ int error;
+
+ error = rtmutex_lock_slow_common(rtmutex, false, 0);
+ assert(!error);
+}
+
+int
+rtmutex_timedlock_slow(struct rtmutex *rtmutex, uint64_t ticks)
+{
+ return rtmutex_lock_slow_common(rtmutex, true, ticks);
}
void
rtmutex_unlock_slow(struct rtmutex *rtmutex)
{
struct turnstile *turnstile;
- uintptr_t self, owner;
+ uintptr_t owner;
- self = (uintptr_t)thread_self();
+ for (;;) {
+ turnstile = turnstile_acquire(rtmutex);
+
+ if (turnstile != NULL) {
+ break;
+ }
+
+ owner = rtmutex_unlock_fast(rtmutex);
- turnstile = turnstile_acquire(rtmutex);
- assert(turnstile != NULL);
+ if (!(owner & RTMUTEX_CONTENDED)) {
+ return;
+ }
+ }
owner = atomic_swap_release(&rtmutex->owner,
RTMUTEX_FORCE_WAIT | RTMUTEX_CONTENDED);
- assert((owner & RTMUTEX_OWNER_MASK) == self);
+ assert(rtmutex_get_thread(owner) == thread_self());
turnstile_disown(turnstile);
turnstile_signal(turnstile);
turnstile_release(turnstile);
+ /* TODO Make private, use thread_set_priority_propagation_needed instead */
thread_propagate_priority();
}
diff --git a/kern/rtmutex.h b/kern/rtmutex.h
index ec79afa9..87cd15ad 100644
--- a/kern/rtmutex.h
+++ b/kern/rtmutex.h
@@ -87,6 +87,20 @@ rtmutex_lock(struct rtmutex *rtmutex)
}
}
+static inline int
+rtmutex_timedlock(struct rtmutex *rtmutex, uint64_t ticks)
+{
+ uintptr_t prev_owner;
+
+ prev_owner = rtmutex_lock_fast(rtmutex);
+
+ if (unlikely(prev_owner != 0)) {
+ return rtmutex_timedlock_slow(rtmutex, ticks);
+ }
+
+ return 0;
+}
+
/*
* Unlock a real-time mutex.
*
diff --git a/kern/rtmutex_i.h b/kern/rtmutex_i.h
index 984cfd16..75ac5e4a 100644
--- a/kern/rtmutex_i.h
+++ b/kern/rtmutex_i.h
@@ -74,6 +74,8 @@ rtmutex_unlock_fast(struct rtmutex *rtmutex)
void rtmutex_lock_slow(struct rtmutex *rtmutex);
+int rtmutex_timedlock_slow(struct rtmutex *rtmutex, uint64_t ticks);
+
void rtmutex_unlock_slow(struct rtmutex *rtmutex);
#endif /* _KERN_RTMUTEX_I_H */