summaryrefslogtreecommitdiff
path: root/kern/mutex
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2017-08-27 17:20:12 +0200
committerRichard Braun <rbraun@sceen.net>2017-08-27 17:20:12 +0200
commitcbf35081fabde1ce35cbaf223fde853db86e06d4 (patch)
treea6215d6335ec42e33da4b6aadfa1b3c0be9c860b /kern/mutex
parent094319b4a0a04ae11e24b44bb67aaf901536afb2 (diff)
parent70f7512a01ba8f90aad6dbb4d285e279f0e17e64 (diff)
Merge branch 'timer_system'
Diffstat (limited to 'kern/mutex')
-rw-r--r--kern/mutex/mutex_adaptive.c121
-rw-r--r--kern/mutex/mutex_adaptive_i.h15
-rw-r--r--kern/mutex/mutex_pi_i.h8
-rw-r--r--kern/mutex/mutex_plain.c55
-rw-r--r--kern/mutex/mutex_plain_i.h16
5 files changed, 197 insertions, 18 deletions
diff --git a/kern/mutex/mutex_adaptive.c b/kern/mutex/mutex_adaptive.c
index ffc47169..34fdd221 100644
--- a/kern/mutex/mutex_adaptive.c
+++ b/kern/mutex/mutex_adaptive.c
@@ -15,11 +15,14 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <assert.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <kern/atomic.h>
+#include <kern/clock.h>
+#include <kern/error.h>
#include <kern/mutex.h>
#include <kern/mutex_types.h>
#include <kern/sleepq.h>
@@ -47,20 +50,23 @@ mutex_adaptive_is_owner(struct mutex *mutex, uintptr_t owner)
return mutex_adaptive_get_thread(prev) == mutex_adaptive_get_thread(owner);
}
-void
-mutex_adaptive_lock_slow(struct mutex *mutex)
+static int
+mutex_adaptive_lock_slow_common(struct mutex *mutex, bool timed, uint64_t ticks)
{
uintptr_t self, owner;
struct sleepq *sleepq;
+ struct thread *thread;
unsigned long flags;
+ int error;
+ error = 0;
self = (uintptr_t)thread_self();
sleepq = sleepq_lend(mutex, false, &flags);
mutex_adaptive_set_contended(mutex);
- for (;;) {
+ do {
owner = atomic_cas_acquire(&mutex->owner, MUTEX_ADAPTIVE_CONTENDED,
self | MUTEX_ADAPTIVE_CONTENDED);
assert(owner & MUTEX_ADAPTIVE_CONTENDED);
@@ -75,40 +81,131 @@ mutex_adaptive_lock_slow(struct mutex *mutex)
*/
while (mutex_adaptive_is_owner(mutex, owner)) {
if (thread_is_running(mutex_adaptive_get_thread(owner))) {
+ if (timed && clock_time_occurred(ticks, clock_get_time())) {
+ error = ERROR_TIMEDOUT;
+ break;
+ }
+
cpu_pause();
} else {
- sleepq_wait(sleepq, "mutex");
+ if (!timed) {
+ sleepq_wait(sleepq, "mutex");
+ } else {
+ error = sleepq_timedwait(sleepq, "mutex", ticks);
+
+ if (error) {
+ break;
+ }
+ }
}
}
- }
+ } while (!error);
/*
- * A potentially spinning thread wouldn't be accounted in the sleep queue,
- * but the only potentially spinning thread is the new owner.
+ * Attempt to clear the contended bit.
+ *
+ * In case of success, the current thread becomes the new owner, and
+ * simply checking if the sleep queue is empty is enough.
+ *
+ * Keep in mind accesses to the mutex word aren't synchronized by
+ * the sleep queue, i.e. an unlock may occur completely concurrently
+ * while attempting to clear the contended bit .
*/
+
+ if (error) {
+ if (sleepq_empty(sleepq)) {
+ owner = atomic_load(&mutex->owner, ATOMIC_RELAXED);
+ assert(owner & MUTEX_ADAPTIVE_CONTENDED);
+ thread = mutex_adaptive_get_thread(owner);
+
+ /* If there is an owner, try to clear the contended bit */
+ if (thread != NULL) {
+ owner = atomic_cas(&mutex->owner, owner,
+ (uintptr_t)thread, ATOMIC_RELAXED);
+ assert(owner & MUTEX_ADAPTIVE_CONTENDED);
+ thread = mutex_adaptive_get_thread(owner);
+ }
+
+ /*
+ * If there is no owner, the previous owner is currently unlocking
+ * the mutex, waiting for either a successful signal, or the
+ * value of the mutex to become different from the contended bit.
+ */
+ if (thread == NULL) {
+ owner = atomic_cas(&mutex->owner, owner, 0, ATOMIC_RELAXED);
+ assert(owner == MUTEX_ADAPTIVE_CONTENDED);
+ }
+ }
+
+ goto out;
+ }
+
if (sleepq_empty(sleepq)) {
atomic_store(&mutex->owner, self, ATOMIC_RELAXED);
}
+out:
sleepq_return(sleepq, flags);
+
+ return error;
+}
+
+void
+mutex_adaptive_lock_slow(struct mutex *mutex)
+{
+ int error;
+
+ error = mutex_adaptive_lock_slow_common(mutex, false, 0);
+ assert(!error);
+}
+
+int
+mutex_adaptive_timedlock_slow(struct mutex *mutex, uint64_t ticks)
+{
+ return mutex_adaptive_lock_slow_common(mutex, true, ticks);
}
void
mutex_adaptive_unlock_slow(struct mutex *mutex)
{
- uintptr_t owner;
+ uintptr_t self, owner;
struct sleepq *sleepq;
unsigned long flags;
+ int error;
- atomic_store(&mutex->owner, MUTEX_ADAPTIVE_CONTENDED, ATOMIC_RELEASE);
+ self = (uintptr_t)thread_self() | MUTEX_ADAPTIVE_CONTENDED;
+
+ for (;;) {
+ owner = atomic_cas_release(&mutex->owner, self,
+ MUTEX_ADAPTIVE_CONTENDED);
+
+ if (owner == self) {
+ break;
+ } else {
+ /*
+ * The contended bit was cleared after the fast path failed,
+ * but before the slow path (re)started.
+ */
+ assert(owner == (uintptr_t)thread_self());
+ error = mutex_adaptive_unlock_fast(mutex);
+
+ if (error) {
+ continue;
+ }
+
+ return;
+ }
+ }
for (;;) {
owner = atomic_load(&mutex->owner, ATOMIC_RELAXED);
/*
- * This only happens if another thread was able to become the new
- * owner, in which case that thread isn't spinning on the current
- * thread, i.e. there is no need for an additional reference.
+ * This only happens if :
+ * 1/ Another thread was able to become the new owner, in which
+ * case that thread isn't spinning on the current thread, i.e.
+ * there is no need for an additional reference.
+ * 2/ A timeout cleared the contended bit.
*/
if (owner != MUTEX_ADAPTIVE_CONTENDED) {
break;
diff --git a/kern/mutex/mutex_adaptive_i.h b/kern/mutex/mutex_adaptive_i.h
index b9952ec6..be822c24 100644
--- a/kern/mutex/mutex_adaptive_i.h
+++ b/kern/mutex/mutex_adaptive_i.h
@@ -78,6 +78,7 @@ mutex_adaptive_unlock_fast(struct mutex *mutex)
}
void mutex_adaptive_lock_slow(struct mutex *mutex);
+int mutex_adaptive_timedlock_slow(struct mutex *mutex, uint64_t ticks);
void mutex_adaptive_unlock_slow(struct mutex *mutex);
/*
@@ -105,6 +106,20 @@ mutex_impl_lock(struct mutex *mutex)
}
}
+static inline int
+mutex_impl_timedlock(struct mutex *mutex, uint64_t ticks)
+{
+ int error;
+
+ error = mutex_adaptive_lock_fast(mutex);
+
+ if (unlikely(error)) {
+ error = mutex_adaptive_timedlock_slow(mutex, ticks);
+ }
+
+ return error;
+}
+
static inline void
mutex_impl_unlock(struct mutex *mutex)
{
diff --git a/kern/mutex/mutex_pi_i.h b/kern/mutex/mutex_pi_i.h
index 6c39db74..616f09b7 100644
--- a/kern/mutex/mutex_pi_i.h
+++ b/kern/mutex/mutex_pi_i.h
@@ -23,6 +23,8 @@
" use <kern/mutex.h> instead"
#endif
+#include <stdint.h>
+
#include <kern/mutex_types.h>
#include <kern/rtmutex.h>
@@ -51,6 +53,12 @@ mutex_impl_lock(struct mutex *mutex)
rtmutex_lock(&mutex->rtmutex);
}
+static inline int
+mutex_impl_timedlock(struct mutex *mutex, uint64_t ticks)
+{
+ return rtmutex_timedlock(&mutex->rtmutex, ticks);
+}
+
static inline void
mutex_impl_unlock(struct mutex *mutex)
{
diff --git a/kern/mutex/mutex_plain.c b/kern/mutex/mutex_plain.c
index 5e4ba537..58fc4878 100644
--- a/kern/mutex/mutex_plain.c
+++ b/kern/mutex/mutex_plain.c
@@ -15,20 +15,25 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <assert.h>
#include <stdbool.h>
#include <stddef.h>
+#include <stdint.h>
#include <kern/atomic.h>
#include <kern/mutex.h>
#include <kern/mutex_types.h>
#include <kern/sleepq.h>
-void
-mutex_plain_lock_slow(struct mutex *mutex)
+static int
+mutex_plain_lock_slow_common(struct mutex *mutex, bool timed, uint64_t ticks)
{
unsigned int state;
struct sleepq *sleepq;
unsigned long flags;
+ int error;
+
+ error = 0;
sleepq = sleepq_lend(mutex, false, &flags);
@@ -39,14 +44,49 @@ mutex_plain_lock_slow(struct mutex *mutex)
break;
}
- sleepq_wait(sleepq, "mutex");
+ if (!timed) {
+ sleepq_wait(sleepq, "mutex");
+ } else {
+ error = sleepq_timedwait(sleepq, "mutex", ticks);
+
+ if (error) {
+ break;
+ }
+ }
+ }
+
+ if (error) {
+ if (sleepq_empty(sleepq)) {
+ atomic_cas(&mutex->state, MUTEX_CONTENDED,
+ MUTEX_LOCKED, ATOMIC_RELAXED);
+ }
+
+ goto out;
}
if (sleepq_empty(sleepq)) {
atomic_store(&mutex->state, MUTEX_LOCKED, ATOMIC_RELAXED);
}
+out:
sleepq_return(sleepq, flags);
+
+ return error;
+}
+
+void
+mutex_plain_lock_slow(struct mutex *mutex)
+{
+ int error;
+
+ error = mutex_plain_lock_slow_common(mutex, false, 0);
+ assert(!error);
+}
+
+int
+mutex_plain_timedlock_slow(struct mutex *mutex, uint64_t ticks)
+{
+ return mutex_plain_lock_slow_common(mutex, true, ticks);
}
void
@@ -57,8 +97,11 @@ mutex_plain_unlock_slow(struct mutex *mutex)
sleepq = sleepq_acquire(mutex, false, &flags);
- if (sleepq != NULL) {
- sleepq_signal(sleepq);
- sleepq_release(sleepq, flags);
+ if (sleepq == NULL) {
+ return;
}
+
+ sleepq_signal(sleepq);
+
+ sleepq_release(sleepq, flags);
}
diff --git a/kern/mutex/mutex_plain_i.h b/kern/mutex/mutex_plain_i.h
index 4f112b89..58e565ed 100644
--- a/kern/mutex/mutex_plain_i.h
+++ b/kern/mutex/mutex_plain_i.h
@@ -24,6 +24,7 @@
#endif
#include <assert.h>
+#include <stdint.h>
#include <kern/atomic.h>
#include <kern/error.h>
@@ -71,6 +72,7 @@ mutex_plain_unlock_fast(struct mutex *mutex)
}
void mutex_plain_lock_slow(struct mutex *mutex);
+int mutex_plain_timedlock_slow(struct mutex *mutex, uint64_t ticks);
void mutex_plain_unlock_slow(struct mutex *mutex);
/*
@@ -98,6 +100,20 @@ mutex_impl_lock(struct mutex *mutex)
}
}
+static inline int
+mutex_impl_timedlock(struct mutex *mutex, uint64_t ticks)
+{
+ int error;
+
+ error = mutex_plain_lock_fast(mutex);
+
+ if (unlikely(error)) {
+ error = mutex_plain_timedlock_slow(mutex, ticks);
+ }
+
+ return error;
+}
+
static inline void
mutex_impl_unlock(struct mutex *mutex)
{