summaryrefslogtreecommitdiff
path: root/kern
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2017-07-21 00:54:31 +0200
committerRichard Braun <rbraun@sceen.net>2017-07-21 00:54:54 +0200
commit4c0dcbeb7d363918c9a4a75faf3f1912f580f9c1 (patch)
tree6fdb4a1b80d4aa51bbafda6f930f9f222adc4368 /kern
parentadcae3076edee5ed24cb06f4328f88cfa5e8998a (diff)
parent5c2cf8fff7a1d6dc6b88615df5433ddccbbcf51f (diff)
Merge branch 'adaptive_spinning'
Diffstat (limited to 'kern')
-rw-r--r--kern/mutex.c54
-rw-r--r--kern/mutex.h105
-rw-r--r--kern/mutex/mutex_adaptive.c138
-rw-r--r--kern/mutex/mutex_adaptive_i.h120
-rw-r--r--kern/mutex/mutex_adaptive_types.h35
-rw-r--r--kern/mutex/mutex_pi_i.h60
-rw-r--r--kern/mutex/mutex_pi_types.h39
-rw-r--r--kern/mutex/mutex_plain.c65
-rw-r--r--kern/mutex/mutex_plain_i.h113
-rw-r--r--kern/mutex/mutex_plain_types.h33
-rw-r--r--kern/mutex_i.h54
-rw-r--r--kern/mutex_types.h26
-rw-r--r--kern/rtmutex.c2
-rw-r--r--kern/sleepq.c27
-rw-r--r--kern/sleepq.h6
-rw-r--r--kern/thread.c15
-rw-r--r--kern/thread.h8
17 files changed, 686 insertions, 214 deletions
diff --git a/kern/mutex.c b/kern/mutex.c
index 87e3d64e..62609768 100644
--- a/kern/mutex.c
+++ b/kern/mutex.c
@@ -15,60 +15,8 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef X15_MUTEX_PI
-
-#include <stddef.h>
-
#include <kern/init.h>
-#include <kern/mutex.h>
-#include <kern/mutex_i.h>
-#include <kern/sleepq.h>
-
-void
-mutex_lock_slow(struct mutex *mutex)
-{
- struct sleepq *sleepq;
- unsigned long flags;
- unsigned int state;
-
- sleepq = sleepq_lend(mutex, false, &flags);
-
- for (;;) {
- state = atomic_swap_acquire(&mutex->state, MUTEX_CONTENDED);
-
- if (state == MUTEX_UNLOCKED) {
- break;
- }
-
- sleepq_wait(sleepq, "mutex");
- }
-
- if (sleepq_empty(sleepq)) {
- state = atomic_swap_acquire(&mutex->state, MUTEX_LOCKED);
- assert(state == MUTEX_CONTENDED);
- }
-
- sleepq_return(sleepq, flags);
-}
-
-void
-mutex_unlock_slow(struct mutex *mutex)
-{
- struct sleepq *sleepq;
- unsigned long flags;
-
- sleepq = sleepq_acquire(mutex, false, &flags);
-
- if (sleepq == NULL) {
- return;
- }
-
- sleepq_signal(sleepq);
-
- sleepq_release(sleepq, flags);
-}
-
-#endif /* X15_MUTEX_PI */
+#include <kern/thread.h>
static int __init
mutex_setup(void)
diff --git a/kern/mutex.h b/kern/mutex.h
index 103af02a..f192a70a 100644
--- a/kern/mutex.h
+++ b/kern/mutex.h
@@ -18,77 +18,38 @@
* Mutual exclusion sleep locks.
*
* Unlike spin locks, acquiring a mutex may make the calling thread sleep.
- *
- * TODO Adaptive spinning.
*/
#ifndef _KERN_MUTEX_H
#define _KERN_MUTEX_H
-#include <kern/init.h>
-#include <kern/mutex_types.h>
-
-#ifdef X15_MUTEX_PI
-
-#include <kern/rtmutex.h>
-
-struct mutex;
-
-#define mutex_assert_locked(mutex) rtmutex_assert_locked(&(mutex)->rtmutex)
-
-static inline void
-mutex_init(struct mutex *mutex)
-{
- rtmutex_init(&mutex->rtmutex);
-}
-
-static inline int
-mutex_trylock(struct mutex *mutex)
-{
- return rtmutex_trylock(&mutex->rtmutex);
-}
-
-static inline void
-mutex_lock(struct mutex *mutex)
-{
- rtmutex_lock(&mutex->rtmutex);
-}
-
-static inline void
-mutex_unlock(struct mutex *mutex)
-{
- rtmutex_unlock(&mutex->rtmutex);
-
- /*
- * If this mutex was used along with a condition variable, wake up
- * a potential pending waiter. This must be done after the mutex is
- * unlocked so that a higher priority thread can directly acquire it.
- */
- thread_wakeup_last_cond();
-}
+#if defined(X15_MUTEX_PI) && defined(X15_MUTEX_ADAPTIVE)
+#error "only one of X15_MUTEX_PI and X15_MUTEX_ADAPTIVE may be defined"
+#endif
-#else /* X15_MUTEX_PI */
+#if defined(X15_MUTEX_PI)
+#include <kern/mutex/mutex_pi_i.h>
+#elif defined(X15_MUTEX_ADAPTIVE)
+#include <kern/mutex/mutex_adaptive_i.h>
+#else
+#include <kern/mutex/mutex_plain_i.h>
+#endif
-#include <assert.h>
-
-#include <kern/error.h>
-#include <kern/macros.h>
-#include <kern/mutex_i.h>
+#include <kern/init.h>
+#include <kern/mutex_types.h>
#include <kern/thread.h>
-struct mutex;
-
-#define mutex_assert_locked(mutex) assert((mutex)->state != MUTEX_UNLOCKED)
-
/*
* Initialize a mutex.
*/
static inline void
mutex_init(struct mutex *mutex)
{
- mutex->state = MUTEX_UNLOCKED;
+ mutex_impl_init(mutex);
}
+#define mutex_assert_locked(mutex) mutex_impl_assert_locked(mutex)
+
/*
* Attempt to lock the given mutex.
*
@@ -99,37 +60,20 @@ mutex_init(struct mutex *mutex)
static inline int
mutex_trylock(struct mutex *mutex)
{
- unsigned int state;
-
- state = mutex_lock_fast(mutex);
-
- if (unlikely(state != MUTEX_UNLOCKED)) {
- assert((state == MUTEX_LOCKED) || (state == MUTEX_CONTENDED));
- return ERROR_BUSY;
- }
-
- return 0;
+ return mutex_impl_trylock(mutex);
}
/*
* Lock a mutex.
*
- * If the mutex is already locked, the calling thread sleeps until the
- * mutex is unlocked.
+ * On return, the mutex is locked. A mutex can only be locked once.
*
- * A mutex can only be locked once.
+ * This function may sleep.
*/
static inline void
mutex_lock(struct mutex *mutex)
{
- unsigned int state;
-
- state = mutex_lock_fast(mutex);
-
- if (unlikely(state != MUTEX_UNLOCKED)) {
- assert((state == MUTEX_LOCKED) || (state == MUTEX_CONTENDED));
- mutex_lock_slow(mutex);
- }
+ mutex_impl_lock(mutex);
}
/*
@@ -141,14 +85,7 @@ mutex_lock(struct mutex *mutex)
static inline void
mutex_unlock(struct mutex *mutex)
{
- unsigned int state;
-
- state = mutex_unlock_fast(mutex);
-
- if (unlikely(state != MUTEX_LOCKED)) {
- assert(state == MUTEX_CONTENDED);
- mutex_unlock_slow(mutex);
- }
+ mutex_impl_unlock(mutex);
/*
* If this mutex was used along with a condition variable, wake up
@@ -157,8 +94,6 @@ mutex_unlock(struct mutex *mutex)
thread_wakeup_last_cond();
}
-#endif /* X15_MUTEX_PI */
-
/*
* This init operation provides :
* - uncontended mutex locking
diff --git a/kern/mutex/mutex_adaptive.c b/kern/mutex/mutex_adaptive.c
new file mode 100644
index 00000000..ffc47169
--- /dev/null
+++ b/kern/mutex/mutex_adaptive.c
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2017 Agustina Arzille.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <kern/atomic.h>
+#include <kern/mutex.h>
+#include <kern/mutex_types.h>
+#include <kern/sleepq.h>
+#include <kern/thread.h>
+#include <machine/cpu.h>
+
+static struct thread *
+mutex_adaptive_get_thread(uintptr_t owner)
+{
+ return (struct thread *)(owner & ~MUTEX_ADAPTIVE_CONTENDED);
+}
+
+static void
+mutex_adaptive_set_contended(struct mutex *mutex)
+{
+ atomic_or(&mutex->owner, MUTEX_ADAPTIVE_CONTENDED, ATOMIC_RELEASE);
+}
+
+static inline bool
+mutex_adaptive_is_owner(struct mutex *mutex, uintptr_t owner)
+{
+ uintptr_t prev;
+
+ prev = atomic_load(&mutex->owner, ATOMIC_RELAXED);
+ return mutex_adaptive_get_thread(prev) == mutex_adaptive_get_thread(owner);
+}
+
+void
+mutex_adaptive_lock_slow(struct mutex *mutex)
+{
+ uintptr_t self, owner;
+ struct sleepq *sleepq;
+ unsigned long flags;
+
+ self = (uintptr_t)thread_self();
+
+ sleepq = sleepq_lend(mutex, false, &flags);
+
+ mutex_adaptive_set_contended(mutex);
+
+ for (;;) {
+ owner = atomic_cas_acquire(&mutex->owner, MUTEX_ADAPTIVE_CONTENDED,
+ self | MUTEX_ADAPTIVE_CONTENDED);
+ assert(owner & MUTEX_ADAPTIVE_CONTENDED);
+
+ if (mutex_adaptive_get_thread(owner) == NULL) {
+ break;
+ }
+
+ /*
+ * The owner may not return from the unlock function if a thread is
+ * spinning on it.
+ */
+ while (mutex_adaptive_is_owner(mutex, owner)) {
+ if (thread_is_running(mutex_adaptive_get_thread(owner))) {
+ cpu_pause();
+ } else {
+ sleepq_wait(sleepq, "mutex");
+ }
+ }
+ }
+
+ /*
+ * A potentially spinning thread wouldn't be accounted in the sleep queue,
+ * but the only potentially spinning thread is the new owner.
+ */
+ if (sleepq_empty(sleepq)) {
+ atomic_store(&mutex->owner, self, ATOMIC_RELAXED);
+ }
+
+ sleepq_return(sleepq, flags);
+}
+
+void
+mutex_adaptive_unlock_slow(struct mutex *mutex)
+{
+ uintptr_t owner;
+ struct sleepq *sleepq;
+ unsigned long flags;
+
+ atomic_store(&mutex->owner, MUTEX_ADAPTIVE_CONTENDED, ATOMIC_RELEASE);
+
+ for (;;) {
+ owner = atomic_load(&mutex->owner, ATOMIC_RELAXED);
+
+ /*
+ * This only happens if another thread was able to become the new
+ * owner, in which case that thread isn't spinning on the current
+ * thread, i.e. there is no need for an additional reference.
+ */
+ if (owner != MUTEX_ADAPTIVE_CONTENDED) {
+ break;
+ }
+
+ /*
+ * Avoid contending with incoming threads that are about to spin/wait
+ * on the mutex. This is particularly expensive with queued locks.
+ *
+ * Also, this call returns NULL if another thread is currently spinning
+ * on the current thread, in which case the latter doesn't return,
+ * averting the need for an additional reference.
+ */
+ sleepq = sleepq_tryacquire(mutex, false, &flags);
+
+ if (sleepq != NULL) {
+ sleepq_signal(sleepq);
+ sleepq_release(sleepq, flags);
+ break;
+ }
+
+ /*
+ * Acquiring the sleep queue may fail because of contention on
+ * unrelated objects. Retry.
+ */
+ }
+}
diff --git a/kern/mutex/mutex_adaptive_i.h b/kern/mutex/mutex_adaptive_i.h
new file mode 100644
index 00000000..b9952ec6
--- /dev/null
+++ b/kern/mutex/mutex_adaptive_i.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2017 Agustina Arzille.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _KERN_MUTEX_ADAPTIVE_I_H
+#define _KERN_MUTEX_ADAPTIVE_I_H
+
+#ifndef _KERN_MUTEX_H
+#error "don't include <kern/mutex/mutex_adaptive_i.h> directly," \
+ " use <kern/mutex.h> instead"
+#endif
+
+#include <assert.h>
+#include <stdint.h>
+
+#include <kern/atomic.h>
+#include <kern/error.h>
+#include <kern/macros.h>
+#include <kern/mutex_types.h>
+#include <kern/thread.h>
+
+/*
+ * Mutex flags.
+ *
+ * The "contended" flag indicates that threads are waiting for the mutex
+ * to be unlocked, potentially spinning on the owner. It forces threads
+ * trying to lock the mutex as well as the owner to take the slow path.
+ */
+#define MUTEX_ADAPTIVE_CONTENDED 0x1
+
+static inline void
+mutex_adaptive_init(struct mutex *mutex)
+{
+ mutex->owner = 0;
+}
+
+#define mutex_adaptive_assert_locked(mutex) assert((mutex)->owner != 0)
+
+static inline int
+mutex_adaptive_lock_fast(struct mutex *mutex)
+{
+ uintptr_t owner;
+
+ owner = atomic_cas_acquire(&mutex->owner, 0, (uintptr_t)thread_self());
+
+ if (unlikely(owner != 0)) {
+ return ERROR_BUSY;
+ }
+
+ return 0;
+}
+
+static inline int
+mutex_adaptive_unlock_fast(struct mutex *mutex)
+{
+ uintptr_t owner;
+
+ owner = atomic_cas_release(&mutex->owner, (uintptr_t)thread_self(), 0);
+
+ if (unlikely(owner & MUTEX_ADAPTIVE_CONTENDED)) {
+ return ERROR_BUSY;
+ }
+
+ return 0;
+}
+
+void mutex_adaptive_lock_slow(struct mutex *mutex);
+void mutex_adaptive_unlock_slow(struct mutex *mutex);
+
+/*
+ * Interface exported to the public mutex header.
+ */
+
+#define mutex_impl_init mutex_adaptive_init
+#define mutex_impl_assert_locked mutex_adaptive_assert_locked
+
+static inline int
+mutex_impl_trylock(struct mutex *mutex)
+{
+ return mutex_adaptive_lock_fast(mutex);
+}
+
+static inline void
+mutex_impl_lock(struct mutex *mutex)
+{
+ int error;
+
+ error = mutex_adaptive_lock_fast(mutex);
+
+ if (unlikely(error)) {
+ mutex_adaptive_lock_slow(mutex);
+ }
+}
+
+static inline void
+mutex_impl_unlock(struct mutex *mutex)
+{
+ int error;
+
+ error = mutex_adaptive_unlock_fast(mutex);
+
+ if (unlikely(error)) {
+ mutex_adaptive_unlock_slow(mutex);
+ }
+}
+
+#endif /* _KERN_MUTEX_ADAPTIVE_I_H */
diff --git a/kern/mutex/mutex_adaptive_types.h b/kern/mutex/mutex_adaptive_types.h
new file mode 100644
index 00000000..efbbf218
--- /dev/null
+++ b/kern/mutex/mutex_adaptive_types.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017 Agustina Arzille.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ *
+ * Isolated type definition used to avoid inclusion circular dependencies.
+ */
+
+#ifndef _KERN_MUTEX_ADAPTIVE_TYPES_H
+#define _KERN_MUTEX_ADAPTIVE_TYPES_H
+
+#ifndef _KERN_MUTEX_TYPES_H
+#error "don't include <kern/mutex/mutex_adaptive_types.h> directly," \
+ " use <kern/mutex_types.h> instead"
+#endif
+
+#include <stdint.h>
+
+struct mutex {
+ uintptr_t owner;
+};
+
+#endif /* _KERN_MUTEX_ADAPTIVE_TYPES_H */
diff --git a/kern/mutex/mutex_pi_i.h b/kern/mutex/mutex_pi_i.h
new file mode 100644
index 00000000..6c39db74
--- /dev/null
+++ b/kern/mutex/mutex_pi_i.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2017 Richard Braun.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _KERN_MUTEX_PI_I_H
+#define _KERN_MUTEX_PI_I_H
+
+#ifndef _KERN_MUTEX_H
+#error "don't include <kern/mutex/mutex_pi_i.h> directly," \
+ " use <kern/mutex.h> instead"
+#endif
+
+#include <kern/mutex_types.h>
+#include <kern/rtmutex.h>
+
+/*
+ * Interface exported to the public mutex header.
+ */
+
+static inline void
+mutex_impl_init(struct mutex *mutex)
+{
+ rtmutex_init(&mutex->rtmutex);
+}
+
+#define mutex_impl_assert_locked(mutex) \
+ rtmutex_assert_locked(&(mutex)->rtmutex)
+
+static inline int
+mutex_impl_trylock(struct mutex *mutex)
+{
+ return rtmutex_trylock(&mutex->rtmutex);
+}
+
+static inline void
+mutex_impl_lock(struct mutex *mutex)
+{
+ rtmutex_lock(&mutex->rtmutex);
+}
+
+static inline void
+mutex_impl_unlock(struct mutex *mutex)
+{
+ rtmutex_unlock(&mutex->rtmutex);
+}
+
+#endif /* _KERN_MUTEX_PI_I_H */
diff --git a/kern/mutex/mutex_pi_types.h b/kern/mutex/mutex_pi_types.h
new file mode 100644
index 00000000..d9ebb6e2
--- /dev/null
+++ b/kern/mutex/mutex_pi_types.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2017 Richard Braun.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ *
+ * Isolated type definition used to avoid inclusion circular dependencies.
+ */
+
+#ifndef _KERN_MUTEX_PI_TYPES_H
+#define _KERN_MUTEX_PI_TYPES_H
+
+#ifndef _KERN_MUTEX_TYPES_H
+#error "don't include <kern/mutex/mutex_pi_types.h> directly," \
+ " use <kern/mutex_types.h> instead"
+#endif
+
+#include <kern/rtmutex_types.h>
+
+/*
+ * Do not directly alias rtmutex to make sure they cannot be used
+ * with condition variables by mistake.
+ */
+struct mutex {
+ struct rtmutex rtmutex;
+};
+
+#endif /* _KERN_MUTEX_PI_TYPES_H */
diff --git a/kern/mutex/mutex_plain.c b/kern/mutex/mutex_plain.c
new file mode 100644
index 00000000..a925a5a2
--- /dev/null
+++ b/kern/mutex/mutex_plain.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2017 Richard Braun.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <stdbool.h>
+#include <stddef.h>
+
+#include <kern/atomic.h>
+#include <kern/mutex.h>
+#include <kern/mutex_types.h>
+#include <kern/sleepq.h>
+
+void
+mutex_plain_lock_slow(struct mutex *mutex)
+{
+ unsigned int state;
+ struct sleepq *sleepq;
+ unsigned long flags;
+
+ sleepq = sleepq_lend(mutex, false, &flags);
+
+ for (;;) {
+ state = atomic_swap_release(&mutex->state, MUTEX_CONTENDED);
+
+ if (state == MUTEX_UNLOCKED) {
+ break;
+ }
+
+ sleepq_wait(sleepq, "mutex");
+ }
+
+ if (sleepq_empty(sleepq)) {
+ /* TODO Review memory order */
+ atomic_store(&mutex->state, MUTEX_LOCKED, ATOMIC_RELEASE);
+ }
+
+ sleepq_return(sleepq, flags);
+}
+
+void
+mutex_plain_unlock_slow(struct mutex *mutex)
+{
+ struct sleepq *sleepq;
+ unsigned long flags;
+
+ sleepq = sleepq_acquire(mutex, false, &flags);
+
+ if (sleepq != NULL) {
+ sleepq_signal(sleepq);
+ sleepq_release(sleepq, flags);
+ }
+}
diff --git a/kern/mutex/mutex_plain_i.h b/kern/mutex/mutex_plain_i.h
new file mode 100644
index 00000000..4f112b89
--- /dev/null
+++ b/kern/mutex/mutex_plain_i.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2017 Richard Braun.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _KERN_MUTEX_PLAIN_I_H
+#define _KERN_MUTEX_PLAIN_I_H
+
+#ifndef _KERN_MUTEX_H
+#error "don't include <kern/mutex/mutex_plain_i.h> directly," \
+ " use <kern/mutex.h> instead"
+#endif
+
+#include <assert.h>
+
+#include <kern/atomic.h>
+#include <kern/error.h>
+#include <kern/mutex_types.h>
+
+#define MUTEX_UNLOCKED 0
+#define MUTEX_LOCKED 1
+#define MUTEX_CONTENDED 2
+
+static inline void
+mutex_plain_init(struct mutex *mutex)
+{
+ mutex->state = MUTEX_UNLOCKED;
+}
+
+#define mutex_plain_assert_locked(mutex) \
+ assert((mutex)->state != MUTEX_UNLOCKED)
+
+static inline int
+mutex_plain_lock_fast(struct mutex *mutex)
+{
+ unsigned int state;
+
+ state = atomic_cas_acquire(&mutex->state, MUTEX_UNLOCKED, MUTEX_LOCKED);
+
+ if (unlikely(state != MUTEX_UNLOCKED)) {
+ return ERROR_BUSY;
+ }
+
+ return 0;
+}
+
+static inline int
+mutex_plain_unlock_fast(struct mutex *mutex)
+{
+ unsigned int state;
+
+ state = atomic_swap_release(&mutex->state, MUTEX_UNLOCKED);
+
+ if (unlikely(state == MUTEX_CONTENDED)) {
+ return ERROR_BUSY;
+ }
+
+ return 0;
+}
+
+void mutex_plain_lock_slow(struct mutex *mutex);
+void mutex_plain_unlock_slow(struct mutex *mutex);
+
+/*
+ * Interface exported to the public mutex header.
+ */
+
+#define mutex_impl_init mutex_plain_init
+#define mutex_impl_assert_locked mutex_plain_assert_locked
+
+static inline int
+mutex_impl_trylock(struct mutex *mutex)
+{
+ return mutex_plain_lock_fast(mutex);
+}
+
+static inline void
+mutex_impl_lock(struct mutex *mutex)
+{
+ int error;
+
+ error = mutex_plain_lock_fast(mutex);
+
+ if (unlikely(error)) {
+ mutex_plain_lock_slow(mutex);
+ }
+}
+
+static inline void
+mutex_impl_unlock(struct mutex *mutex)
+{
+ int error;
+
+ error = mutex_plain_unlock_fast(mutex);
+
+ if (unlikely(error)) {
+ mutex_plain_unlock_slow(mutex);
+ }
+}
+
+#endif /* _KERN_MUTEX_PLAIN_I_H */
diff --git a/kern/mutex/mutex_plain_types.h b/kern/mutex/mutex_plain_types.h
new file mode 100644
index 00000000..02731e94
--- /dev/null
+++ b/kern/mutex/mutex_plain_types.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2017 Richard Braun.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ *
+ * Isolated type definition used to avoid inclusion circular dependencies.
+ */
+
+#ifndef _KERN_MUTEX_PLAIN_TYPES_H
+#define _KERN_MUTEX_PLAIN_TYPES_H
+
+#ifndef _KERN_MUTEX_TYPES_H
+#error "don't include <kern/mutex/mutex_plain_types.h> directly," \
+ " use <kern/mutex_types.h> instead"
+#endif
+
+struct mutex {
+ unsigned int state;
+};
+
+#endif /* _KERN_MUTEX_PLAIN_TYPES_H */
diff --git a/kern/mutex_i.h b/kern/mutex_i.h
deleted file mode 100644
index 3b36dcde..00000000
--- a/kern/mutex_i.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (c) 2013-2017 Richard Braun.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef _KERN_MUTEX_I_H
-#define _KERN_MUTEX_I_H
-
-#ifndef X15_MUTEX_PI
-
-#include <assert.h>
-
-#include <kern/atomic.h>
-#include <kern/mutex_types.h>
-
-#define MUTEX_UNLOCKED 0
-#define MUTEX_LOCKED 1
-#define MUTEX_CONTENDED 2
-
-static inline unsigned int
-mutex_lock_fast(struct mutex *mutex)
-{
- return atomic_cas_acquire(&mutex->state, MUTEX_UNLOCKED, MUTEX_LOCKED);
-}
-
-static inline unsigned int
-mutex_unlock_fast(struct mutex *mutex)
-{
- unsigned int state;
-
- state = atomic_swap_release(&mutex->state, MUTEX_UNLOCKED);
- assert((state == MUTEX_LOCKED) || (state == MUTEX_CONTENDED));
- return state;
-}
-
-void mutex_lock_slow(struct mutex *mutex);
-
-void mutex_unlock_slow(struct mutex *mutex);
-
-#endif /* X15_MUTEX_PI */
-
-#endif /* _KERN_MUTEX_I_H */
diff --git a/kern/mutex_types.h b/kern/mutex_types.h
index 4b7947fc..eb2bc339 100644
--- a/kern/mutex_types.h
+++ b/kern/mutex_types.h
@@ -21,24 +21,12 @@
#ifndef _KERN_MUTEX_TYPES_H
#define _KERN_MUTEX_TYPES_H
-#ifdef X15_MUTEX_PI
-
-#include <kern/rtmutex_types.h>
-
-/*
- * Do not directly alias rtmutex to make sure they cannot be used
- * with condition variables by mistake.
- */
-struct mutex {
- struct rtmutex rtmutex;
-};
-
-#else /* X15_MUTEX_PI */
-
-struct mutex {
- unsigned int state;
-};
-
-#endif /* X15_MUTEX_PI */
+#if defined(X15_MUTEX_PI)
+#include <kern/mutex/mutex_pi_types.h>
+#elif defined(X15_MUTEX_ADAPTIVE)
+#include <kern/mutex/mutex_adaptive_types.h>
+#else
+#include <kern/mutex/mutex_plain_types.h>
+#endif
#endif /* _KERN_MUTEX_TYPES_H */
diff --git a/kern/rtmutex.c b/kern/rtmutex.c
index e78c3899..fcf5f358 100644
--- a/kern/rtmutex.c
+++ b/kern/rtmutex.c
@@ -36,7 +36,7 @@ void
rtmutex_lock_slow(struct rtmutex *rtmutex)
{
struct turnstile *turnstile;
- uintptr_t owner, prev_owner;
+ uintptr_t owner, prev_owner; /* TODO Review names */
struct thread *thread;
uintptr_t bits;
diff --git a/kern/sleepq.c b/kern/sleepq.c
index cc85705d..44ad9962 100644
--- a/kern/sleepq.c
+++ b/kern/sleepq.c
@@ -267,6 +267,33 @@ sleepq_acquire(const void *sync_obj, bool condition, unsigned long *flags)
return sleepq;
}
+struct sleepq *
+sleepq_tryacquire(const void *sync_obj, bool condition, unsigned long *flags)
+{
+ struct sleepq_bucket *bucket;
+ struct sleepq *sleepq;
+ int error;
+
+ assert(sync_obj != NULL);
+
+ bucket = sleepq_bucket_get(sync_obj, condition);
+
+ error = spinlock_trylock_intr_save(&bucket->lock, flags);
+
+ if (error) {
+ return NULL;
+ }
+
+ sleepq = sleepq_bucket_lookup(bucket, sync_obj);
+
+ if (sleepq == NULL) {
+ spinlock_unlock_intr_restore(&bucket->lock, *flags);
+ return NULL;
+ }
+
+ return sleepq;
+}
+
void
sleepq_release(struct sleepq *sleepq, unsigned long flags)
{
diff --git a/kern/sleepq.h b/kern/sleepq.h
index 4d37182a..651b3e7c 100644
--- a/kern/sleepq.h
+++ b/kern/sleepq.h
@@ -56,9 +56,15 @@ void sleepq_destroy(struct sleepq *sleepq);
*
* The condition argument must be true if the synchronization object
* is a condition variable.
+ *
+ * Note that, in the case of the non-blocking variant, the call may also
+ * return NULL if internal state shared by unrelated synchronization
+ * objects is locked.
*/
struct sleepq * sleepq_acquire(const void *sync_obj, bool condition,
unsigned long *flags);
+struct sleepq * sleepq_tryacquire(const void *sync_obj, bool condition,
+ unsigned long *flags);
void sleepq_release(struct sleepq *sleepq, unsigned long flags);
/*
diff --git a/kern/thread.c b/kern/thread.c
index df32ab4c..8e5b2b52 100644
--- a/kern/thread.c
+++ b/kern/thread.c
@@ -554,7 +554,7 @@ thread_runq_get_next(struct thread_runq *runq)
thread = thread_sched_ops[i].get_next(runq);
if (thread != NULL) {
- runq->current = thread;
+ atomic_store(&runq->current, thread, ATOMIC_RELAXED);
return thread;
}
}
@@ -574,7 +574,7 @@ thread_runq_set_next(struct thread_runq *runq, struct thread *thread)
ops->set_next(runq, thread);
}
- runq->current = thread;
+ atomic_store(&runq->current, thread, ATOMIC_RELAXED);
}
static void
@@ -2852,3 +2852,14 @@ thread_key_create(unsigned int *keyp, thread_dtor_fn_t dtor)
thread_dtors[key] = dtor;
*keyp = key;
}
+
+bool
+thread_is_running(const struct thread *thread)
+{
+ const struct thread_runq *runq;
+
+ runq = thread->runq;
+
+ return (runq != NULL)
+ && (atomic_load(&runq->current, ATOMIC_RELAXED) == thread);
+}
diff --git a/kern/thread.h b/kern/thread.h
index a1b33ac2..43a98e87 100644
--- a/kern/thread.h
+++ b/kern/thread.h
@@ -733,6 +733,14 @@ thread_get_specific(unsigned int key)
}
/*
+ * Return true if the given thread is running.
+ *
+ * Note that this check is speculative, and may not return an accurate
+ * result. It may only be used for optimistic optimizations.
+ */
+bool thread_is_running(const struct thread *thread);
+
+/*
* This init operation provides :
* - a dummy thread context for the BSP, allowing the use of thread_self()
*/