summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2013-04-14 18:29:12 +0200
committerRichard Braun <rbraun@sceen.net>2013-04-14 18:29:12 +0200
commit04e1818f60ad8f90ea502f2f6c6ea61e6e61644c (patch)
tree51979cf0fdea679b868b044e386e92473295bbc2
parent909c423347085774a3fc7f8021ce765465cc92c8 (diff)
kern/{condition,mutex}: refactor common code
The condition module intrusively uses mutexes. Augment the interface of the mutex module so that mutexes and conditions share common code. As a side effect, the implementation should have gained in clarity.
-rw-r--r--Makefrag.am1
-rw-r--r--kern/condition.c56
-rw-r--r--kern/mutex.c87
-rw-r--r--kern/mutex.h68
-rw-r--r--kern/mutex_i.h115
5 files changed, 193 insertions, 134 deletions
diff --git a/Makefrag.am b/Makefrag.am
index 667e8f62..a4d69a0b 100644
--- a/Makefrag.am
+++ b/Makefrag.am
@@ -18,6 +18,7 @@ x15_SOURCES += \
kern/macros.h \
kern/mutex.c \
kern/mutex.h \
+ kern/mutex_i.h \
kern/panic.c \
kern/panic.h \
kern/param.h \
diff --git a/kern/condition.c b/kern/condition.c
index 6f7bec4c..ccd86c5b 100644
--- a/kern/condition.c
+++ b/kern/condition.c
@@ -20,18 +20,16 @@
* queued on the mutex associated with the condition, and an attempt to wake
* one by locking and unlocking the mutex is performed. If the mutex is already
* locked, the current owner does the same when unlocking.
- *
- * TODO Refactor mutex and condition code.
*/
#include <kern/assert.h>
#include <kern/condition.h>
#include <kern/list.h>
#include <kern/mutex.h>
+#include <kern/mutex_i.h>
#include <kern/spinlock.h>
#include <kern/stddef.h>
#include <kern/thread.h>
-#include <machine/atomic.h>
void
condition_init(struct condition *condition)
@@ -44,7 +42,7 @@ condition_init(struct condition *condition)
void
condition_wait(struct condition *condition, struct mutex *mutex)
{
- struct mutex_waiter waiter, *waiter_ptr;
+ struct mutex_waiter waiter;
unsigned long state;
waiter.thread = thread_self();
@@ -60,31 +58,15 @@ condition_wait(struct condition *condition, struct mutex *mutex)
spinlock_lock(&mutex->lock);
- state = atomic_swap(&mutex->state, MUTEX_UNLOCKED);
-
- if (state != MUTEX_LOCKED) {
- assert(state == MUTEX_CONTENDED);
+ state = mutex_release(mutex);
- if (!list_empty(&mutex->waiters)) {
- waiter_ptr = list_first_entry(&mutex->waiters, struct mutex_waiter,
- node);
- thread_wakeup(waiter_ptr->thread);
- }
- }
+ if (state == MUTEX_CONTENDED)
+ mutex_signal(mutex);
spinlock_unlock(&condition->lock);
- do {
- thread_sleep(&mutex->lock);
- state = atomic_swap(&mutex->state, MUTEX_CONTENDED);
- } while (state != MUTEX_UNLOCKED);
-
- list_remove(&waiter.node);
-
- if (list_empty(&mutex->waiters)) {
- state = atomic_swap(&mutex->state, MUTEX_LOCKED);
- assert(state == MUTEX_CONTENDED);
- }
+ mutex_wait(mutex, &waiter);
+ mutex_trydowngrade(mutex);
spinlock_unlock(&mutex->lock);
}
@@ -114,13 +96,12 @@ condition_signal(struct condition *condition)
spinlock_lock(&mutex->lock);
- list_insert_tail(&mutex->waiters, &waiter->node);
- state = atomic_swap(&mutex->state, MUTEX_CONTENDED);
+ mutex_queue(mutex, waiter);
+ state = mutex_tryacquire_slow(mutex);
if (state == MUTEX_UNLOCKED) {
- state = atomic_swap(&mutex->state, MUTEX_UNLOCKED);
- assert(state == MUTEX_CONTENDED);
- thread_wakeup(waiter->thread);
+ mutex_release(mutex);
+ mutex_signal(mutex);
}
spinlock_unlock(&mutex->lock);
@@ -129,9 +110,8 @@ condition_signal(struct condition *condition)
void
condition_broadcast(struct condition *condition)
{
- struct mutex_waiter *waiter;
+ struct list waiters;
struct mutex *mutex;
- struct list tmp;
unsigned long state;
spinlock_lock(&condition->lock);
@@ -143,21 +123,19 @@ condition_broadcast(struct condition *condition)
mutex = condition->mutex;
condition->mutex = NULL;
- list_set_head(&tmp, &condition->waiters);
+ list_set_head(&waiters, &condition->waiters);
list_init(&condition->waiters);
spinlock_unlock(&condition->lock);
spinlock_lock(&mutex->lock);
- list_concat(&mutex->waiters, &tmp);
- state = atomic_swap(&mutex->state, MUTEX_CONTENDED);
+ mutex_queue_list(mutex, &waiters);
+ state = mutex_tryacquire_slow(mutex);
if (state == MUTEX_UNLOCKED) {
- state = atomic_swap(&mutex->state, MUTEX_UNLOCKED);
- assert(state == MUTEX_CONTENDED);
- waiter = list_first_entry(&mutex->waiters, struct mutex_waiter, node);
- thread_wakeup(waiter->thread);
+ mutex_release(mutex);
+ mutex_signal(mutex);
}
spinlock_unlock(&mutex->lock);
diff --git a/kern/mutex.c b/kern/mutex.c
index 90922903..e49c56c8 100644
--- a/kern/mutex.c
+++ b/kern/mutex.c
@@ -15,101 +15,36 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <kern/assert.h>
-#include <kern/list.h>
#include <kern/mutex.h>
+#include <kern/mutex_i.h>
#include <kern/spinlock.h>
#include <kern/thread.h>
-#include <machine/atomic.h>
void
-mutex_init(struct mutex *mutex)
-{
- mutex->state = MUTEX_UNLOCKED;
- spinlock_init(&mutex->lock);
- list_init(&mutex->waiters);
-}
-
-int
-mutex_trylock(struct mutex *mutex)
-{
- unsigned long state;
-
- state = atomic_cas(&mutex->state, MUTEX_UNLOCKED, MUTEX_LOCKED);
-
- if (state == MUTEX_UNLOCKED)
- return 0;
-
- return 1;
-}
-
-void
-mutex_lock(struct mutex *mutex)
+mutex_lock_slow(struct mutex *mutex)
{
struct mutex_waiter waiter;
unsigned long state;
- state = atomic_cas(&mutex->state, MUTEX_UNLOCKED, MUTEX_LOCKED);
-
- if (state == MUTEX_UNLOCKED)
- return;
-
- /*
- * The mutex was either locked or contended. Unconditionnally update its
- * state to reflect it is now contended, and to check the previous state
- * while holding the waiters lock so that the current thread doesn't miss
- * a wakeup when the owner unlocks.
- */
-
- assert((state == MUTEX_LOCKED) || (state == MUTEX_CONTENDED));
-
spinlock_lock(&mutex->lock);
- state = atomic_swap(&mutex->state, MUTEX_CONTENDED);
+ state = mutex_tryacquire_slow(mutex);
- if (state == MUTEX_UNLOCKED)
- goto out;
-
- waiter.thread = thread_self();
- list_insert_tail(&mutex->waiters, &waiter.node);
-
- do {
- thread_sleep(&mutex->lock);
- state = atomic_swap(&mutex->state, MUTEX_CONTENDED);
- } while (state != MUTEX_UNLOCKED);
-
- list_remove(&waiter.node);
-
-out:
- if (list_empty(&mutex->waiters)) {
- state = atomic_swap(&mutex->state, MUTEX_LOCKED);
- assert(state == MUTEX_CONTENDED);
+ if (state != MUTEX_UNLOCKED) {
+ waiter.thread = thread_self();
+ mutex_queue(mutex, &waiter);
+ mutex_wait(mutex, &waiter);
}
+ mutex_trydowngrade(mutex);
+
spinlock_unlock(&mutex->lock);
}
void
-mutex_unlock(struct mutex *mutex)
+mutex_unlock_slow(struct mutex *mutex)
{
- struct mutex_waiter *waiter;
- unsigned long state;
-
- state = atomic_swap(&mutex->state, MUTEX_UNLOCKED);
-
- if (state == MUTEX_LOCKED)
- return;
-
- /* The mutex was contended, wake up the next waiter if any */
-
- assert(state == MUTEX_CONTENDED);
-
spinlock_lock(&mutex->lock);
-
- if (!list_empty(&mutex->waiters)) {
- waiter = list_first_entry(&mutex->waiters, struct mutex_waiter, node);
- thread_wakeup(waiter->thread);
- }
-
+ mutex_signal(mutex);
spinlock_unlock(&mutex->lock);
}
diff --git a/kern/mutex.h b/kern/mutex.h
index dcbf98eb..08d3d304 100644
--- a/kern/mutex.h
+++ b/kern/mutex.h
@@ -25,38 +25,68 @@
#include <kern/assert.h>
#include <kern/list.h>
+#include <kern/mutex_i.h>
#include <kern/spinlock.h>
-#include <kern/thread.h>
-#define MUTEX_UNLOCKED 0
-#define MUTEX_LOCKED 1
-#define MUTEX_CONTENDED 2
-
-struct mutex_waiter {
- struct list node;
- struct thread *thread;
-};
-
-struct mutex {
- unsigned long state;
- struct spinlock lock;
- struct list waiters;
-};
+struct mutex;
#define MUTEX_INITIALIZER(mutex) \
{ MUTEX_UNLOCKED, SPINLOCK_INITIALIZER, LIST_INITIALIZER((mutex).waiters) }
-void mutex_init(struct mutex *mutex);
+static inline void
+mutex_init(struct mutex *mutex)
+{
+ mutex->state = MUTEX_UNLOCKED;
+ spinlock_init(&mutex->lock);
+ list_init(&mutex->waiters);
+}
#define mutex_assert_locked(mutex) assert((mutex)->state != MUTEX_UNLOCKED)
/*
* Return 0 on success, 1 if busy.
*/
-int mutex_trylock(struct mutex *mutex);
+static inline int
+mutex_trylock(struct mutex *mutex)
+{
+ unsigned long state;
+
+ state = mutex_tryacquire(mutex);
+
+ if (state == MUTEX_UNLOCKED)
+ return 0;
+
+ return 1;
+}
+
+static inline void
+mutex_lock(struct mutex *mutex)
+{
+ unsigned long state;
+
+ state = mutex_tryacquire(mutex);
+
+ if (state == MUTEX_UNLOCKED)
+ return;
+
+ assert((state == MUTEX_LOCKED) || (state == MUTEX_CONTENDED));
+
+ mutex_lock_slow(mutex);
+}
+
+static inline void
+mutex_unlock(struct mutex *mutex)
+{
+ unsigned long state;
+
+ state = mutex_release(mutex);
+
+ if (state == MUTEX_LOCKED)
+ return;
-void mutex_lock(struct mutex *mutex);
+ assert(state == MUTEX_CONTENDED);
-void mutex_unlock(struct mutex *mutex);
+ mutex_unlock_slow(mutex);
+}
#endif /* _KERN_MUTEX_H */
diff --git a/kern/mutex_i.h b/kern/mutex_i.h
new file mode 100644
index 00000000..2fa5cb42
--- /dev/null
+++ b/kern/mutex_i.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2013 Richard Braun.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _KERN_MUTEX_I_H
+#define _KERN_MUTEX_I_H
+
+#include <kern/assert.h>
+#include <kern/list.h>
+#include <kern/spinlock.h>
+#include <kern/thread.h>
+#include <machine/atomic.h>
+
+#define MUTEX_UNLOCKED 0
+#define MUTEX_LOCKED 1
+#define MUTEX_CONTENDED 2
+
+struct mutex_waiter {
+ struct list node;
+ struct thread *thread;
+};
+
+struct mutex {
+ unsigned long state;
+ struct spinlock lock;
+ struct list waiters;
+};
+
+void mutex_lock_slow(struct mutex *mutex);
+
+void mutex_unlock_slow(struct mutex *mutex);
+
+static inline unsigned long
+mutex_tryacquire(struct mutex *mutex)
+{
+ return atomic_cas(&mutex->state, MUTEX_UNLOCKED, MUTEX_LOCKED);
+}
+
+static inline unsigned long
+mutex_tryacquire_slow(struct mutex *mutex)
+{
+ return atomic_swap(&mutex->state, MUTEX_CONTENDED);
+}
+
+static inline unsigned long
+mutex_release(struct mutex *mutex)
+{
+ unsigned long state;
+
+ state = atomic_swap(&mutex->state, MUTEX_UNLOCKED);
+ assert((state == MUTEX_LOCKED) || (state == MUTEX_CONTENDED));
+ return state;
+}
+
+static inline void
+mutex_queue(struct mutex *mutex, struct mutex_waiter *waiter)
+{
+ list_insert_tail(&mutex->waiters, &waiter->node);
+}
+
+static inline void
+mutex_queue_list(struct mutex *mutex, struct list *waiters)
+{
+ list_concat(&mutex->waiters, waiters);
+}
+
+static inline void
+mutex_wait(struct mutex *mutex, struct mutex_waiter *waiter)
+{
+ unsigned long state;
+
+ do {
+ thread_sleep(&mutex->lock);
+ state = mutex_tryacquire_slow(mutex);
+ } while (state != MUTEX_UNLOCKED);
+
+ list_remove(&waiter->node);
+}
+
+static inline void
+mutex_signal(struct mutex *mutex)
+{
+ struct mutex_waiter *waiter;
+
+ if (!list_empty(&mutex->waiters)) {
+ waiter = list_first_entry(&mutex->waiters, struct mutex_waiter, node);
+ thread_wakeup(waiter->thread);
+ }
+}
+
+static inline void
+mutex_trydowngrade(struct mutex *mutex)
+{
+ if (list_empty(&mutex->waiters)) {
+ unsigned long state;
+
+ state = atomic_swap(&mutex->state, MUTEX_LOCKED);
+ assert(state == MUTEX_CONTENDED);
+ }
+}
+
+#endif /* _KERN_MUTEX_I_H */