summaryrefslogtreecommitdiff
path: root/kern/mutex.c
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2013-04-08 21:40:25 +0200
committerRichard Braun <rbraun@sceen.net>2013-04-08 21:40:25 +0200
commitc0fb9d9c300480a865158be6dc78e9225e0b310e (patch)
tree76d029e5c8c69d9015436be6d1e92523d85de24e /kern/mutex.c
parente2cfad45825f974e544c2b5788392381e2f1b87b (diff)
kern/mutex: new module
As the name implies, this module provides sleepable mutual exclusion locks.
Diffstat (limited to 'kern/mutex.c')
-rw-r--r--kern/mutex.c120
1 files changed, 120 insertions, 0 deletions
diff --git a/kern/mutex.c b/kern/mutex.c
new file mode 100644
index 00000000..507116e2
--- /dev/null
+++ b/kern/mutex.c
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2013 Richard Braun.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <kern/assert.h>
+#include <kern/list.h>
+#include <kern/mutex.h>
+#include <kern/spinlock.h>
+#include <kern/thread.h>
+#include <machine/atomic.h>
+
+struct mutex_waiter {
+ struct list node;
+ struct thread *thread;
+};
+
+void
+mutex_init(struct mutex *mutex)
+{
+ mutex->state = MUTEX_UNLOCKED;
+ spinlock_init(&mutex->lock);
+ list_init(&mutex->waiters);
+}
+
+int
+mutex_trylock(struct mutex *mutex)
+{
+ unsigned long state;
+
+ state = atomic_cas(&mutex->state, MUTEX_UNLOCKED, MUTEX_LOCKED);
+
+ if (state == MUTEX_UNLOCKED)
+ return 0;
+
+ return 1;
+}
+
+void
+mutex_lock(struct mutex *mutex)
+{
+ struct mutex_waiter waiter;
+ unsigned long state;
+
+ state = atomic_cas(&mutex->state, MUTEX_UNLOCKED, MUTEX_LOCKED);
+
+ if (state == MUTEX_UNLOCKED)
+ return;
+
+ /*
+ * The mutex was either locked or contended. Unconditionnally update its
+ * state to reflect it is now contended, and to check the previous state
+ * while holding the waiters lock so that the current thread doesn't miss
+ * a wakeup when the owner unlocks.
+ */
+
+ assert((state == MUTEX_LOCKED) || (state == MUTEX_CONTENDED));
+
+ spinlock_lock(&mutex->lock);
+
+ state = atomic_swap(&mutex->state, MUTEX_CONTENDED);
+
+ if (state == MUTEX_UNLOCKED)
+ goto out;
+
+ waiter.thread = thread_self();
+ list_insert_tail(&mutex->waiters, &waiter.node);
+
+ do {
+ thread_sleep(&mutex->lock);
+ state = atomic_swap(&mutex->state, MUTEX_CONTENDED);
+ } while (state != MUTEX_UNLOCKED);
+
+ list_remove(&waiter.node);
+
+out:
+ if (list_empty(&mutex->waiters)) {
+ state = atomic_swap(&mutex->state, MUTEX_LOCKED);
+ assert(state == MUTEX_CONTENDED);
+ }
+
+ spinlock_unlock(&mutex->lock);
+}
+
+void
+mutex_unlock(struct mutex *mutex)
+{
+ struct mutex_waiter *waiter;
+ unsigned long state;
+
+ state = atomic_swap(&mutex->state, MUTEX_UNLOCKED);
+
+ if (state == MUTEX_LOCKED)
+ return;
+
+ /* The mutex was contended, wake up the next waiter if any */
+
+ assert(state == MUTEX_CONTENDED);
+
+ spinlock_lock(&mutex->lock);
+
+ if (!list_empty(&mutex->waiters)) {
+ waiter = list_first_entry(&mutex->waiters, struct mutex_waiter, node);
+ thread_wakeup(waiter->thread);
+ }
+
+ spinlock_unlock(&mutex->lock);
+}