summaryrefslogtreecommitdiff
path: root/kern/spinlock.h
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2018-03-02 23:23:30 +0100
committerRichard Braun <rbraun@sceen.net>2018-03-02 23:29:28 +0100
commit07c7d5d45c9bf7f95a027ed47a453cc6cb16b304 (patch)
tree46b3d76b3ecff1d99890f6ae352ffba240c5bc22 /kern/spinlock.h
parent43e07ea6df7f09b0a0853e3b9c55780aecaea393 (diff)
kern/spinlock: fix and optimize
Making the unlock operation block allows tricky deadlocks to occur in case a thread is interrupted right before announcing itself as the first waiter in the queue. Since locking is expected to block, the spinlock implementation is reworked to move the hand-off performed by the unlock operation into the lock operation. As a side effect, the common case of a single waiter is also optimized.
Diffstat (limited to 'kern/spinlock.h')
-rw-r--r--kern/spinlock.h19
1 files changed, 17 insertions, 2 deletions
diff --git a/kern/spinlock.h b/kern/spinlock.h
index d88d535e..f9e74f56 100644
--- a/kern/spinlock.h
+++ b/kern/spinlock.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2017 Richard Braun.
+ * Copyright (c) 2012-2018 Richard Braun.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -26,6 +26,11 @@
#ifndef KERN_SPINLOCK_H
#define KERN_SPINLOCK_H
+#include <assert.h>
+#include <stdbool.h>
+#include <stdint.h>
+
+#include <kern/atomic.h>
#include <kern/init.h>
#include <kern/macros.h>
#include <kern/spinlock_i.h>
@@ -34,7 +39,17 @@
struct spinlock;
-#define spinlock_assert_locked(lock) assert((lock)->value != SPINLOCK_UNLOCKED)
+/* TODO Remove, let users do it instead */
+#define spinlock_assert_locked(lock) assert(spinlock_locked(lock))
+
+static inline bool
+spinlock_locked(const struct spinlock *lock)
+{
+ uint32_t value;
+
+ value = atomic_load(&lock->value, ATOMIC_RELAXED);
+ return value != SPINLOCK_UNLOCKED;
+}
#ifdef SPINLOCK_TRACK_OWNER