summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2013-02-26 20:49:25 +0100
committerRichard Braun <rbraun@sceen.net>2013-02-26 20:49:25 +0100
commitb320421ff5414ecdbdf61568a26d15b9abb81ba3 (patch)
tree27a5f9248a383ecee954e35ce19e8dccdbbfae88
parentc5f5244fc70018d5271da9e5fe8b52212b29321d (diff)
kern/spinlock: add interrupt-handling locking primitives
-rw-r--r--Makefrag.am1
-rw-r--r--kern/spinlock.h79
-rw-r--r--kern/spinlock_i.h60
3 files changed, 107 insertions, 33 deletions
diff --git a/Makefrag.am b/Makefrag.am
index 973e3b03..1cac8f10 100644
--- a/Makefrag.am
+++ b/Makefrag.am
@@ -21,6 +21,7 @@ x15_SOURCES += \
kern/rbtree.h \
kern/rbtree_i.h \
kern/spinlock.h \
+ kern/spinlock_i.h \
kern/sprintf.c \
kern/sprintf.h \
kern/stddef.h \
diff --git a/kern/spinlock.h b/kern/spinlock.h
index ccd2cc60..cbd95458 100644
--- a/kern/spinlock.h
+++ b/kern/spinlock.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012 Richard Braun.
+ * Copyright (c) 2012, 2013 Richard Braun.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -17,10 +17,7 @@
*
* Spin lock.
*
- * This implementation relies on the availability of hardware compare-and-swap
- * support. It also means that almost all spinlock operations imply a full
- * memory barrier. While this can be optimized by relying on architecture
- * specific properties, focus on correctness for the time being.
+ * Critical sections built with spin locks run with preemption disabled.
*/
#ifndef _KERN_SPINLOCK_H
@@ -28,21 +25,13 @@
#include <kern/assert.h>
#include <kern/thread.h>
-#include <machine/atomic.h>
+#include <kern/spinlock_i.h>
#include <machine/cpu.h>
-struct spinlock {
- unsigned long locked;
-};
+struct spinlock;
-/*
- * Static spin lock initializer.
- */
#define SPINLOCK_INITIALIZER { 0 }
-/*
- * Initialize a spin lock.
- */
static inline void
spinlock_init(struct spinlock *lock)
{
@@ -56,47 +45,71 @@ spinlock_assert_locked(struct spinlock *lock)
}
/*
- * Attempt to acquire a spin lock.
- *
* Return true if acquired, false if busy.
*/
static inline int
spinlock_trylock(struct spinlock *lock)
{
- unsigned long busy;
+ int acquired;
thread_preempt_disable();
- busy = atomic_cas(&lock->locked, 0, 1);
+ acquired = spinlock_tryacquire(lock);
- if (!busy)
- return 1;
+ if (!acquired)
+ thread_preempt_enable();
- thread_preempt_enable();
- return 0;
+ return acquired;
}
-/*
- * Acquire a spin lock.
- */
static inline void
spinlock_lock(struct spinlock *lock)
{
thread_preempt_disable();
+ spinlock_acquire(lock);
+}
- while (atomic_cas(&lock->locked, 0, 1) != 0)
- cpu_pause();
+static inline void
+spinlock_unlock(struct spinlock *lock)
+{
+ spinlock_release(lock);
+ thread_preempt_enable();
}
/*
- * Release a spin lock.
+ * Versions of the spinlock functions that also disable interrupts during
+ * critical sections.
*/
+
+static inline int
+spinlock_trylock_intr_save(struct spinlock *lock, unsigned long *flags)
+{
+ int acquired;
+
+ thread_preempt_disable();
+ *flags = cpu_intr_save();
+ acquired = spinlock_tryacquire(lock);
+
+ if (!acquired) {
+ cpu_intr_restore(*flags);
+ thread_preempt_enable();
+ }
+
+ return acquired;
+}
+
static inline void
-spinlock_unlock(struct spinlock *lock)
+spinlock_lock_intr_save(struct spinlock *lock, unsigned long *flags)
{
- unsigned long locked;
+ thread_preempt_disable();
+ *flags = cpu_intr_save();
+ spinlock_acquire(lock);
+}
- locked = atomic_swap(&lock->locked, 0);
- assert(locked);
+static inline void
+spinlock_unlock_intr_restore(struct spinlock *lock, unsigned long flags)
+{
+ spinlock_release(lock);
+ cpu_intr_restore(flags);
thread_preempt_enable();
}
diff --git a/kern/spinlock_i.h b/kern/spinlock_i.h
new file mode 100644
index 00000000..58af6bf1
--- /dev/null
+++ b/kern/spinlock_i.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2012, 2013 Richard Braun.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ *
+ * This implementation relies on the availability of hardware compare-and-swap
+ * support. It means that almost all spinlock operations imply a full memory
+ * barrier. Users must not rely on this behaviour as it could change in the
+ * future.
+ */
+
+#ifndef _KERN_SPINLOCK_I_H
+#define _KERN_SPINLOCK_I_H
+
+#include <kern/assert.h>
+#include <machine/atomic.h>
+#include <machine/cpu.h>
+
+struct spinlock {
+ unsigned long locked;
+};
+
+/*
+ * Return true if acquired, false if busy.
+ */
+static inline int
+spinlock_tryacquire(struct spinlock *lock)
+{
+ return !atomic_cas(&lock->locked, 0, 1);
+}
+
+static inline void
+spinlock_acquire(struct spinlock *lock)
+{
+ while (!spinlock_tryacquire(lock))
+ cpu_pause();
+}
+
+static inline void
+spinlock_release(struct spinlock *lock)
+{
+ unsigned long locked;
+
+ locked = atomic_swap(&lock->locked, 0);
+ assert(locked);
+}
+
+#endif /* _KERN_SPINLOCK_I_H */