summaryrefslogtreecommitdiff
path: root/kern/spinlock_i.h
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2013-02-26 20:49:25 +0100
committerRichard Braun <rbraun@sceen.net>2013-02-26 20:49:25 +0100
commitb320421ff5414ecdbdf61568a26d15b9abb81ba3 (patch)
tree27a5f9248a383ecee954e35ce19e8dccdbbfae88 /kern/spinlock_i.h
parentc5f5244fc70018d5271da9e5fe8b52212b29321d (diff)
kern/spinlock: add interrupt-handling locking primitives
Diffstat (limited to 'kern/spinlock_i.h')
-rw-r--r--kern/spinlock_i.h60
1 files changed, 60 insertions, 0 deletions
diff --git a/kern/spinlock_i.h b/kern/spinlock_i.h
new file mode 100644
index 00000000..58af6bf1
--- /dev/null
+++ b/kern/spinlock_i.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2012, 2013 Richard Braun.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ *
+ * This implementation relies on the availability of hardware compare-and-swap
+ * support. It means that almost all spinlock operations imply a full memory
+ * barrier. Users must not rely on this behaviour as it could change in the
+ * future.
+ */
+
+#ifndef _KERN_SPINLOCK_I_H
+#define _KERN_SPINLOCK_I_H
+
+#include <kern/assert.h>
+#include <machine/atomic.h>
+#include <machine/cpu.h>
+
+struct spinlock {
+ unsigned long locked;
+};
+
+/*
+ * Return true if acquired, false if busy.
+ */
+static inline int
+spinlock_tryacquire(struct spinlock *lock)
+{
+ return !atomic_cas(&lock->locked, 0, 1);
+}
+
+static inline void
+spinlock_acquire(struct spinlock *lock)
+{
+ while (!spinlock_tryacquire(lock))
+ cpu_pause();
+}
+
+static inline void
+spinlock_release(struct spinlock *lock)
+{
+ unsigned long locked;
+
+ locked = atomic_swap(&lock->locked, 0);
+ assert(locked);
+}
+
+#endif /* _KERN_SPINLOCK_I_H */