summaryrefslogtreecommitdiff
path: root/kern/spinlock.h
diff options
context:
space:
mode:
Diffstat (limited to 'kern/spinlock.h')
-rw-r--r--kern/spinlock.h84
1 files changed, 69 insertions, 15 deletions
diff --git a/kern/spinlock.h b/kern/spinlock.h
index b165e759..62974922 100644
--- a/kern/spinlock.h
+++ b/kern/spinlock.h
@@ -15,16 +15,17 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*
- * Spin lock.
+ * Spin locks.
*
* Critical sections built with spin locks run with preemption disabled.
+ *
+ * This module provides fair spin locks which guarantee time-bounded lock
+ * acquisition depending only on the number of contending processors.
*/
#ifndef _KERN_SPINLOCK_H
#define _KERN_SPINLOCK_H
-#include <kern/assert.h>
-#include <kern/macros.h>
#include <kern/spinlock_i.h>
#include <kern/spinlock_types.h>
#include <kern/thread.h>
@@ -32,21 +33,27 @@
struct spinlock;
-static inline void
-spinlock_init(struct spinlock *lock)
-{
- lock->locked = 0;
-}
+#define spinlock_assert_locked(lock) assert((lock)->value != SPINLOCK_UNLOCKED)
-#define spinlock_assert_locked(lock) assert((lock)->locked)
+/*
+ * Initialize a spin lock.
+ */
+void spinlock_init(struct spinlock *lock);
+/*
+ * Attempt to lock the given spin lock.
+ *
+ * Return 0 on success, ERROR_BUSY if the spin lock is already locked.
+ *
+ * Preemption is disabled on success.
+ */
static inline int
spinlock_trylock(struct spinlock *lock)
{
int error;
thread_preempt_disable();
- error = spinlock_tryacquire(lock);
+ error = spinlock_lock_fast(lock);
if (error) {
thread_preempt_enable();
@@ -55,17 +62,35 @@ spinlock_trylock(struct spinlock *lock)
return error;
}
+/*
+ * Lock a spin lock.
+ *
+ * If the spin lock is already locked, the calling thread spins until the
+ * spin lock is unlocked.
+ *
+ * A spin lock can only be locked once.
+ *
+ * This function disables preemption.
+ */
static inline void
spinlock_lock(struct spinlock *lock)
{
thread_preempt_disable();
- spinlock_acquire(lock);
+ spinlock_lock_common(lock);
}
+/*
+ * Unlock a spin lock.
+ *
+ * The spin lock must be locked, and must have been locked on the same
+ * processor it is unlocked on.
+ *
+ * This function may reenable preemption.
+ */
static inline void
spinlock_unlock(struct spinlock *lock)
{
- spinlock_release(lock);
+ spinlock_unlock_common(lock);
thread_preempt_enable();
}
@@ -74,6 +99,15 @@ spinlock_unlock(struct spinlock *lock)
* critical sections.
*/
+/*
+ * Attempt to lock the given spin lock.
+ *
+ * Return 0 on success, ERROR_BUSY if the spin lock is already locked.
+ *
+ * Preemption and interrupts are disabled on success, in which case the
+ * flags passed by the caller are filled with the previous value of the
+ * CPU flags.
+ */
static inline int
spinlock_trylock_intr_save(struct spinlock *lock, unsigned long *flags)
{
@@ -81,7 +115,7 @@ spinlock_trylock_intr_save(struct spinlock *lock, unsigned long *flags)
thread_preempt_disable();
cpu_intr_save(flags);
- error = spinlock_tryacquire(lock);
+ error = spinlock_lock_fast(lock);
if (error) {
cpu_intr_restore(*flags);
@@ -91,18 +125,38 @@ spinlock_trylock_intr_save(struct spinlock *lock, unsigned long *flags)
return error;
}
+/*
+ * Lock a spin lock.
+ *
+ * If the spin lock is already locked, the calling thread spins until the
+ * spin lock is unlocked.
+ *
+ * A spin lock can only be locked once.
+ *
+ * This function disables preemption and interrupts. The flags passed by
+ * the caller are filled with the previous value of the CPU flags.
+ */
static inline void
spinlock_lock_intr_save(struct spinlock *lock, unsigned long *flags)
{
thread_preempt_disable();
cpu_intr_save(flags);
- spinlock_acquire(lock);
+ spinlock_lock_common(lock);
}
+/*
+ * Unlock a spin lock.
+ *
+ * The spin lock must be locked, and must have been locked on the same
+ * processor it is unlocked on.
+ *
+ * This function may reenable preemption and interrupts, using the given
+ * flags which must have been obtained with a lock or trylock operation.
+ */
static inline void
spinlock_unlock_intr_restore(struct spinlock *lock, unsigned long flags)
{
- spinlock_release(lock);
+ spinlock_unlock_common(lock);
cpu_intr_restore(flags);
thread_preempt_enable();
}