summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2019-07-11 23:32:44 +0200
committerRichard Braun <rbraun@sceen.net>2019-08-16 03:57:13 +0200
commit06c1722c96a33e5de4ac66ee30b96295d56457d5 (patch)
tree31854ffb1974541022288aa5ef6dcef0d53e89d4
parentb0c247010837ad7880d20a676f4eada30be49090 (diff)
Finish spinlock documentation
-rw-r--r--kern/spinlock.h27
1 files changed, 17 insertions, 10 deletions
diff --git a/kern/spinlock.h b/kern/spinlock.h
index f0243e77..9e369eef 100644
--- a/kern/spinlock.h
+++ b/kern/spinlock.h
@@ -18,6 +18,7 @@
* Spin locks.
*
* Critical sections built with spin locks run with preemption disabled.
+ * Critical sections built with different spin locks may safely nest.
*
* This module provides fair spin locks which guarantee time-bounded lock
* acquisition depending only on the number of contending processors.
@@ -73,8 +74,8 @@ void spinlock_init(struct spinlock *lock);
*
* Return 0 on success, EBUSY if the spin lock is already locked.
*
- * If the operation succeeds, prior calls to spinlock_unlock on the same
- * spin lock synchronize with this operation.
+ * If the operation succeeds, prior unlock operations on the same spin lock
+ * synchronize with this operation.
*/
static inline int
spinlock_trylock(struct spinlock *lock)
@@ -99,8 +100,8 @@ spinlock_trylock(struct spinlock *lock)
*
* A spin lock can only be locked once.
*
- * Prior calls to spinlock_unlock on the same spin lock synchronize with
- * this operation.
+ * Prior unlock operations on the same spin lock synchronize with this
+ * operation.
*/
static inline void
spinlock_lock(struct spinlock *lock)
@@ -114,6 +115,8 @@ spinlock_lock(struct spinlock *lock)
*
* The spin lock must be locked, and must have been locked on the same
* processor it is unlocked on.
+ *
+ * This operation synchronizes with subsequent lock operations.
*/
static inline void
spinlock_unlock(struct spinlock *lock)
@@ -135,8 +138,8 @@ spinlock_unlock(struct spinlock *lock)
* Interrupts are disabled on success, in which case the flags passed
* by the caller are filled with the previous value of the CPU flags.
*
- * If the operation succeeds, prior calls to spinlock_unlock on the same
- * spin lock synchronize with this operation.
+ * If the operation succeeds, prior unlock operations on the same spin lock
+ * synchronize with this operation.
*/
static inline int
spinlock_trylock_intr_save(struct spinlock *lock, unsigned long *flags)
@@ -161,8 +164,11 @@ spinlock_trylock_intr_save(struct spinlock *lock, unsigned long *flags)
*
* A spin lock can only be locked once.
*
- * This function disables preemption and interrupts. The flags passed by
- * the caller are filled with the previous value of the CPU flags.
+ * This function disables interrupts. The flags passed by the caller are
+ * filled with the previous value of the CPU flags.
+ *
+ * Prior unlock operations on the same spin lock synchronize with this
+ * operation.
*/
static inline void
spinlock_lock_intr_save(struct spinlock *lock, unsigned long *flags)
@@ -177,8 +183,9 @@ spinlock_lock_intr_save(struct spinlock *lock, unsigned long *flags)
* The spin lock must be locked, and must have been locked on the same
* processor it is unlocked on.
*
- * This function may reenable preemption and interrupts, using the given
- * flags which must have been obtained with a lock or trylock operation.
+ * The CPU flags, obtained from a locking operation, are restored.
+ *
+ * This operation synchronizes with subsequent lock operations.
*/
static inline void
spinlock_unlock_intr_restore(struct spinlock *lock, unsigned long flags)