From 06c1722c96a33e5de4ac66ee30b96295d56457d5 Mon Sep 17 00:00:00 2001 From: Richard Braun Date: Thu, 11 Jul 2019 23:32:44 +0200 Subject: Finish spinlock documentation --- kern/spinlock.h | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/kern/spinlock.h b/kern/spinlock.h index f0243e77..9e369eef 100644 --- a/kern/spinlock.h +++ b/kern/spinlock.h @@ -18,6 +18,7 @@ * Spin locks. * * Critical sections built with spin locks run with preemption disabled. + * Critical sections built with different spin locks may safely nest. * * This module provides fair spin locks which guarantee time-bounded lock * acquisition depending only on the number of contending processors. @@ -73,8 +74,8 @@ void spinlock_init(struct spinlock *lock); * * Return 0 on success, EBUSY if the spin lock is already locked. * - * If the operation succeeds, prior calls to spinlock_unlock on the same - * spin lock synchronize with this operation. + * If the operation succeeds, prior unlock operations on the same spin lock + * synchronize with this operation. */ static inline int spinlock_trylock(struct spinlock *lock) @@ -99,8 +100,8 @@ spinlock_trylock(struct spinlock *lock) * * A spin lock can only be locked once. * - * Prior calls to spinlock_unlock on the same spin lock synchronize with - * this operation. + * Prior unlock operations on the same spin lock synchronize with this + * operation. */ static inline void spinlock_lock(struct spinlock *lock) @@ -114,6 +115,8 @@ spinlock_lock(struct spinlock *lock) * * The spin lock must be locked, and must have been locked on the same * processor it is unlocked on. + * + * This operation synchronizes with subsequent lock operations. */ static inline void spinlock_unlock(struct spinlock *lock) @@ -135,8 +138,8 @@ spinlock_unlock(struct spinlock *lock) * Interrupts are disabled on success, in which case the flags passed * by the caller are filled with the previous value of the CPU flags. * - * If the operation succeeds, prior calls to spinlock_unlock on the same - * spin lock synchronize with this operation. + * If the operation succeeds, prior unlock operations on the same spin lock + * synchronize with this operation. */ static inline int spinlock_trylock_intr_save(struct spinlock *lock, unsigned long *flags) @@ -161,8 +164,11 @@ spinlock_trylock_intr_save(struct spinlock *lock, unsigned long *flags) * * A spin lock can only be locked once. * - * This function disables preemption and interrupts. The flags passed by - * the caller are filled with the previous value of the CPU flags. + * This function disables interrupts. The flags passed by the caller are + * filled with the previous value of the CPU flags. + * + * Prior unlock operations on the same spin lock synchronize with this + * operation. */ static inline void spinlock_lock_intr_save(struct spinlock *lock, unsigned long *flags) @@ -177,8 +183,9 @@ spinlock_lock_intr_save(struct spinlock *lock, unsigned long *flags) * The spin lock must be locked, and must have been locked on the same * processor it is unlocked on. * - * This function may reenable preemption and interrupts, using the given - * flags which must have been obtained with a lock or trylock operation. + * The CPU flags, obtained from a locking operation, are restored. + * + * This operation synchronizes with subsequent lock operations. */ static inline void spinlock_unlock_intr_restore(struct spinlock *lock, unsigned long flags) -- cgit v1.2.3