summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2019-07-11 23:42:01 +0200
committerRichard Braun <rbraun@sceen.net>2019-08-16 03:57:13 +0200
commita62edad69f70d1c86dfa51cc8a2e298bb966180b (patch)
treeb58cd3be6fa98f663ad4e1dcfd6e20c82f2eb81e
parent06c1722c96a33e5de4ac66ee30b96295d56457d5 (diff)
Document the mutex API, fix spin lock documentation
-rw-r--r--kern/mutex.h12
-rw-r--r--kern/spinlock.h14
2 files changed, 20 insertions, 6 deletions
diff --git a/kern/mutex.h b/kern/mutex.h
index 0e6cd46c..80b7dbdf 100644
--- a/kern/mutex.h
+++ b/kern/mutex.h
@@ -60,6 +60,9 @@ mutex_locked(const struct mutex *mutex)
* This function may not sleep.
*
* Return 0 on success, EBUSY if the mutex is already locked.
+ *
+ * If the operation succeeds, prior unlock operations on the same mutex
+ * synchronize with this operation.
*/
static inline int
mutex_trylock(struct mutex *mutex)
@@ -73,6 +76,9 @@ mutex_trylock(struct mutex *mutex)
* On return, the mutex is locked. A mutex can only be locked once.
*
* This function may sleep.
+ *
+ * Prior unlock operations on the same mutex synchronize with this
+ * operation.
*/
static inline void
mutex_lock(struct mutex *mutex)
@@ -89,6 +95,9 @@ mutex_lock(struct mutex *mutex)
* A mutex can only be locked once.
*
* This function may sleep.
+ *
+ * Prior unlock operations on the same mutex synchronize with this
+ * operation.
*/
static inline int
mutex_timedlock(struct mutex *mutex, uint64_t ticks)
@@ -101,6 +110,9 @@ mutex_timedlock(struct mutex *mutex, uint64_t ticks)
*
* The mutex must be locked, and must have been locked by the calling
* thread.
+ *
+ * This operation synchronizes with subsequent lock operations on the same
+ * mutex.
*/
static inline void
mutex_unlock(struct mutex *mutex)
diff --git a/kern/spinlock.h b/kern/spinlock.h
index 9e369eef..eeff5281 100644
--- a/kern/spinlock.h
+++ b/kern/spinlock.h
@@ -113,10 +113,11 @@ spinlock_lock(struct spinlock *lock)
/*
* Unlock a spin lock.
*
- * The spin lock must be locked, and must have been locked on the same
- * processor it is unlocked on.
+ * The spin lock must be locked, and must have been locked by the calling
+ * thread.
*
- * This operation synchronizes with subsequent lock operations.
+ * This operation synchronizes with subsequent lock operations on the same
+ * spin lock.
*/
static inline void
spinlock_unlock(struct spinlock *lock)
@@ -180,12 +181,13 @@ spinlock_lock_intr_save(struct spinlock *lock, unsigned long *flags)
/*
* Unlock a spin lock.
*
- * The spin lock must be locked, and must have been locked on the same
- * processor it is unlocked on.
+ * The spin lock must be locked, and must have been locked by the calling
+ * thread.
*
* The CPU flags, obtained from a locking operation, are restored.
*
- * This operation synchronizes with subsequent lock operations.
+ * This operation synchronizes with subsequent lock operations on the same
+ * spin lock.
*/
static inline void
spinlock_unlock_intr_restore(struct spinlock *lock, unsigned long flags)