summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2019-07-03 10:53:46 +0200
committerRichard Braun <rbraun@sceen.net>2019-08-16 03:57:13 +0200
commitb0c247010837ad7880d20a676f4eada30be49090 (patch)
treebc7aac0a62e6af95f7e66508784a3be6b84b0b8f
parentd90cece08af32aaca4d6ab766b3cf15e43020552 (diff)
First attempt, incomplete
-rw-r--r--kern/spinlock.h16
-rw-r--r--kern/thread.h6
2 files changed, 9 insertions, 13 deletions
diff --git a/kern/spinlock.h b/kern/spinlock.h
index 3060aaa6..f0243e77 100644
--- a/kern/spinlock.h
+++ b/kern/spinlock.h
@@ -73,7 +73,8 @@ void spinlock_init(struct spinlock *lock);
*
* Return 0 on success, EBUSY if the spin lock is already locked.
*
- * Preemption is disabled on success.
+ * If the operation succeeds, prior calls to spinlock_unlock on the same
+ * spin lock synchronize with this operation.
*/
static inline int
spinlock_trylock(struct spinlock *lock)
@@ -98,7 +99,8 @@ spinlock_trylock(struct spinlock *lock)
*
* A spin lock can only be locked once.
*
- * This function disables preemption.
+ * Prior calls to spinlock_unlock on the same spin lock synchronize with
+ * this operation.
*/
static inline void
spinlock_lock(struct spinlock *lock)
@@ -112,8 +114,6 @@ spinlock_lock(struct spinlock *lock)
*
* The spin lock must be locked, and must have been locked on the same
* processor it is unlocked on.
- *
- * This function may reenable preemption.
*/
static inline void
spinlock_unlock(struct spinlock *lock)
@@ -132,9 +132,11 @@ spinlock_unlock(struct spinlock *lock)
*
* Return 0 on success, EBUSY if the spin lock is already locked.
*
- * Preemption and interrupts are disabled on success, in which case the
- * flags passed by the caller are filled with the previous value of the
- * CPU flags.
+ * Interrupts are disabled on success, in which case the flags passed
+ * by the caller are filled with the previous value of the CPU flags.
+ *
+ * If the operation succeeds, prior calls to spinlock_unlock on the same
+ * spin lock synchronize with this operation.
*/
static inline int
spinlock_trylock_intr_save(struct spinlock *lock, unsigned long *flags)
diff --git a/kern/thread.h b/kern/thread.h
index a65b4999..10cf89e6 100644
--- a/kern/thread.h
+++ b/kern/thread.h
@@ -226,8 +226,6 @@ void thread_join(struct thread *thread);
* When bounding the duration of the sleep, the caller must pass an absolute
* time in ticks, and ETIMEDOUT is returned if that time is reached before
* the thread is awoken.
- *
- * Implies a memory barrier.
*/
void thread_sleep(struct spinlock *interlock, const void *wchan_addr,
const char *wchan_desc);
@@ -240,8 +238,6 @@ int thread_timedsleep(struct spinlock *interlock, const void *wchan_addr,
* If the target thread is NULL, the calling thread, or already in the
* running state, or in the suspended state, no action is performed and
* EINVAL is returned.
- *
- * TODO Describe memory ordering with regard to thread_sleep().
*/
int thread_wakeup(struct thread *thread);
@@ -286,8 +282,6 @@ noreturn void thread_run_scheduler(void);
* This call does nothing if preemption is disabled, or the scheduler
* determines the caller should continue to run (e.g. it's currently the only
* runnable thread).
- *
- * Implies a full memory barrier if a context switch occurred.
*/
void thread_yield(void);