summaryrefslogtreecommitdiff
path: root/kern
diff options
context:
space:
mode:
Diffstat (limited to 'kern')
-rw-r--r--kern/atomic.h3
-rw-r--r--kern/rcu.h8
-rw-r--r--kern/thread.h22
3 files changed, 15 insertions, 18 deletions
diff --git a/kern/atomic.h b/kern/atomic.h
index 2ecb28f7..04a08f3f 100644
--- a/kern/atomic.h
+++ b/kern/atomic.h
@@ -24,9 +24,6 @@
* Some configurations may not support 64-bit operations. Check if the
* ATOMIC_HAVE_64B_OPS macro is defined to find out.
*
- * TODO Replace mentions of "memory barriers" throughout the code with
- * C11 memory model terminology.
- *
* TODO Decide if architecture-specific atomic operations should be
* provided, and if so, how.
*/
diff --git a/kern/rcu.h b/kern/rcu.h
index eb82a6af..55663311 100644
--- a/kern/rcu.h
+++ b/kern/rcu.h
@@ -80,24 +80,24 @@ struct rcu_reader;
/*
* Enter a read-side critical section.
*
- * This is an intra-thread acquire operation.
+ * There is a strong sequence point during this operation.
*/
static inline void
rcu_read_enter(void)
{
rcu_reader_inc(thread_rcu_reader(thread_self()));
- latomic_fence(LATOMIC_ACQ_REL);
+ latomic_fence(LATOMIC_SEQ_CST);
}
/*
* Leave a read-side critical section.
*
- * This is an intra-thread release operation.
+ * There is a strong sequence point during this operation.
*/
static inline void
rcu_read_leave(void)
{
- latomic_fence(LATOMIC_ACQ_REL);
+ latomic_fence(LATOMIC_SEQ_CST);
rcu_reader_dec(thread_rcu_reader(thread_self()));
}
diff --git a/kern/thread.h b/kern/thread.h
index 75ff4b24..9afad463 100644
--- a/kern/thread.h
+++ b/kern/thread.h
@@ -229,7 +229,7 @@ void thread_join(struct thread *thread);
* the thread is awoken.
*
* If this function causes the current thread to yield the processor,
- * this is an intra-thread release-acquire operation.
+ * there is a strong sequence point during this operation.
*/
void thread_sleep(struct spinlock *interlock, const void *wchan_addr,
const char *wchan_desc);
@@ -272,7 +272,7 @@ int thread_resume(struct thread *thread);
* Suspend execution of the calling thread.
*
* If this function causes the calling thread to yield the processor,
- * this is an intra-thread release-acquire operation.
+ * there is a strong sequence point during this operation.
*/
void thread_delay(uint64_t ticks, bool absolute);
@@ -545,7 +545,7 @@ thread_pinned(void)
/*
* Pin the current thread to the current processor.
*
- * This is an intra-thread release-acquire operation.
+ * There is a strong sequence point during this operation.
*/
static inline void
thread_pin(void)
@@ -561,7 +561,7 @@ thread_pin(void)
/*
* Unpin the current thread from the current processor.
*
- * This is an intra-thread release operation.
+ * There is a strong sequence point during this operation.
*/
static inline void
thread_unpin(void)
@@ -592,7 +592,7 @@ thread_preempt_enabled(void)
/*
* Disable preemption.
*
- * This is an intra-thread acquire operation.
+ * There is a strong sequence point during this operation.
*/
static inline void
thread_preempt_disable(void)
@@ -613,7 +613,7 @@ thread_preempt_disable(void)
* never performs voluntary preemption. This may break real-time behavior
* and should never be used in application code.
*
- * This is an intra-thread release operation.
+ * There is a strong sequence point during this operation.
*/
static inline void
thread_preempt_enable_no_resched(void)
@@ -635,7 +635,7 @@ thread_preempt_enable_no_resched(void)
/*
* Enable preemption.
*
- * This is an intra-thread release operation.
+ * There is a strong sequence point during this operation.
*/
static inline void
thread_preempt_enable(void)
@@ -653,7 +653,7 @@ thread_preempt_enable(void)
/*
* Disable preemption and interrupts.
*
- * This is an intra-thread acquire operation.
+ * There is a strong sequence point during this operation.
*/
static inline void
thread_preempt_disable_intr_save(unsigned long *flags)
@@ -665,7 +665,7 @@ thread_preempt_disable_intr_save(unsigned long *flags)
/*
* Enable preemption and restore interrupts.
*
- * This is an intra-thread release operation.
+ * There is a strong sequence point during this operation.
*/
static inline void
thread_preempt_enable_intr_restore(unsigned long flags)
@@ -705,7 +705,7 @@ thread_check_intr_context(void)
/*
* Enter interrupt context.
*
- * This is an intra-thread acquire operation.
+ * There is a strong sequence point during this operation.
*/
static inline void
thread_intr_enter(void)
@@ -726,7 +726,7 @@ thread_intr_enter(void)
/*
* Leave interrupt context.
*
- * This is an intra-thread release operation.
+ * There is a strong sequence point during this operation.
*/
static inline void
thread_intr_leave(void)