summaryrefslogtreecommitdiff
path: root/kern
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2017-08-31 23:17:47 +0200
committerRichard Braun <rbraun@sceen.net>2017-08-31 23:17:47 +0200
commitfe715443e82b068a0b061f1b8ed3c01a96b1e4ba (patch)
tree56c19e3355700f37c82522c178635fb5039ddde3 /kern
parentbf9ea21a402d3e006edce6d87ca7b0a16adbe3f0 (diff)
kern/thread: fix atomic accesses to a thread's run queue
Diffstat (limited to 'kern')
-rw-r--r--kern/thread.c12
-rw-r--r--kern/thread_i.h17
2 files changed, 19 insertions, 10 deletions
diff --git a/kern/thread.c b/kern/thread.c
index e4d8f7cf..4a9cb2a1 100644
--- a/kern/thread.c
+++ b/kern/thread.c
@@ -492,7 +492,7 @@ thread_runq_add(struct thread_runq *runq, struct thread *thread)
thread_set_flag(runq->current, THREAD_YIELD);
}
- thread->runq = runq;
+ atomic_store(&thread->runq, runq, ATOMIC_RELAXED);
thread->in_runq = true;
}
@@ -1861,11 +1861,11 @@ thread_lock_runq(struct thread *thread, unsigned long *flags)
struct thread_runq *runq;
for (;;) {
- runq = thread->runq; /* TODO Atomic access */
+ runq = atomic_load(&thread->runq, ATOMIC_RELAXED);
spinlock_lock_intr_save(&runq->lock, flags);
- if (runq == thread->runq) {
+ if (runq == atomic_load(&thread->runq, ATOMIC_RELAXED)) {
return runq;
}
@@ -2457,6 +2457,10 @@ thread_wakeup_common(struct thread *thread, int error)
if (!thread->pinned) {
runq = thread_get_real_sched_ops(thread)->select_runq(thread);
} else {
+ /*
+ * This access doesn't need to be atomic, as the current thread is
+ * the only one which may update the member.
+ */
runq = thread->runq;
spinlock_lock(&runq->lock);
}
@@ -2876,7 +2880,7 @@ thread_is_running(const struct thread *thread)
{
const struct thread_runq *runq;
- runq = thread->runq;
+ runq = atomic_load(&thread->runq, ATOMIC_RELAXED);
return (runq != NULL)
&& (atomic_load(&runq->current, ATOMIC_RELAXED) == thread);
diff --git a/kern/thread_i.h b/kern/thread_i.h
index 9ff4b764..4218b50b 100644
--- a/kern/thread_i.h
+++ b/kern/thread_i.h
@@ -95,6 +95,11 @@ struct thread_fs_data {
* (a) atomic
* (-) thread-local
* ( ) read-only
+ *
+ * (*) The runq member is used to determine which run queue lock must be
+ * held to serialize access to the relevant members. However, it is only
+ * updated while the associated run queue is locked. As a result, atomic
+ * reads are only necessary outside critical sections.
*/
struct thread {
alignas(CPU_L1_SIZE) struct tcb tcb; /* (r) */
@@ -103,12 +108,12 @@ struct thread {
unsigned long flags; /* (a) */
/* Sleep/wake-up synchronization members */
- struct thread_runq *runq; /* (r) */
- bool in_runq; /* (r) */
- const void *wchan_addr; /* (r) */
- const char *wchan_desc; /* (r) */
- int wakeup_error; /* (r) */
- unsigned short state; /* (r) */
+ struct thread_runq *runq; /* (r,*) */
+ bool in_runq; /* (r) */
+ const void *wchan_addr; /* (r) */
+ const char *wchan_desc; /* (r) */
+ int wakeup_error; /* (r) */
+ unsigned short state; /* (r) */
/* Sleep queue available for lending */
struct sleepq *priv_sleepq; /* (-) */