summaryrefslogtreecommitdiff
path: root/kern/thread.c
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2017-08-31 23:17:47 +0200
committerRichard Braun <rbraun@sceen.net>2017-08-31 23:17:47 +0200
commitfe715443e82b068a0b061f1b8ed3c01a96b1e4ba (patch)
tree56c19e3355700f37c82522c178635fb5039ddde3 /kern/thread.c
parentbf9ea21a402d3e006edce6d87ca7b0a16adbe3f0 (diff)
kern/thread: fix atomic accesses to a thread's run queue
Diffstat (limited to 'kern/thread.c')
-rw-r--r--kern/thread.c12
1 files changed, 8 insertions, 4 deletions
diff --git a/kern/thread.c b/kern/thread.c
index e4d8f7cf..4a9cb2a1 100644
--- a/kern/thread.c
+++ b/kern/thread.c
@@ -492,7 +492,7 @@ thread_runq_add(struct thread_runq *runq, struct thread *thread)
thread_set_flag(runq->current, THREAD_YIELD);
}
- thread->runq = runq;
+ atomic_store(&thread->runq, runq, ATOMIC_RELAXED);
thread->in_runq = true;
}
@@ -1861,11 +1861,11 @@ thread_lock_runq(struct thread *thread, unsigned long *flags)
struct thread_runq *runq;
for (;;) {
- runq = thread->runq; /* TODO Atomic access */
+ runq = atomic_load(&thread->runq, ATOMIC_RELAXED);
spinlock_lock_intr_save(&runq->lock, flags);
- if (runq == thread->runq) {
+ if (runq == atomic_load(&thread->runq, ATOMIC_RELAXED)) {
return runq;
}
@@ -2457,6 +2457,10 @@ thread_wakeup_common(struct thread *thread, int error)
if (!thread->pinned) {
runq = thread_get_real_sched_ops(thread)->select_runq(thread);
} else {
+ /*
+ * This access doesn't need to be atomic, as the current thread is
+ * the only one which may update the member.
+ */
runq = thread->runq;
spinlock_lock(&runq->lock);
}
@@ -2876,7 +2880,7 @@ thread_is_running(const struct thread *thread)
{
const struct thread_runq *runq;
- runq = thread->runq;
+ runq = atomic_load(&thread->runq, ATOMIC_RELAXED);
return (runq != NULL)
&& (atomic_load(&runq->current, ATOMIC_RELAXED) == thread);