summaryrefslogtreecommitdiff
path: root/kern/thread.c
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2013-03-16 19:20:17 +0100
committerRichard Braun <rbraun@sceen.net>2013-03-16 19:20:17 +0100
commitd485d8df2e0c352cd5d5e06d49454d2c60e761ed (patch)
treefbc7427c03a16bbe22b7434e63fbac4448a3fbf3 /kern/thread.c
parenta28e32ba726031cf736560988bc7b0099c22ebc8 (diff)
kern/thread: simplify remote wakeups
Diffstat (limited to 'kern/thread.c')
-rw-r--r--kern/thread.c19
1 files changed, 6 insertions, 13 deletions
diff --git a/kern/thread.c b/kern/thread.c
index 2d51d13d..d08ce9d6 100644
--- a/kern/thread.c
+++ b/kern/thread.c
@@ -383,23 +383,16 @@ thread_runq_wakeup(struct thread_runq *runq, struct thread *thread)
thread->state = THREAD_RUNNING;
thread_runq_add(runq, thread);
- if (runq != thread_runq_local()) {
+ if ((runq != thread_runq_local())
+ && thread_test_flag(runq->current, THREAD_RESCHEDULE)) {
/*
- * Make the new state and flags globally visible so that a remote
- * rescheduling operation sees the correct values.
- *
- * Although scheduling implies a load memory barrier before testing
- * the state of a thread (because of the spin lock acquire semantics),
- * this isn't the case with thread flags. They are set atomically,
- * but not ordered. As a result, reenabling preemption may miss a
- * rescheduling request. But interrupts imply full memory barriers
- * so the request won't be missed when the rescheduling IPI is
- * received by the remote processor.
+ * Make the new flags globally visible before sending the
+ * rescheduling request. This barrier pairs with the one implied
+ * by the rescheduling IPI.
*/
mb_store();
- if (thread_test_flag(runq->current, THREAD_RESCHEDULE))
- tcb_send_reschedule(thread_runq_id(runq));
+ tcb_send_reschedule(thread_runq_id(runq));
}
}