diff options
author | Richard Braun <rbraun@sceen.net> | 2013-05-09 15:23:55 +0200 |
---|---|---|
committer | Richard Braun <rbraun@sceen.net> | 2013-05-09 15:23:55 +0200 |
commit | ab9defa43276354fcc4837c8e2014609b391e1d1 (patch) | |
tree | 58f54330b27e20a7e7c37c0e71bb3ecb5af11c83 | |
parent | 8af6d140094314684d6622df6c07574dbb29ffee (diff) |
kern/thread: remove an unneeded memory barrier
The memory barrier semantics of locks already provide all the required
ordering when checking for pinned threads.
-rw-r--r-- | kern/thread.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/kern/thread.c b/kern/thread.c index 147bd6a1..88537ea5 100644 --- a/kern/thread.c +++ b/kern/thread.c @@ -492,7 +492,9 @@ thread_runq_schedule(struct thread_runq *runq, struct thread *prev) /* * That's where the true context switch occurs. The next thread must - * unlock the run queue and reenable preemption. + * unlock the run queue and reenable preemption. Note that unlocking + * and locking the run queue again is equivalent to a full memory + * barrier. */ tcb_switch(&prev->tcb, &next->tcb); @@ -1096,13 +1098,11 @@ thread_sched_ts_balance_pull(struct thread_runq *runq, * The pinned counter is changed without explicit synchronization. * However, it can only be changed by its owning thread. As threads * currently running aren't considered for migration, the thread had - * to be preempted, and invoke the scheduler, which globally acts - * as a memory barrier. As a result, there is strong ordering between + * to be preempted and invoke the scheduler. Since balancer threads + * acquire the run queue lock, there is strong ordering between * changing the pinned counter and setting the current thread of a - * run queue. Enforce the same ordering on the pulling processor. + * run queue. */ - mb_load(); - if (thread->pinned) continue; |