diff options
author | Richard Braun <rbraun@sceen.net> | 2013-03-16 18:36:11 +0100 |
---|---|---|
committer | Richard Braun <rbraun@sceen.net> | 2013-03-16 18:36:11 +0100 |
commit | a28e32ba726031cf736560988bc7b0099c22ebc8 (patch) | |
tree | 1684295c706e08c13ff694ab6ba4b3f793596af2 /kern/thread.c | |
parent | 8f65bcd715b3ec1c4391c2bfaa116c04fd936903 (diff) |
kern/thread: don't migrate pinned threads
Diffstat (limited to 'kern/thread.c')
-rw-r--r-- | kern/thread.c | 14 |
1 files changed, 14 insertions, 0 deletions
diff --git a/kern/thread.c b/kern/thread.c index f9944ae4..2d51d13d 100644 --- a/kern/thread.c +++ b/kern/thread.c @@ -1003,6 +1003,20 @@ thread_sched_ts_balance_pull(struct thread_runq *runq, continue; /* + * The pinned counter is changed without explicit synchronization. + * However, it can only be changed by its owning thread. As threads + * currently running aren't considered for migration, the thread had + * to be preempted, and called thread_schedule(), which globally acts + * as a memory barrier. As a result, there is strong ordering between + * changing the pinned counter and setting the current thread of a + * run queue. Enforce the same ordering on the pulling processor. + */ + mb_load(); + + if (thread->pinned) + continue; + + /* * Make sure at least one thread is pulled if possible. If one or more * thread has already been pulled, take weights into account. */ |