diff options
author | Richard Braun <rbraun@sceen.net> | 2017-02-07 20:18:44 +0100 |
---|---|---|
committer | Richard Braun <rbraun@sceen.net> | 2017-02-07 20:18:44 +0100 |
commit | 05f3b97ee07e6af26613dfbfe57619a700aa349a (patch) | |
tree | 2e69b4e219153a3884054396f2e537f1f2ce86d8 /kern | |
parent | 09558df791f53dbd1da39216839ae5219b7ef1b0 (diff) |
kern/thread: fix scheduling
Commit fd4c1bc361429fa17cfeb953ec8323bd62053601 made the pointer for
the current thread of a run queue NULL during scheduling, but this
actually breaks existing assumptions. Those changes are reverted in
this commit.
Diffstat (limited to 'kern')
-rw-r--r-- | kern/thread.c | 18 |
1 files changed, 4 insertions, 14 deletions
diff --git a/kern/thread.c b/kern/thread.c index d354270c..9c3a01be 100644 --- a/kern/thread.c +++ b/kern/thread.c @@ -447,8 +447,7 @@ thread_runq_add(struct thread_runq *runq, struct thread *thread) runq->nr_threads++; - if ((runq->current != NULL) - && (thread_sched_class(runq->current) > thread_sched_class(thread))) { + if (thread_sched_class(thread) < thread_sched_class(runq->current)) { thread_set_flag(runq->current, THREAD_YIELD); } @@ -486,8 +485,6 @@ thread_runq_put_prev(struct thread_runq *runq, struct thread *thread) if (ops->put_prev != NULL) { ops->put_prev(runq, thread); } - - runq->current = NULL; } static struct thread * @@ -498,7 +495,6 @@ thread_runq_get_next(struct thread_runq *runq) assert(!cpu_intr_enabled()); spinlock_assert_locked(&runq->lock); - assert(runq->current == NULL); for (i = 0; i < ARRAY_SIZE(thread_sched_ops); i++) { thread = thread_sched_ops[i].get_next(runq); @@ -518,8 +514,6 @@ thread_runq_set_next(struct thread_runq *runq, struct thread *thread) { const struct thread_sched_ops *ops; - assert(runq->current == NULL); - ops = thread_get_sched_ops(thread); if (ops->set_next != NULL) { @@ -539,7 +533,6 @@ thread_runq_wakeup(struct thread_runq *runq, struct thread *thread) thread_runq_add(runq, thread); if ((runq != thread_runq_local()) - && (runq->current != NULL) && thread_test_flag(runq->current, THREAD_YIELD)) { /* * Make the new flags globally visible before sending the scheduling @@ -682,9 +675,8 @@ thread_sched_rt_add(struct thread_runq *runq, struct thread *thread) rt_runq->bitmap |= (1ULL << thread_priority(thread)); } - if ((runq->current != NULL) - && (thread_sched_class(runq->current) == thread_sched_class(thread)) - && (thread_priority(runq->current) < thread_priority(thread))) { + if ((thread_sched_class(thread) == thread_sched_class(runq->current)) + && (thread_priority(thread) > thread_priority(runq->current))) { thread_set_flag(runq->current, THREAD_YIELD); } } @@ -956,8 +948,7 @@ thread_sched_fs_restart(struct thread_runq *runq) assert(node != NULL); fs_runq->current = list_entry(node, struct thread_fs_group, node); - if ((runq->current != NULL) - && (thread_sched_class(runq->current) == THREAD_SCHED_CLASS_FS)) { + if (thread_sched_class(runq->current) == THREAD_SCHED_CLASS_FS) { thread_set_flag(runq->current, THREAD_YIELD); } } @@ -2238,7 +2229,6 @@ thread_run_scheduler(void) sref_register(); spinlock_lock(&runq->lock); - runq->current = NULL; thread = thread_runq_get_next(thread_runq_local()); tcb_load(&thread->tcb); |