diff options
author | Richard Braun <rbraun@sceen.net> | 2013-06-01 16:07:56 +0200 |
---|---|---|
committer | Richard Braun <rbraun@sceen.net> | 2013-06-01 16:07:56 +0200 |
commit | 0ce715c716f0510d9ec2d5a56dec4cbcfbd9522e (patch) | |
tree | a3d20082894b71fdfc37f765e75ab6920ba92ff5 | |
parent | a919943d8070ca15ba93a06258e90297d9f51e1e (diff) |
kern/thread: slightly rework scheduler invocation
Rename THREAD_RESCHEDULE to THREAD_YIELD and thread_reschedule to
thread_yield for better clarity, and add the thread_schedule inline
function that checks for the THREAD_YIELD flag before calling
thread_yield (yielding only checks if preemption is enabled).
-rw-r--r-- | arch/x86/machine/trap.c | 2 | ||||
-rw-r--r-- | kern/thread.c | 43 | ||||
-rw-r--r-- | kern/thread.h | 54 |
3 files changed, 62 insertions, 37 deletions
diff --git a/arch/x86/machine/trap.c b/arch/x86/machine/trap.c index 983f99e2..42b5750d 100644 --- a/arch/x86/machine/trap.c +++ b/arch/x86/machine/trap.c @@ -245,7 +245,7 @@ trap_main(struct trap_frame *frame) if (handler->flags & TRAP_HF_NOPREEMPT) thread_preempt_enable_no_resched(); - thread_reschedule(); + thread_schedule(); } #ifdef __LP64__ diff --git a/kern/thread.c b/kern/thread.c index 1d113ea2..648f8ca1 100644 --- a/kern/thread.c +++ b/kern/thread.c @@ -99,7 +99,6 @@ #include <kern/string.h> #include <kern/task.h> #include <kern/thread.h> -#include <machine/atomic.h> #include <machine/cpu.h> #include <machine/mb.h> #include <machine/pmap.h> @@ -362,25 +361,6 @@ thread_runq_local(void) return &thread_runqs[cpu_id()]; } -static inline void -thread_set_flag(struct thread *thread, unsigned long flag) -{ - atomic_or(&thread->flags, flag); -} - -static inline void -thread_clear_flag(struct thread *thread, unsigned long flag) -{ - atomic_and(&thread->flags, ~flag); -} - -static inline int -thread_test_flag(struct thread *thread, unsigned long flag) -{ - barrier(); - return ((thread->flags & flag) != 0); -} - static void thread_runq_add(struct thread_runq *runq, struct thread *thread) { @@ -395,7 +375,7 @@ thread_runq_add(struct thread_runq *runq, struct thread *thread) runq->nr_threads++; if (thread->sched_class < runq->current->sched_class) - thread_set_flag(runq->current, THREAD_RESCHEDULE); + thread_set_flag(runq->current, THREAD_YIELD); thread->runq = runq; } @@ -455,7 +435,7 @@ thread_runq_wakeup(struct thread_runq *runq, struct thread *thread) thread_runq_add(runq, thread); if ((runq != thread_runq_local()) - && thread_test_flag(runq->current, THREAD_RESCHEDULE)) { + && thread_test_flag(runq->current, THREAD_YIELD)) { /* * Make the new flags globally visible before sending the * rescheduling request. This barrier pairs with the one implied @@ -488,7 +468,7 @@ thread_runq_schedule(struct thread_runq *runq, struct thread *prev) llsync_checkin(thread_runq_id(runq)); - thread_clear_flag(prev, THREAD_RESCHEDULE); + thread_clear_flag(prev, THREAD_YIELD); thread_runq_put_prev(runq, prev); if (prev->state != THREAD_RUNNING) { @@ -581,7 +561,7 @@ thread_sched_rt_add(struct thread_runq *runq, struct thread *thread) if ((thread->sched_class == runq->current->sched_class) && (thread->rt_data.priority > runq->current->rt_data.priority)) - thread_set_flag(runq->current, THREAD_RESCHEDULE); + thread_set_flag(runq->current, THREAD_YIELD); } static void @@ -639,7 +619,7 @@ thread_sched_rt_tick(struct thread_runq *runq, struct thread *thread) return; thread->rt_data.time_slice = THREAD_DEFAULT_RR_TIME_SLICE; - thread_set_flag(thread, THREAD_RESCHEDULE); + thread_set_flag(thread, THREAD_YIELD); } static inline unsigned short @@ -831,7 +811,7 @@ thread_sched_ts_restart(struct thread_runq *runq) ts_runq->current = list_entry(node, struct thread_ts_group, node); if (runq->current->sched_class == THREAD_SCHED_CLASS_TS) - thread_set_flag(runq->current, THREAD_RESCHEDULE); + thread_set_flag(runq->current, THREAD_YIELD); } static void @@ -1010,7 +990,7 @@ thread_sched_ts_tick(struct thread_runq *runq, struct thread *thread) ts_runq->work++; group = &ts_runq->group_array[thread->ts_data.priority]; group->work++; - thread_set_flag(thread, THREAD_RESCHEDULE); + thread_set_flag(thread, THREAD_YIELD); thread->ts_data.work++; } @@ -1677,7 +1657,7 @@ thread_idle(void *arg) for (;;) { cpu_intr_disable(); - if (thread_test_flag(self, THREAD_RESCHEDULE)) { + if (thread_test_flag(self, THREAD_YIELD)) { cpu_intr_enable(); break; } @@ -1931,7 +1911,7 @@ thread_run(void) } void -thread_reschedule(void) +thread_yield(void) { struct thread_runq *runq; struct thread *thread; @@ -1939,8 +1919,7 @@ thread_reschedule(void) thread = thread_self(); - if (!thread_test_flag(thread, THREAD_RESCHEDULE) - || !thread_preempt_enabled()) + if (!thread_preempt_enabled()) return; do { @@ -1950,7 +1929,7 @@ thread_reschedule(void) runq = thread_runq_schedule(runq, thread); spinlock_unlock_intr_restore(&runq->lock, flags); thread_preempt_enable_no_resched(); - } while (thread_test_flag(thread, THREAD_RESCHEDULE)); + } while (thread_test_flag(thread, THREAD_YIELD)); } void diff --git a/kern/thread.h b/kern/thread.h index 9d557455..7cef3eae 100644 --- a/kern/thread.h +++ b/kern/thread.h @@ -39,6 +39,7 @@ #include <kern/list.h> #include <kern/macros.h> #include <kern/param.h> +#include <machine/atomic.h> #include <machine/cpu.h> #include <machine/tcb.h> @@ -58,7 +59,7 @@ struct thread_ts_runq; /* * Thread flags. */ -#define THREAD_RESCHEDULE 0x1UL /* Thread marked for reschedule */ +#define THREAD_YIELD 0x1UL /* Must yield the processor ASAP */ /* * Thread states. @@ -245,9 +246,13 @@ void thread_wakeup(struct thread *thread); void __noreturn thread_run(void); /* - * Invoke the scheduler if the calling thread is marked for reschedule. + * Make the calling thread release the processor. + * + * This call does nothing if preemption is disabled, or the scheduler + * determines the caller should continue to run (e.g. it's currently the only + * runnable thread). */ -void thread_reschedule(void); +void thread_yield(void); /* * Report a periodic timer interrupt on the thread currently running on @@ -264,6 +269,47 @@ thread_self(void) } /* + * Flag access functions. + */ + +static inline void +thread_set_flag(struct thread *thread, unsigned long flag) +{ + atomic_or(&thread->flags, flag); +} + +static inline void +thread_clear_flag(struct thread *thread, unsigned long flag) +{ + atomic_and(&thread->flags, ~flag); +} + +static inline int +thread_test_flag(struct thread *thread, unsigned long flag) +{ + barrier(); + return ((thread->flags & flag) != 0); +} + +/* + * Main scheduler invocation call. + * + * Called on return from interrupt or when reenabling preemption. + * + * Implies a compiler barrier. + */ +static inline void +thread_schedule(void) +{ + barrier(); + + if (likely(!thread_test_flag(thread_self(), THREAD_YIELD))) + return; + + thread_yield(); +} + +/* * Migration control functions. * * Functions that change the migration state are implicit compiler barriers. @@ -324,7 +370,7 @@ static inline void thread_preempt_enable(void) { thread_preempt_enable_no_resched(); - thread_reschedule(); + thread_schedule(); } static inline void |