diff options
author | Mocramis <mocramis@gmail.com> | 2018-01-08 21:21:13 +0100 |
---|---|---|
committer | Mocramis <mocramis@gmail.com> | 2018-01-08 21:21:13 +0100 |
commit | bf5c783d4cad55ba41210ba71b3c8e28ce63cfa8 (patch) | |
tree | 63ad90bbc798f6cca9b8cf3d11614b480fc76c1b /kern/thread.c | |
parent | 1ec3d3f143a201984d51f1cff91b0fe29cde2b71 (diff) | |
parent | 65f71c221037e468caa5921d23a86da34f3bd0a5 (diff) |
Merge branch 'master' into perfmon
Diffstat (limited to 'kern/thread.c')
-rw-r--r-- | kern/thread.c | 58 |
1 files changed, 44 insertions, 14 deletions
diff --git a/kern/thread.c b/kern/thread.c index c1e734e..a03f2f3 100644 --- a/kern/thread.c +++ b/kern/thread.c @@ -341,6 +341,15 @@ static unsigned int thread_nr_keys __read_mostly; */ static thread_dtor_fn_t thread_dtors[THREAD_KEYS_MAX] __read_mostly; +/* + * Number of processors which have requested the scheduler to run. + * + * This value is used to implement a global barrier across the entire + * system at boot time, so that inter-processor requests may not be + * lost in case a processor is slower to initialize. + */ +static unsigned int thread_nr_boot_cpus __initdata; + struct thread_zombie { struct work work; struct thread *thread; @@ -477,7 +486,7 @@ thread_runq_add(struct thread_runq *runq, struct thread *thread) const struct thread_sched_ops *ops; assert(!cpu_intr_enabled()); - assert(spinlock_locked(&runq->lock)); + spinlock_assert_locked(&runq->lock); assert(!thread->in_runq); ops = thread_get_real_sched_ops(thread); @@ -504,7 +513,7 @@ thread_runq_remove(struct thread_runq *runq, struct thread *thread) const struct thread_sched_ops *ops; assert(!cpu_intr_enabled()); - assert(spinlock_locked(&runq->lock)); + spinlock_assert_locked(&runq->lock); assert(thread->in_runq); runq->nr_threads--; @@ -525,7 +534,7 @@ thread_runq_put_prev(struct thread_runq *runq, struct thread *thread) const struct thread_sched_ops *ops; assert(!cpu_intr_enabled()); - assert(spinlock_locked(&runq->lock)); + spinlock_assert_locked(&runq->lock); ops = thread_get_real_sched_ops(thread); @@ -541,7 +550,7 @@ thread_runq_get_next(struct thread_runq *runq) unsigned int i; assert(!cpu_intr_enabled()); - assert(spinlock_locked(&runq->lock)); + spinlock_assert_locked(&runq->lock); for (i = 0; i < ARRAY_SIZE(thread_sched_ops); i++) { thread = thread_sched_ops[i].get_next(runq); @@ -574,7 +583,7 @@ static void thread_runq_wakeup(struct thread_runq *runq, struct thread *thread) { assert(!cpu_intr_enabled()); - assert(spinlock_locked(&runq->lock)); + spinlock_assert_locked(&runq->lock); assert(thread->state == THREAD_RUNNING); thread_runq_add(runq, thread); @@ -626,7 +635,7 @@ thread_runq_schedule(struct thread_runq *runq) && (__builtin_frame_address(0) < (prev->stack + TCB_STACK_SIZE))); assert(prev->preempt == THREAD_SUSPEND_PREEMPT_LEVEL); assert(!cpu_intr_enabled()); - assert(spinlock_locked(&runq->lock)); + spinlock_assert_locked(&runq->lock); llsync_report_context_switch(); @@ -673,7 +682,7 @@ thread_runq_schedule(struct thread_runq *runq) assert(prev->preempt == THREAD_SUSPEND_PREEMPT_LEVEL); assert(!cpu_intr_enabled()); - assert(spinlock_locked(&runq->lock)); + spinlock_assert_locked(&runq->lock); return runq; } @@ -778,8 +787,7 @@ thread_sched_rt_get_next(struct thread_runq *runq) } static void -thread_sched_rt_reset_priority(struct thread *thread, - __unused unsigned short priority) +thread_sched_rt_reset_priority(struct thread *thread, unsigned short priority) { assert(priority <= THREAD_SCHED_RT_PRIO_MAX); thread->rt_data.time_slice = THREAD_DEFAULT_RR_TIME_SLICE; @@ -1909,13 +1917,13 @@ thread_unlock_runq(struct thread_runq *runq, unsigned long flags) static void * thread_alloc_stack(void) { - __unused struct vm_page *first_page, *last_page; + struct vm_page *first_page, *last_page; phys_addr_t first_pa, last_pa; struct pmap *kernel_pmap; size_t stack_size; uintptr_t va; void *mem; - __unused int error; + int error; kernel_pmap = pmap_get_kernel_pmap(); @@ -2577,7 +2585,7 @@ void thread_sleep(struct spinlock *interlock, const void *wchan_addr, const char *wchan_desc) { - __unused int error; + int error; error = thread_sleep_common(interlock, wchan_addr, wchan_desc, false, 0); assert(!error); @@ -2605,6 +2613,26 @@ thread_delay(uint64_t ticks, bool absolute) thread_preempt_enable(); } +static void __init +thread_boot_barrier(void) +{ + unsigned int nr_cpus; + + assert(!cpu_intr_enabled()); + + atomic_add(&thread_nr_boot_cpus, 1, ATOMIC_RELAXED); + + for (;;) { + nr_cpus = atomic_load(&thread_nr_boot_cpus, ATOMIC_SEQ_CST); + + if (nr_cpus == cpu_count()) { + break; + } + + cpu_pause(); + } +} + void __init thread_run_scheduler(void) { @@ -2613,6 +2641,8 @@ thread_run_scheduler(void) assert(!cpu_intr_enabled()); + thread_boot_barrier(); + runq = thread_runq_local(); thread = thread_self(); assert(thread == runq->current); @@ -2814,14 +2844,14 @@ thread_pi_setscheduler(struct thread *thread, unsigned char policy, unsigned short priority) { const struct thread_sched_ops *ops; - __unused struct turnstile_td *td; struct thread_runq *runq; + struct turnstile_td *td; unsigned int global_priority; unsigned long flags; bool requeue, current; td = thread_turnstile_td(thread); - assert(turnstile_td_locked(td)); + turnstile_td_assert_lock(td); ops = thread_get_sched_ops(thread_policy_to_class(policy)); global_priority = ops->get_global_priority(priority); |