diff options
Diffstat (limited to 'kern/thread.c')
-rw-r--r-- | kern/thread.c | 45 |
1 files changed, 20 insertions, 25 deletions
diff --git a/kern/thread.c b/kern/thread.c index aaff9676..1a078fea 100644 --- a/kern/thread.c +++ b/kern/thread.c @@ -89,7 +89,6 @@ #include <kern/condition.h> #include <kern/cpumap.h> #include <kern/error.h> -#include <kern/evcnt.h> #include <kern/init.h> #include <kern/kmem.h> #include <kern/list.h> @@ -103,6 +102,7 @@ #include <kern/spinlock.h> #include <kern/sprintf.h> #include <kern/sref.h> +#include <kern/syscnt.h> #include <kern/task.h> #include <kern/thread.h> #include <kern/turnstile.h> @@ -175,7 +175,7 @@ * Run queue properties for real-time threads. */ struct thread_rt_runq { - unsigned long long bitmap; + unsigned int bitmap; struct list threads[THREAD_SCHED_RT_PRIO_MAX + 1]; }; @@ -254,8 +254,9 @@ struct thread_runq { /* Ticks before the next balancing attempt when a run queue is idle */ unsigned int idle_balance_ticks; - struct evcnt ev_schedule_intr; - struct evcnt ev_tick_intr; + struct syscnt sc_schedule_intrs; + struct syscnt sc_tick_intrs; + struct syscnt sc_boosts; } __aligned(CPU_L1_SIZE); /* @@ -280,7 +281,7 @@ static struct thread_runq thread_runq __percpu; * Statically allocated fake threads that provide thread context to processors * during bootstrap. */ -static struct thread thread_booters[MAX_CPUS] __initdata; +static struct thread thread_booters[X15_MAX_CPUS] __initdata; static struct kmem_cache thread_cache; @@ -442,7 +443,7 @@ static void __init thread_runq_init(struct thread_runq *runq, unsigned int cpu, struct thread *booter) { - char name[EVCNT_NAME_SIZE]; + char name[SYSCNT_NAME_SIZE]; spinlock_init(&runq->lock); runq->cpu = cpu; @@ -453,10 +454,12 @@ thread_runq_init(struct thread_runq *runq, unsigned int cpu, runq->balancer = NULL; runq->idler = NULL; runq->idle_balance_ticks = (unsigned int)-1; - snprintf(name, sizeof(name), "thread_schedule_intr/%u", cpu); - evcnt_register(&runq->ev_schedule_intr, name); - snprintf(name, sizeof(name), "thread_tick_intr/%u", cpu); - evcnt_register(&runq->ev_tick_intr, name); + snprintf(name, sizeof(name), "thread_schedule_intrs/%u", cpu); + syscnt_register(&runq->sc_schedule_intrs, name); + snprintf(name, sizeof(name), "thread_tick_intrs/%u", cpu); + syscnt_register(&runq->sc_tick_intrs, name); + snprintf(name, sizeof(name), "thread_boosts/%u", cpu); + syscnt_register(&runq->sc_boosts, name); } static inline struct thread_runq * @@ -760,7 +763,7 @@ thread_sched_rt_get_next(struct thread_runq *runq) return NULL; } - priority = THREAD_SCHED_RT_PRIO_MAX - __builtin_clzll(rt_runq->bitmap); + priority = THREAD_SCHED_RT_PRIO_MAX - __builtin_clz(rt_runq->bitmap); threads = &rt_runq->threads[priority]; assert(!list_empty(threads)); thread = list_first_entry(threads, struct thread, rt_data.node); @@ -1705,6 +1708,8 @@ thread_bootstrap_common(unsigned int cpu) thread_reset_real_priority(booter); memset(booter->tsd, 0, sizeof(booter->tsd)); booter->task = kernel_task; + snprintf(booter->name, sizeof(booter->name), + THREAD_KERNEL_PREFIX "thread_boot/%u", cpu); thread_runq_init(percpu_ptr(thread_runq, cpu), cpu, booter); } @@ -2402,16 +2407,6 @@ thread_wakeup(struct thread *thread) thread_clear_wchan(thread); thread->state = THREAD_RUNNING; } else { - /* - * If another wake-up was attempted right before this one, the thread - * may currently be pushed on a remote run queue, and the run queue - * being locked here is actually the previous one. The run queue - * pointer may be modified concurrently, now being protected by the - * target run queue. This isn't a problem since the thread state has - * already been updated, making this attempt stop early. In addition, - * locking semantics guarantee that, if the thread as seen by this - * attempt isn't running, its run queue is up to date. - */ runq = thread_lock_runq(thread, &flags); if (thread->state == THREAD_RUNNING) { @@ -2494,7 +2489,7 @@ thread_schedule_intr(void) assert(!thread_preempt_enabled()); runq = thread_runq_local(); - evcnt_inc(&runq->ev_schedule_intr); + syscnt_inc(&runq->sc_schedule_intrs); } void @@ -2508,7 +2503,7 @@ thread_tick_intr(void) assert(!thread_preempt_enabled()); runq = thread_runq_local(); - evcnt_inc(&runq->ev_tick_intr); + syscnt_inc(&runq->sc_tick_intrs); llsync_report_periodic_event(); sref_report_periodic_event(); work_report_periodic_event(); @@ -2529,7 +2524,6 @@ thread_tick_intr(void) spinlock_unlock(&runq->lock); } -/* TODO Move outside */ char thread_state_to_chr(const struct thread *thread) { @@ -2545,7 +2539,6 @@ thread_state_to_chr(const struct thread *thread) } } -/* TODO Move outside */ const char * thread_sched_class_to_str(unsigned char sched_class) { @@ -2659,6 +2652,8 @@ thread_pi_setscheduler(struct thread *thread, unsigned char policy, goto out; } + syscnt_inc(&runq->sc_boosts); + requeue = thread->in_runq; if (!requeue) { |