summaryrefslogtreecommitdiff
path: root/kern/thread.c
diff options
context:
space:
mode:
Diffstat (limited to 'kern/thread.c')
-rw-r--r--kern/thread.c204
1 files changed, 160 insertions, 44 deletions
diff --git a/kern/thread.c b/kern/thread.c
index 7ce22fb7..8e5b2b52 100644
--- a/kern/thread.c
+++ b/kern/thread.c
@@ -54,7 +54,7 @@
*
* A few terms are used by both papers with slightly different meanings. Here
* are the definitions used in this implementation :
- * - The time unit is the system timer period (1 / HZ)
+ * - The time unit is the system timer period (1 / tick frequency)
* - Work is the amount of execution time units consumed
* - Weight is the amount of execution time units allocated
* - A round is the shortest period during which all threads in a run queue
@@ -81,13 +81,15 @@
* weights in a smoother way than a raw scaling).
*/
+#include <assert.h>
+#include <stdalign.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
+#include <stdnoreturn.h>
#include <string.h>
-#include <kern/assert.h>
#include <kern/atomic.h>
#include <kern/condition.h>
#include <kern/cpumap.h>
@@ -99,8 +101,8 @@
#include <kern/macros.h>
#include <kern/mutex.h>
#include <kern/panic.h>
-#include <kern/param.h>
#include <kern/percpu.h>
+#include <kern/shell.h>
#include <kern/sleepq.h>
#include <kern/spinlock.h>
#include <kern/sref.h>
@@ -110,6 +112,7 @@
#include <kern/turnstile.h>
#include <kern/work.h>
#include <machine/cpu.h>
+#include <machine/page.h>
#include <machine/pmap.h>
#include <machine/tcb.h>
#include <vm/vm_map.h>
@@ -158,7 +161,7 @@
/*
* Default time slice for real-time round-robin scheduling.
*/
-#define THREAD_DEFAULT_RR_TIME_SLICE (HZ / 10)
+#define THREAD_DEFAULT_RR_TIME_SLICE (THREAD_TICK_FREQ / 10)
/*
* Maximum number of threads which can be pulled from a remote run queue
@@ -169,7 +172,7 @@
/*
* Delay (in ticks) between two balance attempts when a run queue is idle.
*/
-#define THREAD_IDLE_BALANCE_TICKS (HZ / 2)
+#define THREAD_IDLE_BALANCE_TICKS (THREAD_TICK_FREQ / 2)
/*
* Run queue properties for real-time threads.
@@ -189,7 +192,7 @@ struct thread_rt_runq {
/*
* Round slice base unit for fair-scheduling threads.
*/
-#define THREAD_FS_ROUND_SLICE_BASE (HZ / 10)
+#define THREAD_FS_ROUND_SLICE_BASE (THREAD_TICK_FREQ / 10)
/*
* Group of threads sharing the same weight.
@@ -226,7 +229,7 @@ struct thread_fs_runq {
* return path) may violate the locking order.
*/
struct thread_runq {
- struct spinlock lock;
+ alignas(CPU_L1_SIZE) struct spinlock lock;
unsigned int cpu;
unsigned int nr_threads;
struct thread *current;
@@ -257,7 +260,7 @@ struct thread_runq {
struct syscnt sc_schedule_intrs;
struct syscnt sc_tick_intrs;
struct syscnt sc_boosts;
-} __aligned(CPU_L1_SIZE);
+};
/*
* Operations of a scheduling class.
@@ -321,7 +324,7 @@ static struct cpumap thread_idle_runqs;
* There can be moderate bouncing on this word so give it its own cache line.
*/
static struct {
- volatile unsigned long value __aligned(CPU_L1_SIZE);
+ alignas(CPU_L1_SIZE) volatile unsigned long value;
} thread_fs_highest_round_struct;
#define thread_fs_highest_round (thread_fs_highest_round_struct.value)
@@ -1514,7 +1517,7 @@ thread_sched_idle_select_runq(struct thread *thread)
panic("thread: idler threads cannot be awaken");
}
-static void __noreturn
+static noreturn void
thread_sched_idle_panic(void)
{
panic("thread: only idle threads are allowed in the idle class");
@@ -1684,12 +1687,10 @@ thread_reset_real_priority(struct thread *thread)
}
static void __init
-thread_bootstrap_common(unsigned int cpu)
+thread_init_booter(unsigned int cpu)
{
struct thread *booter;
- cpumap_set(&thread_active_runqs, cpu);
-
/* Initialize only what's needed during bootstrap */
booter = &thread_booters[cpu];
booter->nr_refs = 0; /* Make sure booters aren't destroyed */
@@ -1705,10 +1706,20 @@ thread_bootstrap_common(unsigned int cpu)
booter->task = kernel_task;
snprintf(booter->name, sizeof(booter->name),
THREAD_KERNEL_PREFIX "thread_boot/%u", cpu);
- thread_runq_init(percpu_ptr(thread_runq, cpu), cpu, booter);
}
-void __init
+static int __init
+thread_setup_booter(void)
+{
+ tcb_set_current(&thread_booters[0].tcb);
+ thread_init_booter(0);
+ return 0;
+}
+
+INIT_OP_DEFINE(thread_setup_booter,
+ INIT_OP_DEP(tcb_setup, true));
+
+static int __init
thread_bootstrap(void)
{
cpumap_zero(&thread_active_runqs);
@@ -1716,18 +1727,17 @@ thread_bootstrap(void)
thread_fs_highest_round = THREAD_FS_INITIAL_ROUND;
- tcb_set_current(&thread_booters[0].tcb);
- thread_bootstrap_common(0);
+ cpumap_set(&thread_active_runqs, 0);
+ thread_runq_init(cpu_local_ptr(thread_runq), 0, &thread_booters[0]);
+ return 0;
}
-void __init
-thread_ap_bootstrap(void)
-{
- tcb_set_current(&thread_booters[cpu_id()].tcb);
-}
+INIT_OP_DEFINE(thread_bootstrap,
+ INIT_OP_DEP(syscnt_setup, true),
+ INIT_OP_DEP(thread_setup_booter, true));
-static void
-thread_main(void)
+void
+thread_main(void (*fn)(void *), void *arg)
{
struct thread *thread;
@@ -1741,7 +1751,7 @@ thread_main(void)
cpu_intr_enable();
thread_preempt_enable();
- thread->fn(thread->arg);
+ fn(arg);
thread_exit();
}
@@ -1828,14 +1838,12 @@ thread_init(struct thread *thread, void *stack,
thread->task = task;
thread->stack = stack;
strlcpy(thread->name, attr->name, sizeof(thread->name));
- thread->fn = fn;
- thread->arg = arg;
if (attr->flags & THREAD_ATTR_DETACHED) {
thread->flags |= THREAD_DETACHED;
}
- error = tcb_init(&thread->tcb, stack, thread_main);
+ error = tcb_init(&thread->tcb, stack, fn, arg);
if (error) {
goto error_tcb;
@@ -1894,7 +1902,7 @@ thread_alloc_stack(void)
void *mem;
int error;
- stack_size = vm_page_round(STACK_SIZE);
+ stack_size = vm_page_round(TCB_STACK_SIZE);
mem = vm_kmem_alloc((PAGE_SIZE * 2) + stack_size);
if (mem == NULL) {
@@ -1923,24 +1931,18 @@ thread_alloc_stack(void)
pmap_remove(kernel_pmap, va + PAGE_SIZE + stack_size, cpumap_all());
pmap_update(kernel_pmap);
- vm_page_free(first_page, 0);
- vm_page_free(last_page, 0);
-
- return (char *)va + PAGE_SIZE;
+ return (void *)va + PAGE_SIZE;
}
static void
thread_free_stack(void *stack)
{
size_t stack_size;
- char *va;
-
- stack_size = vm_page_round(STACK_SIZE);
- va = (char *)stack - PAGE_SIZE;
+ void *va;
- vm_kmem_free_va(va, PAGE_SIZE);
- vm_kmem_free(va + PAGE_SIZE, stack_size);
- vm_kmem_free_va(va + PAGE_SIZE + stack_size, PAGE_SIZE);
+ stack_size = vm_page_round(TCB_STACK_SIZE);
+ va = (void *)stack - PAGE_SIZE;
+ vm_kmem_free(va, (PAGE_SIZE * 2) + stack_size);
}
#else /* X15_THREAD_STACK_GUARD */
@@ -2228,20 +2230,107 @@ thread_setup_runq(struct thread_runq *runq)
thread_setup_idler(runq);
}
-void __init
+#ifdef X15_SHELL
+
+/*
+ * This function is meant for debugging only. As a result, it uses a weak
+ * locking policy which allows tracing threads which state may mutate during
+ * tracing.
+ */
+static void
+thread_shell_trace(int argc, char *argv[])
+{
+ const char *task_name, *thread_name;
+ struct thread_runq *runq;
+ struct thread *thread;
+ unsigned long flags;
+ struct task *task;
+ int error;
+
+ if (argc != 3) {
+ error = ERROR_INVAL;
+ goto error;
+ }
+
+ task_name = argv[1];
+ thread_name = argv[2];
+
+ task = task_lookup(task_name);
+
+ if (task == NULL) {
+ error = ERROR_SRCH;
+ goto error;
+ }
+
+ thread = task_lookup_thread(task, thread_name);
+ task_unref(task);
+
+ if (thread == NULL) {
+ error = ERROR_SRCH;
+ goto error;
+ }
+
+ runq = thread_lock_runq(thread, &flags);
+
+ if (thread == runq->current) {
+ printf("thread: trace: thread is running\n");
+ } else {
+ tcb_trace(&thread->tcb);
+ }
+
+ thread_unlock_runq(runq, flags);
+
+ thread_unref(thread);
+ return;
+
+error:
+ printf("thread: trace: %s\n", error_str(error));
+}
+
+static struct shell_cmd thread_shell_cmds[] = {
+ SHELL_CMD_INITIALIZER("thread_trace", thread_shell_trace,
+ "thread_trace <task_name> <thread_name>",
+ "display the stack trace of a given thread"),
+};
+
+static int __init
+thread_setup_shell(void)
+{
+ SHELL_REGISTER_CMDS(thread_shell_cmds);
+ return 0;
+}
+
+INIT_OP_DEFINE(thread_setup_shell,
+ INIT_OP_DEP(printf_setup, true),
+ INIT_OP_DEP(shell_setup, true),
+ INIT_OP_DEP(task_setup, true),
+ INIT_OP_DEP(thread_setup, true));
+
+#endif /* X15_SHELL */
+
+static void __init
+thread_setup_common(unsigned int cpu)
+{
+ assert(cpu != 0);
+ cpumap_set(&thread_active_runqs, cpu);
+ thread_init_booter(cpu);
+ thread_runq_init(percpu_ptr(thread_runq, cpu), cpu, &thread_booters[cpu]);
+}
+
+static int __init
thread_setup(void)
{
int cpu;
for (cpu = 1; (unsigned int)cpu < cpu_count(); cpu++) {
- thread_bootstrap_common(cpu);
+ thread_setup_common(cpu);
}
kmem_cache_init(&thread_cache, "thread", sizeof(struct thread),
CPU_L1_SIZE, NULL, 0);
#ifndef X15_THREAD_STACK_GUARD
- kmem_cache_init(&thread_stack_cache, "thread_stack", STACK_SIZE,
- DATA_ALIGN, NULL, 0);
+ kmem_cache_init(&thread_stack_cache, "thread_stack", TCB_STACK_SIZE,
+ CPU_DATA_ALIGN, NULL, 0);
#endif /* X15_THREAD_STACK_GUARD */
thread_setup_reaper();
@@ -2249,6 +2338,29 @@ thread_setup(void)
cpumap_for_each(&thread_active_runqs, cpu) {
thread_setup_runq(percpu_ptr(thread_runq, cpu));
}
+
+ return 0;
+}
+
+INIT_OP_DEFINE(thread_setup,
+ INIT_OP_DEP(cpumap_setup, true),
+ INIT_OP_DEP(kmem_setup, true),
+ INIT_OP_DEP(pmap_setup, true),
+ INIT_OP_DEP(sleepq_setup, true),
+ INIT_OP_DEP(task_setup, true),
+ INIT_OP_DEP(thread_bootstrap, true),
+ INIT_OP_DEP(turnstile_setup, true),
+#ifdef X15_THREAD_STACK_GUARD
+ INIT_OP_DEP(vm_kmem_setup, true),
+ INIT_OP_DEP(vm_map_setup, true),
+ INIT_OP_DEP(vm_page_setup, true),
+#endif
+ );
+
+void __init
+thread_ap_setup(void)
+{
+ tcb_set_current(&thread_booters[cpu_id()].tcb);
}
int
@@ -2395,6 +2507,10 @@ thread_wakeup(struct thread *thread)
struct thread_runq *runq;
unsigned long flags;
+ if ((thread == NULL) || (thread == thread_self())) {
+ return;
+ }
+
/*
* There is at most one reference on threads that were never dispatched,
* in which case there is no need to lock anything.