summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2012-12-28 19:42:22 +0100
committerRichard Braun <rbraun@sceen.net>2012-12-28 19:45:37 +0100
commit3fa27e6bbd937c36ba845ed29b1a9369a8d97c9a (patch)
tree68fd407f2e74129990c6df7b51d5d740c9de8ef6
parentb841232941239387ca81adaaa64f2887077dabd0 (diff)
kern/thread: make thread_current migration-safe
This change removes the current thread member from the run queues, and moves the responsibility of maintaining it to the architecture specific tcb module. For the x86 architecture, the TCB functions use a per-CPU pointer that can be read and set in a single instruction, making it interrupt-safe and thus migration-safe.
-rw-r--r--arch/x86/machine/boot.c1
-rw-r--r--arch/x86/machine/cpu.h28
-rw-r--r--arch/x86/machine/tcb.c10
-rw-r--r--arch/x86/machine/tcb.h40
-rw-r--r--arch/x86/machine/tcb_asm.S8
-rw-r--r--kern/thread.c45
-rw-r--r--kern/thread.h24
7 files changed, 105 insertions, 51 deletions
diff --git a/arch/x86/machine/boot.c b/arch/x86/machine/boot.c
index 5cc52242..31a8c2e2 100644
--- a/arch/x86/machine/boot.c
+++ b/arch/x86/machine/boot.c
@@ -291,6 +291,7 @@ void __init
boot_ap_main(void)
{
cpu_ap_setup();
+ thread_ap_bootstrap();
pmap_ap_bootstrap();
kernel_ap_main();
diff --git a/arch/x86/machine/cpu.h b/arch/x86/machine/cpu.h
index 14ed73cb..4413c61f 100644
--- a/arch/x86/machine/cpu.h
+++ b/arch/x86/machine/cpu.h
@@ -204,6 +204,11 @@ struct cpu_tss {
} __packed;
/*
+ * Forward declaration.
+ */
+struct tcb;
+
+/*
* CPU states.
*/
#define CPU_STATE_OFF 0
@@ -217,6 +222,7 @@ struct cpu_tss {
struct cpu {
struct cpu *self;
+ struct tcb *tcb;
unsigned int id;
unsigned int apic_id;
char vendor_id[CPU_VENDOR_ID_SIZE];
@@ -419,6 +425,28 @@ cpu_current(void)
return cpu;
}
+/*
+ * The current TCB must be obtained and updated in a migration-safe way.
+ */
+static __always_inline struct tcb *
+cpu_tcb(void)
+{
+ struct tcb *tcb;
+
+ asm volatile("mov %%fs:%1, %0"
+ : "=r" (tcb)
+ : "m" (*(char *)offsetof(struct cpu, tcb)));
+
+ return tcb;
+}
+
+static __always_inline void
+cpu_set_tcb(struct tcb *tcb)
+{
+ asm volatile("mov %0, %%fs:%1"
+ : : "r" (tcb), "m" (*(char *)offsetof(struct cpu, tcb)));
+}
+
static __always_inline unsigned int
cpu_id(void)
{
diff --git a/arch/x86/machine/tcb.c b/arch/x86/machine/tcb.c
index a15c6709..bd16c638 100644
--- a/arch/x86/machine/tcb.c
+++ b/arch/x86/machine/tcb.c
@@ -18,19 +18,9 @@
#include <kern/param.h>
#include <machine/tcb.h>
-/*
- * Low level context switch function.
- */
-void tcb_context_switch(struct tcb *prev, struct tcb *next);
-
void
tcb_init(struct tcb *tcb, void *stack, void (*fn)(void))
{
tcb->sp = (unsigned long)stack + STACK_SIZE;
tcb->ip = (unsigned long)fn;
}
-
-void tcb_switch(struct tcb *prev, struct tcb *next)
-{
- tcb_context_switch(prev, next);
-}
diff --git a/arch/x86/machine/tcb.h b/arch/x86/machine/tcb.h
index e18da15b..b6e774cc 100644
--- a/arch/x86/machine/tcb.h
+++ b/arch/x86/machine/tcb.h
@@ -21,7 +21,9 @@
#ifndef _X86_TCB_H
#define _X86_TCB_H
+#include <kern/assert.h>
#include <kern/macros.h>
+#include <machine/cpu.h>
#include <machine/trap.h>
/*
@@ -41,17 +43,49 @@ struct tcb {
void tcb_init(struct tcb *tcb, void *stack, void (*fn)(void));
/*
+ * Low level context load/switch functions.
+ */
+void __noreturn tcb_context_load(struct tcb *tcb);
+void tcb_context_switch(struct tcb *prev, struct tcb *next);
+
+static inline struct tcb *
+tcb_current(void)
+{
+ return cpu_tcb();
+}
+
+static inline void
+tcb_set_current(struct tcb *tcb)
+{
+ cpu_set_tcb(tcb);
+}
+
+/*
* Load a TCB.
*
- * The caller context is lost.
+ * Called with interrupts disabled. The caller context is lost.
*/
-void __noreturn tcb_load(struct tcb *tcb);
+static inline void __noreturn
+tcb_load(struct tcb *tcb)
+{
+ assert(!cpu_intr_enabled());
+
+ tcb_set_current(tcb);
+ tcb_context_load(tcb);
+}
/*
* Context switch.
*
* Called with interrupts disabled.
*/
-void tcb_switch(struct tcb *prev, struct tcb *next);
+static inline void
+tcb_switch(struct tcb *prev, struct tcb *next)
+{
+ assert(!cpu_intr_enabled());
+
+ tcb_set_current(next);
+ tcb_context_switch(prev, next);
+}
#endif /* _X86_TCB_H */
diff --git a/arch/x86/machine/tcb_asm.S b/arch/x86/machine/tcb_asm.S
index 0be93809..872eb82c 100644
--- a/arch/x86/machine/tcb_asm.S
+++ b/arch/x86/machine/tcb_asm.S
@@ -22,14 +22,14 @@
#ifdef __LP64__
-ASM_ENTRY(tcb_load)
+ASM_ENTRY(tcb_context_load)
movq 8(%rdi), %rax
movq (%rdi), %rsp
pushq %rax
pushq $CPU_EFL_ONE
popfq
ret
-ASM_END(tcb_load)
+ASM_END(tcb_context_load)
ASM_ENTRY(tcb_context_switch)
pushfq
@@ -58,7 +58,7 @@ ASM_END(tcb_context_switch)
#else /* __LP64__ */
-ASM_ENTRY(tcb_load)
+ASM_ENTRY(tcb_context_load)
movl 4(%esp), %eax
movl 4(%eax), %ecx
movl (%eax), %esp
@@ -66,7 +66,7 @@ ASM_ENTRY(tcb_load)
pushl $CPU_EFL_ONE
popfl
ret
-ASM_END(tcb_load)
+ASM_END(tcb_context_load)
ASM_ENTRY(tcb_context_switch)
movl 4(%esp), %eax
diff --git a/kern/thread.c b/kern/thread.c
index 4d5d74ae..fa86a2ab 100644
--- a/kern/thread.c
+++ b/kern/thread.c
@@ -21,6 +21,7 @@
#include <kern/kmem.h>
#include <kern/list.h>
#include <kern/macros.h>
+#include <kern/panic.h>
#include <kern/param.h>
#include <kern/sprintf.h>
#include <kern/stddef.h>
@@ -30,7 +31,15 @@
#include <machine/cpu.h>
#include <machine/tcb.h>
-struct thread_runq thread_runqs[MAX_CPUS];
+/*
+ * Per processor run queue.
+ */
+struct thread_runq {
+ struct thread *idle;
+ struct list threads;
+} __aligned(CPU_L1_SIZE);
+
+static struct thread_runq thread_runqs[MAX_CPUS];
/*
* Statically allocating the idle thread structures enables their use as
@@ -52,7 +61,6 @@ thread_runq_init(struct thread_runq *runq, struct thread *idle)
idle->flags = 0;
idle->pinned = 1;
idle->preempt = 1;
- runq->current = idle;
runq->idle = idle;
list_init(&runq->threads);
}
@@ -81,6 +89,12 @@ thread_runq_dequeue(struct thread_runq *runq)
return thread;
}
+static inline struct thread_runq *
+thread_runq_local(void)
+{
+ return &thread_runqs[cpu_id()];
+}
+
void __init
thread_bootstrap(void)
{
@@ -88,6 +102,14 @@ thread_bootstrap(void)
for (i = 0; i < ARRAY_SIZE(thread_runqs); i++)
thread_runq_init(&thread_runqs[i], &thread_idles[i]);
+
+ tcb_set_current(&thread_idles[0].tcb);
+}
+
+void __init
+thread_ap_bootstrap(void)
+{
+ tcb_set_current(&thread_idles[cpu_id()].tcb);
}
void __init
@@ -102,13 +124,11 @@ thread_setup(void)
static void
thread_main(void)
{
- struct thread_runq *runq;
struct thread *thread;
assert(!cpu_intr_enabled());
- runq = thread_runq_local();
- thread = runq->current;
+ thread = thread_current();
cpu_intr_enable();
thread->fn(thread->arg);
@@ -218,7 +238,6 @@ thread_run(void)
if (thread == NULL)
thread = runq->idle;
- runq->current = thread;
tcb_load(&thread->tcb);
}
@@ -236,7 +255,7 @@ thread_schedule(void)
flags = cpu_intr_save();
runq = thread_runq_local();
- prev = runq->current;
+ prev = thread_current();
assert(prev != NULL);
if (prev != runq->idle)
@@ -259,13 +278,11 @@ thread_schedule(void)
void
thread_intr_schedule(void)
{
- struct thread_runq *runq;
struct thread *thread;
assert(!cpu_intr_enabled());
- runq = thread_runq_local();
- thread = runq->current;
+ thread = thread_current();
assert(thread != NULL);
if ((thread->preempt == 0) && (thread->flags & THREAD_RESCHEDULE))
@@ -275,11 +292,9 @@ thread_intr_schedule(void)
void
thread_preempt_schedule(void)
{
- struct thread_runq *runq;
struct thread *thread;
- runq = thread_runq_local();
- thread = runq->current;
+ thread = thread_current();
assert(thread != NULL);
if ((thread->preempt == 0))
@@ -289,13 +304,11 @@ thread_preempt_schedule(void)
void
thread_tick(void)
{
- struct thread_runq *runq;
struct thread *thread;
assert(!cpu_intr_enabled());
- runq = thread_runq_local();
- thread = runq->current;
+ thread = thread_current();
assert(thread != NULL);
thread->flags |= THREAD_RESCHEDULE;
}
diff --git a/kern/thread.h b/kern/thread.h
index c5f2d07d..8e1f961f 100644
--- a/kern/thread.h
+++ b/kern/thread.h
@@ -58,17 +58,6 @@ struct thread {
} __aligned(CPU_L1_SIZE);
/*
- * Per processor run queue.
- */
-struct thread_runq {
- struct thread *current;
- struct thread *idle;
- struct list threads;
-} __aligned(CPU_L1_SIZE);
-
-extern struct thread_runq thread_runqs[MAX_CPUS];
-
-/*
* Early initialization of the thread module.
*
* This function makes it possible to use migration and preemption control
@@ -77,6 +66,11 @@ extern struct thread_runq thread_runqs[MAX_CPUS];
void thread_bootstrap(void);
/*
+ * Early initialization of the TCB on APs.
+ */
+void thread_ap_bootstrap(void);
+
+/*
* Initialize the thread module.
*/
void thread_setup(void);
@@ -119,16 +113,10 @@ void thread_preempt_schedule(void);
*/
void thread_tick(void);
-static inline struct thread_runq *
-thread_runq_local(void)
-{
- return &thread_runqs[cpu_id()];
-}
-
static inline struct thread *
thread_current(void)
{
- return thread_runq_local()->current;
+ return structof(tcb_current(), struct thread, tcb);
}
/*