summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2012-12-29 17:18:11 +0100
committerRichard Braun <rbraun@sceen.net>2012-12-29 17:18:11 +0100
commit9b00cdf3846b84c3aba24df28c0bdd10d5baf0b5 (patch)
treece483228035c3a304c757dbd108cd39100cec662
parent90dd685f2cbe1aa50ff8684e3b07f27b582771e2 (diff)
kern/thread: handle address space on context switch
-rw-r--r--arch/x86/machine/cpu.h5
-rw-r--r--arch/x86/machine/pmap.c5
-rw-r--r--arch/x86/machine/pmap.h7
-rw-r--r--kern/thread.c16
4 files changed, 31 insertions, 2 deletions
diff --git a/arch/x86/machine/cpu.h b/arch/x86/machine/cpu.h
index 854acf9d..774f76da 100644
--- a/arch/x86/machine/cpu.h
+++ b/arch/x86/machine/cpu.h
@@ -204,9 +204,10 @@ struct cpu_tss {
} __packed;
/*
- * Forward declaration.
+ * Forward declarations.
*/
struct tcb;
+struct pmap;
/*
* CPU states.
@@ -223,6 +224,7 @@ struct tcb;
struct cpu {
struct cpu *self;
struct tcb *tcb;
+ struct pmap *pmap;
unsigned int id;
unsigned int apic_id;
char vendor_id[CPU_VENDOR_ID_SIZE];
@@ -445,6 +447,7 @@ cpu_percpu_set_ ## member(type *ptr) \
CPU_DECL_PERCPU(struct cpu, self)
CPU_DECL_PERCPU(struct tcb, tcb)
+CPU_DECL_PERCPU(struct pmap, pmap)
static __always_inline struct cpu *
cpu_current(void)
diff --git a/arch/x86/machine/pmap.c b/arch/x86/machine/pmap.c
index 2d55d10d..0fa5b3fd 100644
--- a/arch/x86/machine/pmap.c
+++ b/arch/x86/machine/pmap.c
@@ -351,6 +351,7 @@ pmap_bootstrap(void)
unsigned int i;
spinlock_init(&kernel_pmap->lock);
+ cpu_percpu_set_pmap(kernel_pmap);
pmap_boot_heap = (unsigned long)&_end;
pmap_boot_heap_end = pmap_boot_heap + (PMAP_RESERVED_PAGES * PAGE_SIZE);
@@ -392,6 +393,8 @@ pmap_bootstrap(void)
void __init
pmap_ap_bootstrap(void)
{
+ cpu_percpu_set_pmap(kernel_pmap);
+
if (cpu_has_global_pages())
cpu_enable_global_pages();
}
@@ -765,6 +768,8 @@ error_pmap:
void
pmap_load(struct pmap *pmap)
{
+ cpu_percpu_set_pmap(pmap);
+
#ifdef X86_PAE
cpu_set_cr3(pmap->pdpt_pa);
#else /* X86_PAE */
diff --git a/arch/x86/machine/pmap.h b/arch/x86/machine/pmap.h
index da18c78c..8f174a69 100644
--- a/arch/x86/machine/pmap.h
+++ b/arch/x86/machine/pmap.h
@@ -93,6 +93,7 @@
#include <kern/spinlock.h>
#include <kern/stdint.h>
#include <kern/types.h>
+#include <machine/cpu.h>
#include <machine/trap.h>
#ifdef X86_PAE
@@ -209,6 +210,12 @@ int pmap_create(struct pmap **pmapp);
*/
void pmap_load(struct pmap *pmap);
+static inline struct pmap *
+pmap_current(void)
+{
+ return cpu_percpu_get_pmap();
+}
+
#endif /* __ASSEMBLER__ */
#endif /* _X86_PMAP_H */
diff --git a/kern/thread.c b/kern/thread.c
index e3298261..4c48ef0d 100644
--- a/kern/thread.c
+++ b/kern/thread.c
@@ -29,7 +29,9 @@
#include <kern/task.h>
#include <kern/thread.h>
#include <machine/cpu.h>
+#include <machine/pmap.h>
#include <machine/tcb.h>
+#include <vm/vm_map.h>
/*
* Per processor run queue.
@@ -241,9 +243,21 @@ thread_run(void)
if (thread == NULL)
thread = runq->idle;
+ if (thread->task != kernel_task)
+ pmap_load(thread->task->map->pmap);
+
tcb_load(&thread->tcb);
}
+static inline void
+thread_switch(struct thread *prev, struct thread *next)
+{
+ if ((prev->task != next->task) && (next->task != kernel_task))
+ pmap_load(next->task->map->pmap);
+
+ tcb_switch(&prev->tcb, &next->tcb);
+}
+
void
thread_schedule(void)
{
@@ -271,7 +285,7 @@ thread_schedule(void)
next = runq->idle;
if (prev != next)
- tcb_switch(&prev->tcb, &next->tcb);
+ thread_switch(prev, next);
cpu_intr_restore(flags);
thread_preempt_enable_no_resched();