summaryrefslogtreecommitdiff
path: root/kern/thread.c
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2013-07-01 21:09:40 +0200
committerRichard Braun <rbraun@sceen.net>2013-07-01 21:09:40 +0200
commitca3910f342605ab91011e7d72e065b6758ce9df0 (patch)
treed0abddf4f6ac1069f570b378c912fb5cd2abd517 /kern/thread.c
parent75d1643bc6478538b0c8e13447fddd1f3b5b7f13 (diff)
x86/pmap: improve TLB invalidation
Add a processor bitmap per physical map to determine processors on which a pmap is loaded, so that only those processors receive update IPIs. In addition, implement lazy TLB invalidation by not loading page tables when switching to a kernel thread. To finish with, the thread module now calls pmap_load unconditionally without making assumptions about pmap optimizations.
Diffstat (limited to 'kern/thread.c')
-rw-r--r--kern/thread.c7
1 files changed, 2 insertions, 5 deletions
diff --git a/kern/thread.c b/kern/thread.c
index f19ec853..3ee8da93 100644
--- a/kern/thread.c
+++ b/kern/thread.c
@@ -473,8 +473,7 @@ thread_runq_schedule(struct thread_runq *runq, struct thread *prev)
assert((next != runq->idler) || (runq->nr_threads == 0));
if (prev != next) {
- if ((prev->task != next->task) && (next->task != kernel_task))
- pmap_load(next->task->map->pmap);
+ pmap_load(next->task->map->pmap);
/*
* That's where the true context switch occurs. The next thread must
@@ -1893,9 +1892,7 @@ thread_run(void)
spinlock_lock(&runq->lock);
thread = thread_runq_get_next(thread_runq_local());
- if (thread->task != kernel_task)
- pmap_load(thread->task->map->pmap);
-
+ pmap_load(thread->task->map->pmap);
tcb_load(&thread->tcb);
}