summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2013-06-28 20:44:33 +0200
committerRichard Braun <rbraun@sceen.net>2013-06-28 20:44:33 +0200
commit3788dee2d2bab33aef04b4377392371b93dee1f7 (patch)
tree914fce524b8bcc2f4d996ac5f8ac60662e501105
parent722366c46be7a75dd13429cb7efb1144ac913f76 (diff)
x86/pmap: improve TLB range flushes
Use a threshold to determine whether TLB entries should be invalidated indivudally or through a global TLB flush.
-rw-r--r--arch/x86/machine/pmap.c19
1 files changed, 16 insertions, 3 deletions
diff --git a/arch/x86/machine/pmap.c b/arch/x86/machine/pmap.c
index 60c1a6e8..43ca984c 100644
--- a/arch/x86/machine/pmap.c
+++ b/arch/x86/machine/pmap.c
@@ -146,6 +146,12 @@ static struct {
} pmap_pt_vas[MAX_CPUS];
/*
+ * Maximum number of mappings for which individual TLB invalidations can be
+ * performed. Global TLB flushes are done beyond this value.
+ */
+#define PMAP_UPDATE_MAX_MAPPINGS 64
+
+/*
* TLB invalidation data.
*
* TODO Implement generic inter-processor calls with low overhead and use them.
@@ -645,9 +651,16 @@ pmap_update_local(struct pmap *pmap, unsigned long start, unsigned long end)
if ((pmap != pmap_current()) && (pmap != kernel_pmap))
return;
- while (start < end) {
- cpu_tlb_flush_va(start);
- start += PAGE_SIZE;
+ if (vm_page_atop(end - start) > PMAP_UPDATE_MAX_MAPPINGS) {
+ if (pmap == kernel_pmap)
+ cpu_tlb_flush_all();
+ else
+ cpu_tlb_flush();
+ } else {
+ while (start < end) {
+ cpu_tlb_flush_va(start);
+ start += PAGE_SIZE;
+ }
}
}