summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2013-06-28 20:44:33 +0200
committerRichard Braun <rbraun@sceen.net>2013-06-28 20:44:33 +0200
commit757bcadf8732ca7c1a0acaac9115dc788c4f6d28 (patch)
treeeb24e1427a45eedfebc5a1b44676e3776e40f6b2
parent0239be76d4601a2a1bf448965e0ffc7c17735613 (diff)
x86/pmap: replace pmap_kupdate with pmap_update
Similar to pmap_protect and pmap_extract, pmap_update is meant to handle both kernel and regular pmap updates.
-rw-r--r--arch/x86/machine/cga.c2
-rw-r--r--arch/x86/machine/pmap.c63
-rw-r--r--arch/x86/machine/pmap.h18
-rw-r--r--vm/vm_kmem.c8
-rw-r--r--vm/vm_map.c6
5 files changed, 59 insertions, 38 deletions
diff --git a/arch/x86/machine/cga.c b/arch/x86/machine/cga.c
index d2d0005d..0cc857f0 100644
--- a/arch/x86/machine/cga.c
+++ b/arch/x86/machine/cga.c
@@ -108,7 +108,7 @@ cga_setup(void)
va = pmap_bootalloc(1);
pmap_kenter(va, CGA_MEMORY);
- pmap_kupdate(va, va + PAGE_SIZE);
+ pmap_update(kernel_pmap, va, va + PAGE_SIZE);
cga_memory = (uint8_t *)va;
/*
diff --git a/arch/x86/machine/pmap.c b/arch/x86/machine/pmap.c
index d35c5b19..60c1a6e8 100644
--- a/arch/x86/machine/pmap.c
+++ b/arch/x86/machine/pmap.c
@@ -146,18 +146,19 @@ static struct {
} pmap_pt_vas[MAX_CPUS];
/*
- * Shared variables used by the inter-processor update functions.
- */
-static unsigned long pmap_update_start;
-static unsigned long pmap_update_end;
-static struct spinlock pmap_update_lock;
-
-/*
- * There is strong bouncing on this counter so give it its own cache line.
+ * TLB invalidation data.
+ *
+ * TODO Implement generic inter-processor calls with low overhead and use them.
*/
static struct {
- volatile unsigned long count __aligned(CPU_L1_SIZE);
-} pmap_nr_updates;
+ struct spinlock lock;
+ struct pmap *pmap;
+ unsigned long start;
+ unsigned long end;
+
+ /* There may be strong bouncing on this counter so give it a cache line */
+ volatile unsigned long nr_pending_updates __aligned(CPU_L1_SIZE);
+} pmap_update_data;
/*
* Global list of physical maps.
@@ -376,7 +377,7 @@ pmap_bootstrap(void)
pmap_pt_vas[i].va = pmap_bootalloc(PMAP_NR_RPTPS);
}
- spinlock_init(&pmap_update_lock);
+ spinlock_init(&pmap_update_data.lock);
mutex_init(&pmap_list_lock);
list_init(&pmap_list);
@@ -553,7 +554,7 @@ pmap_kgrow(unsigned long end)
lower_index = PMAP_PTEMAP_INDEX(va, pt_lower_level->shift);
lower_pt = &pt_lower_level->ptes[lower_index];
lower_pt_va = (unsigned long)lower_pt;
- pmap_kupdate(lower_pt_va, lower_pt_va + PAGE_SIZE);
+ pmap_update(kernel_pmap, lower_pt_va, lower_pt_va + PAGE_SIZE);
}
}
}
@@ -639,8 +640,11 @@ pmap_extract(struct pmap *pmap, unsigned long va)
}
static void
-pmap_kupdate_local(unsigned long start, unsigned long end)
+pmap_update_local(struct pmap *pmap, unsigned long start, unsigned long end)
{
+ if ((pmap != pmap_current()) && (pmap != kernel_pmap))
+ return;
+
while (start < end) {
cpu_tlb_flush_va(start);
start += PAGE_SIZE;
@@ -648,35 +652,38 @@ pmap_kupdate_local(unsigned long start, unsigned long end)
}
void
-pmap_kupdate(unsigned long start, unsigned long end)
+pmap_update(struct pmap *pmap, unsigned long start, unsigned long end)
{
unsigned int nr_cpus;
nr_cpus = cpu_count();
+ assert(cpu_intr_enabled() || (nr_cpus == 1));
+
if (nr_cpus == 1) {
- pmap_kupdate_local(start, end);
+ pmap_update_local(pmap, start, end);
return;
}
- spinlock_lock(&pmap_update_lock);
+ spinlock_lock(&pmap_update_data.lock);
- pmap_update_start = start;
- pmap_update_end = end;
- pmap_nr_updates.count = nr_cpus - 1;
- barrier();
+ pmap_update_data.pmap = pmap;
+ pmap_update_data.start = start;
+ pmap_update_data.end = end;
+ pmap_update_data.nr_pending_updates = nr_cpus - 1;
+ mb_store();
lapic_ipi_broadcast(TRAP_PMAP_UPDATE);
/*
* Perform the local update now so that some time is given to the other
* processors, which slightly reduces contention on the update counter.
*/
- pmap_kupdate_local(start, end);
+ pmap_update_local(pmap, start, end);
- while (pmap_nr_updates.count != 0)
+ while (pmap_update_data.nr_pending_updates != 0)
cpu_pause();
- spinlock_unlock(&pmap_update_lock);
+ spinlock_unlock(&pmap_update_data.lock);
}
void
@@ -687,8 +694,9 @@ pmap_update_intr(struct trap_frame *frame)
lapic_eoi();
/* Interrupts are serializing events, no memory barrier required */
- pmap_kupdate_local(pmap_update_start, pmap_update_end);
- atomic_add(&pmap_nr_updates.count, -1);
+ pmap_update_local(pmap_update_data.pmap, pmap_update_data.start,
+ pmap_update_data.end);
+ atomic_add(&pmap_update_data.nr_pending_updates, -1);
}
#ifdef X86_PAE
@@ -712,7 +720,7 @@ pmap_pdpt_alloc(size_t slab_size)
pmap_kenter(start, vm_page_to_pa(page));
}
- pmap_kupdate(va, end);
+ pmap_update(kernel_pmap, va, end);
return va;
error_page:
@@ -823,6 +831,9 @@ error_pmap:
void
pmap_load(struct pmap *pmap)
{
+ assert(!cpu_intr_enabled());
+ assert(!thread_preempt_enabled());
+
cpu_percpu_set_pmap(pmap);
#ifdef X86_PAE
diff --git a/arch/x86/machine/pmap.h b/arch/x86/machine/pmap.h
index fc6a9322..c2ea6348 100644
--- a/arch/x86/machine/pmap.h
+++ b/arch/x86/machine/pmap.h
@@ -174,8 +174,7 @@ void pmap_kgrow(unsigned long end);
*
* These functions assume the caller owns the addresses and don't grab any
* lock. Page tables for the new mappings must be preallocated with
- * pmap_kgrow(). The TLB isn't flushed, the caller must use pmap_kupdate()
- * to explicitely request TLB flushing.
+ * pmap_kgrow().
*/
void pmap_kenter(unsigned long va, phys_addr_t pa);
void pmap_kremove(unsigned long start, unsigned long end);
@@ -196,9 +195,18 @@ void pmap_protect(struct pmap *pmap, unsigned long start, unsigned long end,
phys_addr_t pmap_extract(struct pmap *pmap, unsigned long va);
/*
- * Flush the TLB for the given range of kernel addresses.
+ * Perform the required TLB invalidations so that a physical map is up to
+ * date on all processors using it.
+ *
+ * Functions that require updating are :
+ * - pmap_kenter
+ * - pmap_kremove
+ * - pmap_protect
+ *
+ * If the kernel has reached a state where IPIs may be used to update remote
+ * processor TLBs, interrupts must be enabled when calling this function.
*/
-void pmap_kupdate(unsigned long start, unsigned long end);
+void pmap_update(struct pmap *pmap, unsigned long start, unsigned long end);
/*
* Interrupt handler for inter-processor update requests.
@@ -220,6 +228,8 @@ int pmap_create(struct pmap **pmapp);
/*
* Load the given pmap on the current processor.
+ *
+ * This function must be called with interrupts and preemption disabled.
*/
void pmap_load(struct pmap *pmap);
diff --git a/vm/vm_kmem.c b/vm/vm_kmem.c
index 77238934..5bbf7d67 100644
--- a/vm/vm_kmem.c
+++ b/vm/vm_kmem.c
@@ -73,7 +73,7 @@ vm_kmem_bootalloc(size_t size)
pmap_kenter(va, pa);
}
- pmap_kupdate(start, vm_kmem_boot_start);
+ pmap_update(kernel_pmap, start, vm_kmem_boot_start);
return start;
}
@@ -145,7 +145,7 @@ vm_kmem_free_va(unsigned long addr, size_t size)
end = addr + vm_page_round(size);
pmap_kremove(addr, end);
- pmap_kupdate(addr, end);
+ pmap_update(kernel_pmap, addr, end);
vm_map_remove(kernel_map, addr, end);
}
@@ -169,7 +169,7 @@ vm_kmem_alloc(size_t size)
pmap_kenter(start, vm_page_to_pa(page));
}
- pmap_kupdate(va, end);
+ pmap_update(kernel_pmap, va, end);
return va;
error_page:
@@ -219,7 +219,7 @@ vm_kmem_map_pa(phys_addr_t addr, size_t size, unsigned long *map_addrp,
for (offset = 0; offset < map_size; offset += PAGE_SIZE)
pmap_kenter(map_addr + offset, start + offset);
- pmap_kupdate(map_addr, map_addr + map_size);
+ pmap_update(kernel_pmap, map_addr, map_addr + map_size);
if (map_addrp != NULL)
*map_addrp = map_addr;
diff --git a/vm/vm_map.c b/vm/vm_map.c
index 16d440bb..a558a4b7 100644
--- a/vm/vm_map.c
+++ b/vm/vm_map.c
@@ -210,7 +210,7 @@ vm_map_kentry_alloc(size_t slab_size)
pmap_kenter(va + i, vm_page_to_pa(page));
}
- pmap_kupdate(va, va + slab_size);
+ pmap_update(kernel_pmap, va, va + slab_size);
return va;
}
@@ -235,7 +235,7 @@ vm_map_kentry_free(unsigned long va, size_t slab_size)
}
pmap_kremove(va, va + slab_size);
- pmap_kupdate(va, va + slab_size);
+ pmap_update(kernel_pmap, va, va + slab_size);
vm_map_kentry_free_va(va, slab_size);
}
@@ -287,7 +287,7 @@ vm_map_kentry_setup(void)
pmap_kenter(table_va + (i * PAGE_SIZE), vm_page_to_pa(page));
}
- pmap_kupdate(table_va, table_va + (nr_pages * PAGE_SIZE));
+ pmap_update(kernel_pmap, table_va, table_va + (nr_pages * PAGE_SIZE));
mutex_init(&vm_map_kentry_free_slabs_lock);
slabs = (struct vm_map_kentry_slab *)table_va;