summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2013-07-03 21:31:47 +0200
committerRichard Braun <rbraun@sceen.net>2013-07-03 21:31:47 +0200
commit9ec78141b572c7db135a28358c06cf22ad6feba4 (patch)
tree43d6d48c77d9c4fc76f05c7f998cf8e5bcd9011f
parentd7b1110aaeac2eb0af3ffcbcca9a9a366b1617a8 (diff)
x86/pmap: new pmap_enter function
-rw-r--r--arch/x86/machine/pmap.c58
-rw-r--r--arch/x86/machine/pmap.h14
2 files changed, 72 insertions, 0 deletions
diff --git a/arch/x86/machine/pmap.c b/arch/x86/machine/pmap.c
index 5e572bec..513acc91 100644
--- a/arch/x86/machine/pmap.c
+++ b/arch/x86/machine/pmap.c
@@ -908,6 +908,64 @@ error_pmap:
return error;
}
+static int
+pmap_enter_ptemap(struct pmap *pmap, unsigned long va, phys_addr_t pa, int prot)
+{
+ const struct pmap_pt_level *pt_level;
+ struct vm_page *page;
+ unsigned int level;
+ pmap_pte_t *pte, pte_bits;
+ phys_addr_t pt_pa;
+
+ pte_bits = PMAP_PTE_RW | PMAP_PTE_P;
+
+ /*
+ * Page tables describing user mappings are protected from user access by
+ * not setting the U/S bit when inserting the root page table into itself.
+ */
+ if (pmap != kernel_pmap)
+ pte_bits |= PMAP_PTE_US;
+
+ for (level = PMAP_NR_LEVELS; level > 1; level--) {
+ pt_level = &pmap_pt_levels[level - 1];
+ pte = &pt_level->ptes[PMAP_PTEMAP_INDEX(va, pt_level->shift)];
+
+ if (*pte & PMAP_PTE_P)
+ continue;
+
+ page = vm_phys_alloc(0);
+
+ /* Note that other pages allocated on the way are not released */
+ if (page == NULL)
+ return ERROR_NOMEM;
+
+ pt_pa = vm_page_to_pa(page);
+ pmap_zero_page(pt_pa);
+ *pte = (pt_pa | pte_bits) & pt_level->mask;
+ }
+
+ pte_bits = pmap_prot_table[prot & VM_PROT_ALL] | PMAP_PTE_P;
+
+ if (pmap == kernel_pmap)
+ pte_bits |= PMAP_PTE_G;
+ else
+ pte_bits |= PMAP_PTE_US;
+
+ pte = PMAP_PTEMAP_BASE + PMAP_PTEMAP_INDEX(va, PMAP_L1_SHIFT);
+ *pte = ((pa & PMAP_PA_MASK) | pte_bits) & pmap_pt_levels[0].mask;
+ return 0;
+}
+
+int
+pmap_enter(struct pmap *pmap, unsigned long va, phys_addr_t pa, int prot)
+{
+ if ((pmap == kernel_pmap) || (pmap == pmap_current()))
+ return pmap_enter_ptemap(pmap, va, pa, prot);
+
+ /* TODO Complete pmap_enter() */
+ panic("pmap: pmap_enter not completely implemented yet");
+}
+
void
pmap_load(struct pmap *pmap)
{
diff --git a/arch/x86/machine/pmap.h b/arch/x86/machine/pmap.h
index 62a9e21f..b67772a9 100644
--- a/arch/x86/machine/pmap.h
+++ b/arch/x86/machine/pmap.h
@@ -180,6 +180,8 @@ void pmap_kgrow(unsigned long end);
* If protection is VM_PROT_NONE, this function behaves as if it were
* VM_PROT_READ. Page tables for the new mapping must be preallocated with
* pmap_kgrow().
+ *
+ * This function is an optimized version of pmap_enter() for the kernel pmap.
*/
void pmap_kenter(unsigned long va, phys_addr_t pa, int prot);
@@ -236,6 +238,18 @@ void pmap_setup(void);
int pmap_create(struct pmap **pmapp);
/*
+ * Create a mapping on a physical map.
+ *
+ * If protection is VM_PROT_NONE, this function behaves as if it were
+ * VM_PROT_READ.
+ *
+ * This function may be used on the kernel pmap, but not for the purpose of
+ * mapping kernel memory, i.e. data structures used by the kernel memory
+ * allocator should be mapped with pmap_kenter().
+ */
+int pmap_enter(struct pmap *pmap, unsigned long va, phys_addr_t pa, int prot);
+
+/*
* Load the given pmap on the current processor.
*
* This function must be called with interrupts and preemption disabled.