summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2013-07-03 21:16:51 +0200
committerRichard Braun <rbraun@sceen.net>2013-07-03 21:16:51 +0200
commit92bc488b0ff8aeb4a2239c0f8dc291252aad5215 (patch)
treedfe6ad57beea22f9cf5e34430edf4a93beb6f160
parentf87947128216c91f9aa32ba16b7c1b70ea23ddd1 (diff)
x86/pmap: change pmap_kenter to pass protection
-rw-r--r--arch/x86/machine/cga.c3
-rw-r--r--arch/x86/machine/pmap.c11
-rw-r--r--arch/x86/machine/pmap.h12
-rw-r--r--vm/vm_kmem.c7
-rw-r--r--vm/vm_map.c5
5 files changed, 23 insertions, 15 deletions
diff --git a/arch/x86/machine/cga.c b/arch/x86/machine/cga.c
index 0cc857f0..0baf0a30 100644
--- a/arch/x86/machine/cga.c
+++ b/arch/x86/machine/cga.c
@@ -23,6 +23,7 @@
#include <machine/io.h>
#include <machine/pmap.h>
#include <machine/cga.h>
+#include <vm/vm_prot.h>
/*
* Screen dimensions.
@@ -107,7 +108,7 @@ cga_setup(void)
unsigned long va;
va = pmap_bootalloc(1);
- pmap_kenter(va, CGA_MEMORY);
+ pmap_kenter(va, CGA_MEMORY, VM_PROT_READ | VM_PROT_WRITE);
pmap_update(kernel_pmap, va, va + PAGE_SIZE);
cga_memory = (uint8_t *)va;
diff --git a/arch/x86/machine/pmap.c b/arch/x86/machine/pmap.c
index 014af520..5e572bec 100644
--- a/arch/x86/machine/pmap.c
+++ b/arch/x86/machine/pmap.c
@@ -476,7 +476,7 @@ pmap_zero_page(phys_addr_t pa)
thread_pin();
mutex_lock(&pmap_zero_va_lock);
- pmap_kenter(pmap_zero_va, pa);
+ pmap_kenter(pmap_zero_va, pa, VM_PROT_WRITE);
cpu_tlb_flush_va(pmap_zero_va);
memset((void *)pmap_zero_va, 0, PAGE_SIZE);
mutex_unlock(&pmap_zero_va_lock);
@@ -497,7 +497,7 @@ pmap_map_pt(phys_addr_t pa)
for (i = 0; i < PMAP_NR_RPTPS; i++) {
offset = i * PAGE_SIZE;
va = base + offset;
- pmap_kenter(va, pa + offset);
+ pmap_kenter(va, pa + offset, VM_PROT_READ | VM_PROT_WRITE);
cpu_tlb_flush_va(va);
}
@@ -596,12 +596,13 @@ pmap_kgrow(unsigned long end)
}
void
-pmap_kenter(unsigned long va, phys_addr_t pa)
+pmap_kenter(unsigned long va, phys_addr_t pa, int prot)
{
pmap_pte_t *pte;
pte = PMAP_PTEMAP_BASE + PMAP_PTEMAP_INDEX(va, PMAP_L1_SHIFT);
- *pte = ((pa & PMAP_PA_MASK) | PMAP_PTE_G | PMAP_PTE_RW | PMAP_PTE_P)
+ *pte = ((pa & PMAP_PA_MASK) | PMAP_PTE_G | PMAP_PTE_P
+ | pmap_prot_table[prot & VM_PROT_ALL])
& pmap_pt_levels[0].mask;
}
@@ -795,7 +796,7 @@ pmap_pdpt_alloc(size_t slab_size)
if (page == NULL)
goto error_page;
- pmap_kenter(start, vm_page_to_pa(page));
+ pmap_kenter(start, vm_page_to_pa(page), VM_PROT_READ | VM_PROT_WRITE);
}
pmap_update(kernel_pmap, va, end);
diff --git a/arch/x86/machine/pmap.h b/arch/x86/machine/pmap.h
index 32a56c05..62a9e21f 100644
--- a/arch/x86/machine/pmap.h
+++ b/arch/x86/machine/pmap.h
@@ -175,13 +175,17 @@ unsigned long pmap_klimit(void);
void pmap_kgrow(unsigned long end);
/*
- * Kernel pmap functions.
+ * Create a mapping on the kernel physical map.
*
- * These functions assume the caller owns the addresses and don't grab any
- * lock. Page tables for the new mappings must be preallocated with
+ * If protection is VM_PROT_NONE, this function behaves as if it were
+ * VM_PROT_READ. Page tables for the new mapping must be preallocated with
* pmap_kgrow().
*/
-void pmap_kenter(unsigned long va, phys_addr_t pa);
+void pmap_kenter(unsigned long va, phys_addr_t pa, int prot);
+
+/*
+ * Remove mappings from the kernel physical map.
+ */
void pmap_kremove(unsigned long start, unsigned long end);
/*
diff --git a/vm/vm_kmem.c b/vm/vm_kmem.c
index 5bbf7d67..73d7c3c5 100644
--- a/vm/vm_kmem.c
+++ b/vm/vm_kmem.c
@@ -70,7 +70,7 @@ vm_kmem_bootalloc(size_t size)
for (va = start; va < vm_kmem_boot_start; va += PAGE_SIZE) {
pa = vm_phys_bootalloc();
- pmap_kenter(va, pa);
+ pmap_kenter(va, pa, VM_PROT_READ | VM_PROT_WRITE);
}
pmap_update(kernel_pmap, start, vm_kmem_boot_start);
@@ -166,7 +166,7 @@ vm_kmem_alloc(size_t size)
if (page == NULL)
goto error_page;
- pmap_kenter(start, vm_page_to_pa(page));
+ pmap_kenter(start, vm_page_to_pa(page), VM_PROT_READ | VM_PROT_WRITE);
}
pmap_update(kernel_pmap, va, end);
@@ -217,7 +217,8 @@ vm_kmem_map_pa(phys_addr_t addr, size_t size, unsigned long *map_addrp,
return NULL;
for (offset = 0; offset < map_size; offset += PAGE_SIZE)
- pmap_kenter(map_addr + offset, start + offset);
+ pmap_kenter(map_addr + offset, start + offset,
+ VM_PROT_READ | VM_PROT_WRITE);
pmap_update(kernel_pmap, map_addr, map_addr + map_size);
diff --git a/vm/vm_map.c b/vm/vm_map.c
index a558a4b7..d6000e19 100644
--- a/vm/vm_map.c
+++ b/vm/vm_map.c
@@ -207,7 +207,7 @@ vm_map_kentry_alloc(size_t slab_size)
if (page == NULL)
panic("vm_map: no physical page for kentry cache");
- pmap_kenter(va + i, vm_page_to_pa(page));
+ pmap_kenter(va + i, vm_page_to_pa(page), VM_PROT_READ | VM_PROT_WRITE);
}
pmap_update(kernel_pmap, va, va + slab_size);
@@ -284,7 +284,8 @@ vm_map_kentry_setup(void)
if (page == NULL)
panic("vm_map: unable to allocate page for kentry table");
- pmap_kenter(table_va + (i * PAGE_SIZE), vm_page_to_pa(page));
+ pmap_kenter(table_va + (i * PAGE_SIZE), vm_page_to_pa(page),
+ VM_PROT_READ | VM_PROT_WRITE);
}
pmap_update(kernel_pmap, table_va, table_va + (nr_pages * PAGE_SIZE));