summaryrefslogtreecommitdiff
path: root/vm
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2013-07-03 21:16:51 +0200
committerRichard Braun <rbraun@sceen.net>2013-07-03 21:16:51 +0200
commit92bc488b0ff8aeb4a2239c0f8dc291252aad5215 (patch)
treedfe6ad57beea22f9cf5e34430edf4a93beb6f160 /vm
parentf87947128216c91f9aa32ba16b7c1b70ea23ddd1 (diff)
x86/pmap: change pmap_kenter to pass protection
Diffstat (limited to 'vm')
-rw-r--r--vm/vm_kmem.c7
-rw-r--r--vm/vm_map.c5
2 files changed, 7 insertions, 5 deletions
diff --git a/vm/vm_kmem.c b/vm/vm_kmem.c
index 5bbf7d6..73d7c3c 100644
--- a/vm/vm_kmem.c
+++ b/vm/vm_kmem.c
@@ -70,7 +70,7 @@ vm_kmem_bootalloc(size_t size)
for (va = start; va < vm_kmem_boot_start; va += PAGE_SIZE) {
pa = vm_phys_bootalloc();
- pmap_kenter(va, pa);
+ pmap_kenter(va, pa, VM_PROT_READ | VM_PROT_WRITE);
}
pmap_update(kernel_pmap, start, vm_kmem_boot_start);
@@ -166,7 +166,7 @@ vm_kmem_alloc(size_t size)
if (page == NULL)
goto error_page;
- pmap_kenter(start, vm_page_to_pa(page));
+ pmap_kenter(start, vm_page_to_pa(page), VM_PROT_READ | VM_PROT_WRITE);
}
pmap_update(kernel_pmap, va, end);
@@ -217,7 +217,8 @@ vm_kmem_map_pa(phys_addr_t addr, size_t size, unsigned long *map_addrp,
return NULL;
for (offset = 0; offset < map_size; offset += PAGE_SIZE)
- pmap_kenter(map_addr + offset, start + offset);
+ pmap_kenter(map_addr + offset, start + offset,
+ VM_PROT_READ | VM_PROT_WRITE);
pmap_update(kernel_pmap, map_addr, map_addr + map_size);
diff --git a/vm/vm_map.c b/vm/vm_map.c
index a558a4b..d6000e1 100644
--- a/vm/vm_map.c
+++ b/vm/vm_map.c
@@ -207,7 +207,7 @@ vm_map_kentry_alloc(size_t slab_size)
if (page == NULL)
panic("vm_map: no physical page for kentry cache");
- pmap_kenter(va + i, vm_page_to_pa(page));
+ pmap_kenter(va + i, vm_page_to_pa(page), VM_PROT_READ | VM_PROT_WRITE);
}
pmap_update(kernel_pmap, va, va + slab_size);
@@ -284,7 +284,8 @@ vm_map_kentry_setup(void)
if (page == NULL)
panic("vm_map: unable to allocate page for kentry table");
- pmap_kenter(table_va + (i * PAGE_SIZE), vm_page_to_pa(page));
+ pmap_kenter(table_va + (i * PAGE_SIZE), vm_page_to_pa(page),
+ VM_PROT_READ | VM_PROT_WRITE);
}
pmap_update(kernel_pmap, table_va, table_va + (nr_pages * PAGE_SIZE));