summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/x86/machine/pmap.c19
-rw-r--r--arch/x86/machine/pmap.h13
-rw-r--r--vm/vm_kmem.c2
-rw-r--r--vm/vm_map.c2
4 files changed, 23 insertions, 13 deletions
diff --git a/arch/x86/machine/pmap.c b/arch/x86/machine/pmap.c
index 703d842a..c571d0a3 100644
--- a/arch/x86/machine/pmap.c
+++ b/arch/x86/machine/pmap.c
@@ -75,8 +75,6 @@ struct pmap_pt_level {
static struct pmap kernel_pmap_store;
struct pmap *kernel_pmap = &kernel_pmap_store;
-unsigned long pmap_klimit;
-
#ifdef X86_PAE
/*
* "Hidden" root page table for PAE mode.
@@ -91,6 +89,11 @@ static pmap_pte_t pmap_pdpt[PMAP_NR_RPTPS] __aligned(32);
static pmap_pte_t *pmap_boot_root_pt __initdata;
/*
+ * Maximum mappable kernel address.
+ */
+static unsigned long pmap_kernel_limit;
+
+/*
* Table of page translation properties.
*
* This table is only used before paging is enabled.
@@ -339,7 +342,7 @@ pmap_bootstrap(void)
cpu_tlb_flush();
- pmap_klimit = VM_MIN_KERNEL_ADDRESS;
+ pmap_kernel_limit = VM_MIN_KERNEL_ADDRESS;
}
void __init
@@ -367,6 +370,12 @@ pmap_bootalloc(unsigned int nr_pages)
return page;
}
+unsigned long
+pmap_klimit(void)
+{
+ return pmap_kernel_limit;
+}
+
void
pmap_growkernel(unsigned long va)
{
@@ -377,7 +386,7 @@ pmap_growkernel(unsigned long va)
pmap_pte_t *pte;
phys_addr_t pa;
- start = pmap_klimit;
+ start = pmap_kernel_limit;
va = P2END(va, 1 << PMAP_L2_SHIFT) - 1;
assert(start < va);
@@ -408,7 +417,7 @@ pmap_growkernel(unsigned long va)
}
}
- pmap_klimit = va + 1;
+ pmap_kernel_limit = va + 1;
}
void
diff --git a/arch/x86/machine/pmap.h b/arch/x86/machine/pmap.h
index 81cc998c..fece5163 100644
--- a/arch/x86/machine/pmap.h
+++ b/arch/x86/machine/pmap.h
@@ -122,12 +122,6 @@ struct pmap {
extern struct pmap *kernel_pmap;
/*
- * Address below which using the low level kernel pmap functions is safe.
- * Its value is adjusted by calling pmap_growkernel().
- */
-extern unsigned long pmap_klimit;
-
-/*
* Early initialization of the MMU.
*
* This function is called before paging is enabled by the boot module. It
@@ -161,6 +155,13 @@ void pmap_ap_bootstrap(void);
unsigned long pmap_bootalloc(unsigned int nr_pages);
/*
+ * Return the address below which using the kernel pmap functions is safe.
+ *
+ * Its value is adjusted by calling pmap_growkernel().
+ */
+unsigned long pmap_klimit(void);
+
+/*
* Preallocate resources so that addresses up to va can be mapped safely in
* the kernel pmap.
*/
diff --git a/vm/vm_kmem.c b/vm/vm_kmem.c
index a90fb058..d51b2ced 100644
--- a/vm/vm_kmem.c
+++ b/vm/vm_kmem.c
@@ -62,7 +62,7 @@ vm_kmem_bootalloc(size_t size)
start = vm_kmem_boot_start;
vm_kmem_boot_start += size;
- if (pmap_klimit < vm_kmem_boot_start)
+ if (pmap_klimit() < vm_kmem_boot_start)
pmap_growkernel(vm_kmem_boot_start);
for (va = start; va < vm_kmem_boot_start; va += PAGE_SIZE) {
diff --git a/vm/vm_map.c b/vm/vm_map.c
index 8343472f..2ca46582 100644
--- a/vm/vm_map.c
+++ b/vm/vm_map.c
@@ -435,7 +435,7 @@ vm_map_insert(struct vm_map *map, struct vm_map_entry *entry,
vm_map_link(map, entry, NULL, request->next);
map->size += request->size;
- if ((map == kernel_map) && (pmap_klimit < entry->end))
+ if ((map == kernel_map) && (pmap_klimit() < entry->end))
pmap_growkernel(entry->end);
return 0;