summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/machine/biosmem.c41
-rw-r--r--arch/x86/machine/boot.c3
-rw-r--r--arch/x86/machine/param.h26
-rw-r--r--arch/x86/machine/pmap.c17
4 files changed, 42 insertions, 45 deletions
diff --git a/arch/x86/machine/biosmem.c b/arch/x86/machine/biosmem.c
index 0e942c0e..d999454f 100644
--- a/arch/x86/machine/biosmem.c
+++ b/arch/x86/machine/biosmem.c
@@ -32,7 +32,6 @@
#include <machine/multiboot.h>
#include <vm/vm_kmem.h>
#include <vm/vm_page.h>
-#include <vm/vm_phys.h>
/*
* Maximum number of entries in the BIOS memory map.
@@ -527,10 +526,10 @@ biosmem_map_find_avail(phys_addr_t *phys_start, phys_addr_t *phys_end)
if (entry->type != BIOSMEM_TYPE_AVAILABLE)
continue;
-#ifndef VM_PHYS_HIGHMEM_LIMIT
- if (entry->base_addr >= VM_PHYS_NORMAL_LIMIT)
+#ifndef VM_PAGE_HIGHMEM_LIMIT
+ if (entry->base_addr >= VM_PAGE_NORMAL_LIMIT)
break;
-#endif /* VM_PHYS_HIGHMEM_LIMIT */
+#endif /* VM_PAGE_HIGHMEM_LIMIT */
start = vm_page_round(entry->base_addr);
@@ -539,10 +538,10 @@ biosmem_map_find_avail(phys_addr_t *phys_start, phys_addr_t *phys_end)
entry_end = entry->base_addr + entry->length;
-#ifndef VM_PHYS_HIGHMEM_LIMIT
- if (entry_end > VM_PHYS_NORMAL_LIMIT)
- entry_end = VM_PHYS_NORMAL_LIMIT;
-#endif /* VM_PHYS_HIGHMEM_LIMIT */
+#ifndef VM_PAGE_HIGHMEM_LIMIT
+ if (entry_end > VM_PAGE_NORMAL_LIMIT)
+ entry_end = VM_PAGE_NORMAL_LIMIT;
+#endif /* VM_PAGE_HIGHMEM_LIMIT */
end = vm_page_trunc(entry_end);
@@ -591,7 +590,7 @@ biosmem_load_segment(const char *name, unsigned long long max_phys_end,
if ((avail_end < phys_start) || (avail_end > phys_end))
avail_end = phys_end;
- vm_phys_load(name, phys_start, phys_end, avail_start, avail_end,
+ vm_page_load(name, phys_start, phys_end, avail_start, avail_end,
seg_index, seglist_prio);
}
@@ -612,24 +611,24 @@ biosmem_setup(void)
: 1ULL << cpu->phys_addr_width;
phys_start = BIOSMEM_BASE;
- phys_end = VM_PHYS_NORMAL_LIMIT;
+ phys_end = VM_PAGE_NORMAL_LIMIT;
error = biosmem_map_find_avail(&phys_start, &phys_end);
if (!error)
biosmem_load_segment("normal", max_phys_end, phys_start, phys_end,
biosmem_heap_free, biosmem_heap_end,
- VM_PHYS_SEG_NORMAL, VM_PHYS_SEGLIST_NORMAL);
+ VM_PAGE_SEG_NORMAL, VM_PAGE_SEGLIST_NORMAL);
-#ifdef VM_PHYS_HIGHMEM_LIMIT
- phys_start = VM_PHYS_NORMAL_LIMIT;
- phys_end = VM_PHYS_HIGHMEM_LIMIT;
+#ifdef VM_PAGE_HIGHMEM_LIMIT
+ phys_start = VM_PAGE_NORMAL_LIMIT;
+ phys_end = VM_PAGE_HIGHMEM_LIMIT;
error = biosmem_map_find_avail(&phys_start, &phys_end);
if (!error)
biosmem_load_segment("highmem", max_phys_end, phys_start, phys_end,
phys_start, phys_end,
- VM_PHYS_SEG_HIGHMEM, VM_PHYS_SEGLIST_HIGHMEM);
-#endif /* VM_PHYS_HIGHMEM_LIMIT */
+ VM_PAGE_SEG_HIGHMEM, VM_PAGE_SEGLIST_HIGHMEM);
+#endif /* VM_PAGE_HIGHMEM_LIMIT */
}
static void __init
@@ -668,9 +667,9 @@ biosmem_free_usable_range(phys_addr_t start, phys_addr_t end)
struct vm_page *page;
while (start < end) {
- page = vm_phys_lookup_page(start);
+ page = vm_page_lookup(start);
assert(page != NULL);
- vm_phys_manage(page);
+ vm_page_manage(page);
start += PAGE_SIZE;
}
}
@@ -710,13 +709,13 @@ biosmem_free_usable(void)
continue;
/* High memory is always loaded during setup */
- if (entry->base_addr >= VM_PHYS_NORMAL_LIMIT)
+ if (entry->base_addr >= VM_PAGE_NORMAL_LIMIT)
break;
entry_end = entry->base_addr + entry->length;
- if (entry_end > VM_PHYS_NORMAL_LIMIT)
- entry_end = VM_PHYS_NORMAL_LIMIT;
+ if (entry_end > VM_PAGE_NORMAL_LIMIT)
+ entry_end = VM_PAGE_NORMAL_LIMIT;
start = vm_page_round(entry->base_addr);
end = vm_page_trunc(entry_end);
diff --git a/arch/x86/machine/boot.c b/arch/x86/machine/boot.c
index 9f6f4b05..692db0ad 100644
--- a/arch/x86/machine/boot.c
+++ b/arch/x86/machine/boot.c
@@ -65,7 +65,6 @@
#include <machine/trap.h>
#include <vm/vm_kmem.h>
#include <vm/vm_page.h>
-#include <vm/vm_phys.h>
#include <vm/vm_setup.h>
/*
@@ -278,7 +277,7 @@ boot_main(void)
vm_setup();
boot_save_data();
biosmem_free_usable();
- vm_phys_info();
+ vm_page_info();
pic_setup();
pit_setup();
cpu_mp_setup();
diff --git a/arch/x86/machine/param.h b/arch/x86/machine/param.h
index e7612441..5ed5d461 100644
--- a/arch/x86/machine/param.h
+++ b/arch/x86/machine/param.h
@@ -132,35 +132,35 @@
*/
#ifdef __LP64__
-#define VM_MAX_PHYS_SEG 2
-#define VM_PHYS_NORMAL_LIMIT DECL_CONST(0x100000000, UL)
-#define VM_PHYS_HIGHMEM_LIMIT DECL_CONST(0x10000000000000, UL)
+#define VM_PAGE_MAX_SEGS 2
+#define VM_PAGE_NORMAL_LIMIT DECL_CONST(0x100000000, UL)
+#define VM_PAGE_HIGHMEM_LIMIT DECL_CONST(0x10000000000000, UL)
#else /* __LP64__ */
#ifdef X86_PAE
-#define VM_MAX_PHYS_SEG 2
-#define VM_PHYS_NORMAL_LIMIT DECL_CONST(0x100000000, ULL)
-#define VM_PHYS_HIGHMEM_LIMIT DECL_CONST(0x10000000000000, ULL)
+#define VM_PAGE_MAX_SEGS 2
+#define VM_PAGE_NORMAL_LIMIT DECL_CONST(0x100000000, ULL)
+#define VM_PAGE_HIGHMEM_LIMIT DECL_CONST(0x10000000000000, ULL)
#else /* X86_PAE */
-#define VM_MAX_PHYS_SEG 1
-#define VM_PHYS_NORMAL_LIMIT DECL_CONST(0xfffff000, UL)
+#define VM_PAGE_MAX_SEGS 1
+#define VM_PAGE_NORMAL_LIMIT DECL_CONST(0xfffff000, UL)
#endif /* X86_PAE */
#endif /* __LP64__ */
/*
* Physical segment indexes.
*/
-#define VM_PHYS_SEG_NORMAL 0
-#define VM_PHYS_SEG_HIGHMEM 1
+#define VM_PAGE_SEG_NORMAL 0
+#define VM_PAGE_SEG_HIGHMEM 1
/*
* Number of physical segment lists.
*/
-#define VM_NR_PHYS_SEGLIST VM_MAX_PHYS_SEG
+#define VM_PAGE_NR_SEGLISTS VM_PAGE_MAX_SEGS
/*
* Segment list priorities.
*/
-#define VM_PHYS_SEGLIST_NORMAL 0
-#define VM_PHYS_SEGLIST_HIGHMEM 1
+#define VM_PAGE_SEGLIST_NORMAL 0
+#define VM_PAGE_SEGLIST_HIGHMEM 1
#endif /* _X86_PARAM_H */
diff --git a/arch/x86/machine/pmap.c b/arch/x86/machine/pmap.c
index b0bc4a83..713afacb 100644
--- a/arch/x86/machine/pmap.c
+++ b/arch/x86/machine/pmap.c
@@ -39,7 +39,6 @@
#include <vm/vm_kmem.h>
#include <vm/vm_page.h>
#include <vm/vm_prot.h>
-#include <vm/vm_phys.h>
#define PMAP_PTEMAP_INDEX(va, shift) (((va) & PMAP_VA_MASK) >> (shift))
@@ -578,10 +577,10 @@ pmap_kgrow(unsigned long end)
pte = &pt_level->ptes[index];
if (!(*pte & PMAP_PTE_P)) {
- if (!vm_phys_ready)
- pa = vm_phys_bootalloc();
+ if (!vm_page_ready)
+ pa = vm_page_bootalloc();
else {
- page = vm_phys_alloc(0);
+ page = vm_page_alloc(0);
if (page == NULL)
panic("pmap: no page available to grow kernel space");
@@ -799,7 +798,7 @@ pmap_pdpt_alloc(size_t slab_size)
return 0;
for (start = va, end = va + slab_size; start < end; start += PAGE_SIZE) {
- page = vm_phys_alloc_seg(0, VM_PHYS_SEG_NORMAL);
+ page = vm_page_alloc_seg(0, VM_PAGE_SEG_NORMAL);
if (page == NULL)
goto error_page;
@@ -849,7 +848,7 @@ pmap_create(struct pmap **pmapp)
goto error_pmap;
}
- root_pages = vm_phys_alloc(PMAP_RPTP_ORDER);
+ root_pages = vm_page_alloc(PMAP_RPTP_ORDER);
if (root_pages == NULL) {
error = ERROR_NOMEM;
@@ -873,7 +872,7 @@ pmap_create(struct pmap **pmapp)
pmap->pdpt[i] = (pmap->root_pt + (i * PAGE_SIZE)) | PMAP_PTE_P;
pa = pmap_extract_ptemap(va) + (va & PAGE_MASK);
- assert(pa < VM_PHYS_NORMAL_LIMIT);
+ assert(pa < VM_PAGE_NORMAL_LIMIT);
pmap->pdpt_pa = (unsigned long)pa;
#endif /* X86_PAE */
@@ -910,7 +909,7 @@ pmap_create(struct pmap **pmapp)
#ifdef X86_PAE
error_pdpt:
- vm_phys_free(root_pages, PMAP_RPTP_ORDER);
+ vm_page_free(root_pages, PMAP_RPTP_ORDER);
#endif /* X86_PAE */
error_pages:
kmem_cache_free(&pmap_cache, pmap);
@@ -943,7 +942,7 @@ pmap_enter_ptemap(struct pmap *pmap, unsigned long va, phys_addr_t pa, int prot)
if (*pte & PMAP_PTE_P)
continue;
- page = vm_phys_alloc(0);
+ page = vm_page_alloc(0);
/* Note that other pages allocated on the way are not released */
if (page == NULL)