summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Makefrag.am3
-rw-r--r--arch/x86/machine/biosmem.c41
-rw-r--r--arch/x86/machine/boot.c3
-rw-r--r--arch/x86/machine/param.h26
-rw-r--r--arch/x86/machine/pmap.c17
-rw-r--r--vm/vm_kmem.c11
-rw-r--r--vm/vm_map.c9
-rw-r--r--vm/vm_page.c (renamed from vm/vm_phys.c)285
-rw-r--r--vm/vm_page.h72
-rw-r--r--vm/vm_phys.h94
-rw-r--r--vm/vm_setup.c4
11 files changed, 267 insertions, 298 deletions
diff --git a/Makefrag.am b/Makefrag.am
index d32c02a3..86cac8bd 100644
--- a/Makefrag.am
+++ b/Makefrag.am
@@ -60,9 +60,8 @@ x15_SOURCES += \
vm/vm_kmem.h \
vm/vm_map.c \
vm/vm_map.h \
+ vm/vm_page.c \
vm/vm_page.h \
- vm/vm_phys.c \
- vm/vm_phys.h \
vm/vm_prot.h \
vm/vm_setup.c \
vm/vm_setup.h
diff --git a/arch/x86/machine/biosmem.c b/arch/x86/machine/biosmem.c
index 0e942c0e..d999454f 100644
--- a/arch/x86/machine/biosmem.c
+++ b/arch/x86/machine/biosmem.c
@@ -32,7 +32,6 @@
#include <machine/multiboot.h>
#include <vm/vm_kmem.h>
#include <vm/vm_page.h>
-#include <vm/vm_phys.h>
/*
* Maximum number of entries in the BIOS memory map.
@@ -527,10 +526,10 @@ biosmem_map_find_avail(phys_addr_t *phys_start, phys_addr_t *phys_end)
if (entry->type != BIOSMEM_TYPE_AVAILABLE)
continue;
-#ifndef VM_PHYS_HIGHMEM_LIMIT
- if (entry->base_addr >= VM_PHYS_NORMAL_LIMIT)
+#ifndef VM_PAGE_HIGHMEM_LIMIT
+ if (entry->base_addr >= VM_PAGE_NORMAL_LIMIT)
break;
-#endif /* VM_PHYS_HIGHMEM_LIMIT */
+#endif /* VM_PAGE_HIGHMEM_LIMIT */
start = vm_page_round(entry->base_addr);
@@ -539,10 +538,10 @@ biosmem_map_find_avail(phys_addr_t *phys_start, phys_addr_t *phys_end)
entry_end = entry->base_addr + entry->length;
-#ifndef VM_PHYS_HIGHMEM_LIMIT
- if (entry_end > VM_PHYS_NORMAL_LIMIT)
- entry_end = VM_PHYS_NORMAL_LIMIT;
-#endif /* VM_PHYS_HIGHMEM_LIMIT */
+#ifndef VM_PAGE_HIGHMEM_LIMIT
+ if (entry_end > VM_PAGE_NORMAL_LIMIT)
+ entry_end = VM_PAGE_NORMAL_LIMIT;
+#endif /* VM_PAGE_HIGHMEM_LIMIT */
end = vm_page_trunc(entry_end);
@@ -591,7 +590,7 @@ biosmem_load_segment(const char *name, unsigned long long max_phys_end,
if ((avail_end < phys_start) || (avail_end > phys_end))
avail_end = phys_end;
- vm_phys_load(name, phys_start, phys_end, avail_start, avail_end,
+ vm_page_load(name, phys_start, phys_end, avail_start, avail_end,
seg_index, seglist_prio);
}
@@ -612,24 +611,24 @@ biosmem_setup(void)
: 1ULL << cpu->phys_addr_width;
phys_start = BIOSMEM_BASE;
- phys_end = VM_PHYS_NORMAL_LIMIT;
+ phys_end = VM_PAGE_NORMAL_LIMIT;
error = biosmem_map_find_avail(&phys_start, &phys_end);
if (!error)
biosmem_load_segment("normal", max_phys_end, phys_start, phys_end,
biosmem_heap_free, biosmem_heap_end,
- VM_PHYS_SEG_NORMAL, VM_PHYS_SEGLIST_NORMAL);
+ VM_PAGE_SEG_NORMAL, VM_PAGE_SEGLIST_NORMAL);
-#ifdef VM_PHYS_HIGHMEM_LIMIT
- phys_start = VM_PHYS_NORMAL_LIMIT;
- phys_end = VM_PHYS_HIGHMEM_LIMIT;
+#ifdef VM_PAGE_HIGHMEM_LIMIT
+ phys_start = VM_PAGE_NORMAL_LIMIT;
+ phys_end = VM_PAGE_HIGHMEM_LIMIT;
error = biosmem_map_find_avail(&phys_start, &phys_end);
if (!error)
biosmem_load_segment("highmem", max_phys_end, phys_start, phys_end,
phys_start, phys_end,
- VM_PHYS_SEG_HIGHMEM, VM_PHYS_SEGLIST_HIGHMEM);
-#endif /* VM_PHYS_HIGHMEM_LIMIT */
+ VM_PAGE_SEG_HIGHMEM, VM_PAGE_SEGLIST_HIGHMEM);
+#endif /* VM_PAGE_HIGHMEM_LIMIT */
}
static void __init
@@ -668,9 +667,9 @@ biosmem_free_usable_range(phys_addr_t start, phys_addr_t end)
struct vm_page *page;
while (start < end) {
- page = vm_phys_lookup_page(start);
+ page = vm_page_lookup(start);
assert(page != NULL);
- vm_phys_manage(page);
+ vm_page_manage(page);
start += PAGE_SIZE;
}
}
@@ -710,13 +709,13 @@ biosmem_free_usable(void)
continue;
/* High memory is always loaded during setup */
- if (entry->base_addr >= VM_PHYS_NORMAL_LIMIT)
+ if (entry->base_addr >= VM_PAGE_NORMAL_LIMIT)
break;
entry_end = entry->base_addr + entry->length;
- if (entry_end > VM_PHYS_NORMAL_LIMIT)
- entry_end = VM_PHYS_NORMAL_LIMIT;
+ if (entry_end > VM_PAGE_NORMAL_LIMIT)
+ entry_end = VM_PAGE_NORMAL_LIMIT;
start = vm_page_round(entry->base_addr);
end = vm_page_trunc(entry_end);
diff --git a/arch/x86/machine/boot.c b/arch/x86/machine/boot.c
index 9f6f4b05..692db0ad 100644
--- a/arch/x86/machine/boot.c
+++ b/arch/x86/machine/boot.c
@@ -65,7 +65,6 @@
#include <machine/trap.h>
#include <vm/vm_kmem.h>
#include <vm/vm_page.h>
-#include <vm/vm_phys.h>
#include <vm/vm_setup.h>
/*
@@ -278,7 +277,7 @@ boot_main(void)
vm_setup();
boot_save_data();
biosmem_free_usable();
- vm_phys_info();
+ vm_page_info();
pic_setup();
pit_setup();
cpu_mp_setup();
diff --git a/arch/x86/machine/param.h b/arch/x86/machine/param.h
index e7612441..5ed5d461 100644
--- a/arch/x86/machine/param.h
+++ b/arch/x86/machine/param.h
@@ -132,35 +132,35 @@
*/
#ifdef __LP64__
-#define VM_MAX_PHYS_SEG 2
-#define VM_PHYS_NORMAL_LIMIT DECL_CONST(0x100000000, UL)
-#define VM_PHYS_HIGHMEM_LIMIT DECL_CONST(0x10000000000000, UL)
+#define VM_PAGE_MAX_SEGS 2
+#define VM_PAGE_NORMAL_LIMIT DECL_CONST(0x100000000, UL)
+#define VM_PAGE_HIGHMEM_LIMIT DECL_CONST(0x10000000000000, UL)
#else /* __LP64__ */
#ifdef X86_PAE
-#define VM_MAX_PHYS_SEG 2
-#define VM_PHYS_NORMAL_LIMIT DECL_CONST(0x100000000, ULL)
-#define VM_PHYS_HIGHMEM_LIMIT DECL_CONST(0x10000000000000, ULL)
+#define VM_PAGE_MAX_SEGS 2
+#define VM_PAGE_NORMAL_LIMIT DECL_CONST(0x100000000, ULL)
+#define VM_PAGE_HIGHMEM_LIMIT DECL_CONST(0x10000000000000, ULL)
#else /* X86_PAE */
-#define VM_MAX_PHYS_SEG 1
-#define VM_PHYS_NORMAL_LIMIT DECL_CONST(0xfffff000, UL)
+#define VM_PAGE_MAX_SEGS 1
+#define VM_PAGE_NORMAL_LIMIT DECL_CONST(0xfffff000, UL)
#endif /* X86_PAE */
#endif /* __LP64__ */
/*
* Physical segment indexes.
*/
-#define VM_PHYS_SEG_NORMAL 0
-#define VM_PHYS_SEG_HIGHMEM 1
+#define VM_PAGE_SEG_NORMAL 0
+#define VM_PAGE_SEG_HIGHMEM 1
/*
* Number of physical segment lists.
*/
-#define VM_NR_PHYS_SEGLIST VM_MAX_PHYS_SEG
+#define VM_PAGE_NR_SEGLISTS VM_PAGE_MAX_SEGS
/*
* Segment list priorities.
*/
-#define VM_PHYS_SEGLIST_NORMAL 0
-#define VM_PHYS_SEGLIST_HIGHMEM 1
+#define VM_PAGE_SEGLIST_NORMAL 0
+#define VM_PAGE_SEGLIST_HIGHMEM 1
#endif /* _X86_PARAM_H */
diff --git a/arch/x86/machine/pmap.c b/arch/x86/machine/pmap.c
index b0bc4a83..713afacb 100644
--- a/arch/x86/machine/pmap.c
+++ b/arch/x86/machine/pmap.c
@@ -39,7 +39,6 @@
#include <vm/vm_kmem.h>
#include <vm/vm_page.h>
#include <vm/vm_prot.h>
-#include <vm/vm_phys.h>
#define PMAP_PTEMAP_INDEX(va, shift) (((va) & PMAP_VA_MASK) >> (shift))
@@ -578,10 +577,10 @@ pmap_kgrow(unsigned long end)
pte = &pt_level->ptes[index];
if (!(*pte & PMAP_PTE_P)) {
- if (!vm_phys_ready)
- pa = vm_phys_bootalloc();
+ if (!vm_page_ready)
+ pa = vm_page_bootalloc();
else {
- page = vm_phys_alloc(0);
+ page = vm_page_alloc(0);
if (page == NULL)
panic("pmap: no page available to grow kernel space");
@@ -799,7 +798,7 @@ pmap_pdpt_alloc(size_t slab_size)
return 0;
for (start = va, end = va + slab_size; start < end; start += PAGE_SIZE) {
- page = vm_phys_alloc_seg(0, VM_PHYS_SEG_NORMAL);
+ page = vm_page_alloc_seg(0, VM_PAGE_SEG_NORMAL);
if (page == NULL)
goto error_page;
@@ -849,7 +848,7 @@ pmap_create(struct pmap **pmapp)
goto error_pmap;
}
- root_pages = vm_phys_alloc(PMAP_RPTP_ORDER);
+ root_pages = vm_page_alloc(PMAP_RPTP_ORDER);
if (root_pages == NULL) {
error = ERROR_NOMEM;
@@ -873,7 +872,7 @@ pmap_create(struct pmap **pmapp)
pmap->pdpt[i] = (pmap->root_pt + (i * PAGE_SIZE)) | PMAP_PTE_P;
pa = pmap_extract_ptemap(va) + (va & PAGE_MASK);
- assert(pa < VM_PHYS_NORMAL_LIMIT);
+ assert(pa < VM_PAGE_NORMAL_LIMIT);
pmap->pdpt_pa = (unsigned long)pa;
#endif /* X86_PAE */
@@ -910,7 +909,7 @@ pmap_create(struct pmap **pmapp)
#ifdef X86_PAE
error_pdpt:
- vm_phys_free(root_pages, PMAP_RPTP_ORDER);
+ vm_page_free(root_pages, PMAP_RPTP_ORDER);
#endif /* X86_PAE */
error_pages:
kmem_cache_free(&pmap_cache, pmap);
@@ -943,7 +942,7 @@ pmap_enter_ptemap(struct pmap *pmap, unsigned long va, phys_addr_t pa, int prot)
if (*pte & PMAP_PTE_P)
continue;
- page = vm_phys_alloc(0);
+ page = vm_page_alloc(0);
/* Note that other pages allocated on the way are not released */
if (page == NULL)
diff --git a/vm/vm_kmem.c b/vm/vm_kmem.c
index 73d7c3c5..31ea397b 100644
--- a/vm/vm_kmem.c
+++ b/vm/vm_kmem.c
@@ -27,7 +27,6 @@
#include <vm/vm_kmem.h>
#include <vm/vm_map.h>
#include <vm/vm_page.h>
-#include <vm/vm_phys.h>
#include <vm/vm_prot.h>
/*
@@ -69,7 +68,7 @@ vm_kmem_bootalloc(size_t size)
pmap_kgrow(vm_kmem_boot_start);
for (va = start; va < vm_kmem_boot_start; va += PAGE_SIZE) {
- pa = vm_phys_bootalloc();
+ pa = vm_page_bootalloc();
pmap_kenter(va, pa, VM_PROT_READ | VM_PROT_WRITE);
}
@@ -94,7 +93,7 @@ vm_kmem_lookup_page(unsigned long va)
if (pa == 0)
return NULL;
- return vm_phys_lookup_page(pa);
+ return vm_page_lookup(pa);
}
static int
@@ -161,7 +160,7 @@ vm_kmem_alloc(size_t size)
return 0;
for (start = va, end = va + size; start < end; start += PAGE_SIZE) {
- page = vm_phys_alloc(0);
+ page = vm_page_alloc(0);
if (page == NULL)
goto error_page;
@@ -193,9 +192,9 @@ vm_kmem_free(unsigned long addr, size_t size)
if (pa == 0)
continue;
- page = vm_phys_lookup_page(pa);
+ page = vm_page_lookup(pa);
assert(page != NULL);
- vm_phys_free(page, 0);
+ vm_page_free(page, 0);
}
vm_kmem_free_va(addr, size);
diff --git a/vm/vm_map.c b/vm/vm_map.c
index d6000e19..da47b189 100644
--- a/vm/vm_map.c
+++ b/vm/vm_map.c
@@ -49,7 +49,6 @@
#include <vm/vm_map.h>
#include <vm/vm_kmem.h>
#include <vm/vm_page.h>
-#include <vm/vm_phys.h>
#include <vm/vm_prot.h>
/*
@@ -202,7 +201,7 @@ vm_map_kentry_alloc(size_t slab_size)
+ VM_MAP_KENTRY_SIZE));
for (i = 0; i < slab_size; i += PAGE_SIZE) {
- page = vm_phys_alloc(0);
+ page = vm_page_alloc(0);
if (page == NULL)
panic("vm_map: no physical page for kentry cache");
@@ -229,9 +228,9 @@ vm_map_kentry_free(unsigned long va, size_t slab_size)
for (i = 0; i < slab_size; i += PAGE_SIZE) {
pa = pmap_extract(kernel_pmap, va + i);
assert(pa != 0);
- page = vm_phys_lookup_page(pa);
+ page = vm_page_lookup(pa);
assert(page != NULL);
- vm_phys_free(page, 0);
+ vm_page_free(page, 0);
}
pmap_kremove(va, va + slab_size);
@@ -279,7 +278,7 @@ vm_map_kentry_setup(void)
table_va = vm_map_kentry_entry.start + VM_MAP_KENTRY_SIZE;
for (i = 0; i < nr_pages; i++) {
- page = vm_phys_alloc(0);
+ page = vm_page_alloc(0);
if (page == NULL)
panic("vm_map: unable to allocate page for kentry table");
diff --git a/vm/vm_phys.c b/vm/vm_page.c
index 1cfc2290..ec3cca5f 100644
--- a/vm/vm_phys.c
+++ b/vm/vm_page.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2010, 2011, 2012 Richard Braun.
+ * Copyright (c) 2010, 2011, 2012, 2013 Richard Braun.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -44,34 +44,33 @@
#include <machine/cpu.h>
#include <vm/vm_kmem.h>
#include <vm/vm_page.h>
-#include <vm/vm_phys.h>
/*
* Number of free block lists per segment.
*/
-#define VM_PHYS_NR_FREE_LISTS 11
+#define VM_PAGE_NR_FREE_LISTS 11
/*
* The size of a CPU pool is computed by dividing the number of pages in its
* containing segment by this value.
*/
-#define VM_PHYS_CPU_POOL_RATIO 1024
+#define VM_PAGE_CPU_POOL_RATIO 1024
/*
* Maximum number of pages in a CPU pool.
*/
-#define VM_PHYS_CPU_POOL_MAX_SIZE 128
+#define VM_PAGE_CPU_POOL_MAX_SIZE 128
/*
* The transfer size of a CPU pool is computed by dividing the pool size by
* this value.
*/
-#define VM_PHYS_CPU_POOL_TRANSFER_RATIO 2
+#define VM_PAGE_CPU_POOL_TRANSFER_RATIO 2
/*
* Per-processor cache of pages.
*/
-struct vm_phys_cpu_pool {
+struct vm_page_cpu_pool {
struct mutex lock;
int size;
int transfer_size;
@@ -84,12 +83,12 @@ struct vm_phys_cpu_pool {
*
* When a page is free, its order is the index of its free list.
*/
-#define VM_PHYS_ORDER_ALLOCATED VM_PHYS_NR_FREE_LISTS
+#define VM_PAGE_ORDER_ALLOCATED VM_PAGE_NR_FREE_LISTS
/*
* Doubly-linked list of free blocks.
*/
-struct vm_phys_free_list {
+struct vm_page_free_list {
unsigned long size;
struct list blocks;
};
@@ -97,13 +96,13 @@ struct vm_phys_free_list {
/*
* Segment name buffer size.
*/
-#define VM_PHYS_NAME_SIZE 16
+#define VM_PAGE_NAME_SIZE 16
/*
* Segment of contiguous memory.
*/
-struct vm_phys_seg {
- struct vm_phys_cpu_pool cpu_pools[MAX_CPUS];
+struct vm_page_seg {
+ struct vm_page_cpu_pool cpu_pools[MAX_CPUS];
struct list node;
phys_addr_t start;
@@ -111,46 +110,46 @@ struct vm_phys_seg {
struct vm_page *pages;
struct vm_page *pages_end;
struct mutex lock;
- struct vm_phys_free_list free_lists[VM_PHYS_NR_FREE_LISTS];
+ struct vm_page_free_list free_lists[VM_PAGE_NR_FREE_LISTS];
unsigned long nr_free_pages;
- char name[VM_PHYS_NAME_SIZE];
+ char name[VM_PAGE_NAME_SIZE];
};
/*
* Bootstrap information about a segment.
*/
-struct vm_phys_boot_seg {
+struct vm_page_boot_seg {
phys_addr_t avail_start;
phys_addr_t avail_end;
};
-int vm_phys_ready;
+int vm_page_ready;
/*
* Segment lists, ordered by priority.
*/
-static struct list vm_phys_seg_lists[VM_NR_PHYS_SEGLIST];
+static struct list vm_page_seg_lists[VM_PAGE_NR_SEGLISTS];
/*
* Segment table.
*/
-static struct vm_phys_seg vm_phys_segs[VM_MAX_PHYS_SEG];
+static struct vm_page_seg vm_page_segs[VM_PAGE_MAX_SEGS];
/*
* Bootstrap segment table.
*/
-static struct vm_phys_boot_seg vm_phys_boot_segs[VM_MAX_PHYS_SEG] __initdata;
+static struct vm_page_boot_seg vm_page_boot_segs[VM_PAGE_MAX_SEGS] __initdata;
/*
* Number of loaded segments.
*/
-static unsigned int vm_phys_segs_size;
+static unsigned int vm_page_segs_size;
-static int vm_phys_load_initialized __initdata = 0;
+static int vm_page_load_initialized __initdata = 0;
static void __init
-vm_phys_init_page(struct vm_page *page, unsigned short seg_index,
- unsigned short order, phys_addr_t pa)
+vm_page_init(struct vm_page *page, unsigned short seg_index,
+ unsigned short order, phys_addr_t pa)
{
page->seg_index = seg_index;
page->order = order;
@@ -159,61 +158,61 @@ vm_phys_init_page(struct vm_page *page, unsigned short seg_index,
}
static void __init
-vm_phys_free_list_init(struct vm_phys_free_list *free_list)
+vm_page_free_list_init(struct vm_page_free_list *free_list)
{
free_list->size = 0;
list_init(&free_list->blocks);
}
static inline void
-vm_phys_free_list_insert(struct vm_phys_free_list *free_list,
+vm_page_free_list_insert(struct vm_page_free_list *free_list,
struct vm_page *page)
{
- assert(page->order == VM_PHYS_ORDER_ALLOCATED);
+ assert(page->order == VM_PAGE_ORDER_ALLOCATED);
free_list->size++;
list_insert_head(&free_list->blocks, &page->node);
}
static inline void
-vm_phys_free_list_remove(struct vm_phys_free_list *free_list,
+vm_page_free_list_remove(struct vm_page_free_list *free_list,
struct vm_page *page)
{
assert(free_list->size != 0);
assert(!list_empty(&free_list->blocks));
- assert(page->order < VM_PHYS_NR_FREE_LISTS);
+ assert(page->order < VM_PAGE_NR_FREE_LISTS);
free_list->size--;
list_remove(&page->node);
}
static struct vm_page *
-vm_phys_seg_alloc_from_buddy(struct vm_phys_seg *seg, unsigned int order)
+vm_page_seg_alloc_from_buddy(struct vm_page_seg *seg, unsigned int order)
{
- struct vm_phys_free_list *free_list;
+ struct vm_page_free_list *free_list;
struct vm_page *page, *buddy;
unsigned int i;
- assert(order < VM_PHYS_NR_FREE_LISTS);
+ assert(order < VM_PAGE_NR_FREE_LISTS);
- for (i = order; i < VM_PHYS_NR_FREE_LISTS; i++) {
+ for (i = order; i < VM_PAGE_NR_FREE_LISTS; i++) {
free_list = &seg->free_lists[i];
if (free_list->size != 0)
break;
}
- if (i == VM_PHYS_NR_FREE_LISTS)
+ if (i == VM_PAGE_NR_FREE_LISTS)
return NULL;
page = list_first_entry(&free_list->blocks, struct vm_page, node);
- vm_phys_free_list_remove(free_list, page);
- page->order = VM_PHYS_ORDER_ALLOCATED;
+ vm_page_free_list_remove(free_list, page);
+ page->order = VM_PAGE_ORDER_ALLOCATED;
while (i > order) {
i--;
buddy = &page[1 << i];
- vm_phys_free_list_insert(&seg->free_lists[i], buddy);
+ vm_page_free_list_insert(&seg->free_lists[i], buddy);
buddy->order = i;
}
@@ -222,7 +221,7 @@ vm_phys_seg_alloc_from_buddy(struct vm_phys_seg *seg, unsigned int order)
}
static void
-vm_phys_seg_free_to_buddy(struct vm_phys_seg *seg, struct vm_page *page,
+vm_page_seg_free_to_buddy(struct vm_page_seg *seg, struct vm_page *page,
unsigned int order)
{
struct vm_page *buddy;
@@ -231,13 +230,13 @@ vm_phys_seg_free_to_buddy(struct vm_phys_seg *seg, struct vm_page *page,
assert(page >= seg->pages);
assert(page < seg->pages_end);
- assert(page->order == VM_PHYS_ORDER_ALLOCATED);
- assert(order < VM_PHYS_NR_FREE_LISTS);
+ assert(page->order == VM_PAGE_ORDER_ALLOCATED);
+ assert(order < VM_PAGE_NR_FREE_LISTS);
nr_pages = (1 << order);
pa = page->phys_addr;
- while (order < (VM_PHYS_NR_FREE_LISTS - 1)) {
+ while (order < (VM_PAGE_NR_FREE_LISTS - 1)) {
buddy_pa = pa ^ vm_page_ptoa(1 << order);
if ((buddy_pa < seg->start) || (buddy_pa >= seg->end))
@@ -248,37 +247,37 @@ vm_phys_seg_free_to_buddy(struct vm_phys_seg *seg, struct vm_page *page,
if (buddy->order != order)
break;
- vm_phys_free_list_remove(&seg->free_lists[order], buddy);
- buddy->order = VM_PHYS_ORDER_ALLOCATED;
+ vm_page_free_list_remove(&seg->free_lists[order], buddy);
+ buddy->order = VM_PAGE_ORDER_ALLOCATED;
order++;
pa &= -vm_page_ptoa(1 << order);
page = &seg->pages[vm_page_atop(pa - seg->start)];
}
- vm_phys_free_list_insert(&seg->free_lists[order], page);
+ vm_page_free_list_insert(&seg->free_lists[order], page);
page->order = order;
seg->nr_free_pages += nr_pages;
}
static void __init
-vm_phys_cpu_pool_init(struct vm_phys_cpu_pool *cpu_pool, int size)
+vm_page_cpu_pool_init(struct vm_page_cpu_pool *cpu_pool, int size)
{
mutex_init(&cpu_pool->lock);
cpu_pool->size = size;
- cpu_pool->transfer_size = (size + VM_PHYS_CPU_POOL_TRANSFER_RATIO - 1)
- / VM_PHYS_CPU_POOL_TRANSFER_RATIO;
+ cpu_pool->transfer_size = (size + VM_PAGE_CPU_POOL_TRANSFER_RATIO - 1)
+ / VM_PAGE_CPU_POOL_TRANSFER_RATIO;
cpu_pool->nr_pages = 0;
list_init(&cpu_pool->pages);
}
-static inline struct vm_phys_cpu_pool *
-vm_phys_cpu_pool_get(struct vm_phys_seg *seg)
+static inline struct vm_page_cpu_pool *
+vm_page_cpu_pool_get(struct vm_page_seg *seg)
{
return &seg->cpu_pools[cpu_id()];
}
static inline struct vm_page *
-vm_phys_cpu_pool_pop(struct vm_phys_cpu_pool *cpu_pool)
+vm_page_cpu_pool_pop(struct vm_page_cpu_pool *cpu_pool)
{
struct vm_page *page;
@@ -290,7 +289,7 @@ vm_phys_cpu_pool_pop(struct vm_phys_cpu_pool *cpu_pool)
}
static inline void
-vm_phys_cpu_pool_push(struct vm_phys_cpu_pool *cpu_pool, struct vm_page *page)
+vm_page_cpu_pool_push(struct vm_page_cpu_pool *cpu_pool, struct vm_page *page)
{
assert(cpu_pool->nr_pages < cpu_pool->size);
cpu_pool->nr_pages++;
@@ -298,8 +297,8 @@ vm_phys_cpu_pool_push(struct vm_phys_cpu_pool *cpu_pool, struct vm_page *page)
}
static int
-vm_phys_cpu_pool_fill(struct vm_phys_cpu_pool *cpu_pool,
- struct vm_phys_seg *seg)
+vm_page_cpu_pool_fill(struct vm_page_cpu_pool *cpu_pool,
+ struct vm_page_seg *seg)
{
struct vm_page *page;
int i;
@@ -309,12 +308,12 @@ vm_phys_cpu_pool_fill(struct vm_phys_cpu_pool *cpu_pool,
mutex_lock(&seg->lock);
for (i = 0; i < cpu_pool->transfer_size; i++) {
- page = vm_phys_seg_alloc_from_buddy(seg, 0);
+ page = vm_page_seg_alloc_from_buddy(seg, 0);
if (page == NULL)
break;
- vm_phys_cpu_pool_push(cpu_pool, page);
+ vm_page_cpu_pool_push(cpu_pool, page);
}
mutex_unlock(&seg->lock);
@@ -323,8 +322,8 @@ vm_phys_cpu_pool_fill(struct vm_phys_cpu_pool *cpu_pool,
}
static void
-vm_phys_cpu_pool_drain(struct vm_phys_cpu_pool *cpu_pool,
- struct vm_phys_seg *seg)
+vm_page_cpu_pool_drain(struct vm_page_cpu_pool *cpu_pool,
+ struct vm_page_seg *seg)
{
struct vm_page *page;
int i;
@@ -334,77 +333,77 @@ vm_phys_cpu_pool_drain(struct vm_phys_cpu_pool *cpu_pool,
mutex_lock(&seg->lock);
for (i = cpu_pool->transfer_size; i > 0; i--) {
- page = vm_phys_cpu_pool_pop(cpu_pool);
- vm_phys_seg_free_to_buddy(seg, page, 0);
+ page = vm_page_cpu_pool_pop(cpu_pool);
+ vm_page_seg_free_to_buddy(seg, page, 0);
}
mutex_unlock(&seg->lock);
}
static inline phys_addr_t __init
-vm_phys_seg_size(struct vm_phys_seg *seg)
+vm_page_seg_size(struct vm_page_seg *seg)
{
return seg->end - seg->start;
}
static int __init
-vm_phys_seg_compute_pool_size(struct vm_phys_seg *seg)
+vm_page_seg_compute_pool_size(struct vm_page_seg *seg)
{
phys_addr_t size;
- size = vm_page_atop(vm_phys_seg_size(seg)) / VM_PHYS_CPU_POOL_RATIO;
+ size = vm_page_atop(vm_page_seg_size(seg)) / VM_PAGE_CPU_POOL_RATIO;
if (size == 0)
size = 1;
- else if (size > VM_PHYS_CPU_POOL_MAX_SIZE)
- size = VM_PHYS_CPU_POOL_MAX_SIZE;
+ else if (size > VM_PAGE_CPU_POOL_MAX_SIZE)
+ size = VM_PAGE_CPU_POOL_MAX_SIZE;
return size;
}
static void __init
-vm_phys_seg_init(struct vm_phys_seg *seg, struct vm_page *pages)
+vm_page_seg_init(struct vm_page_seg *seg, struct vm_page *pages)
{
phys_addr_t pa;
int pool_size;
unsigned int i;
- pool_size = vm_phys_seg_compute_pool_size(seg);
+ pool_size = vm_page_seg_compute_pool_size(seg);
for (i = 0; i < ARRAY_SIZE(seg->cpu_pools); i++)
- vm_phys_cpu_pool_init(&seg->cpu_pools[i], pool_size);
+ vm_page_cpu_pool_init(&seg->cpu_pools[i], pool_size);
seg->pages = pages;
- seg->pages_end = pages + vm_page_atop(vm_phys_seg_size(seg));
+ seg->pages_end = pages + vm_page_atop(vm_page_seg_size(seg));
mutex_init(&seg->lock);
for (i = 0; i < ARRAY_SIZE(seg->free_lists); i++)
- vm_phys_free_list_init(&seg->free_lists[i]);
+ vm_page_free_list_init(&seg->free_lists[i]);
seg->nr_free_pages = 0;
- i = seg - vm_phys_segs;
+ i = seg - vm_page_segs;
for (pa = seg->start; pa < seg->end; pa += PAGE_SIZE)
- vm_phys_init_page(&pages[vm_page_atop(pa - seg->start)], i,
- VM_PHYS_ORDER_ALLOCATED, pa);
+ vm_page_init(&pages[vm_page_atop(pa - seg->start)], i,
+ VM_PAGE_ORDER_ALLOCATED, pa);
}
static struct vm_page *
-vm_phys_seg_alloc(struct vm_phys_seg *seg, unsigned int order)
+vm_page_seg_alloc(struct vm_page_seg *seg, unsigned int order)
{
- struct vm_phys_cpu_pool *cpu_pool;
+ struct vm_page_cpu_pool *cpu_pool;
struct vm_page *page;
int filled;
- assert(order < VM_PHYS_NR_FREE_LISTS);
+ assert(order < VM_PAGE_NR_FREE_LISTS);
if (order == 0) {
- cpu_pool = vm_phys_cpu_pool_get(seg);
+ cpu_pool = vm_page_cpu_pool_get(seg);
mutex_lock(&cpu_pool->lock);
if (cpu_pool->nr_pages == 0) {
- filled = vm_phys_cpu_pool_fill(cpu_pool, seg);
+ filled = vm_page_cpu_pool_fill(cpu_pool, seg);
if (!filled) {
mutex_unlock(&cpu_pool->lock);
@@ -412,11 +411,11 @@ vm_phys_seg_alloc(struct vm_phys_seg *seg, unsigned int order)
}
}
- page = vm_phys_cpu_pool_pop(cpu_pool);
+ page = vm_page_cpu_pool_pop(cpu_pool);
mutex_unlock(&cpu_pool->lock);
} else {
mutex_lock(&seg->lock);
- page = vm_phys_seg_alloc_from_buddy(seg, order);
+ page = vm_page_seg_alloc_from_buddy(seg, order);
mutex_unlock(&seg->lock);
}
@@ -424,58 +423,58 @@ vm_phys_seg_alloc(struct vm_phys_seg *seg, unsigned int order)
}
static void
-vm_phys_seg_free(struct vm_phys_seg *seg, struct vm_page *page,
+vm_page_seg_free(struct vm_page_seg *seg, struct vm_page *page,
unsigned int order)
{
- struct vm_phys_cpu_pool *cpu_pool;
+ struct vm_page_cpu_pool *cpu_pool;
- assert(order < VM_PHYS_NR_FREE_LISTS);
+ assert(order < VM_PAGE_NR_FREE_LISTS);
if (order == 0) {
- cpu_pool = vm_phys_cpu_pool_get(seg);
+ cpu_pool = vm_page_cpu_pool_get(seg);
mutex_lock(&cpu_pool->lock);
if (cpu_pool->nr_pages == cpu_pool->size)
- vm_phys_cpu_pool_drain(cpu_pool, seg);
+ vm_page_cpu_pool_drain(cpu_pool, seg);
- vm_phys_cpu_pool_push(cpu_pool, page);
+ vm_page_cpu_pool_push(cpu_pool, page);
mutex_unlock(&cpu_pool->lock);
} else {
mutex_lock(&seg->lock);
- vm_phys_seg_free_to_buddy(seg, page, order);
+ vm_page_seg_free_to_buddy(seg, page, order);
mutex_unlock(&seg->lock);
}
}
void __init
-vm_phys_load(const char *name, phys_addr_t start, phys_addr_t end,
+vm_page_load(const char *name, phys_addr_t start, phys_addr_t end,
phys_addr_t avail_start, phys_addr_t avail_end,
unsigned int seg_index, unsigned int seglist_prio)
{
- struct vm_phys_boot_seg *boot_seg;
- struct vm_phys_seg *seg;
+ struct vm_page_boot_seg *boot_seg;
+ struct vm_page_seg *seg;
struct list *seg_list;
unsigned int i;
assert(name != NULL);
assert(start < end);
- assert(seg_index < ARRAY_SIZE(vm_phys_segs));
- assert(seglist_prio < ARRAY_SIZE(vm_phys_seg_lists));
+ assert(seg_index < ARRAY_SIZE(vm_page_segs));
+ assert(seglist_prio < ARRAY_SIZE(vm_page_seg_lists));
- if (!vm_phys_load_initialized) {
- for (i = 0; i < ARRAY_SIZE(vm_phys_seg_lists); i++)
- list_init(&vm_phys_seg_lists[i]);
+ if (!vm_page_load_initialized) {
+ for (i = 0; i < ARRAY_SIZE(vm_page_seg_lists); i++)
+ list_init(&vm_page_seg_lists[i]);
- vm_phys_segs_size = 0;
- vm_phys_load_initialized = 1;
+ vm_page_segs_size = 0;
+ vm_page_load_initialized = 1;
}
- assert(vm_phys_segs_size < ARRAY_SIZE(vm_phys_segs));
+ assert(vm_page_segs_size < ARRAY_SIZE(vm_page_segs));
- boot_seg = &vm_phys_boot_segs[seg_index];
- seg = &vm_phys_segs[seg_index];
- seg_list = &vm_phys_seg_lists[seglist_prio];
+ boot_seg = &vm_page_boot_segs[seg_index];
+ seg = &vm_page_segs[seg_index];
+ seg_list = &vm_page_seg_lists[seglist_prio];
list_insert_tail(seg_list, &seg->node);
seg->start = start;
@@ -484,22 +483,22 @@ vm_phys_load(const char *name, phys_addr_t start, phys_addr_t end,
boot_seg->avail_start = avail_start;
boot_seg->avail_end = avail_end;
- vm_phys_segs_size++;
+ vm_page_segs_size++;
}
phys_addr_t __init
-vm_phys_bootalloc(void)
+vm_page_bootalloc(void)
{
- struct vm_phys_boot_seg *boot_seg;
- struct vm_phys_seg *seg;
+ struct vm_page_boot_seg *boot_seg;
+ struct vm_page_seg *seg;
struct list *seg_list;
phys_addr_t pa;
- for (seg_list = &vm_phys_seg_lists[ARRAY_SIZE(vm_phys_seg_lists) - 1];
- seg_list >= vm_phys_seg_lists;
+ for (seg_list = &vm_page_seg_lists[ARRAY_SIZE(vm_page_seg_lists) - 1];
+ seg_list >= vm_page_seg_lists;
seg_list--)
list_for_each_entry(seg_list, seg, node) {
- boot_seg = &vm_phys_boot_segs[seg - vm_phys_segs];
+ boot_seg = &vm_page_boot_segs[seg - vm_page_segs];
if ((boot_seg->avail_end - boot_seg->avail_start) > 1) {
pa = boot_seg->avail_start;
@@ -508,14 +507,14 @@ vm_phys_bootalloc(void)
}
}
- panic("vm_phys: no physical memory available");
+ panic("vm_page: no physical memory available");
}
void __init
-vm_phys_setup(void)
+vm_page_setup(void)
{
- struct vm_phys_boot_seg *boot_seg;
- struct vm_phys_seg *seg;
+ struct vm_page_boot_seg *boot_seg;
+ struct vm_page_seg *seg;
struct vm_page *map, *start, *end;
size_t pages, map_size;
unsigned int i;
@@ -525,11 +524,11 @@ vm_phys_setup(void)
*/
pages = 0;
- for (i = 0; i < vm_phys_segs_size; i++)
- pages += vm_page_atop(vm_phys_seg_size(&vm_phys_segs[i]));
+ for (i = 0; i < vm_page_segs_size; i++)
+ pages += vm_page_atop(vm_page_seg_size(&vm_page_segs[i]));
map_size = P2ROUND(pages * sizeof(struct vm_page), PAGE_SIZE);
- printk("vm_phys: page table size: %zu entries (%zuk)\n", pages,
+ printk("vm_page: page table size: %zu entries (%zuk)\n", pages,
map_size >> 10);
map = (struct vm_page *)vm_kmem_bootalloc(map_size);
@@ -539,41 +538,41 @@ vm_phys_setup(void)
* with a block size of one (order 0). They are then released, which
* populates the free lists.
*/
- for (i = 0; i < vm_phys_segs_size; i++) {
- seg = &vm_phys_segs[i];
- boot_seg = &vm_phys_boot_segs[i];
- vm_phys_seg_init(seg, map);
+ for (i = 0; i < vm_page_segs_size; i++) {
+ seg = &vm_page_segs[i];
+ boot_seg = &vm_page_boot_segs[i];
+ vm_page_seg_init(seg, map);
start = seg->pages + vm_page_atop(boot_seg->avail_start - seg->start);
end = seg->pages + vm_page_atop(boot_seg->avail_end - seg->start);
while (start < end) {
- vm_phys_seg_free_to_buddy(seg, start, 0);
+ vm_page_seg_free_to_buddy(seg, start, 0);
start++;
}
- map += vm_page_atop(vm_phys_seg_size(seg));
+ map += vm_page_atop(vm_page_seg_size(seg));
}
- vm_phys_ready = 1;
+ vm_page_ready = 1;
}
void __init
-vm_phys_manage(struct vm_page *page)
+vm_page_manage(struct vm_page *page)
{
- assert(page->seg_index < ARRAY_SIZE(vm_phys_segs));
+ assert(page->seg_index < ARRAY_SIZE(vm_page_segs));
- vm_phys_seg_free_to_buddy(&vm_phys_segs[page->seg_index], page, 0);
+ vm_page_seg_free_to_buddy(&vm_page_segs[page->seg_index], page, 0);
}
struct vm_page *
-vm_phys_lookup_page(phys_addr_t pa)
+vm_page_lookup(phys_addr_t pa)
{
- struct vm_phys_seg *seg;
+ struct vm_page_seg *seg;
unsigned int i;
- for (i = 0; i < vm_phys_segs_size; i++) {
- seg = &vm_phys_segs[i];
+ for (i = 0; i < vm_page_segs_size; i++) {
+ seg = &vm_page_segs[i];
if ((pa >= seg->start) && (pa < seg->end))
return &seg->pages[vm_page_atop(pa - seg->start)];
@@ -583,17 +582,17 @@ vm_phys_lookup_page(phys_addr_t pa)
}
struct vm_page *
-vm_phys_alloc(unsigned int order)
+vm_page_alloc(unsigned int order)
{
- struct vm_phys_seg *seg;
+ struct vm_page_seg *seg;
struct list *seg_list;
struct vm_page *page;
- for (seg_list = &vm_phys_seg_lists[ARRAY_SIZE(vm_phys_seg_lists) - 1];
- seg_list >= vm_phys_seg_lists;
+ for (seg_list = &vm_page_seg_lists[ARRAY_SIZE(vm_page_seg_lists) - 1];
+ seg_list >= vm_page_seg_lists;
seg_list--)
list_for_each_entry(seg_list, seg, node) {
- page = vm_phys_seg_alloc(seg, order);
+ page = vm_page_seg_alloc(seg, order);
if (page != NULL)
return page;
@@ -603,32 +602,32 @@ vm_phys_alloc(unsigned int order)
}
struct vm_page *
-vm_phys_alloc_seg(unsigned int order, unsigned int seg_index)
+vm_page_alloc_seg(unsigned int order, unsigned int seg_index)
{
- assert(seg_index < vm_phys_segs_size);
+ assert(seg_index < vm_page_segs_size);
- return vm_phys_seg_alloc(&vm_phys_segs[seg_index], order);
+ return vm_page_seg_alloc(&vm_page_segs[seg_index], order);
}
void
-vm_phys_free(struct vm_page *page, unsigned int order)
+vm_page_free(struct vm_page *page, unsigned int order)
{
- assert(page->seg_index < ARRAY_SIZE(vm_phys_segs));
+ assert(page->seg_index < ARRAY_SIZE(vm_page_segs));
- vm_phys_seg_free(&vm_phys_segs[page->seg_index], page, order);
+ vm_page_seg_free(&vm_page_segs[page->seg_index], page, order);
}
void
-vm_phys_info(void)
+vm_page_info(void)
{
- struct vm_phys_seg *seg;
+ struct vm_page_seg *seg;
unsigned long pages;
unsigned int i;
- for (i = 0; i < vm_phys_segs_size; i++) {
- seg = &vm_phys_segs[i];
+ for (i = 0; i < vm_page_segs_size; i++) {
+ seg = &vm_page_segs[i];
pages = (unsigned long)(seg->pages_end - seg->pages);
- printk("vm_phys: %s: pages: %lu (%luM), free: %lu (%luM)\n", seg->name,
+ printk("vm_page: %s: pages: %lu (%luM), free: %lu (%luM)\n", seg->name,
pages, pages >> (20 - PAGE_SHIFT), seg->nr_free_pages,
seg->nr_free_pages >> (20 - PAGE_SHIFT));
}
diff --git a/vm/vm_page.h b/vm/vm_page.h
index 205f3d2f..d8d022b4 100644
--- a/vm/vm_page.h
+++ b/vm/vm_page.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2010, 2011 Richard Braun.
+ * Copyright (c) 2010, 2011, 2013 Richard Braun.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -13,6 +13,9 @@
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ *
+ * Physical page management.
*/
#ifndef _VM_VM_PAGE_H
@@ -35,6 +38,12 @@
#define vm_page_aligned(addr) P2ALIGNED(addr, PAGE_SIZE)
/*
+ * True if the vm_page module is completely initialized, false otherwise
+ * (in which case only vm_page_bootalloc() can be used for allocations).
+ */
+extern int vm_page_ready;
+
+/*
* Physical page descriptor.
*/
struct vm_page {
@@ -51,4 +60,65 @@ vm_page_to_pa(const struct vm_page *page)
return page->phys_addr;
}
+/*
+ * Load physical memory into the vm_page module at boot time.
+ *
+ * The avail_start and avail_end parameters are used to maintain a simple
+ * heap for bootstrap allocations.
+ */
+void vm_page_load(const char *name, phys_addr_t start, phys_addr_t end,
+ phys_addr_t avail_start, phys_addr_t avail_end,
+ unsigned int seg_index, unsigned int seglist_prio);
+
+/*
+ * Allocate one physical page.
+ *
+ * This function is used to allocate physical memory at boot time, before the
+ * vm_page module is ready, but after the physical memory has been loaded.
+ */
+phys_addr_t vm_page_bootalloc(void);
+
+/*
+ * Set up the vm_page module.
+ *
+ * Once this function returns, the vm_page module is ready, and normal
+ * allocation functions can be used.
+ */
+void vm_page_setup(void);
+
+/*
+ * Make the given page managed by the vm_page module.
+ *
+ * If additional memory can be made usable after the VM system is initialized,
+ * it should be reported through this function.
+ */
+void vm_page_manage(struct vm_page *page);
+
+/*
+ * Return the page descriptor for the given physical address.
+ */
+struct vm_page * vm_page_lookup(phys_addr_t pa);
+
+/*
+ * Allocate a block of 2^order physical pages.
+ */
+struct vm_page * vm_page_alloc(unsigned int order);
+
+/*
+ * Allocate physical pages from a specific segment.
+ *
+ * This function should only be called by architecture specific functions.
+ */
+struct vm_page * vm_page_alloc_seg(unsigned int order, unsigned int seg_index);
+
+/*
+ * Release a block of 2^order physical pages.
+ */
+void vm_page_free(struct vm_page *page, unsigned int order);
+
+/*
+ * Display internal information about the module.
+ */
+void vm_page_info(void);
+
#endif /* _VM_VM_PAGE_H */
diff --git a/vm/vm_phys.h b/vm/vm_phys.h
deleted file mode 100644
index 8b02c4b9..00000000
--- a/vm/vm_phys.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright (c) 2010, 2011, 2012 Richard Braun.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- *
- * Physical page allocator.
- */
-
-#ifndef _VM_VM_PHYS_H
-#define _VM_VM_PHYS_H
-
-#include <kern/types.h>
-#include <vm/vm_page.h>
-
-/*
- * True if the vm_phys module is completely initialized, false otherwise
- * (in which case only vm_phys_bootalloc() can be used for allocations).
- */
-extern int vm_phys_ready;
-
-/*
- * Load physical memory into the vm_phys module at boot time.
- *
- * The avail_start and avail_end parameters are used to maintain a simple
- * heap for bootstrap allocations.
- */
-void vm_phys_load(const char *name, phys_addr_t start, phys_addr_t end,
- phys_addr_t avail_start, phys_addr_t avail_end,
- unsigned int seg_index, unsigned int seglist_prio);
-
-/*
- * Allocate one physical page.
- *
- * This function is used to allocate physical memory at boot time, before the
- * vm_phys module is ready, but after the physical memory has been loaded.
- */
-phys_addr_t vm_phys_bootalloc(void);
-
-/*
- * Set up the vm_phys module.
- *
- * Once this function returns, the vm_phys module is ready, and normal
- * allocation functions can be used.
- */
-void vm_phys_setup(void);
-
-/*
- * Make the given page managed by the vm_phys module.
- *
- * If additional memory can be made usable after the VM system is initialized,
- * it should be reported through this function.
- */
-void vm_phys_manage(struct vm_page *page);
-
-/*
- * Return the page descriptor for the given physical address.
- */
-struct vm_page * vm_phys_lookup_page(phys_addr_t pa);
-
-/*
- * Allocate a block of 2^order physical pages.
- */
-struct vm_page * vm_phys_alloc(unsigned int order);
-
-/*
- * Allocate physical pages from a specific segment.
- *
- * This function shouldn't only be called by architecture specific functions.
- */
-struct vm_page * vm_phys_alloc_seg(unsigned int order, unsigned int seg_index);
-
-/*
- * Release a block of 2^order physical pages.
- */
-void vm_phys_free(struct vm_page *page, unsigned int order);
-
-/*
- * Display internal information about the module.
- */
-void vm_phys_info(void);
-
-#endif /* _VM_VM_PHYS_H */
diff --git a/vm/vm_setup.c b/vm/vm_setup.c
index 6d21597c..0407e048 100644
--- a/vm/vm_setup.c
+++ b/vm/vm_setup.c
@@ -20,14 +20,14 @@
#include <machine/pmap.h>
#include <vm/vm_map.h>
#include <vm/vm_kmem.h>
-#include <vm/vm_phys.h>
+#include <vm/vm_page.h>
#include <vm/vm_setup.h>
void __init
vm_setup(void)
{
vm_kmem_setup();
- vm_phys_setup();
+ vm_page_setup();
kmem_setup();
vm_map_setup();
pmap_setup();