summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2012-10-10 21:18:28 +0200
committerRichard Braun <rbraun@sceen.net>2012-10-10 21:18:28 +0200
commitd754647055848c8d575e96d09cdba9e16b8f27ba (patch)
treedc5094de4fc94df137cda5d745fedbd15319e51d
parent8eadbf1e9f3530fa7b0f93caf572d96a50891445 (diff)
Rename vm_phys_t to phys_addr_t
-rw-r--r--arch/x86/machine/acpimp.c2
-rw-r--r--arch/x86/machine/biosmem.c42
-rw-r--r--arch/x86/machine/mps.c2
-rw-r--r--arch/x86/machine/pmap.c8
-rw-r--r--arch/x86/machine/pmap.h10
-rw-r--r--arch/x86/machine/types.h4
-rw-r--r--vm/vm_kmem.c8
-rw-r--r--vm/vm_kmem.h2
-rw-r--r--vm/vm_page.h4
-rw-r--r--vm/vm_phys.c28
-rw-r--r--vm/vm_phys.h8
11 files changed, 59 insertions, 59 deletions
diff --git a/arch/x86/machine/acpimp.c b/arch/x86/machine/acpimp.c
index e2fc6eb3..92b28ebd 100644
--- a/arch/x86/machine/acpimp.c
+++ b/arch/x86/machine/acpimp.c
@@ -242,7 +242,7 @@ acpimp_check_rsdp(const struct acpimp_rsdp *rsdp)
}
static int __init
-acpimp_get_rsdp(vm_phys_t start, size_t size, struct acpimp_rsdp *rsdp)
+acpimp_get_rsdp(phys_addr_t start, size_t size, struct acpimp_rsdp *rsdp)
{
const struct acpimp_rsdp *src;
unsigned long addr, end, map_addr;
diff --git a/arch/x86/machine/biosmem.c b/arch/x86/machine/biosmem.c
index 430d6462..0e3c6ce2 100644
--- a/arch/x86/machine/biosmem.c
+++ b/arch/x86/machine/biosmem.c
@@ -481,14 +481,14 @@ biosmem_map_show(void)
}
static int __init
-biosmem_map_find_avail(vm_phys_t *phys_start, vm_phys_t *phys_end)
+biosmem_map_find_avail(phys_addr_t *phys_start, phys_addr_t *phys_end)
{
const struct biosmem_map_entry *entry, *map_end;
- vm_phys_t start, end, seg_start, seg_end;
+ phys_addr_t start, end, seg_start, seg_end;
uint64_t entry_end;
- seg_start = (vm_phys_t)-1;
- seg_end = (vm_phys_t)-1;
+ seg_start = (phys_addr_t)-1;
+ seg_end = (phys_addr_t)-1;
map_end = biosmem_map + biosmem_map_size;
for (entry = biosmem_map; entry < map_end; entry++) {
@@ -516,14 +516,14 @@ biosmem_map_find_avail(vm_phys_t *phys_start, vm_phys_t *phys_end)
/* TODO: check against a minimum size */
if ((start < end) && (start < *phys_end) && (end > *phys_start)) {
- if (seg_start == (vm_phys_t)-1)
+ if (seg_start == (phys_addr_t)-1)
seg_start = start;
seg_end = end;
}
}
- if ((seg_start == (vm_phys_t)-1) || (seg_end == (vm_phys_t)-1))
+ if ((seg_start == (phys_addr_t)-1) || (seg_end == (phys_addr_t)-1))
return -1;
if (seg_start > *phys_start)
@@ -536,9 +536,9 @@ biosmem_map_find_avail(vm_phys_t *phys_start, vm_phys_t *phys_end)
}
static void __init
-biosmem_load_segment(const char *name, vm_phys_t phys_start,
- vm_phys_t phys_end, vm_phys_t avail_start,
- vm_phys_t avail_end, unsigned int seglist_prio)
+biosmem_load_segment(const char *name, phys_addr_t phys_start,
+ phys_addr_t phys_end, phys_addr_t avail_start,
+ phys_addr_t avail_end, unsigned int seglist_prio)
{
if ((avail_start < phys_start) || (avail_start > phys_end))
avail_start = phys_start;
@@ -553,7 +553,7 @@ biosmem_load_segment(const char *name, vm_phys_t phys_start,
void __init
biosmem_setup(void)
{
- vm_phys_t phys_start, phys_end;
+ phys_addr_t phys_start, phys_end;
int error;
biosmem_map_adjust();
@@ -580,9 +580,9 @@ biosmem_setup(void)
}
static void __init
-biosmem_find_reserved_area_update(vm_phys_t min, vm_phys_t *start,
- vm_phys_t *end, vm_phys_t reserved_start,
- vm_phys_t reserved_end)
+biosmem_find_reserved_area_update(phys_addr_t min, phys_addr_t *start,
+ phys_addr_t *end, phys_addr_t reserved_start,
+ phys_addr_t reserved_end)
{
if ((min <= reserved_start) && (reserved_start < *start)) {
*start = reserved_start;
@@ -590,11 +590,11 @@ biosmem_find_reserved_area_update(vm_phys_t min, vm_phys_t *start,
}
}
-static vm_phys_t __init
-biosmem_find_reserved_area(vm_phys_t min, vm_phys_t max,
- vm_phys_t *endp)
+static phys_addr_t __init
+biosmem_find_reserved_area(phys_addr_t min, phys_addr_t max,
+ phys_addr_t *endp)
{
- vm_phys_t start, end = end;
+ phys_addr_t start, end = end;
start = max;
biosmem_find_reserved_area_update(min, &start, &end, (unsigned long)&_init,
@@ -610,7 +610,7 @@ biosmem_find_reserved_area(vm_phys_t min, vm_phys_t max,
}
static void __init
-biosmem_free_usable_range(vm_phys_t start, vm_phys_t end)
+biosmem_free_usable_range(phys_addr_t start, phys_addr_t end)
{
struct vm_page *page;
@@ -623,9 +623,9 @@ biosmem_free_usable_range(vm_phys_t start, vm_phys_t end)
}
static void __init
-biosmem_free_usable_upper(vm_phys_t upper_end)
+biosmem_free_usable_upper(phys_addr_t upper_end)
{
- vm_phys_t next, start, end;
+ phys_addr_t next, start, end;
next = BIOSMEM_END;
@@ -646,7 +646,7 @@ void __init
biosmem_free_usable(void)
{
struct biosmem_map_entry *entry;
- vm_phys_t start, end;
+ phys_addr_t start, end;
uint64_t entry_end;
unsigned int i;
diff --git a/arch/x86/machine/mps.c b/arch/x86/machine/mps.c
index 8a4a899d..726449cb 100644
--- a/arch/x86/machine/mps.c
+++ b/arch/x86/machine/mps.c
@@ -204,7 +204,7 @@ mps_check_fps(const struct mps_fps *fps)
}
static int __init
-mps_get_fps(vm_phys_t start, size_t size, struct mps_fps *fps)
+mps_get_fps(phys_addr_t start, size_t size, struct mps_fps *fps)
{
const struct mps_fps *src;
unsigned long addr, end, map_addr;
diff --git a/arch/x86/machine/pmap.c b/arch/x86/machine/pmap.c
index 6681bc68..1c1eb01c 100644
--- a/arch/x86/machine/pmap.c
+++ b/arch/x86/machine/pmap.c
@@ -194,7 +194,7 @@ pmap_growkernel(unsigned long va)
{
struct vm_page *page;
pmap_pte_t *pde;
- vm_phys_t pa;
+ phys_addr_t pa;
while (va > pmap_klimit) {
pde = pmap_pde(pmap_kpdir, pmap_klimit);
@@ -218,7 +218,7 @@ pmap_growkernel(unsigned long va)
}
void
-pmap_kenter(unsigned long va, vm_phys_t pa)
+pmap_kenter(unsigned long va, phys_addr_t pa)
{
PMAP_PTE_BASE[vm_page_atop(va)] = (pa & PMAP_PTE_PMASK) | pmap_pte_global
| PMAP_PTE_WRITE | PMAP_PTE_PRESENT;
@@ -250,7 +250,7 @@ pmap_kprotect(unsigned long start, unsigned long end, int prot)
}
}
-vm_phys_t
+phys_addr_t
pmap_kextract(unsigned long va)
{
pmap_pte_t *pde;
@@ -264,7 +264,7 @@ pmap_kextract(unsigned long va)
}
void
-pmap_zero_page(vm_phys_t pa)
+pmap_zero_page(phys_addr_t pa)
{
pmap_kenter(pmap_zero_va, pa);
memset((void *)pmap_zero_va, 0, PAGE_SIZE);
diff --git a/arch/x86/machine/pmap.h b/arch/x86/machine/pmap.h
index d90f52d2..dad4aee9 100644
--- a/arch/x86/machine/pmap.h
+++ b/arch/x86/machine/pmap.h
@@ -131,8 +131,8 @@ typedef unsigned long pmap_pte_t;
* Physical address map.
*/
struct pmap {
- pmap_pte_t *pdir; /* Page directory virtual address */
- vm_phys_t pdir_pa; /* Page directory physical address */
+ pmap_pte_t *pdir; /* Page directory virtual address */
+ phys_addr_t pdir_pa; /* Page directory physical address */
#ifdef PAE
pmap_pte_t *pdpt; /* Page directory pointer table physical address */
#endif /* PAE */
@@ -198,15 +198,15 @@ void pmap_growkernel(unsigned long va);
*
* Resources for the new mappings must be preallocated.
*/
-void pmap_kenter(unsigned long va, vm_phys_t pa);
+void pmap_kenter(unsigned long va, phys_addr_t pa);
void pmap_kremove(unsigned long start, unsigned long end);
void pmap_kprotect(unsigned long start, unsigned long end, int prot);
-vm_phys_t pmap_kextract(unsigned long va);
+phys_addr_t pmap_kextract(unsigned long va);
/*
* Zero a page at the given physical address.
*/
-void pmap_zero_page(vm_phys_t pa);
+void pmap_zero_page(phys_addr_t pa);
#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/machine/types.h b/arch/x86/machine/types.h
index cc125151..03c9723b 100644
--- a/arch/x86/machine/types.h
+++ b/arch/x86/machine/types.h
@@ -19,9 +19,9 @@
#define _X86_TYPES_H
#ifdef PAE
-typedef unsigned long long vm_phys_t;
+typedef unsigned long long phys_addr_t;
#else /* PAE */
-typedef unsigned long vm_phys_t;
+typedef unsigned long phys_addr_t;
#endif /* PAE */
#endif /* _X86_TYPES_H */
diff --git a/vm/vm_kmem.c b/vm/vm_kmem.c
index c9fd4027..8617791d 100644
--- a/vm/vm_kmem.c
+++ b/vm/vm_kmem.c
@@ -51,7 +51,7 @@ unsigned long __init
vm_kmem_bootalloc(size_t size)
{
unsigned long start, va;
- vm_phys_t pa;
+ phys_addr_t pa;
assert(size > 0);
@@ -162,7 +162,7 @@ vm_kmem_free(unsigned long addr, size_t size)
{
struct vm_page *page;
unsigned long va, end;
- vm_phys_t pa;
+ phys_addr_t pa;
assert(vm_kmem_free_check(addr, size) == 0);
@@ -184,12 +184,12 @@ vm_kmem_free(unsigned long addr, size_t size)
}
void *
-vm_kmem_map_pa(vm_phys_t addr, size_t size, unsigned long *map_addrp,
+vm_kmem_map_pa(phys_addr_t addr, size_t size, unsigned long *map_addrp,
size_t *map_sizep)
{
unsigned long offset, map_addr;
size_t map_size;
- vm_phys_t start;
+ phys_addr_t start;
assert(vm_kmem_alloc_check(size) == 0);
diff --git a/vm/vm_kmem.h b/vm/vm_kmem.h
index d4984e8e..6ecf8e17 100644
--- a/vm/vm_kmem.h
+++ b/vm/vm_kmem.h
@@ -74,7 +74,7 @@ void vm_kmem_free(unsigned long addr, size_t size);
* TODO When mapping attributes are implemented, make this function disable
* caching on the mapping.
*/
-void * vm_kmem_map_pa(vm_phys_t addr, size_t size, unsigned long *map_addrp,
+void * vm_kmem_map_pa(phys_addr_t addr, size_t size, unsigned long *map_addrp,
size_t *map_sizep);
/*
diff --git a/vm/vm_page.h b/vm/vm_page.h
index 641cbccf..e499dc52 100644
--- a/vm/vm_page.h
+++ b/vm/vm_page.h
@@ -41,10 +41,10 @@ struct vm_page {
struct list node;
unsigned short seg_index;
unsigned short order;
- vm_phys_t phys_addr;
+ phys_addr_t phys_addr;
};
-static inline vm_phys_t
+static inline phys_addr_t
vm_page_to_pa(const struct vm_page *page)
{
return page->phys_addr;
diff --git a/vm/vm_phys.c b/vm/vm_phys.c
index 2852a034..81e655bf 100644
--- a/vm/vm_phys.c
+++ b/vm/vm_phys.c
@@ -105,8 +105,8 @@ struct vm_phys_seg {
struct vm_phys_cpu_pool cpu_pools[MAX_CPUS];
struct list node;
- vm_phys_t start;
- vm_phys_t end;
+ phys_addr_t start;
+ phys_addr_t end;
struct vm_page *pages;
struct vm_page *pages_end;
/* struct mutex mutex; */
@@ -119,8 +119,8 @@ struct vm_phys_seg {
* Bootstrap information about a segment.
*/
struct vm_phys_boot_seg {
- vm_phys_t avail_start;
- vm_phys_t avail_end;
+ phys_addr_t avail_start;
+ phys_addr_t avail_end;
};
int vm_phys_ready;
@@ -150,7 +150,7 @@ static int vm_phys_load_initialized __initdata = 0;
static void __init
vm_phys_init_page(struct vm_page *page, unsigned short seg_index,
- unsigned short order, vm_phys_t pa)
+ unsigned short order, phys_addr_t pa)
{
page->seg_index = seg_index;
page->order = order;
@@ -225,7 +225,7 @@ vm_phys_seg_free_to_buddy(struct vm_phys_seg *seg, struct vm_page *page,
unsigned int order)
{
struct vm_page *buddy;
- vm_phys_t pa, buddy_pa;
+ phys_addr_t pa, buddy_pa;
unsigned int nr_pages;
assert(page >= seg->pages);
@@ -339,7 +339,7 @@ vm_phys_cpu_pool_drain(struct vm_phys_cpu_pool *cpu_pool,
/* mutex_unlock(&seg->mutex); */
}
-static inline vm_phys_t __init
+static inline phys_addr_t __init
vm_phys_seg_size(struct vm_phys_seg *seg)
{
return seg->end - seg->start;
@@ -348,7 +348,7 @@ vm_phys_seg_size(struct vm_phys_seg *seg)
static int __init
vm_phys_seg_compute_pool_size(struct vm_phys_seg *seg)
{
- vm_phys_t size;
+ phys_addr_t size;
size = vm_page_atop(vm_phys_seg_size(seg)) / VM_PHYS_CPU_POOL_RATIO;
@@ -363,7 +363,7 @@ vm_phys_seg_compute_pool_size(struct vm_phys_seg *seg)
static void __init
vm_phys_seg_init(struct vm_phys_seg *seg, struct vm_page *pages)
{
- vm_phys_t pa;
+ phys_addr_t pa;
int pool_size;
unsigned int i;
@@ -447,8 +447,8 @@ vm_phys_seg_free(struct vm_phys_seg *seg, struct vm_page *page,
}
void __init
-vm_phys_load(const char *name, vm_phys_t start, vm_phys_t end,
- vm_phys_t avail_start, vm_phys_t avail_end,
+vm_phys_load(const char *name, phys_addr_t start, phys_addr_t end,
+ phys_addr_t avail_start, phys_addr_t avail_end,
unsigned int seglist_prio)
{
struct vm_phys_boot_seg *boot_seg;
@@ -485,13 +485,13 @@ vm_phys_load(const char *name, vm_phys_t start, vm_phys_t end,
vm_phys_segs_size++;
}
-vm_phys_t __init
+phys_addr_t __init
vm_phys_bootalloc(void)
{
struct vm_phys_boot_seg *boot_seg;
struct vm_phys_seg *seg;
struct list *seg_list;
- vm_phys_t pa;
+ phys_addr_t pa;
for (seg_list = &vm_phys_seg_lists[ARRAY_SIZE(vm_phys_seg_lists) - 1];
seg_list >= vm_phys_seg_lists;
@@ -565,7 +565,7 @@ vm_phys_manage(struct vm_page *page)
}
struct vm_page *
-vm_phys_lookup_page(vm_phys_t pa)
+vm_phys_lookup_page(phys_addr_t pa)
{
struct vm_phys_seg *seg;
unsigned int i;
diff --git a/vm/vm_phys.h b/vm/vm_phys.h
index a5a7d32f..aff124a0 100644
--- a/vm/vm_phys.h
+++ b/vm/vm_phys.h
@@ -36,8 +36,8 @@ extern int vm_phys_ready;
* The avail_start and avail_end parameters are used to maintain a simple
* heap for bootstrap allocations.
*/
-void vm_phys_load(const char *name, vm_phys_t start, vm_phys_t end,
- vm_phys_t avail_start, vm_phys_t avail_end,
+void vm_phys_load(const char *name, phys_addr_t start, phys_addr_t end,
+ phys_addr_t avail_start, phys_addr_t avail_end,
unsigned int seglist_prio);
/*
@@ -46,7 +46,7 @@ void vm_phys_load(const char *name, vm_phys_t start, vm_phys_t end,
* This function is used to allocate physical memory at boot time, before the
* vm_phys module is ready, but after the physical memory has been loaded.
*/
-vm_phys_t vm_phys_bootalloc(void);
+phys_addr_t vm_phys_bootalloc(void);
/*
* Set up the vm_phys module.
@@ -67,7 +67,7 @@ void vm_phys_manage(struct vm_page *page);
/*
* Return the page descriptor for the given physical address.
*/
-struct vm_page * vm_phys_lookup_page(vm_phys_t pa);
+struct vm_page * vm_phys_lookup_page(phys_addr_t pa);
/*
* Allocate a block of 2^order physical pages.