summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/x86/machine/cpu.c6
-rw-r--r--arch/x86/machine/cpu.h6
-rw-r--r--arch/x86/machine/pmap.c10
-rw-r--r--kern/kmem.c14
-rw-r--r--kern/percpu.c16
-rw-r--r--vm/vm_kmem.c81
-rw-r--r--vm/vm_kmem.h22
-rw-r--r--vm/vm_page.c2
8 files changed, 78 insertions, 79 deletions
diff --git a/arch/x86/machine/cpu.c b/arch/x86/machine/cpu.c
index afe88d44..e104ab4d 100644
--- a/arch/x86/machine/cpu.c
+++ b/arch/x86/machine/cpu.c
@@ -155,7 +155,7 @@ static struct cpu_gate_desc cpu_idt[CPU_IDT_SIZE] __aligned(8) __read_mostly;
static unsigned long cpu_double_fault_handler;
static char cpu_double_fault_stack[STACK_SIZE] __aligned(DATA_ALIGN);
-unsigned long __init
+void * __init
cpu_get_boot_stack(void)
{
return percpu_var(cpu_desc.boot_stack, boot_ap_id);
@@ -316,7 +316,7 @@ cpu_init_tss(struct cpu *cpu)
#ifdef __LP64__
assert(cpu->double_fault_stack != 0);
- tss->ist[CPU_TSS_IST_DF] = cpu->double_fault_stack;
+ tss->ist[CPU_TSS_IST_DF] = (unsigned long)cpu->double_fault_stack;
#endif /* __LP64__ */
asm volatile("ltr %w0" : : "q" (CPU_GDT_SEL_TSS));
@@ -519,7 +519,7 @@ cpu_setup(void)
cpu = percpu_ptr(cpu_desc, 0);
cpu_preinit(cpu, 0, CPU_INVALID_APIC_ID);
- cpu->double_fault_stack = (unsigned long)cpu_double_fault_stack; /* XXX */
+ cpu->double_fault_stack = cpu_double_fault_stack; /* XXX */
cpu_init(cpu);
cpu_nr_active = 1;
}
diff --git a/arch/x86/machine/cpu.h b/arch/x86/machine/cpu.h
index 88d2a270..4f7589d2 100644
--- a/arch/x86/machine/cpu.h
+++ b/arch/x86/machine/cpu.h
@@ -177,8 +177,8 @@ struct cpu {
struct cpu_tss double_fault_tss;
#endif /* __LP64__ */
volatile int state;
- unsigned long boot_stack;
- unsigned long double_fault_stack;
+ void *boot_stack;
+ void *double_fault_stack;
};
/*
@@ -510,7 +510,7 @@ cpu_delay(unsigned long usecs)
/*
* Return the address of the boot stack allocated for the current processor.
*/
-unsigned long cpu_get_boot_stack(void);
+void * cpu_get_boot_stack(void);
/*
* Install an interrupt handler in the IDT.
diff --git a/arch/x86/machine/pmap.c b/arch/x86/machine/pmap.c
index de9eb537..82114b45 100644
--- a/arch/x86/machine/pmap.c
+++ b/arch/x86/machine/pmap.c
@@ -946,7 +946,7 @@ pmap_setup_inc_nr_ptes(pmap_pte_t *pte)
{
struct vm_page *page;
- page = vm_kmem_lookup_page(vm_page_trunc((unsigned long)pte));
+ page = vm_kmem_lookup_page(pte);
assert(page != NULL);
page->pmap_page.nr_ptes++;
}
@@ -956,7 +956,7 @@ pmap_setup_set_ptp_type(pmap_pte_t *pte)
{
struct vm_page *page;
- page = vm_kmem_lookup_page(vm_page_trunc((unsigned long)pte));
+ page = vm_kmem_lookup_page(pte);
assert(page != NULL);
if (vm_page_type(page) != VM_PAGE_PMAP) {
@@ -1008,7 +1008,7 @@ pmap_copy_cpu_table_recursive(struct vm_page *page, phys_addr_t pa,
pt_level = &pmap_pt_levels[level];
spt = &pt_level->ptemap_base[PMAP_PTEMAP_INDEX(start_va, pt_level->shift)];
- orig_page = vm_kmem_lookup_page((unsigned long)spt);
+ orig_page = vm_kmem_lookup_page(spt);
assert(orig_page != NULL);
page->pmap_page.nr_ptes = orig_page->pmap_page.nr_ptes;
@@ -1184,7 +1184,7 @@ pmap_enter_ptemap_inc_nr_ptes(const pmap_pte_t *pte)
if (!pmap_ready)
return;
- page = vm_kmem_lookup_page(vm_page_trunc((unsigned long)pte));
+ page = vm_kmem_lookup_page(pte);
assert(page != NULL);
assert(vm_page_type(page) == VM_PAGE_PMAP);
page->pmap_page.nr_ptes++;
@@ -1322,7 +1322,7 @@ pmap_remove_ptemap(unsigned long va)
if (page != NULL)
vm_page_free(page, 0);
- page = vm_kmem_lookup_page(vm_page_trunc((unsigned long)pte));
+ page = vm_kmem_lookup_page(pte);
assert(page != NULL);
assert(vm_page_type(page) == VM_PAGE_PMAP);
assert(page->pmap_page.nr_ptes != 0);
diff --git a/kern/kmem.c b/kern/kmem.c
index 107e0282..7d8e9585 100644
--- a/kern/kmem.c
+++ b/kern/kmem.c
@@ -261,7 +261,7 @@ kmem_slab_create(struct kmem_cache *cache, size_t color)
void *slab_buf;
if (cache->slab_alloc_fn == NULL)
- slab_buf = (void *)vm_kmem_alloc(cache->slab_size);
+ slab_buf = vm_kmem_alloc(cache->slab_size);
else
slab_buf = (void *)cache->slab_alloc_fn(cache->slab_size);
@@ -273,7 +273,7 @@ kmem_slab_create(struct kmem_cache *cache, size_t color)
if (slab == NULL) {
if (cache->slab_free_fn == NULL)
- vm_kmem_free((unsigned long)slab_buf, cache->slab_size);
+ vm_kmem_free(slab_buf, cache->slab_size);
else
cache->slab_free_fn((unsigned long)slab_buf, cache->slab_size);
@@ -319,7 +319,7 @@ kmem_slab_vmref(struct kmem_slab *slab, size_t size)
end = va + size;
do {
- page = vm_kmem_lookup_page(va);
+ page = vm_kmem_lookup_page((void *)va);
assert(page != NULL);
assert(page->slab_priv == NULL);
page->slab_priv = slab;
@@ -717,7 +717,7 @@ kmem_cache_free_to_slab(struct kmem_cache *cache, void *buf)
} else {
struct vm_page *page;
- page = vm_kmem_lookup_page((unsigned long)buf);
+ page = vm_kmem_lookup_page(buf);
assert(page != NULL);
slab = page->slab_priv;
assert(slab != NULL);
@@ -862,7 +862,7 @@ kmem_cache_free_verify(struct kmem_cache *cache, void *buf)
unsigned char *redzone_byte;
unsigned long slabend;
- page = vm_kmem_lookup_page((unsigned long)buf);
+ page = vm_kmem_lookup_page(buf);
if (page == NULL)
kmem_cache_error(cache, buf, KMEM_ERR_INVALID, NULL);
@@ -1124,7 +1124,7 @@ kmem_alloc(size_t size)
if ((buf != NULL) && (cache->flags & KMEM_CF_VERIFY))
kmem_alloc_verify(cache, buf, size);
} else {
- buf = (void *)vm_kmem_alloc(size);
+ buf = vm_kmem_alloc(size);
}
return buf;
@@ -1182,7 +1182,7 @@ kmem_free(void *ptr, size_t size)
kmem_cache_free(cache, ptr);
} else {
- vm_kmem_free((unsigned long)ptr, size);
+ vm_kmem_free(ptr, size);
}
}
diff --git a/kern/percpu.c b/kern/percpu.c
index a7dd73ef..2da86e2e 100644
--- a/kern/percpu.c
+++ b/kern/percpu.c
@@ -45,8 +45,6 @@ percpu_bootstrap(void)
void __init
percpu_setup(void)
{
- unsigned long va;
-
printk("percpu: max_cpus: %u, section size: %zuk\n",
MAX_CPUS, percpu_size >> 10);
assert(vm_page_aligned(percpu_size));
@@ -54,20 +52,17 @@ percpu_setup(void)
if (percpu_size == 0)
return;
- va = vm_kmem_alloc(percpu_size);
+ percpu_area_content = vm_kmem_alloc(percpu_size);
- if (va == 0)
+ if (percpu_area_content == NULL)
panic("percpu: unable to allocate memory for percpu area content");
- percpu_area_content = (void *)va;
memcpy(percpu_area_content, &_percpu, percpu_size);
}
int __init
percpu_add(unsigned int cpu)
{
- unsigned long va;
-
if (cpu >= ARRAY_SIZE(percpu_areas)) {
if (!percpu_skip_warning) {
printk("percpu: ignoring processor beyond id %zu\n",
@@ -86,14 +81,13 @@ percpu_add(unsigned int cpu)
if (percpu_size == 0)
goto out;
- va = vm_kmem_alloc(percpu_size);
+ percpu_areas[cpu] = vm_kmem_alloc(percpu_size);
- if (va == 0) {
+ if (percpu_areas[cpu] == NULL) {
printk("percpu: error: unable to allocate percpu area\n");
return ERROR_NOMEM;
}
- percpu_areas[cpu] = (void *)va;
memcpy(percpu_area(cpu), percpu_area_content, percpu_size);
out:
@@ -103,5 +97,5 @@ out:
void
percpu_cleanup(void)
{
- vm_kmem_free((unsigned long)percpu_area_content, percpu_size);
+ vm_kmem_free(percpu_area_content, percpu_size);
}
diff --git a/vm/vm_kmem.c b/vm/vm_kmem.c
index e1b7a431..9f1d4a25 100644
--- a/vm/vm_kmem.c
+++ b/vm/vm_kmem.c
@@ -49,7 +49,7 @@ vm_kmem_setup(void)
vm_kmem_boot_end = VM_MAX_KERNEL_ADDRESS;
}
-unsigned long __init
+void * __init
vm_kmem_bootalloc(size_t size)
{
unsigned long start, va;
@@ -72,22 +72,22 @@ vm_kmem_bootalloc(size_t size)
}
pmap_update(kernel_pmap);
- return start;
+ return (void *)start;
}
void __init
-vm_kmem_boot_space(unsigned long *start, unsigned long *end)
+vm_kmem_boot_space(unsigned long *startp, unsigned long *endp)
{
- *start = VM_MIN_KERNEL_ADDRESS;
- *end = vm_kmem_boot_start;
+ *startp = VM_MIN_KERNEL_ADDRESS;
+ *endp = vm_kmem_boot_start;
}
struct vm_page *
-vm_kmem_lookup_page(unsigned long va)
+vm_kmem_lookup_page(const void *addr)
{
phys_addr_t pa;
- pa = pmap_extract(kernel_pmap, va);
+ pa = pmap_extract(kernel_pmap, (unsigned long)addr);
if (pa == 0)
return NULL;
@@ -106,15 +106,15 @@ vm_kmem_alloc_check(size_t size)
}
static int
-vm_kmem_free_check(unsigned long addr, size_t size)
+vm_kmem_free_check(unsigned long va, size_t size)
{
- if (!vm_page_aligned(addr))
+ if (!vm_page_aligned(va))
return -1;
return vm_kmem_alloc_check(size);
}
-unsigned long
+void *
vm_kmem_alloc_va(size_t size)
{
unsigned long va;
@@ -130,24 +130,27 @@ vm_kmem_alloc_va(size_t size)
if (error)
return 0;
- return va;
+ return (void *)va;
}
void
-vm_kmem_free_va(unsigned long addr, size_t size)
+vm_kmem_free_va(void *addr, size_t size)
{
- assert(vm_kmem_free_check(addr, size) == 0);
- vm_map_remove(kernel_map, addr, addr + vm_page_round(size));
+ unsigned long va;
+
+ va = (unsigned long)addr;
+ assert(vm_kmem_free_check(va, size) == 0);
+ vm_map_remove(kernel_map, va, va + vm_page_round(size));
}
-unsigned long
+void *
vm_kmem_alloc(size_t size)
{
struct vm_page *page;
unsigned long va, start, end;
size = vm_page_round(size);
- va = vm_kmem_alloc_va(size);
+ va = (unsigned long)vm_kmem_alloc_va(size);
if (va == 0)
return 0;
@@ -163,43 +166,45 @@ vm_kmem_alloc(size_t size)
}
pmap_update(kernel_pmap);
- return va;
+ return (void *)va;
error_page:
size = start - va;
if (size != 0) {
pmap_update(kernel_pmap);
- vm_kmem_free(va, size);
+ vm_kmem_free((void *)va, size);
}
size = end - start;
if (size != 0)
- vm_kmem_free_va(start, size);
+ vm_kmem_free_va((void *)start, size);
- return 0;
+ return NULL;
}
void
-vm_kmem_free(unsigned long addr, size_t size)
+vm_kmem_free(void *addr, size_t size)
{
const struct cpumap *cpumap;
struct vm_page *page;
unsigned long va, end;
phys_addr_t pa;
+ va = (unsigned long)addr;
size = vm_page_round(size);
- end = addr + size;
+ end = va + size;
cpumap = cpumap_all();
- for (va = addr; va < end; va += PAGE_SIZE) {
+ while (va < end) {
pa = pmap_extract(kernel_pmap, va);
assert(pa != 0);
pmap_remove(kernel_pmap, va, cpumap);
page = vm_page_lookup(pa);
assert(page != NULL);
vm_page_free(page, 0);
+ va += PAGE_SIZE;
}
pmap_update(kernel_pmap);
@@ -207,47 +212,47 @@ vm_kmem_free(unsigned long addr, size_t size)
}
void *
-vm_kmem_map_pa(phys_addr_t addr, size_t size, unsigned long *map_addrp,
- size_t *map_sizep)
+vm_kmem_map_pa(phys_addr_t pa, size_t size,
+ unsigned long *map_vap, size_t *map_sizep)
{
- unsigned long offset, map_addr;
+ unsigned long offset, map_va;
size_t map_size;
phys_addr_t start;
- start = vm_page_trunc(addr);
- map_size = vm_page_round(addr + size) - start;
- map_addr = vm_kmem_alloc_va(map_size);
+ start = vm_page_trunc(pa);
+ map_size = vm_page_round(pa + size) - start;
+ map_va = (unsigned long)vm_kmem_alloc_va(map_size);
- if (map_addr == 0)
+ if (map_va == 0)
return NULL;
for (offset = 0; offset < map_size; offset += PAGE_SIZE)
- pmap_enter(kernel_pmap, map_addr + offset, start + offset,
+ pmap_enter(kernel_pmap, map_va + offset, start + offset,
VM_PROT_READ | VM_PROT_WRITE, PMAP_PEF_GLOBAL);
pmap_update(kernel_pmap);
- if (map_addrp != NULL)
- *map_addrp = map_addr;
+ if (map_vap != NULL)
+ *map_vap = map_va;
if (map_sizep != NULL)
*map_sizep = map_size;
- return (void *)(map_addr + (unsigned long)(addr & PAGE_MASK));
+ return (void *)(map_va + (unsigned long)(pa & PAGE_MASK));
}
void
-vm_kmem_unmap_pa(unsigned long map_addr, size_t map_size)
+vm_kmem_unmap_pa(unsigned long map_va, size_t map_size)
{
const struct cpumap *cpumap;
unsigned long va, end;
cpumap = cpumap_all();
- end = map_addr + map_size;
+ end = map_va + map_size;
- for (va = map_addr; va < end; va += PAGE_SIZE)
+ for (va = map_va; va < end; va += PAGE_SIZE)
pmap_remove(kernel_pmap, va, cpumap);
pmap_update(kernel_pmap);
- vm_kmem_free_va(map_addr, map_size);
+ vm_kmem_free_va((void *)map_va, map_size);
}
diff --git a/vm/vm_kmem.h b/vm/vm_kmem.h
index 39298a44..de66362a 100644
--- a/vm/vm_kmem.h
+++ b/vm/vm_kmem.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2010, 2011, 2012 Richard Braun
+ * Copyright (c) 2010-2014 Richard Braun
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -52,42 +52,42 @@ void vm_kmem_setup(void);
* The main purpose of this function is to allow the allocation of the
* physical page table.
*/
-unsigned long vm_kmem_bootalloc(size_t size);
+void * vm_kmem_bootalloc(size_t size);
/*
* Return the range of initial virtual memory used by the kernel.
*/
-void vm_kmem_boot_space(unsigned long *start, unsigned long *end);
+void vm_kmem_boot_space(unsigned long *startp, unsigned long *endp);
/*
* Return the page descriptor for the physical page mapped at va in kernel
* space. The given address must be mapped and valid.
*/
-struct vm_page * vm_kmem_lookup_page(unsigned long va);
+struct vm_page * vm_kmem_lookup_page(const void *addr);
/*
* Allocate pure virtual kernel pages.
*
* The caller is reponsible for taking care of the underlying physical memory.
*/
-unsigned long vm_kmem_alloc_va(size_t size);
+void * vm_kmem_alloc_va(size_t size);
/*
* Free virtual kernel pages.
*
* The caller is reponsible for taking care of the underlying physical memory.
*/
-void vm_kmem_free_va(unsigned long addr, size_t size);
+void vm_kmem_free_va(void *addr, size_t size);
/*
* Allocate kernel pages.
*/
-unsigned long vm_kmem_alloc(size_t size);
+void * vm_kmem_alloc(size_t size);
/*
* Free kernel pages.
*/
-void vm_kmem_free(unsigned long addr, size_t size);
+void vm_kmem_free(void *addr, size_t size);
/*
* Map physical memory in the kernel map.
@@ -102,12 +102,12 @@ void vm_kmem_free(unsigned long addr, size_t size);
* TODO When mapping attributes are implemented, make this function disable
* caching on the mapping.
*/
-void * vm_kmem_map_pa(phys_addr_t addr, size_t size, unsigned long *map_addrp,
- size_t *map_sizep);
+void * vm_kmem_map_pa(phys_addr_t pa, size_t size,
+ unsigned long *map_vap, size_t *map_sizep);
/*
* Unmap physical memory from the kernel map.
*/
-void vm_kmem_unmap_pa(unsigned long map_addr, size_t map_size);
+void vm_kmem_unmap_pa(unsigned long map_va, size_t map_size);
#endif /* _VM_VM_KMEM_H */
diff --git a/vm/vm_page.c b/vm/vm_page.c
index bae4ea4a..705e431c 100644
--- a/vm/vm_page.c
+++ b/vm/vm_page.c
@@ -561,7 +561,7 @@ vm_page_setup(void)
table_size = P2ROUND(nr_pages * sizeof(struct vm_page), PAGE_SIZE);
printk("vm_page: page table size: %zu entries (%zuk)\n", nr_pages,
table_size >> 10);
- table = (struct vm_page *)vm_kmem_bootalloc(table_size);
+ table = vm_kmem_bootalloc(table_size);
va = (unsigned long)table;
/*