summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/machine/cpu.c6
-rw-r--r--arch/x86/machine/cpu.h6
-rw-r--r--arch/x86/machine/pmap.c10
3 files changed, 11 insertions, 11 deletions
diff --git a/arch/x86/machine/cpu.c b/arch/x86/machine/cpu.c
index afe88d44..e104ab4d 100644
--- a/arch/x86/machine/cpu.c
+++ b/arch/x86/machine/cpu.c
@@ -155,7 +155,7 @@ static struct cpu_gate_desc cpu_idt[CPU_IDT_SIZE] __aligned(8) __read_mostly;
static unsigned long cpu_double_fault_handler;
static char cpu_double_fault_stack[STACK_SIZE] __aligned(DATA_ALIGN);
-unsigned long __init
+void * __init
cpu_get_boot_stack(void)
{
return percpu_var(cpu_desc.boot_stack, boot_ap_id);
@@ -316,7 +316,7 @@ cpu_init_tss(struct cpu *cpu)
#ifdef __LP64__
assert(cpu->double_fault_stack != 0);
- tss->ist[CPU_TSS_IST_DF] = cpu->double_fault_stack;
+ tss->ist[CPU_TSS_IST_DF] = (unsigned long)cpu->double_fault_stack;
#endif /* __LP64__ */
asm volatile("ltr %w0" : : "q" (CPU_GDT_SEL_TSS));
@@ -519,7 +519,7 @@ cpu_setup(void)
cpu = percpu_ptr(cpu_desc, 0);
cpu_preinit(cpu, 0, CPU_INVALID_APIC_ID);
- cpu->double_fault_stack = (unsigned long)cpu_double_fault_stack; /* XXX */
+ cpu->double_fault_stack = cpu_double_fault_stack; /* XXX */
cpu_init(cpu);
cpu_nr_active = 1;
}
diff --git a/arch/x86/machine/cpu.h b/arch/x86/machine/cpu.h
index 88d2a270..4f7589d2 100644
--- a/arch/x86/machine/cpu.h
+++ b/arch/x86/machine/cpu.h
@@ -177,8 +177,8 @@ struct cpu {
struct cpu_tss double_fault_tss;
#endif /* __LP64__ */
volatile int state;
- unsigned long boot_stack;
- unsigned long double_fault_stack;
+ void *boot_stack;
+ void *double_fault_stack;
};
/*
@@ -510,7 +510,7 @@ cpu_delay(unsigned long usecs)
/*
* Return the address of the boot stack allocated for the current processor.
*/
-unsigned long cpu_get_boot_stack(void);
+void * cpu_get_boot_stack(void);
/*
* Install an interrupt handler in the IDT.
diff --git a/arch/x86/machine/pmap.c b/arch/x86/machine/pmap.c
index de9eb537..82114b45 100644
--- a/arch/x86/machine/pmap.c
+++ b/arch/x86/machine/pmap.c
@@ -946,7 +946,7 @@ pmap_setup_inc_nr_ptes(pmap_pte_t *pte)
{
struct vm_page *page;
- page = vm_kmem_lookup_page(vm_page_trunc((unsigned long)pte));
+ page = vm_kmem_lookup_page(pte);
assert(page != NULL);
page->pmap_page.nr_ptes++;
}
@@ -956,7 +956,7 @@ pmap_setup_set_ptp_type(pmap_pte_t *pte)
{
struct vm_page *page;
- page = vm_kmem_lookup_page(vm_page_trunc((unsigned long)pte));
+ page = vm_kmem_lookup_page(pte);
assert(page != NULL);
if (vm_page_type(page) != VM_PAGE_PMAP) {
@@ -1008,7 +1008,7 @@ pmap_copy_cpu_table_recursive(struct vm_page *page, phys_addr_t pa,
pt_level = &pmap_pt_levels[level];
spt = &pt_level->ptemap_base[PMAP_PTEMAP_INDEX(start_va, pt_level->shift)];
- orig_page = vm_kmem_lookup_page((unsigned long)spt);
+ orig_page = vm_kmem_lookup_page(spt);
assert(orig_page != NULL);
page->pmap_page.nr_ptes = orig_page->pmap_page.nr_ptes;
@@ -1184,7 +1184,7 @@ pmap_enter_ptemap_inc_nr_ptes(const pmap_pte_t *pte)
if (!pmap_ready)
return;
- page = vm_kmem_lookup_page(vm_page_trunc((unsigned long)pte));
+ page = vm_kmem_lookup_page(pte);
assert(page != NULL);
assert(vm_page_type(page) == VM_PAGE_PMAP);
page->pmap_page.nr_ptes++;
@@ -1322,7 +1322,7 @@ pmap_remove_ptemap(unsigned long va)
if (page != NULL)
vm_page_free(page, 0);
- page = vm_kmem_lookup_page(vm_page_trunc((unsigned long)pte));
+ page = vm_kmem_lookup_page(pte);
assert(page != NULL);
assert(vm_page_type(page) == VM_PAGE_PMAP);
assert(page->pmap_page.nr_ptes != 0);