/* * Copyright (c) 2010-2017 Richard Braun. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Delay used for frequency measurement, in microseconds. */ #define CPU_FREQ_CAL_DELAY 1000000 #define CPU_TYPE_MASK 0x00003000 #define CPU_TYPE_SHIFT 12 #define CPU_FAMILY_MASK 0x00000f00 #define CPU_FAMILY_SHIFT 8 #define CPU_EXTFAMILY_MASK 0x0ff00000 #define CPU_EXTFAMILY_SHIFT 20 #define CPU_MODEL_MASK 0x000000f0 #define CPU_MODEL_SHIFT 4 #define CPU_EXTMODEL_MASK 0x000f0000 #define CPU_EXTMODEL_SHIFT 16 #define CPU_STEPPING_MASK 0x0000000f #define CPU_STEPPING_SHIFT 0 #define CPU_BRAND_MASK 0x000000ff #define CPU_BRAND_SHIFT 0 #define CPU_CLFLUSH_MASK 0x0000ff00 #define CPU_CLFLUSH_SHIFT 8 #define CPU_APIC_ID_MASK 0xff000000 #define CPU_APIC_ID_SHIFT 24 #define CPU_INVALID_APIC_ID ((unsigned int)-1) struct cpu_vendor { unsigned int id; const char *str; }; /* * MP related CMOS ports, registers and values. */ #define CPU_MP_CMOS_PORT_REG 0x70 #define CPU_MP_CMOS_PORT_DATA 0x71 #define CPU_MP_CMOS_REG_RESET 0x0f #define CPU_MP_CMOS_DATA_RESET_WARM 0x0a #define CPU_MP_CMOS_RESET_VECTOR 0x467 /* * Priority of the shutdown operations. * * Last resort, lower than everything else. */ #define CPU_SHUTDOWN_PRIORITY 0 /* * Gate descriptor. */ struct cpu_gate_desc { uint32_t word1; uint32_t word2; #ifdef __LP64__ uint32_t word3; uint32_t word4; #endif /* __LP64__ */ }; /* * LDT or TSS system segment descriptor. */ struct cpu_sysseg_desc { uint32_t word1; uint32_t word2; #ifdef __LP64__ uint32_t word3; uint32_t word4; #endif /* __LP64__ */ }; struct cpu_pseudo_desc { uint16_t limit; uintptr_t address; } __packed; void *cpu_local_area __percpu; /* * Processor descriptor, one per CPU. */ struct cpu cpu_desc __percpu; /* * Number of active processors. */ unsigned int cpu_nr_active __read_mostly = 1; /* * Processor frequency, assumed fixed and equal on all processors. */ static uint64_t cpu_freq __read_mostly; /* * TLS segment, as expected by the compiler. * * TLS isn't actually used inside the kernel. The current purpose of this * segment is to implement stack protection. */ static const struct cpu_tls_seg cpu_tls_seg = { .ssp_guard_word = SSP_GUARD_WORD, }; /* * Interrupt descriptor table. */ static alignas(8) struct cpu_gate_desc cpu_idt[CPU_IDT_SIZE] __read_mostly; /* * Double fault handler, and stack for the main processor. * * TODO Declare as init data, and replace the BSP stack with kernel virtual * memory. */ static unsigned long cpu_double_fault_handler; static alignas(CPU_DATA_ALIGN) char cpu_double_fault_stack[TRAP_STACK_SIZE]; uint64_t cpu_get_freq(void) { return cpu_freq; } void cpu_delay(unsigned long usecs) { int64_t total, prev, count, diff; assert(usecs != 0); total = DIV_CEIL((int64_t)usecs * cpu_freq, 1000000); prev = cpu_get_tsc(); do { count = cpu_get_tsc(); diff = count - prev; prev = count; total -= diff; } while (total > 0); } static const struct cpu_vendor cpu_vendors[] = { { CPU_VENDOR_INTEL, "GenuineIntel" }, { CPU_VENDOR_AMD, "AuthenticAMD" }, }; void * __init cpu_get_boot_stack(void) { return percpu_var(cpu_desc.boot_stack, boot_ap_id); } static void __init cpu_preinit(struct cpu *cpu, unsigned int id, unsigned int apic_id) { memset(cpu, 0, sizeof(*cpu)); cpu->id = id; cpu->apic_id = apic_id; } static void cpu_seg_set_null(char *table, unsigned int selector) { struct cpu_seg_desc *desc; desc = (struct cpu_seg_desc *)(table + selector); desc->high = 0; desc->low = 0; } static void cpu_seg_set_code(char *table, unsigned int selector) { struct cpu_seg_desc *desc; desc = (struct cpu_seg_desc *)(table + selector); #ifdef __LP64__ desc->high = CPU_DESC_LONG | CPU_DESC_PRESENT | CPU_DESC_S | CPU_DESC_TYPE_CODE; desc->low = 0; #else /* __LP64__ */ desc->high = CPU_DESC_GRAN_4KB | CPU_DESC_DB | (0x000fffff & CPU_DESC_SEG_LIMIT_HIGH_MASK) | CPU_DESC_PRESENT | CPU_DESC_S | CPU_DESC_TYPE_CODE; desc->low = 0x000fffff & CPU_DESC_SEG_LIMIT_LOW_MASK; #endif /* __LP64__ */ } static void cpu_seg_set_data(char *table, unsigned int selector, uint32_t base) { struct cpu_seg_desc *desc; desc = (struct cpu_seg_desc *)(table + selector); #ifdef __LP64__ (void)base; desc->high = CPU_DESC_DB | CPU_DESC_PRESENT | CPU_DESC_S | CPU_DESC_TYPE_DATA; desc->low = 0; #else /* __LP64__ */ desc->high = (base & CPU_DESC_SEG_BASE_HIGH_MASK) | CPU_DESC_GRAN_4KB | CPU_DESC_DB | (0x000fffff & CPU_DESC_SEG_LIMIT_HIGH_MASK) | CPU_DESC_PRESENT | CPU_DESC_S | CPU_DESC_TYPE_DATA | ((base & CPU_DESC_SEG_BASE_MID_MASK) >> 16); desc->low = ((base & CPU_DESC_SEG_BASE_LOW_MASK) << 16) | (0x000fffff & CPU_DESC_SEG_LIMIT_LOW_MASK); #endif /* __LP64__ */ } static void cpu_seg_set_tss(char *table, unsigned int selector, struct cpu_tss *tss) { struct cpu_sysseg_desc *desc; unsigned long base, limit; desc = (struct cpu_sysseg_desc *)(table + selector); base = (unsigned long)tss; limit = base + sizeof(*tss) - 1; #ifdef __LP64__ desc->word4 = 0; desc->word3 = (base >> 32); #endif /* __LP64__ */ desc->word2 = (base & CPU_DESC_SEG_BASE_HIGH_MASK) | (limit & CPU_DESC_SEG_LIMIT_HIGH_MASK) | CPU_DESC_PRESENT | CPU_DESC_TYPE_TSS | ((base & CPU_DESC_SEG_BASE_MID_MASK) >> 16); desc->word1 = ((base & CPU_DESC_SEG_BASE_LOW_MASK) << 16) | (limit & CPU_DESC_SEG_LIMIT_LOW_MASK); } /* * Set the given GDT for the current processor. * * On i386, the ds, es and ss segment registers are reloaded. * * The fs and gs segment registers, which point to the percpu and the TLS * areas respectively, must be set separately. */ void cpu_load_gdt(struct cpu_pseudo_desc *gdtr); static inline void __init cpu_set_percpu_area(const struct cpu *cpu, void *area) { #ifdef __LP64__ unsigned long va; va = (unsigned long)area; cpu_set_msr(CPU_MSR_FSBASE, (uint32_t)(va >> 32), (uint32_t)va); #else /* __LP64__ */ asm volatile("mov %0, %%fs" : : "r" (CPU_GDT_SEL_PERCPU)); #endif /* __LP64__ */ percpu_var(cpu_local_area, cpu->id) = area; } static inline void __init cpu_set_tls_area(void) { #ifdef __LP64__ unsigned long va; va = (unsigned long)&cpu_tls_seg; cpu_set_msr(CPU_MSR_GSBASE, (uint32_t)(va >> 32), (uint32_t)va); #else /* __LP64__ */ asm volatile("mov %0, %%gs" : : "r" (CPU_GDT_SEL_TLS)); #endif /* __LP64__ */ } static void __init cpu_init_gdtr(struct cpu_pseudo_desc *gdtr, const struct cpu *cpu) { gdtr->address = (unsigned long)cpu->gdt; gdtr->limit = sizeof(cpu->gdt) - 1; } static void __init cpu_init_gdt(struct cpu *cpu) { struct cpu_pseudo_desc gdtr; void *pcpu_area; pcpu_area = percpu_area(cpu->id); cpu_seg_set_null(cpu->gdt, CPU_GDT_SEL_NULL); cpu_seg_set_code(cpu->gdt, CPU_GDT_SEL_CODE); cpu_seg_set_data(cpu->gdt, CPU_GDT_SEL_DATA, 0); cpu_seg_set_tss(cpu->gdt, CPU_GDT_SEL_TSS, &cpu->tss); #ifndef __LP64__ cpu_seg_set_tss(cpu->gdt, CPU_GDT_SEL_DF_TSS, &cpu->double_fault_tss); cpu_seg_set_data(cpu->gdt, CPU_GDT_SEL_PERCPU, (unsigned long)pcpu_area); cpu_seg_set_data(cpu->gdt, CPU_GDT_SEL_TLS, (unsigned long)&cpu_tls_seg); #endif /* __LP64__ */ cpu_init_gdtr(&gdtr, cpu); cpu_load_gdt(&gdtr); cpu_set_percpu_area(cpu, pcpu_area); cpu_set_tls_area(); } static void __init cpu_init_ldt(void) { asm volatile("lldt %w0" : : "q" (CPU_GDT_SEL_NULL)); } static void __init cpu_init_tss(struct cpu *cpu) { struct cpu_tss *tss; tss = &cpu->tss; memset(tss, 0, sizeof(*tss)); #ifdef __LP64__ assert(cpu->double_fault_stack != NULL); tss->ist[CPU_TSS_IST_DF] = (unsigned long)cpu->double_fault_stack + TRAP_STACK_SIZE; #endif /* __LP64__ */ asm volatile("ltr %w0" : : "q" (CPU_GDT_SEL_TSS)); } #ifndef __LP64__ static void __init cpu_init_double_fault_tss(struct cpu *cpu) { struct cpu_tss *tss; assert(cpu_double_fault_handler != 0); assert(cpu->double_fault_stack != NULL); tss = &cpu->double_fault_tss; memset(tss, 0, sizeof(*tss)); tss->cr3 = cpu_get_cr3(); tss->eip = cpu_double_fault_handler; tss->eflags = CPU_EFL_ONE; tss->ebp = (unsigned long)cpu->double_fault_stack + TRAP_STACK_SIZE; tss->esp = tss->ebp; tss->es = CPU_GDT_SEL_DATA; tss->cs = CPU_GDT_SEL_CODE; tss->ss = CPU_GDT_SEL_DATA; tss->ds = CPU_GDT_SEL_DATA; tss->fs = CPU_GDT_SEL_PERCPU; } #endif /* __LP64__ */ void cpu_idt_set_gate(unsigned int vector, void (*isr)(void)) { struct cpu_gate_desc *desc; assert(vector < ARRAY_SIZE(cpu_idt)); desc = &cpu_idt[vector]; #ifdef __LP64__ desc->word4 = 0; desc->word3 = (unsigned long)isr >> 32; #endif /* __LP64__ */ /* Use interrupt gates only to simplify trap handling */ desc->word2 = ((unsigned long)isr & CPU_DESC_GATE_OFFSET_HIGH_MASK) | CPU_DESC_PRESENT | CPU_DESC_TYPE_GATE_INTR; desc->word1 = (CPU_GDT_SEL_CODE << 16) | ((unsigned long)isr & CPU_DESC_GATE_OFFSET_LOW_MASK); } void cpu_idt_set_double_fault(void (*isr)(void)) { struct cpu_gate_desc *desc; cpu_double_fault_handler = (unsigned long)isr; #ifdef __LP64__ cpu_idt_set_gate(TRAP_DF, isr); desc = &cpu_idt[TRAP_DF]; desc->word2 |= CPU_TSS_IST_DF & CPU_DESC_SEG_IST_MASK; #else /* __LP64__ */ desc = &cpu_idt[TRAP_DF]; desc->word2 = CPU_DESC_PRESENT | CPU_DESC_TYPE_GATE_TASK; desc->word1 = CPU_GDT_SEL_DF_TSS << 16; #endif /* __LP64__ */ } static void cpu_load_idt(const void *idt, size_t size) { struct cpu_pseudo_desc idtr; idtr.address = (uintptr_t)idt; idtr.limit = size - 1; asm volatile("lidt %0" : : "m" (idtr)); } static const struct cpu_vendor * cpu_vendor_lookup(const char *str) { for (size_t i = 0; i < ARRAY_SIZE(cpu_vendors); i++) { if (strcmp(str, cpu_vendors[i].str) == 0) { return &cpu_vendors[i]; } } return NULL; } static void __init cpu_init_vendor_id(struct cpu *cpu) { const struct cpu_vendor *vendor; vendor = cpu_vendor_lookup(cpu->vendor_str); if (vendor == NULL) { return; } cpu->vendor_id = vendor->id; } /* * Initialize the given cpu structure for the current processor. */ static void __init cpu_init(struct cpu *cpu) { unsigned int eax, ebx, ecx, edx, max_basic, max_extended; /* * Assume at least an i586 processor. */ cpu_intr_restore(CPU_EFL_ONE); cpu_set_cr0(CPU_CR0_PG | CPU_CR0_AM | CPU_CR0_WP | CPU_CR0_NE | CPU_CR0_ET | CPU_CR0_TS | CPU_CR0_MP | CPU_CR0_PE); cpu_init_gdt(cpu); cpu_init_ldt(); cpu_init_tss(cpu); #ifndef __LP64__ cpu_init_double_fault_tss(cpu); #endif /* __LP64__ */ cpu_load_idt(cpu_idt, sizeof(cpu_idt)); eax = 0; cpu_cpuid(&eax, &ebx, &ecx, &edx); max_basic = eax; cpu->cpuid_max_basic = max_basic; memcpy(cpu->vendor_str, &ebx, sizeof(ebx)); memcpy(cpu->vendor_str + 4, &edx, sizeof(edx)); memcpy(cpu->vendor_str + 8, &ecx, sizeof(ecx)); cpu->vendor_str[sizeof(cpu->vendor_str) - 1] = '\0'; cpu_init_vendor_id(cpu); /* Some fields are only initialized if supported by the processor */ cpu->model_name[0] = '\0'; cpu->phys_addr_width = 0; cpu->virt_addr_width = 0; assert(max_basic >= 1); eax = 1; cpu_cpuid(&eax, &ebx, &ecx, &edx); cpu->type = (eax & CPU_TYPE_MASK) >> CPU_TYPE_SHIFT; cpu->family = (eax & CPU_FAMILY_MASK) >> CPU_FAMILY_SHIFT; if (cpu->family == 0xf) { cpu->family += (eax & CPU_EXTFAMILY_MASK) >> CPU_EXTFAMILY_SHIFT; } cpu->model = (eax & CPU_MODEL_MASK) >> CPU_MODEL_SHIFT; if ((cpu->model == 6) || (cpu->model == 0xf)) { cpu->model += (eax & CPU_EXTMODEL_MASK) >> CPU_EXTMODEL_SHIFT; } cpu->stepping = (eax & CPU_STEPPING_MASK) >> CPU_STEPPING_SHIFT; cpu->clflush_size = ((ebx & CPU_CLFLUSH_MASK) >> CPU_CLFLUSH_SHIFT) * 8; cpu->initial_apic_id = (ebx & CPU_APIC_ID_MASK) >> CPU_APIC_ID_SHIFT; cpu->features1 = ecx; cpu->features2 = edx; eax = 0x80000000; cpu_cpuid(&eax, &ebx, &ecx, &edx); if (eax <= 0x80000000) { max_extended = 0; } else { max_extended = eax; } cpu->cpuid_max_extended = max_extended; if (max_extended < 0x80000001) { cpu->features3 = 0; cpu->features4 = 0; } else { eax = 0x80000001; cpu_cpuid(&eax, &ebx, &ecx, &edx); cpu->features3 = ecx; cpu->features4 = edx; } if (max_extended >= 0x80000004) { eax = 0x80000002; cpu_cpuid(&eax, &ebx, &ecx, &edx); memcpy(cpu->model_name, &eax, sizeof(eax)); memcpy(cpu->model_name + 4, &ebx, sizeof(ebx)); memcpy(cpu->model_name + 8, &ecx, sizeof(ecx)); memcpy(cpu->model_name + 12, &edx, sizeof(edx)); eax = 0x80000003; cpu_cpuid(&eax, &ebx, &ecx, &edx); memcpy(cpu->model_name + 16, &eax, sizeof(eax)); memcpy(cpu->model_name + 20, &ebx, sizeof(ebx)); memcpy(cpu->model_name + 24, &ecx, sizeof(ecx)); memcpy(cpu->model_name + 28, &edx, sizeof(edx)); eax = 0x80000004; cpu_cpuid(&eax, &ebx, &ecx, &edx); memcpy(cpu->model_name + 32, &eax, sizeof(eax)); memcpy(cpu->model_name + 36, &ebx, sizeof(ebx)); memcpy(cpu->model_name + 40, &ecx, sizeof(ecx)); memcpy(cpu->model_name + 44, &edx, sizeof(edx)); cpu->model_name[sizeof(cpu->model_name) - 1] = '\0'; } if (max_extended >= 0x80000008) { eax = 0x80000008; cpu_cpuid(&eax, &ebx, &ecx, &edx); cpu->phys_addr_width = (unsigned short)eax & 0xff; cpu->virt_addr_width = ((unsigned short)eax >> 8) & 0xff; } cpu->state = CPU_STATE_ON; } static void __init cpu_measure_freq(void) { uint64_t start, end; pit_setup_free_running(); start = cpu_get_tsc(); pit_delay(CPU_FREQ_CAL_DELAY); end = cpu_get_tsc(); cpu_freq = (end - start) / (1000000 / CPU_FREQ_CAL_DELAY); } static int __init cpu_setup(void) { struct cpu *cpu; cpu = percpu_ptr(cpu_desc, 0); cpu_preinit(cpu, 0, CPU_INVALID_APIC_ID); cpu->double_fault_stack = cpu_double_fault_stack; /* XXX */ cpu_init(cpu); cpu_measure_freq(); return 0; } INIT_OP_DEFINE(cpu_setup, INIT_OP_DEP(percpu_bootstrap, true), INIT_OP_DEP(trap_setup, true)); static void __init cpu_panic_on_missing_feature(const char *feature) { panic("cpu: %s feature missing", feature); } static void __init cpu_check(const struct cpu *cpu) { if (!(cpu->features2 & CPU_FEATURE2_FPU)) { cpu_panic_on_missing_feature("fpu"); } /* * The compiler is expected to produce cmpxchg8b instructions to * perform 64-bits atomic operations on a 32-bits processor. Clang * currently has trouble doing that so 64-bits atomic support is * just disabled when building with it. */ #if !defined(__LP64__) && !defined(__clang__) if (!(cpu->features2 & CPU_FEATURE2_CX8)) { cpu_panic_on_missing_feature("cx8"); } #endif } static int __init cpu_check_bsp(void) { cpu_check(cpu_current()); return 0; } INIT_OP_DEFINE(cpu_check_bsp, INIT_OP_DEP(cpu_setup, true), INIT_OP_DEP(panic_setup, true)); void cpu_log_info(const struct cpu *cpu) { log_info("cpu%u: %s, type %u, family %u, model %u, stepping %u", cpu->id, cpu->vendor_str, cpu->type, cpu->family, cpu->model, cpu->stepping); if (strlen(cpu->model_name) > 0) { log_info("cpu%u: %s", cpu->id, cpu->model_name); } if ((cpu->phys_addr_width != 0) && (cpu->virt_addr_width != 0)) { log_info("cpu%u: address widths: physical: %hu, virtual: %hu", cpu->id, cpu->phys_addr_width, cpu->virt_addr_width); } log_info("cpu%u: frequency: %llu.%02llu MHz", cpu->id, (unsigned long long)cpu_freq / 1000000, (unsigned long long)cpu_freq % 1000000); } void __init cpu_mp_register_lapic(unsigned int apic_id, int is_bsp) { struct cpu *cpu; int error; if (is_bsp) { cpu = percpu_ptr(cpu_desc, 0); if (cpu->apic_id != CPU_INVALID_APIC_ID) { panic("cpu: another processor pretends to be the BSP"); } cpu->apic_id = apic_id; return; } error = percpu_add(cpu_nr_active); if (error) { return; } cpu = percpu_ptr(cpu_desc, cpu_nr_active); cpu_preinit(cpu, cpu_nr_active, apic_id); cpu_nr_active++; } static void cpu_shutdown_reset(void) { cpu_load_idt(NULL, 1); trap_trigger_double_fault(); } static struct shutdown_ops cpu_shutdown_ops = { .reset = cpu_shutdown_reset, }; static int __init cpu_mp_probe(void) { log_info("cpu: %u processor(s) configured", cpu_count()); return 0; } INIT_OP_DEFINE(cpu_mp_probe, INIT_OP_DEP(acpi_setup, true), INIT_OP_DEP(cpu_setup, true), INIT_OP_DEP(log_setup, true)); static int __init cpu_setup_shutdown(void) { if (cpu_count() == 1) { shutdown_register(&cpu_shutdown_ops, CPU_SHUTDOWN_PRIORITY); } return 0; } INIT_OP_DEFINE(cpu_setup_shutdown, INIT_OP_DEP(cpu_mp_probe, true), INIT_OP_DEP(shutdown_bootstrap, true)); void __init cpu_mp_setup(void) { struct vm_page *page; uint16_t reset_vector[2]; struct cpu *cpu; unsigned int i; void *ptr; if (cpu_count() == 1) { pmap_mp_setup(); return; } assert(BOOT_MP_TRAMPOLINE_ADDR < BIOSMEM_BASE); assert(vm_page_aligned(BOOT_MP_TRAMPOLINE_ADDR)); assert(boot_mp_trampoline_size <= PAGE_SIZE); /* Set up the AP trampoline code */ ptr = (void *)vm_page_direct_va(BOOT_MP_TRAMPOLINE_ADDR); memcpy(ptr, boot_mp_trampoline, boot_mp_trampoline_size); /* Set up the warm reset vector */ reset_vector[0] = 0; reset_vector[1] = BOOT_MP_TRAMPOLINE_ADDR >> 4; ptr = (void *)vm_page_direct_va(CPU_MP_CMOS_RESET_VECTOR); memcpy(ptr, reset_vector, sizeof(reset_vector)); io_write_byte(CPU_MP_CMOS_PORT_REG, CPU_MP_CMOS_REG_RESET); io_write_byte(CPU_MP_CMOS_PORT_DATA, CPU_MP_CMOS_DATA_RESET_WARM); /* TODO Allocate stacks out of the slab allocator for sub-page sizes */ for (i = 1; i < cpu_count(); i++) { cpu = percpu_ptr(cpu_desc, i); page = vm_page_alloc(vm_page_order(BOOT_STACK_SIZE), VM_PAGE_SEL_DIRECTMAP, VM_PAGE_KERNEL); if (page == NULL) { panic("cpu: unable to allocate boot stack for cpu%u", i); } cpu->boot_stack = vm_page_direct_ptr(page); page = vm_page_alloc(vm_page_order(TRAP_STACK_SIZE), VM_PAGE_SEL_DIRECTMAP, VM_PAGE_KERNEL); if (page == NULL) { panic("cpu: unable to allocate double fault stack for cpu%u", i); } cpu->double_fault_stack = vm_page_direct_ptr(page); } /* * This function creates per-CPU copies of the page tables. Just in case, * call it last to make sure all processors get the same mappings. */ pmap_mp_setup(); for (i = 1; i < cpu_count(); i++) { cpu = percpu_ptr(cpu_desc, i); boot_ap_id = i; /* Perform the "Universal Start-up Algorithm" */ lapic_ipi_init_assert(cpu->apic_id); cpu_delay(200); lapic_ipi_init_deassert(cpu->apic_id); cpu_delay(10000); lapic_ipi_startup(cpu->apic_id, BOOT_MP_TRAMPOLINE_ADDR >> 12); cpu_delay(200); lapic_ipi_startup(cpu->apic_id, BOOT_MP_TRAMPOLINE_ADDR >> 12); cpu_delay(200); while (cpu->state == CPU_STATE_OFF) { cpu_pause(); } } } void __init cpu_ap_setup(void) { struct cpu *cpu; cpu = percpu_ptr(cpu_desc, boot_ap_id); cpu_init(cpu); cpu_check(cpu_current()); lapic_ap_setup(); } void cpu_halt_broadcast(void) { unsigned int nr_cpus; assert(!cpu_intr_enabled()); nr_cpus = cpu_count(); if (nr_cpus == 1) { return; } lapic_ipi_broadcast(TRAP_CPU_HALT); } void cpu_halt_intr(struct trap_frame *frame) { (void)frame; lapic_eoi(); cpu_halt(); } void cpu_xcall_intr(struct trap_frame *frame) { (void)frame; lapic_eoi(); xcall_intr(); } void cpu_thread_schedule_intr(struct trap_frame *frame) { (void)frame; lapic_eoi(); thread_schedule_intr(); }