summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2018-07-07 14:46:27 +0200
committerRichard Braun <rbraun@sceen.net>2018-07-07 14:47:28 +0200
commit0c0e2a02a42a8161e1b8dc1e1943fe5057ecb3a3 (patch)
treed5fa4c3160fc2c7ee910f9ff757549825f44a338
parent3a1f3702ec2d4eb8e5d8528dfbcb3bed21b623cf (diff)
x86: refactor interrupt/exception handling
This commit merges the trap module into the cpu module in order to solve interface problems caused by the degree to which those two modules are actually coupled, i.e. it just makes a lot more sense to not separate them at all. The cpu module is also internally reworked with improved object and method definitions, that clarify the double fault handling code, among other things.
-rw-r--r--arch/x86/Makefile2
-rw-r--r--arch/x86/machine/acpi.c1
-rw-r--r--arch/x86/machine/cpu.c977
-rw-r--r--arch/x86/machine/cpu.h373
-rw-r--r--arch/x86/machine/cpu_asm.S728
-rw-r--r--arch/x86/machine/cpu_i.h196
-rw-r--r--arch/x86/machine/ioapic.c15
-rw-r--r--arch/x86/machine/lapic.c111
-rw-r--r--arch/x86/machine/lapic.h12
-rw-r--r--arch/x86/machine/pic.c13
-rw-r--r--arch/x86/machine/pmap.c2
-rw-r--r--arch/x86/machine/trap.c354
-rw-r--r--arch/x86/machine/trap.h186
-rw-r--r--arch/x86/machine/trap_asm.S693
-rw-r--r--arch/x86/machine/uart.c1
-rw-r--r--kern/intr.c3
16 files changed, 1847 insertions, 1820 deletions
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 226f4a90..daf862d8 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -58,8 +58,6 @@ x15_SOURCES-y += \
arch/x86/machine/string.c \
arch/x86/machine/tcb_asm.S \
arch/x86/machine/tcb.c \
- arch/x86/machine/trap_asm.S \
- arch/x86/machine/trap.c \
arch/x86/machine/uart.c
x15_SOURCES-$(CONFIG_X86_PMU_AMD) += arch/x86/machine/pmu_amd.c
diff --git a/arch/x86/machine/acpi.c b/arch/x86/machine/acpi.c
index f088f2c8..3e6bbad8 100644
--- a/arch/x86/machine/acpi.c
+++ b/arch/x86/machine/acpi.c
@@ -708,5 +708,4 @@ INIT_OP_DEFINE(acpi_setup,
INIT_OP_DEP(log_setup, true),
INIT_OP_DEP(percpu_setup, true),
INIT_OP_DEP(shutdown_bootstrap, true),
- INIT_OP_DEP(trap_setup, true),
INIT_OP_DEP(vm_kmem_setup, true));
diff --git a/arch/x86/machine/cpu.c b/arch/x86/machine/cpu.c
index 6cbe168a..fc8fde3f 100644
--- a/arch/x86/machine/cpu.c
+++ b/arch/x86/machine/cpu.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2010-2017 Richard Braun.
+ * Copyright (c) 2010-2018 Richard Braun.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -17,15 +17,19 @@
#include <assert.h>
#include <stdalign.h>
+#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
+#include <stdio.h>
#include <string.h>
#include <kern/init.h>
+#include <kern/kmem.h>
#include <kern/log.h>
#include <kern/macros.h>
#include <kern/panic.h>
#include <kern/percpu.h>
+#include <kern/spinlock.h>
#include <kern/shutdown.h>
#include <kern/thread.h>
#include <kern/xcall.h>
@@ -36,11 +40,10 @@
#include <machine/io.h>
#include <machine/lapic.h>
#include <machine/page.h>
-#include <machine/pic.h>
#include <machine/pit.h>
#include <machine/pmap.h>
#include <machine/ssp.h>
-#include <machine/trap.h>
+#include <machine/strace.h>
#include <vm/vm_page.h>
/*
@@ -48,24 +51,24 @@
*/
#define CPU_FREQ_CAL_DELAY 1000000
-#define CPU_TYPE_MASK 0x00003000
-#define CPU_TYPE_SHIFT 12
-#define CPU_FAMILY_MASK 0x00000f00
-#define CPU_FAMILY_SHIFT 8
-#define CPU_EXTFAMILY_MASK 0x0ff00000
-#define CPU_EXTFAMILY_SHIFT 20
-#define CPU_MODEL_MASK 0x000000f0
-#define CPU_MODEL_SHIFT 4
-#define CPU_EXTMODEL_MASK 0x000f0000
-#define CPU_EXTMODEL_SHIFT 16
-#define CPU_STEPPING_MASK 0x0000000f
-#define CPU_STEPPING_SHIFT 0
-#define CPU_BRAND_MASK 0x000000ff
-#define CPU_BRAND_SHIFT 0
-#define CPU_CLFLUSH_MASK 0x0000ff00
-#define CPU_CLFLUSH_SHIFT 8
-#define CPU_APIC_ID_MASK 0xff000000
-#define CPU_APIC_ID_SHIFT 24
+#define CPU_CPUID_TYPE_MASK 0x00003000
+#define CPU_CPUID_TYPE_SHIFT 12
+#define CPU_CPUID_FAMILY_MASK 0x00000f00
+#define CPU_CPUID_FAMILY_SHIFT 8
+#define CPU_CPUID_EXTFAMILY_MASK 0x0ff00000
+#define CPU_CPUID_EXTFAMILY_SHIFT 20
+#define CPU_CPUID_MODEL_MASK 0x000000f0
+#define CPU_CPUID_MODEL_SHIFT 4
+#define CPU_CPUID_EXTMODEL_MASK 0x000f0000
+#define CPU_CPUID_EXTMODEL_SHIFT 16
+#define CPU_CPUID_STEPPING_MASK 0x0000000f
+#define CPU_CPUID_STEPPING_SHIFT 0
+#define CPU_CPUID_BRAND_MASK 0x000000ff
+#define CPU_CPUID_BRAND_SHIFT 0
+#define CPU_CPUID_CLFLUSH_MASK 0x0000ff00
+#define CPU_CPUID_CLFLUSH_SHIFT 8
+#define CPU_CPUID_APIC_ID_MASK 0xff000000
+#define CPU_CPUID_APIC_ID_SHIFT 24
#define CPU_INVALID_APIC_ID ((unsigned int)-1)
@@ -75,6 +78,11 @@ struct cpu_vendor {
};
/*
+ * IST indexes (0 is reserved).
+ */
+#define CPU_TSS_IST_DF 1
+
+/*
* MP related CMOS ports, registers and values.
*/
#define CPU_MP_CMOS_PORT_REG 0x70
@@ -90,9 +98,6 @@ struct cpu_vendor {
*/
#define CPU_SHUTDOWN_PRIORITY 0
-/*
- * Gate descriptor.
- */
struct cpu_gate_desc {
uint32_t word1;
uint32_t word2;
@@ -102,16 +107,8 @@ struct cpu_gate_desc {
#endif /* __LP64__ */
};
-/*
- * LDT or TSS system segment descriptor.
- */
-struct cpu_sysseg_desc {
- uint32_t word1;
- uint32_t word2;
-#ifdef __LP64__
- uint32_t word3;
- uint32_t word4;
-#endif /* __LP64__ */
+struct cpu_idt {
+ alignas(CPU_L1_SIZE) struct cpu_gate_desc descs[CPU_NR_EXC_VECTORS];
};
struct cpu_pseudo_desc {
@@ -119,10 +116,106 @@ struct cpu_pseudo_desc {
uintptr_t address;
} __packed;
+#ifdef __LP64__
+
+struct cpu_exc_frame {
+ uint64_t rax;
+ uint64_t rbx;
+ uint64_t rcx;
+ uint64_t rdx;
+ uint64_t rbp;
+ uint64_t rsi;
+ uint64_t rdi;
+ uint64_t r8;
+ uint64_t r9;
+ uint64_t r10;
+ uint64_t r11;
+ uint64_t r12;
+ uint64_t r13;
+ uint64_t r14;
+ uint64_t r15;
+ uint64_t vector;
+ uint64_t error;
+ uint64_t rip;
+ uint64_t cs;
+ uint64_t rflags;
+ uint64_t rsp;
+ uint64_t ss;
+} __packed;
+
+#else /* __LP64__ */
+
+struct cpu_exc_frame {
+ uint32_t eax;
+ uint32_t ebx;
+ uint32_t ecx;
+ uint32_t edx;
+ uint32_t ebp;
+ uint32_t esi;
+ uint32_t edi;
+ uint16_t ds;
+ uint16_t es;
+ uint16_t fs;
+ uint16_t gs;
+ uint32_t vector;
+ uint32_t error;
+ uint32_t eip;
+ uint32_t cs;
+ uint32_t eflags;
+ uint32_t esp; /* esp and ss are undefined if trapped in kernel */
+ uint32_t ss;
+} __packed;
+
+#endif /* __LP64__ */
+
+/*
+ * Type for low level exception handlers.
+ *
+ * Low level exception handlers are directly installed in the IDT and are
+ * first run by the processor when an exception occurs. They route execution
+ * through either the main exception or interrupt handler.
+ */
+typedef void (*cpu_ll_exc_fn_t)(void);
+
+typedef void (*cpu_exc_handler_fn_t)(const struct cpu_exc_frame *frame);
+
+struct cpu_exc_handler {
+ cpu_exc_handler_fn_t fn;
+};
+
+struct cpu_intr_handler {
+ cpu_intr_handler_fn_t fn;
+};
+
+/*
+ * Set the given GDT for the current processor.
+ *
+ * On i386, the ds, es and ss segment registers are reloaded.
+ *
+ * The fs and gs segment registers, which point to the percpu and the TLS
+ * areas respectively, must be set separately.
+ */
+void cpu_load_gdt(struct cpu_pseudo_desc *gdtr);
+
+/*
+ * Return a pointer to the processor-local interrupt stack.
+ *
+ * This function is called by the low level exception handling code.
+ *
+ * Return NULL if no stack switching is required.
+ */
+void * cpu_get_intr_stack(void);
+
+/*
+ * Common entry points for exceptions and interrupts.
+ */
+void cpu_exc_main(const struct cpu_exc_frame *frame);
+void cpu_intr_main(const struct cpu_exc_frame *frame);
+
void *cpu_local_area __percpu;
/*
- * Processor descriptor, one per CPU.
+ * CPU descriptor, one per CPU.
*/
struct cpu cpu_desc __percpu;
@@ -136,29 +229,162 @@ unsigned int cpu_nr_active __read_mostly = 1;
*/
static uint64_t cpu_freq __read_mostly;
-/*
- * TLS segment, as expected by the compiler.
- *
- * TLS isn't actually used inside the kernel. The current purpose of this
- * segment is to implement stack protection.
- */
static const struct cpu_tls_seg cpu_tls_seg = {
.ssp_guard_word = SSP_GUARD_WORD,
};
-/*
- * Interrupt descriptor table.
- */
-static alignas(8) struct cpu_gate_desc cpu_idt[CPU_IDT_SIZE] __read_mostly;
+static struct cpu_idt cpu_idt;
/*
- * Double fault handler, and stack for the main processor.
- *
- * TODO Declare as init data, and replace the BSP stack with kernel virtual
- * memory.
+ * This table only exists during initialization, and is a way to
+ * communicate the list of low level handlers from assembly to C.
*/
-static unsigned long cpu_double_fault_handler;
-static alignas(CPU_DATA_ALIGN) char cpu_double_fault_stack[TRAP_STACK_SIZE];
+extern cpu_ll_exc_fn_t cpu_ll_exc_handler_addrs[CPU_NR_EXC_VECTORS];
+
+static struct cpu_exc_handler cpu_exc_handlers[CPU_NR_EXC_VECTORS]
+ __read_mostly;
+
+static struct cpu_intr_handler cpu_intr_handlers[CPU_NR_EXC_VECTORS]
+ __read_mostly;
+
+static const struct cpu_vendor cpu_vendors[] = {
+ { CPU_VENDOR_INTEL, "GenuineIntel" },
+ { CPU_VENDOR_AMD, "AuthenticAMD" },
+};
+
+static void __init
+cpu_exc_handler_init(struct cpu_exc_handler *handler, cpu_exc_handler_fn_t fn)
+{
+ handler->fn = fn;
+}
+
+static void
+cpu_exc_handler_run(const struct cpu_exc_handler *handler,
+ const struct cpu_exc_frame *frame)
+{
+ handler->fn(frame);
+}
+
+static void __init
+cpu_intr_handler_init(struct cpu_intr_handler *handler,
+ cpu_intr_handler_fn_t fn)
+{
+ handler->fn = fn;
+}
+
+static void
+cpu_intr_handler_run(const struct cpu_intr_handler *handler,
+ unsigned int vector)
+{
+ handler->fn(vector);
+}
+
+static cpu_ll_exc_fn_t __init
+cpu_get_ll_exc_handler(unsigned int vector)
+{
+ assert(vector < ARRAY_SIZE(cpu_ll_exc_handler_addrs));
+ return cpu_ll_exc_handler_addrs[vector];
+}
+
+static struct cpu_exc_handler *
+cpu_get_exc_handler(unsigned int vector)
+{
+ assert(vector < ARRAY_SIZE(cpu_exc_handlers));
+ return &cpu_exc_handlers[vector];
+}
+
+static void __init
+cpu_register_exc(unsigned int vector, cpu_exc_handler_fn_t fn)
+{
+ cpu_exc_handler_init(cpu_get_exc_handler(vector), fn);
+}
+
+static struct cpu_intr_handler *
+cpu_get_intr_handler(unsigned int vector)
+{
+ assert(vector < ARRAY_SIZE(cpu_intr_handlers));
+ return &cpu_intr_handlers[vector];
+}
+
+void __init
+cpu_register_intr(unsigned int vector, cpu_intr_handler_fn_t fn)
+{
+ cpu_intr_handler_init(cpu_get_intr_handler(vector), fn);
+}
+
+static void __init
+cpu_gate_desc_init_intr(struct cpu_gate_desc *desc, cpu_ll_exc_fn_t fn,
+ unsigned int ist_index)
+{
+ uintptr_t addr;
+
+ addr = (uintptr_t)fn;
+ desc->word1 = (CPU_GDT_SEL_CODE << 16)
+ | (addr & CPU_DESC_GATE_OFFSET_LOW_MASK);
+ desc->word2 = (addr & CPU_DESC_GATE_OFFSET_HIGH_MASK)
+ | CPU_DESC_PRESENT | CPU_DESC_TYPE_GATE_INTR;
+
+#ifdef __LP64__
+ desc->word2 |= ist_index & CPU_DESC_SEG_IST_MASK;
+ desc->word3 = addr >> 32;
+ desc->word4 = 0;
+#else /* __LP64__ */
+ assert(ist_index == 0);
+#endif /* __LP64__ */
+}
+
+#ifndef __LP64__
+static void __init
+cpu_gate_desc_init_task(struct cpu_gate_desc *desc, unsigned int tss_seg_sel)
+{
+ desc->word2 = CPU_DESC_PRESENT | CPU_DESC_TYPE_GATE_TASK;
+ desc->word1 = tss_seg_sel << 16;
+}
+#endif /* __LP64__ */
+
+static struct cpu_gate_desc * __init
+cpu_idt_get_desc(struct cpu_idt *idt, unsigned int vector)
+{
+ assert(vector < ARRAY_SIZE(idt->descs));
+ return &idt->descs[vector];
+}
+
+static void __init
+cpu_idt_set_intr_gate(struct cpu_idt *idt, unsigned int vector,
+ cpu_ll_exc_fn_t fn)
+{
+ struct cpu_gate_desc *desc;
+
+ desc = cpu_idt_get_desc(idt, vector);
+ cpu_gate_desc_init_intr(desc, fn, 0);
+}
+
+static void __init
+cpu_idt_setup_double_fault(struct cpu_idt *idt)
+{
+ struct cpu_gate_desc *desc;
+
+ desc = cpu_idt_get_desc(idt, CPU_EXC_DF);
+
+#ifdef __LP64__
+ cpu_ll_exc_fn_t fn;
+
+ fn = cpu_get_ll_exc_handler(CPU_EXC_DF);
+ cpu_gate_desc_init_intr(desc, fn, CPU_TSS_IST_DF);
+#else /* __LP64__ */
+ cpu_gate_desc_init_task(desc, CPU_GDT_SEL_DF_TSS);
+#endif /* __LP64__ */
+}
+
+static void
+cpu_idt_load(const struct cpu_idt *idt)
+{
+ struct cpu_pseudo_desc idtr;
+
+ idtr.address = (uintptr_t)idt->descs;
+ idtr.limit = sizeof(idt->descs) - 1;
+ asm volatile("lidt %0" : : "m" (idtr));
+}
uint64_t
cpu_get_freq(void)
@@ -166,6 +392,15 @@ cpu_get_freq(void)
return cpu_freq;
}
+static uint64_t
+cpu_get_tsc(void)
+{
+ uint32_t high, low;
+
+ asm volatile("rdtsc" : "=a" (low), "=d" (high));
+ return ((uint64_t)high << 32) | low;
+}
+
void
cpu_delay(unsigned long usecs)
{
@@ -184,42 +419,286 @@ cpu_delay(unsigned long usecs)
} while (total > 0);
}
-static const struct cpu_vendor cpu_vendors[] = {
- { CPU_VENDOR_INTEL, "GenuineIntel" },
- { CPU_VENDOR_AMD, "AuthenticAMD" },
-};
-
void * __init
cpu_get_boot_stack(void)
{
- return percpu_var(cpu_desc.boot_stack, boot_ap_id);
+ return percpu_var(cpu_desc.boot_stack, boot_ap_id); // TODO Pass as argument
}
-static void __init
-cpu_preinit(struct cpu *cpu, unsigned int id, unsigned int apic_id)
+void *
+cpu_get_intr_stack(void)
{
- memset(cpu, 0, sizeof(*cpu));
- cpu->id = id;
- cpu->apic_id = apic_id;
+ struct cpu *cpu;
+
+ if (thread_interrupted()) {
+ return NULL;
+ }
+
+ cpu = cpu_local_ptr(cpu_desc);
+ return cpu->intr_stack + sizeof(cpu->intr_stack);
}
static void
-cpu_seg_set_null(char *table, unsigned int selector)
+cpu_show_thread(void)
{
- struct cpu_seg_desc *desc;
+ struct thread *thread;
- desc = (struct cpu_seg_desc *)(table + selector);
- desc->high = 0;
- desc->low = 0;
+ thread = thread_self();
+
+ /* TODO Thread name accessor */
+ printf("cpu: interrupted thread: %p (%s)\n", thread, thread->name);
}
+#ifdef __LP64__
+
static void
-cpu_seg_set_code(char *table, unsigned int selector)
+cpu_show_frame(const struct cpu_exc_frame *frame)
{
- struct cpu_seg_desc *desc;
+ printf("cpu: rax: %016lx rbx: %016lx rcx: %016lx\n"
+ "cpu: rdx: %016lx rbp: %016lx rsi: %016lx\n"
+ "cpu: rdi: %016lx r8: %016lx r9: %016lx\n"
+ "cpu: r10: %016lx r11: %016lx r12: %016lx\n"
+ "cpu: r13: %016lx r14: %016lx r15: %016lx\n"
+ "cpu: vector: %lu error: %08lx\n"
+ "cpu: rip: %016lx cs: %lu rflags: %016lx\n"
+ "cpu: rsp: %016lx ss: %lu\n",
+ (unsigned long)frame->rax, (unsigned long)frame->rbx,
+ (unsigned long)frame->rcx, (unsigned long)frame->rdx,
+ (unsigned long)frame->rbp, (unsigned long)frame->rsi,
+ (unsigned long)frame->rdi, (unsigned long)frame->r8,
+ (unsigned long)frame->r9, (unsigned long)frame->r10,
+ (unsigned long)frame->r11, (unsigned long)frame->r12,
+ (unsigned long)frame->r13, (unsigned long)frame->r14,
+ (unsigned long)frame->r15, (unsigned long)frame->vector,
+ (unsigned long)frame->error, (unsigned long)frame->rip,
+ (unsigned long)frame->cs, (unsigned long)frame->rflags,
+ (unsigned long)frame->rsp, (unsigned long)frame->ss);
+
+ /* XXX Until the page fault handler is written */
+ if (frame->vector == 14) {
+ printf("cpu: cr2: %016lx\n", (unsigned long)cpu_get_cr2());
+ }
+}
+
+#else /* __LP64__ */
- desc = (struct cpu_seg_desc *)(table + selector);
+static void
+cpu_show_frame(const struct cpu_exc_frame *frame)
+{
+ unsigned long esp, ss;
+ if ((frame->cs & CPU_PL_USER) || (frame->vector == CPU_EXC_DF)) {
+ esp = frame->esp;
+ ss = frame->ss;
+ } else {
+ esp = 0;
+ ss = 0;
+ }
+
+ printf("cpu: eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n"
+ "cpu: ebp: %08lx esi: %08lx edi: %08lx\n"
+ "cpu: ds: %hu es: %hu fs: %hu gs: %hu\n"
+ "cpu: vector: %lu error: %08lx\n"
+ "cpu: eip: %08lx cs: %lu eflags: %08lx\n"
+ "cpu: esp: %08lx ss: %lu\n",
+ (unsigned long)frame->eax, (unsigned long)frame->ebx,
+ (unsigned long)frame->ecx, (unsigned long)frame->edx,
+ (unsigned long)frame->ebp, (unsigned long)frame->esi,
+ (unsigned long)frame->edi, (unsigned short)frame->ds,
+ (unsigned short)frame->es, (unsigned short)frame->fs,
+ (unsigned short)frame->gs, (unsigned long)frame->vector,
+ (unsigned long)frame->error, (unsigned long)frame->eip,
+ (unsigned long)frame->cs, (unsigned long)frame->eflags,
+ (unsigned long)esp, (unsigned long)ss);
+
+
+ /* XXX Until the page fault handler is written */
+ if (frame->vector == 14) {
+ printf("cpu: cr2: %08lx\n", (unsigned long)cpu_get_cr2());
+ }
+}
+
+#endif /* __LP64__ */
+
+static void
+cpu_show_stack(const struct cpu_exc_frame *frame)
+{
+#ifdef __LP64__
+ strace_show(frame->rip, frame->rbp);
+#else /* __LP64__ */
+ strace_show(frame->eip, frame->ebp);
+#endif /* __LP64__ */
+}
+
+static void
+cpu_exc_double_fault(const struct cpu_exc_frame *frame)
+{
+ cpu_halt_broadcast();
+
+#ifndef __LP64__
+ struct cpu_exc_frame frame_store;
+ struct cpu *cpu;
+
+ /*
+ * Double faults are catched through a task gate, which makes the given
+ * frame useless. The interrupted state is automatically saved in the
+ * main TSS by the processor. Build a proper exception frame from there.
+ */
+ cpu = cpu_current();
+ frame_store.eax = cpu->tss.eax;
+ frame_store.ebx = cpu->tss.ebx;
+ frame_store.ecx = cpu->tss.ecx;
+ frame_store.edx = cpu->tss.edx;
+ frame_store.ebp = cpu->tss.ebp;
+ frame_store.esi = cpu->tss.esi;
+ frame_store.edi = cpu->tss.edi;
+ frame_store.ds = cpu->tss.ds;
+ frame_store.es = cpu->tss.es;
+ frame_store.fs = cpu->tss.fs;
+ frame_store.gs = cpu->tss.gs;
+ frame_store.vector = CPU_EXC_DF;
+ frame_store.error = 0;
+ frame_store.eip = cpu->tss.eip;
+ frame_store.cs = cpu->tss.cs;
+ frame_store.eflags = cpu->tss.eflags;
+ frame_store.esp = cpu->tss.esp;
+ frame_store.ss = cpu->tss.ss;
+ frame = &frame_store;
+#endif /* __LP64__ */
+
+ printf("cpu: double fault (cpu%u):\n", cpu_id());
+ cpu_show_thread();
+ cpu_show_frame(frame);
+ cpu_show_stack(frame);
+ cpu_halt();
+}
+
+void
+cpu_exc_main(const struct cpu_exc_frame *frame)
+{
+ const struct cpu_exc_handler *handler;
+
+ handler = cpu_get_exc_handler(frame->vector);
+ cpu_exc_handler_run(handler, frame);
+ assert(!cpu_intr_enabled());
+}
+
+void
+cpu_intr_main(const struct cpu_exc_frame *frame)
+{
+ const struct cpu_intr_handler *handler;
+
+ handler = cpu_get_intr_handler(frame->vector);
+
+ thread_intr_enter();
+ cpu_intr_handler_run(handler, frame->vector);
+ thread_intr_leave();
+
+ assert(!cpu_intr_enabled());
+}
+
+static void
+cpu_exc_default(const struct cpu_exc_frame *frame)
+{
+ cpu_halt_broadcast();
+ printf("cpu: unregistered exception (cpu%u):\n", cpu_id());
+ cpu_show_thread();
+ cpu_show_frame(frame);
+ cpu_show_stack(frame);
+ cpu_halt();
+}
+
+static void
+cpu_intr_default(unsigned int vector)
+{
+ cpu_halt_broadcast();
+ printf("cpu: unregistered interrupt %u (cpu%u):\n", vector, cpu_id());
+ cpu_show_thread();
+ cpu_halt();
+}
+
+static void
+cpu_xcall_intr(unsigned int vector)
+{
+ (void)vector;
+
+ lapic_eoi();
+ xcall_intr();
+}
+
+static void
+cpu_thread_schedule_intr(unsigned int vector)
+{
+ (void)vector;
+
+ lapic_eoi();
+ thread_schedule_intr();
+}
+
+static void
+cpu_halt_intr(unsigned int vector)
+{
+ (void)vector;
+
+ lapic_eoi();
+ cpu_halt();
+}
+
+static void __init
+cpu_setup_idt(void)
+{
+ for (size_t i = 0; i < ARRAY_SIZE(cpu_ll_exc_handler_addrs); i++) {
+ cpu_idt_set_intr_gate(&cpu_idt, i, cpu_get_ll_exc_handler(i));
+ }
+
+ cpu_idt_setup_double_fault(&cpu_idt);
+}
+
+static void __init
+cpu_setup_intr(void)
+{
+ cpu_setup_idt();
+
+ for (size_t i = 0; i < ARRAY_SIZE(cpu_exc_handlers); i++) {
+ cpu_register_exc(i, cpu_exc_default);
+ }
+
+ /* Architecture defined exceptions */
+ cpu_register_exc(CPU_EXC_DE, cpu_exc_default);
+ cpu_register_exc(CPU_EXC_DB, cpu_exc_default);
+ cpu_register_intr(CPU_EXC_NMI, cpu_intr_default);
+ cpu_register_exc(CPU_EXC_BP, cpu_exc_default);
+ cpu_register_exc(CPU_EXC_OF, cpu_exc_default);
+ cpu_register_exc(CPU_EXC_BR, cpu_exc_default);
+ cpu_register_exc(CPU_EXC_UD, cpu_exc_default);
+ cpu_register_exc(CPU_EXC_NM, cpu_exc_default);
+ cpu_register_exc(CPU_EXC_DF, cpu_exc_double_fault);
+ cpu_register_exc(CPU_EXC_TS, cpu_exc_default);
+ cpu_register_exc(CPU_EXC_NP, cpu_exc_default);
+ cpu_register_exc(CPU_EXC_SS, cpu_exc_default);
+ cpu_register_exc(CPU_EXC_GP, cpu_exc_default);
+ cpu_register_exc(CPU_EXC_PF, cpu_exc_default);
+ cpu_register_exc(CPU_EXC_MF, cpu_exc_default);
+ cpu_register_exc(CPU_EXC_AC, cpu_exc_default);
+ cpu_register_intr(CPU_EXC_MC, cpu_intr_default);
+ cpu_register_exc(CPU_EXC_XM, cpu_exc_default);
+
+ /* System defined exceptions */
+ cpu_register_intr(CPU_EXC_XCALL, cpu_xcall_intr);
+ cpu_register_intr(CPU_EXC_THREAD_SCHEDULE, cpu_thread_schedule_intr);
+ cpu_register_intr(CPU_EXC_HALT, cpu_halt_intr);
+}
+
+static void __init
+cpu_seg_desc_init_null(struct cpu_seg_desc *desc)
+{
+ desc->high = 0;
+ desc->low = 0;
+}
+
+static void __init
+cpu_seg_desc_init_code(struct cpu_seg_desc *desc)
+{
#ifdef __LP64__
desc->high = CPU_DESC_LONG | CPU_DESC_PRESENT | CPU_DESC_S
| CPU_DESC_TYPE_CODE;
@@ -232,13 +711,9 @@ cpu_seg_set_code(char *table, unsigned int selector)
#endif /* __LP64__ */
}
-static void
-cpu_seg_set_data(char *table, unsigned int selector, uint32_t base)
+static void __init
+cpu_seg_desc_init_data(struct cpu_seg_desc *desc, uintptr_t base)
{
- struct cpu_seg_desc *desc;
-
- desc = (struct cpu_seg_desc *)(table + selector);
-
#ifdef __LP64__
(void)base;
@@ -256,21 +731,19 @@ cpu_seg_set_data(char *table, unsigned int selector, uint32_t base)
#endif /* __LP64__ */
}
-static void
-cpu_seg_set_tss(char *table, unsigned int selector, struct cpu_tss *tss)
+static void __init
+cpu_sysseg_desc_init_tss(struct cpu_sysseg_desc *desc,
+ const struct cpu_tss *tss)
{
- struct cpu_sysseg_desc *desc;
- unsigned long base, limit;
+ uintptr_t base, limit;
- desc = (struct cpu_sysseg_desc *)(table + selector);
- base = (unsigned long)tss;
+ base = (uintptr_t)tss;
limit = base + sizeof(*tss) - 1;
#ifdef __LP64__
desc->word4 = 0;
desc->word3 = (base >> 32);
#endif /* __LP64__ */
-
desc->word2 = (base & CPU_DESC_SEG_BASE_HIGH_MASK)
| (limit & CPU_DESC_SEG_LIMIT_HIGH_MASK)
| CPU_DESC_PRESENT | CPU_DESC_TYPE_TSS
@@ -279,114 +752,101 @@ cpu_seg_set_tss(char *table, unsigned int selector, struct cpu_tss *tss)
| (limit & CPU_DESC_SEG_LIMIT_LOW_MASK);
}
-/*
- * Set the given GDT for the current processor.
- *
- * On i386, the ds, es and ss segment registers are reloaded.
- *
- * The fs and gs segment registers, which point to the percpu and the TLS
- * areas respectively, must be set separately.
- */
-void cpu_load_gdt(struct cpu_pseudo_desc *gdtr);
-
-static inline void __init
-cpu_set_percpu_area(const struct cpu *cpu, void *area)
+static void * __init
+cpu_gdt_get_desc(struct cpu_gdt *gdt, unsigned int selector)
{
-#ifdef __LP64__
- unsigned long va;
+ assert((selector % sizeof(struct cpu_seg_desc)) == 0);
+ assert(selector < sizeof(gdt->descs));
+ return gdt->descs + selector;
+}
- va = (unsigned long)area;
- cpu_set_msr(CPU_MSR_FSBASE, (uint32_t)(va >> 32), (uint32_t)va);
-#else /* __LP64__ */
- asm volatile("mov %0, %%fs" : : "r" (CPU_GDT_SEL_PERCPU));
-#endif /* __LP64__ */
+static void __init
+cpu_gdt_set_null(struct cpu_gdt *gdt, unsigned int selector)
+{
+ struct cpu_seg_desc *desc;
- percpu_var(cpu_local_area, cpu->id) = area;
+ desc = cpu_gdt_get_desc(gdt, selector);
+ cpu_seg_desc_init_null(desc);
}
-static inline void __init
-cpu_set_tls_area(void)
+static void __init
+cpu_gdt_set_code(struct cpu_gdt *gdt, unsigned int selector)
{
-#ifdef __LP64__
- unsigned long va;
+ struct cpu_seg_desc *desc;
- va = (unsigned long)&cpu_tls_seg;
- cpu_set_msr(CPU_MSR_GSBASE, (uint32_t)(va >> 32), (uint32_t)va);
-#else /* __LP64__ */
- asm volatile("mov %0, %%gs" : : "r" (CPU_GDT_SEL_TLS));
-#endif /* __LP64__ */
+ desc = cpu_gdt_get_desc(gdt, selector);
+ cpu_seg_desc_init_code(desc);
}
static void __init
-cpu_init_gdtr(struct cpu_pseudo_desc *gdtr, const struct cpu *cpu)
+cpu_gdt_set_data(struct cpu_gdt *gdt, unsigned int selector, const void *base)
{
- gdtr->address = (unsigned long)cpu->gdt;
- gdtr->limit = sizeof(cpu->gdt) - 1;
+ struct cpu_seg_desc *desc;
+
+ desc = cpu_gdt_get_desc(gdt, selector);
+ cpu_seg_desc_init_data(desc, (uintptr_t)base);
}
static void __init
-cpu_init_gdt(struct cpu *cpu)
+cpu_gdt_set_tss(struct cpu_gdt *gdt, unsigned int selector,
+ const struct cpu_tss *tss)
{
- struct cpu_pseudo_desc gdtr;
- void *pcpu_area;
+ struct cpu_sysseg_desc *desc;
- pcpu_area = percpu_area(cpu->id);
+ desc = cpu_gdt_get_desc(gdt, selector);
+ cpu_sysseg_desc_init_tss(desc, tss);
+}
- cpu_seg_set_null(cpu->gdt, CPU_GDT_SEL_NULL);
- cpu_seg_set_code(cpu->gdt, CPU_GDT_SEL_CODE);
- cpu_seg_set_data(cpu->gdt, CPU_GDT_SEL_DATA, 0);
- cpu_seg_set_tss(cpu->gdt, CPU_GDT_SEL_TSS, &cpu->tss);
+static void __init
+cpu_gdt_init(struct cpu_gdt *gdt, const struct cpu_tss *tss,
+ const struct cpu_tss *df_tss, void *pcpu_area)
+{
+ cpu_gdt_set_null(gdt, CPU_GDT_SEL_NULL);
+ cpu_gdt_set_code(gdt, CPU_GDT_SEL_CODE);
+ cpu_gdt_set_data(gdt, CPU_GDT_SEL_DATA, 0);
+ cpu_gdt_set_tss(gdt, CPU_GDT_SEL_TSS, tss);
-#ifndef __LP64__
- cpu_seg_set_tss(cpu->gdt, CPU_GDT_SEL_DF_TSS, &cpu->double_fault_tss);
- cpu_seg_set_data(cpu->gdt, CPU_GDT_SEL_PERCPU, (unsigned long)pcpu_area);
- cpu_seg_set_data(cpu->gdt, CPU_GDT_SEL_TLS, (unsigned long)&cpu_tls_seg);
+#ifdef __LP64__
+ (void)df_tss;
+ (void)pcpu_area;
+#else /* __LP64__ */
+ cpu_gdt_set_tss(gdt, CPU_GDT_SEL_DF_TSS, df_tss);
+ cpu_gdt_set_data(gdt, CPU_GDT_SEL_PERCPU, pcpu_area);
+ cpu_gdt_set_data(gdt, CPU_GDT_SEL_TLS, &cpu_tls_seg);
#endif /* __LP64__ */
-
- cpu_init_gdtr(&gdtr, cpu);
- cpu_load_gdt(&gdtr);
- cpu_set_percpu_area(cpu, pcpu_area);
- cpu_set_tls_area();
}
static void __init
-cpu_init_ldt(void)
+cpu_gdt_load(const struct cpu_gdt *gdt)
{
- asm volatile("lldt %w0" : : "q" (CPU_GDT_SEL_NULL));
+ struct cpu_pseudo_desc gdtr;
+
+ gdtr.address = (uintptr_t)gdt->descs;
+ gdtr.limit = sizeof(gdt->descs) - 1;
+ cpu_load_gdt(&gdtr);
}
static void __init
-cpu_init_tss(struct cpu *cpu)
+cpu_tss_init(struct cpu_tss *tss, const void *df_stack_top)
{
- struct cpu_tss *tss;
-
- tss = &cpu->tss;
memset(tss, 0, sizeof(*tss));
#ifdef __LP64__
- assert(cpu->double_fault_stack != NULL);
- tss->ist[CPU_TSS_IST_DF] = (unsigned long)cpu->double_fault_stack
- + TRAP_STACK_SIZE;
+ tss->ist[CPU_TSS_IST_DF] = (uintptr_t)df_stack_top;
+#else /* __LP64__ */
+ (void)df_stack_top;
#endif /* __LP64__ */
-
- asm volatile("ltr %w0" : : "q" (CPU_GDT_SEL_TSS));
}
#ifndef __LP64__
static void __init
-cpu_init_double_fault_tss(struct cpu *cpu)
+cpu_tss_init_i386_double_fault(struct cpu_tss *tss, const void *df_stack_top)
{
- struct cpu_tss *tss;
-
- assert(cpu_double_fault_handler != 0);
- assert(cpu->double_fault_stack != NULL);
-
- tss = &cpu->double_fault_tss;
memset(tss, 0, sizeof(*tss));
tss->cr3 = cpu_get_cr3();
- tss->eip = cpu_double_fault_handler;
+ tss->eip = (uintptr_t)cpu_get_ll_exc_handler(CPU_EXC_DF);
tss->eflags = CPU_EFL_ONE;
- tss->ebp = (unsigned long)cpu->double_fault_stack + TRAP_STACK_SIZE;
+ tss->ebp = (uintptr_t)df_stack_top;
tss->esp = tss->ebp;
tss->es = CPU_GDT_SEL_DATA;
tss->cs = CPU_GDT_SEL_CODE;
@@ -396,56 +856,78 @@ cpu_init_double_fault_tss(struct cpu *cpu)
}
#endif /* __LP64__ */
-void
-cpu_idt_set_gate(unsigned int vector, void (*isr)(void))
+static struct cpu_tss * __init
+cpu_get_tss(struct cpu *cpu)
{
- struct cpu_gate_desc *desc;
-
- assert(vector < ARRAY_SIZE(cpu_idt));
-
- desc = &cpu_idt[vector];
+ return &cpu->tss;
+}
+static struct cpu_tss * __init
+cpu_get_df_tss(struct cpu *cpu)
+{
#ifdef __LP64__
- desc->word4 = 0;
- desc->word3 = (unsigned long)isr >> 32;
+ (void)cpu;
+ return NULL;
+#else /* __LP64__ */
+ return &cpu->df_tss;
#endif /* __LP64__ */
+}
- /* Use interrupt gates only to simplify trap handling */
- desc->word2 = ((unsigned long)isr & CPU_DESC_GATE_OFFSET_HIGH_MASK)
- | CPU_DESC_PRESENT | CPU_DESC_TYPE_GATE_INTR;
- desc->word1 = (CPU_GDT_SEL_CODE << 16)
- | ((unsigned long)isr & CPU_DESC_GATE_OFFSET_LOW_MASK);
+static void * __init
+cpu_get_df_stack_top(struct cpu *cpu)
+{
+ return &cpu->df_stack[sizeof(cpu->df_stack)];
}
-void
-cpu_idt_set_double_fault(void (*isr)(void))
+static void __init
+cpu_init(struct cpu *cpu, unsigned int id, unsigned int apic_id)
{
- struct cpu_gate_desc *desc;
+ memset(cpu, 0, sizeof(*cpu));
+ cpu->id = id;
+ cpu->apic_id = apic_id;
+}
+
+static void __init
+cpu_load_ldt(void)
+{
+ asm volatile("lldt %w0" : : "q" (CPU_GDT_SEL_NULL));
+}
- cpu_double_fault_handler = (unsigned long)isr;
+static void __init
+cpu_load_tss(void)
+{
+ asm volatile("ltr %w0" : : "q" (CPU_GDT_SEL_TSS));
+}
+static void __init
+cpu_set_percpu_area(const struct cpu *cpu, void *area)
+{
#ifdef __LP64__
- cpu_idt_set_gate(TRAP_DF, isr);
- desc = &cpu_idt[TRAP_DF];
- desc->word2 |= CPU_TSS_IST_DF & CPU_DESC_SEG_IST_MASK;
+ unsigned long va;
+
+ va = (unsigned long)area;
+ cpu_set_msr(CPU_MSR_FSBASE, (uint32_t)(va >> 32), (uint32_t)va);
#else /* __LP64__ */
- desc = &cpu_idt[TRAP_DF];
- desc->word2 = CPU_DESC_PRESENT | CPU_DESC_TYPE_GATE_TASK;
- desc->word1 = CPU_GDT_SEL_DF_TSS << 16;
+ asm volatile("mov %0, %%fs" : : "r" (CPU_GDT_SEL_PERCPU));
#endif /* __LP64__ */
+
+ percpu_var(cpu_local_area, cpu->id) = area;
}
-static void
-cpu_load_idt(const void *idt, size_t size)
+static void __init
+cpu_set_tls_area(void)
{
- struct cpu_pseudo_desc idtr;
+#ifdef __LP64__
+ uintptr_t va;
- idtr.address = (uintptr_t)idt;
- idtr.limit = size - 1;
- asm volatile("lidt %0" : : "m" (idtr));
+ va = (uintptr_t)&cpu_tls_seg;
+ cpu_set_msr(CPU_MSR_GSBASE, (uint32_t)(va >> 32), (uint32_t)va);
+#else /* __LP64__ */
+ asm volatile("mov %0, %%gs" : : "r" (CPU_GDT_SEL_TLS));
+#endif /* __LP64__ */
}
-static const struct cpu_vendor *
+static const struct cpu_vendor * __init
cpu_vendor_lookup(const char *str)
{
for (size_t i = 0; i < ARRAY_SIZE(cpu_vendors); i++) {
@@ -471,13 +953,13 @@ cpu_init_vendor_id(struct cpu *cpu)
cpu->vendor_id = vendor->id;
}
-/*
- * Initialize the given cpu structure for the current processor.
- */
static void __init
-cpu_init(struct cpu *cpu)
+cpu_build(struct cpu *cpu)
{
unsigned int eax, ebx, ecx, edx, max_basic, max_extended;
+ void *pcpu_area;
+
+ pcpu_area = percpu_area(cpu->id);
/*
* Assume at least an i586 processor.
@@ -486,13 +968,23 @@ cpu_init(struct cpu *cpu)
cpu_intr_restore(CPU_EFL_ONE);
cpu_set_cr0(CPU_CR0_PG | CPU_CR0_AM | CPU_CR0_WP | CPU_CR0_NE | CPU_CR0_ET
| CPU_CR0_TS | CPU_CR0_MP | CPU_CR0_PE);
- cpu_init_gdt(cpu);
- cpu_init_ldt();
- cpu_init_tss(cpu);
+ cpu_gdt_init(&cpu->gdt, cpu_get_tss(cpu), cpu_get_df_tss(cpu), pcpu_area);
+ cpu_gdt_load(&cpu->gdt);
+ cpu_load_ldt();
+ cpu_tss_init(&cpu->tss, cpu_get_df_stack_top(cpu));
#ifndef __LP64__
- cpu_init_double_fault_tss(cpu);
+ cpu_tss_init_i386_double_fault(&cpu->df_tss, cpu_get_df_stack_top(cpu));
#endif /* __LP64__ */
- cpu_load_idt(cpu_idt, sizeof(cpu_idt));
+ cpu_load_tss();
+ cpu_idt_load(&cpu_idt);
+ cpu_set_percpu_area(cpu, pcpu_area);
+ cpu_set_tls_area();
+
+ /*
+ * Perform the check after initializing the GDT and the per-CPU area
+ * since cpu_id() relies on them to correctly work.
+ */
+ assert(cpu->id == cpu_id());
eax = 0;
cpu_cpuid(&eax, &ebx, &ecx, &edx);
@@ -513,22 +1005,27 @@ cpu_init(struct cpu *cpu)
eax = 1;
cpu_cpuid(&eax, &ebx, &ecx, &edx);
- cpu->type = (eax & CPU_TYPE_MASK) >> CPU_TYPE_SHIFT;
- cpu->family = (eax & CPU_FAMILY_MASK) >> CPU_FAMILY_SHIFT;
+ cpu->type = (eax & CPU_CPUID_TYPE_MASK) >> CPU_CPUID_TYPE_SHIFT;
+ cpu->family = (eax & CPU_CPUID_FAMILY_MASK) >> CPU_CPUID_FAMILY_SHIFT;
if (cpu->family == 0xf) {
- cpu->family += (eax & CPU_EXTFAMILY_MASK) >> CPU_EXTFAMILY_SHIFT;
+ cpu->family += (eax & CPU_CPUID_EXTFAMILY_MASK)
+ >> CPU_CPUID_EXTFAMILY_SHIFT;
}
- cpu->model = (eax & CPU_MODEL_MASK) >> CPU_MODEL_SHIFT;
+ cpu->model = (eax & CPU_CPUID_MODEL_MASK) >> CPU_CPUID_MODEL_SHIFT;
if ((cpu->model == 6) || (cpu->model == 0xf)) {
- cpu->model += (eax & CPU_EXTMODEL_MASK) >> CPU_EXTMODEL_SHIFT;
+ cpu->model += (eax & CPU_CPUID_EXTMODEL_MASK)
+ >> CPU_CPUID_EXTMODEL_SHIFT;
}
- cpu->stepping = (eax & CPU_STEPPING_MASK) >> CPU_STEPPING_SHIFT;
- cpu->clflush_size = ((ebx & CPU_CLFLUSH_MASK) >> CPU_CLFLUSH_SHIFT) * 8;
- cpu->initial_apic_id = (ebx & CPU_APIC_ID_MASK) >> CPU_APIC_ID_SHIFT;
+ cpu->stepping = (eax & CPU_CPUID_STEPPING_MASK)
+ >> CPU_CPUID_STEPPING_SHIFT;
+ cpu->clflush_size = ((ebx & CPU_CPUID_CLFLUSH_MASK)
+ >> CPU_CPUID_CLFLUSH_SHIFT) * 8;
+ cpu->initial_apic_id = (ebx & CPU_CPUID_APIC_ID_MASK)
+ >> CPU_CPUID_APIC_ID_SHIFT;
cpu->features1 = ecx;
cpu->features2 = edx;
@@ -607,10 +1104,11 @@ cpu_setup(void)
{
struct cpu *cpu;
+ cpu_setup_intr();
+
cpu = percpu_ptr(cpu_desc, 0);
- cpu_preinit(cpu, 0, CPU_INVALID_APIC_ID);
- cpu->double_fault_stack = cpu_double_fault_stack; /* XXX */
- cpu_init(cpu);
+ cpu_init(cpu, 0, CPU_INVALID_APIC_ID);
+ cpu_build(cpu);
cpu_measure_freq();
@@ -618,8 +1116,7 @@ cpu_setup(void)
}
INIT_OP_DEFINE(cpu_setup,
- INIT_OP_DEP(percpu_bootstrap, true),
- INIT_OP_DEP(trap_setup, true));
+ INIT_OP_DEP(percpu_bootstrap, true));
static void __init
cpu_panic_on_missing_feature(const char *feature)
@@ -654,11 +1151,12 @@ cpu_check_bsp(void)
return 0;
}
+// TODO Remove panic_setup
INIT_OP_DEFINE(cpu_check_bsp,
INIT_OP_DEP(cpu_setup, true),
INIT_OP_DEP(panic_setup, true));
-void
+void __init
cpu_log_info(const struct cpu *cpu)
{
log_info("cpu%u: %s, type %u, family %u, model %u, stepping %u",
@@ -680,7 +1178,7 @@ cpu_log_info(const struct cpu *cpu)
}
void __init
-cpu_mp_register_lapic(unsigned int apic_id, int is_bsp)
+cpu_mp_register_lapic(unsigned int apic_id, bool is_bsp)
{
struct cpu *cpu;
int error;
@@ -703,15 +1201,22 @@ cpu_mp_register_lapic(unsigned int apic_id, int is_bsp)
}
cpu = percpu_ptr(cpu_desc, cpu_nr_active);
- cpu_preinit(cpu, cpu_nr_active, apic_id);
+ cpu_init(cpu, cpu_nr_active, apic_id);
cpu_nr_active++;
}
static void
+cpu_trigger_double_fault(void)
+{
+ asm volatile("movl $0xdead, %esp; push $0");
+}
+
+static void
cpu_shutdown_reset(void)
{
- cpu_load_idt(NULL, 1);
- trap_trigger_double_fault();
+ /* Generate a triple fault */
+ cpu_idt_load(NULL);
+ cpu_trigger_double_fault();
}
static struct shutdown_ops cpu_shutdown_ops = {
@@ -747,7 +1252,6 @@ INIT_OP_DEFINE(cpu_setup_shutdown,
void __init
cpu_mp_setup(void)
{
- struct vm_page *page;
uint16_t reset_vector[2];
struct cpu *cpu;
unsigned int i;
@@ -775,26 +1279,13 @@ cpu_mp_setup(void)
io_write_byte(CPU_MP_CMOS_PORT_REG, CPU_MP_CMOS_REG_RESET);
io_write_byte(CPU_MP_CMOS_PORT_DATA, CPU_MP_CMOS_DATA_RESET_WARM);
- /* TODO Allocate stacks out of the slab allocator for sub-page sizes */
-
for (i = 1; i < cpu_count(); i++) {
cpu = percpu_ptr(cpu_desc, i);
- page = vm_page_alloc(vm_page_order(BOOT_STACK_SIZE),
- VM_PAGE_SEL_DIRECTMAP, VM_PAGE_KERNEL);
+ cpu->boot_stack = kmem_alloc(BOOT_STACK_SIZE);
- if (page == NULL) {
+ if (!cpu->boot_stack) {
panic("cpu: unable to allocate boot stack for cpu%u", i);
}
-
- cpu->boot_stack = vm_page_direct_ptr(page);
- page = vm_page_alloc(vm_page_order(TRAP_STACK_SIZE),
- VM_PAGE_SEL_DIRECTMAP, VM_PAGE_KERNEL);
-
- if (page == NULL) {
- panic("cpu: unable to allocate double fault stack for cpu%u", i);
- }
-
- cpu->double_fault_stack = vm_page_direct_ptr(page);
}
/*
@@ -829,7 +1320,7 @@ cpu_ap_setup(void)
struct cpu *cpu;
cpu = percpu_ptr(cpu_desc, boot_ap_id);
- cpu_init(cpu);
+ cpu_build(cpu);
cpu_check(cpu_current());
lapic_ap_setup();
}
@@ -847,35 +1338,5 @@ cpu_halt_broadcast(void)
return;
}
- lapic_ipi_broadcast(TRAP_CPU_HALT);
-}
-
-void
-cpu_halt_intr(struct trap_frame *frame)
-{
- (void)frame;
-
- lapic_eoi();
-
- cpu_halt();
-}
-
-void
-cpu_xcall_intr(struct trap_frame *frame)
-{
- (void)frame;
-
- lapic_eoi();
-
- xcall_intr();
-}
-
-void
-cpu_thread_schedule_intr(struct trap_frame *frame)
-{
- (void)frame;
-
- lapic_eoi();
-
- thread_schedule_intr();
+ lapic_ipi_broadcast(CPU_EXC_HALT);
}
diff --git a/arch/x86/machine/cpu.h b/arch/x86/machine/cpu.h
index 0c4e6e1d..78857cdc 100644
--- a/arch/x86/machine/cpu.h
+++ b/arch/x86/machine/cpu.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2010-2017 Richard Braun.
+ * Copyright (c) 2010-2018 Richard Braun.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -20,6 +20,60 @@
#include <limits.h>
+#include <machine/page.h>
+
+/*
+ * Architecture defined exception vectors.
+ */
+#define CPU_EXC_DE 0 /* Divide Error */
+#define CPU_EXC_DB 1 /* Debug */
+#define CPU_EXC_NMI 2 /* NMI Interrupt */
+#define CPU_EXC_BP 3 /* Breakpoint */
+#define CPU_EXC_OF 4 /* Overflow */
+#define CPU_EXC_BR 5 /* BOUND Range Exceeded */
+#define CPU_EXC_UD 6 /* Undefined Opcode */
+#define CPU_EXC_NM 7 /* No Math Coprocessor */
+#define CPU_EXC_DF 8 /* Double Fault */
+#define CPU_EXC_TS 10 /* Invalid TSS */
+#define CPU_EXC_NP 11 /* Segment Not Present */
+#define CPU_EXC_SS 12 /* Stack-Segment Fault */
+#define CPU_EXC_GP 13 /* General Protection */
+#define CPU_EXC_PF 14 /* Page Fault */
+#define CPU_EXC_MF 16 /* Math Fault */
+#define CPU_EXC_AC 17 /* Alignment Check */
+#define CPU_EXC_MC 18 /* Machine Check */
+#define CPU_EXC_XM 19 /* SIMD Floating-Point Exception */
+
+/*
+ * Exception vectors used for external interrupts.
+ */
+#define CPU_EXC_INTR_FIRST 32
+#define CPU_EXC_INTR_LAST 223
+
+/*
+ * System defined exception vectors.
+ *
+ * The local APIC assigns one priority every 16 vectors.
+ */
+#define CPU_EXC_XCALL 238
+#define CPU_EXC_THREAD_SCHEDULE 239
+#define CPU_EXC_HALT 240
+#define CPU_EXC_LAPIC_PMC_OF 252
+#define CPU_EXC_LAPIC_TIMER 253
+#define CPU_EXC_LAPIC_ERROR 254
+#define CPU_EXC_LAPIC_SPURIOUS 255
+
+#define CPU_NR_EXC_VECTORS 256
+
+#define CPU_INTR_STACK_SIZE PAGE_SIZE
+
+#define CPU_VENDOR_STR_SIZE 13
+#define CPU_MODEL_NAME_SIZE 49
+
+#define CPU_VENDOR_UNKNOWN 0
+#define CPU_VENDOR_INTEL 1
+#define CPU_VENDOR_AMD 2
+
/*
* L1 cache line size.
*
@@ -49,80 +103,57 @@
/*
* Processor privilege levels.
*/
-#define CPU_PL_KERNEL 0
-#define CPU_PL_USER 3
+#define CPU_PL_KERNEL 0
+#define CPU_PL_USER 3
/*
* Control register 0 flags.
*/
-#define CPU_CR0_PE 0x00000001
-#define CPU_CR0_MP 0x00000002
-#define CPU_CR0_TS 0x00000008
-#define CPU_CR0_ET 0x00000010
-#define CPU_CR0_NE 0x00000020
-#define CPU_CR0_WP 0x00010000
-#define CPU_CR0_AM 0x00040000
-#define CPU_CR0_PG 0x80000000
+#define CPU_CR0_PE 0x00000001
+#define CPU_CR0_MP 0x00000002
+#define CPU_CR0_TS 0x00000008
+#define CPU_CR0_ET 0x00000010
+#define CPU_CR0_NE 0x00000020
+#define CPU_CR0_WP 0x00010000
+#define CPU_CR0_AM 0x00040000
+#define CPU_CR0_PG 0x80000000
/*
* Control register 4 flags.
*/
-#define CPU_CR4_PSE 0x00000010
-#define CPU_CR4_PAE 0x00000020
-#define CPU_CR4_PGE 0x00000080
-
-/*
- * EFLAGS register flags.
- */
-#define CPU_EFL_ONE 0x00000002 /* Reserved, must be one */
-#define CPU_EFL_IF 0x00000200
+#define CPU_CR4_PSE 0x00000010
+#define CPU_CR4_PAE 0x00000020
+#define CPU_CR4_PGE 0x00000080
/*
* Model specific registers.
*/
-#define CPU_MSR_EFER 0xc0000080
-#define CPU_MSR_FSBASE 0xc0000100
-#define CPU_MSR_GSBASE 0xc0000101
+#define CPU_MSR_EFER 0xc0000080
+#define CPU_MSR_FSBASE 0xc0000100
+#define CPU_MSR_GSBASE 0xc0000101
/*
* EFER MSR flags.
*/
-#define CPU_EFER_LME 0x00000100
+#define CPU_EFER_LME 0x00000100
/*
* Feature2 flags.
*
* TODO Better names.
*/
-#define CPU_FEATURE2_FPU 0x00000001
-#define CPU_FEATURE2_PSE 0x00000008
-#define CPU_FEATURE2_PAE 0x00000040
-#define CPU_FEATURE2_MSR 0x00000020
-#define CPU_FEATURE2_CX8 0x00000100
-#define CPU_FEATURE2_APIC 0x00000200
-#define CPU_FEATURE2_PGE 0x00002000
-
-#define CPU_FEATURE4_1GP 0x04000000
-#define CPU_FEATURE4_LM 0x20000000
-
-/*
- * GDT segment selectors.
- */
-#define CPU_GDT_SEL_NULL 0
-#define CPU_GDT_SEL_CODE 8
-#define CPU_GDT_SEL_DATA 16
-#define CPU_GDT_SEL_TSS 24
+#define CPU_FEATURE2_FPU 0x00000001
+#define CPU_FEATURE2_PSE 0x00000008
+#define CPU_FEATURE2_PAE 0x00000040
+#define CPU_FEATURE2_MSR 0x00000020
+#define CPU_FEATURE2_CX8 0x00000100
+#define CPU_FEATURE2_APIC 0x00000200
+#define CPU_FEATURE2_PGE 0x00002000
-#ifdef __LP64__
-#define CPU_GDT_SIZE 40
-#else /* __LP64__ */
-#define CPU_GDT_SEL_DF_TSS 32
-#define CPU_GDT_SEL_PERCPU 40
-#define CPU_GDT_SEL_TLS 48
-#define CPU_GDT_SIZE 56
-#endif /* __LP64__ */
+#define CPU_FEATURE4_1GP 0x04000000
+#define CPU_FEATURE4_LM 0x20000000
-#define CPU_IDT_SIZE 256
+#include <machine/cpu_i.h>
#ifndef __ASSEMBLER__
@@ -137,137 +168,65 @@
#include <machine/pit.h>
#include <machine/ssp.h>
+#define CPU_INTR_TABLE_SIZE (CPU_EXC_INTR_LAST - CPU_EXC_INTR_FIRST)
+
/*
* Gate/segment descriptor bits and masks.
*/
-#define CPU_DESC_TYPE_DATA 0x00000200
-#define CPU_DESC_TYPE_CODE 0x00000a00
-#define CPU_DESC_TYPE_TSS 0x00000900
-#define CPU_DESC_TYPE_GATE_INTR 0x00000e00
-#define CPU_DESC_TYPE_GATE_TASK 0x00000500
-#define CPU_DESC_S 0x00001000
-#define CPU_DESC_PRESENT 0x00008000
-#define CPU_DESC_LONG 0x00200000
-#define CPU_DESC_DB 0x00400000
-#define CPU_DESC_GRAN_4KB 0x00800000
-
-#define CPU_DESC_GATE_OFFSET_LOW_MASK 0x0000ffff
-#define CPU_DESC_GATE_OFFSET_HIGH_MASK 0xffff0000
-#define CPU_DESC_SEG_IST_MASK 0x00000007
-#define CPU_DESC_SEG_BASE_LOW_MASK 0x0000ffff
-#define CPU_DESC_SEG_BASE_MID_MASK 0x00ff0000
-#define CPU_DESC_SEG_BASE_HIGH_MASK 0xff000000
-#define CPU_DESC_SEG_LIMIT_LOW_MASK 0x0000ffff
-#define CPU_DESC_SEG_LIMIT_HIGH_MASK 0x000f0000
+#define CPU_DESC_TYPE_DATA 0x00000200
+#define CPU_DESC_TYPE_CODE 0x00000a00
+#define CPU_DESC_TYPE_TSS 0x00000900
+#define CPU_DESC_TYPE_GATE_INTR 0x00000e00
+#define CPU_DESC_TYPE_GATE_TASK 0x00000500
+#define CPU_DESC_S 0x00001000
+#define CPU_DESC_PRESENT 0x00008000
+#define CPU_DESC_LONG 0x00200000
+#define CPU_DESC_DB 0x00400000
+#define CPU_DESC_GRAN_4KB 0x00800000
-/*
- * Code or data segment descriptor.
- */
-struct cpu_seg_desc {
- uint32_t low;
- uint32_t high;
-};
+#define CPU_DESC_GATE_OFFSET_LOW_MASK 0x0000ffff
+#define CPU_DESC_GATE_OFFSET_HIGH_MASK 0xffff0000
+#define CPU_DESC_SEG_IST_MASK 0x00000007
+#define CPU_DESC_SEG_BASE_LOW_MASK 0x0000ffff
+#define CPU_DESC_SEG_BASE_MID_MASK 0x00ff0000
+#define CPU_DESC_SEG_BASE_HIGH_MASK 0xff000000
+#define CPU_DESC_SEG_LIMIT_LOW_MASK 0x0000ffff
+#define CPU_DESC_SEG_LIMIT_HIGH_MASK 0x000f0000
/*
- * Forward declaration.
- */
-struct trap_frame;
-
-/*
- * IST indexes (0 is reserved).
- */
-#define CPU_TSS_IST_DF 1
-
-struct cpu_tss {
-#ifdef __LP64__
- uint32_t reserved0;
- uint64_t rsp0;
- uint64_t rsp1;
- uint64_t rsp2;
- uint64_t ist[8];
- uint64_t reserved1;
- uint16_t reserved2;
-#else /* __LP64__ */
- uint32_t link;
- uint32_t esp0;
- uint32_t ss0;
- uint32_t esp1;
- uint32_t ss1;
- uint32_t esp2;
- uint32_t ss2;
- uint32_t cr3;
- uint32_t eip;
- uint32_t eflags;
- uint32_t eax;
- uint32_t ecx;
- uint32_t edx;
- uint32_t ebx;
- uint32_t esp;
- uint32_t ebp;
- uint32_t esi;
- uint32_t edi;
- uint32_t es;
- uint32_t cs;
- uint32_t ss;
- uint32_t ds;
- uint32_t fs;
- uint32_t gs;
- uint32_t ldt;
- uint16_t trap_bit;
-#endif /* __LP64__ */
- uint16_t iobp_base;
-} __packed;
-
-#define CPU_VENDOR_STR_SIZE 13
-#define CPU_MODEL_NAME_SIZE 49
-
-#define CPU_VENDOR_UNKNOWN 0
-#define CPU_VENDOR_INTEL 1
-#define CPU_VENDOR_AMD 2
+ * Type for interrupt handler functions.
+ */
+typedef void (*cpu_intr_handler_fn_t)(unsigned int vector);
/*
- * CPU states.
- */
-#define CPU_STATE_OFF 0
-#define CPU_STATE_ON 1
-
-struct cpu {
- unsigned int id;
- unsigned int apic_id;
- char vendor_str[CPU_VENDOR_STR_SIZE];
- char model_name[CPU_MODEL_NAME_SIZE];
- unsigned int cpuid_max_basic;
- unsigned int cpuid_max_extended;
- unsigned int vendor_id;
- unsigned int type;
- unsigned int family;
- unsigned int model;
- unsigned int stepping;
- unsigned int clflush_size;
- unsigned int initial_apic_id;
- unsigned int features1;
- unsigned int features2;
- unsigned int features3;
- unsigned int features4;
- unsigned short phys_addr_width;
- unsigned short virt_addr_width;
- alignas(8) char gdt[CPU_GDT_SIZE];
- struct cpu_tss tss;
-#ifndef __LP64__
- struct cpu_tss double_fault_tss;
-#endif /* __LP64__ */
- volatile int state;
- void *boot_stack;
- void *double_fault_stack;
-};
-
+ * TLS segment, as expected by the compiler.
+ *
+ * TLS isn't actually used inside the kernel. The current purpose of this
+ * segment is to implement stack protection.
+ *
+ * This is a public structure, made available to the boot module so that
+ * C code that runs early correctly works when built with stack protection.
+ */
struct cpu_tls_seg {
uintptr_t unused[SSP_WORD_TLS_OFFSET];
uintptr_t ssp_guard_word;
};
/*
+ * Code or data segment descriptor.
+ *
+ * See Intel 64 and IA-32 Architecture Software Developer's Manual,
+ * Volume 3 System Programming Guide, 3.4.5 Segment Descriptors.
+ */
+struct cpu_seg_desc {
+ uint32_t low;
+ uint32_t high;
+};
+
+/*
* Macro to create functions that read/write control registers.
+ *
+ * TODO Break down.
*/
#define CPU_DECL_GETSET_CR(name) \
static __always_inline unsigned long \
@@ -300,24 +259,6 @@ CPU_DECL_GETSET_CR(cr3)
CPU_DECL_GETSET_CR(cr4)
/*
- * Return the content of the EFLAGS register.
- *
- * Implies a compiler barrier.
- */
-static __always_inline unsigned long
-cpu_get_eflags(void)
-{
- unsigned long eflags;
-
- asm volatile("pushf\n"
- "pop %0\n"
- : "=r" (eflags)
- : : "memory");
-
- return eflags;
-}
-
-/*
* Enable local interrupts.
*
* Implies a compiler barrier.
@@ -428,17 +369,7 @@ cpu_halt(void)
*/
void cpu_halt_broadcast(void);
-/*
- * Interrupt handler for inter-processor halt requests.
- */
-void cpu_halt_intr(struct trap_frame *frame);
-
-/*
- * This percpu variable contains the address of the percpu area for the local
- * processor. This is normally the same value stored in the percpu module, but
- * it can be directly accessed through a segment register.
- */
-extern void *cpu_local_area;
+/* Generic percpu accessors */
#define cpu_local_ptr(var) \
MACRO_BEGIN \
@@ -453,7 +384,7 @@ MACRO_END
#define cpu_local_var(var) (*cpu_local_ptr(var))
-/* Interrupt-safe percpu accessors for basic types */
+/* Generic interrupt-safe percpu accessors */
#define cpu_local_assign(var, val) \
asm("mov %0, %%fs:%1" \
@@ -523,6 +454,7 @@ cpu_has_global_pages(void)
* pages were previously disabled.
*
* Implies a full memory barrier.
+ * TODO Update barrier description.
*/
static __always_inline void
cpu_enable_global_pages(void)
@@ -533,8 +465,7 @@ cpu_enable_global_pages(void)
/*
* CPUID instruction wrapper.
*
- * The CPUID instruction is a serializing instruction, implying a full
- * memory barrier.
+ * The CPUID instruction is a serializing instruction.
*/
static __always_inline void
cpu_cpuid(unsigned int *eax, unsigned int *ebx, unsigned int *ecx,
@@ -581,19 +512,11 @@ cpu_set_msr64(uint32_t msr, uint64_t value)
cpu_set_msr(msr, high, low);
}
-static __always_inline uint64_t
-cpu_get_tsc(void)
-{
- uint32_t high, low;
-
- asm volatile("rdtsc" : "=a" (low), "=d" (high));
- return ((uint64_t)high << 32) | low;
-}
-
/*
* Flush non-global TLB entries.
*
* Implies a full memory barrier.
+ * TODO Update barrier description.
*/
static __always_inline void
cpu_tlb_flush(void)
@@ -605,6 +528,7 @@ cpu_tlb_flush(void)
* Flush all TLB entries, including global ones.
*
* Implies a full memory barrier.
+ * TODO Update barrier description.
*/
static __always_inline void
cpu_tlb_flush_all(void)
@@ -631,6 +555,7 @@ cpu_tlb_flush_all(void)
* Flush a single page table entry in the TLB.
*
* Implies a full memory barrier.
+ * TODO Update barrier description.
*/
static __always_inline void
cpu_tlb_flush_va(unsigned long va)
@@ -678,22 +603,14 @@ void cpu_delay(unsigned long usecs);
void * cpu_get_boot_stack(void);
/*
- * Install an interrupt handler in the IDT.
- *
- * These functions may be called before the cpu module is initialized.
- */
-void cpu_idt_set_gate(unsigned int vector, void (*isr)(void));
-void cpu_idt_set_double_fault(void (*isr)(void));
-
-/*
* Log processor information.
*/
void cpu_log_info(const struct cpu *cpu);
/*
- * Register the presence of a local APIC.
+ * Register a local APIC.
*/
-void cpu_mp_register_lapic(unsigned int apic_id, int is_bsp);
+void cpu_mp_register_lapic(unsigned int apic_id, bool is_bsp);
/*
* Start application processors.
@@ -724,27 +641,29 @@ cpu_apic_id(unsigned int cpu)
static inline void
cpu_send_xcall(unsigned int cpu)
{
- lapic_ipi_send(cpu_apic_id(cpu), TRAP_XCALL);
+ lapic_ipi_send(cpu_apic_id(cpu), CPU_EXC_XCALL);
}
/*
- * Interrupt handler for cross-calls.
- */
-void cpu_xcall_intr(struct trap_frame *frame);
-
-/*
* Send a scheduling interrupt to a remote processor.
*/
static inline void
cpu_send_thread_schedule(unsigned int cpu)
{
- lapic_ipi_send(cpu_apic_id(cpu), TRAP_THREAD_SCHEDULE);
+ lapic_ipi_send(cpu_apic_id(cpu), CPU_EXC_THREAD_SCHEDULE);
}
/*
- * Interrupt handler for scheduling requests.
+ * Register an interrupt handler.
+ *
+ * This function is only available during system initialization, before the
+ * scheduler is started. It is meant for architectural interrupts, including
+ * interrupt controllers, and not directly for drivers, which should use
+ * the machine-independent intr module instead.
+ *
+ * Registration is system-wide.
*/
-void cpu_thread_schedule_intr(struct trap_frame *frame);
+void cpu_register_intr(unsigned int vector, cpu_intr_handler_fn_t fn);
/*
* This init operation provides :
diff --git a/arch/x86/machine/cpu_asm.S b/arch/x86/machine/cpu_asm.S
index 9d479913..96511b14 100644
--- a/arch/x86/machine/cpu_asm.S
+++ b/arch/x86/machine/cpu_asm.S
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011-2014 Richard Braun.
+ * Copyright (c) 2011-2018 Richard Braun.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -19,31 +19,723 @@
#include <machine/asm.h>
#include <machine/cpu.h>
-.section INIT_SECTION
+.text
+
+#ifdef __LP64__
+
+.macro cpu_ll_exc_store_registers
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %r11
+ pushq %r10
+ pushq %r9
+ pushq %r8
+ pushq %rdi
+ pushq %rsi
+ pushq %rbp
+ pushq %rdx
+ pushq %rcx
+ pushq %rbx
+ pushq %rax
+.endm
+
+.macro cpu_ll_exc_load_registers
+ popq %rax
+ popq %rbx
+ popq %rcx
+ popq %rdx
+ popq %rbp
+ popq %rsi
+ popq %rdi
+ popq %r8
+ popq %r9
+ popq %r10
+ popq %r11
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ addq $16, %rsp /* skip vector and error */
+.endm
+
+.macro cpu_ll_exc_enter vector
+ pushq $\vector
+ cpu_ll_exc_store_registers
+ xorq %rbp, %rbp /* block stack tracing */
+ movq %rsp, %rbx /* save frame */
+.endm
+
+.macro cpu_ll_exc_leave
+ movq %rbx, %rsp /* restore stack */
+ call thread_schedule /* schedule threads */
+ cpu_ll_exc_load_registers
+ iretq
+.endm
+
+.macro cpu_ll_exc_handle vector
+ cpu_ll_exc_enter \vector
+ movq %rbx, %rdi
+ call cpu_exc_main
+ cpu_ll_exc_leave
+.endm
+
+.macro cpu_ll_intr_handle vector
+ cpu_ll_exc_enter \vector
+ movq %rbx, %rdi
+ call cpu_get_intr_stack
+ testq %rax, %rax /* switch stack ? */
+ jz 1f
+ movq %rax, %rsp /* switch to interrupt stack */
+1:
+ movq %rbx, %rdi
+ call cpu_intr_main
+ cpu_ll_exc_leave
+.endm
+
+#else /* __LP64__ */
+
+.macro cpu_ll_exc_store_registers
+ pushw %gs
+ pushw %fs
+ pushw %es
+ pushw %ds
+ pushl %edi
+ pushl %esi
+ pushl %ebp
+ pushl %edx
+ pushl %ecx
+ pushl %ebx
+ pushl %eax
+.endm
+
+/* XXX Don't load segment registers for now */
+.macro cpu_ll_exc_load_registers
+ popl %eax
+ popl %ebx
+ popl %ecx
+ popl %edx
+ popl %ebp
+ popl %esi
+ popl %edi
+ addl $16, %esp /* skip segment registers, vector and error */
+.endm
+
+.macro cpu_ll_exc_enter vector
+ pushl $\vector
+ cpu_ll_exc_store_registers
+ xorl %ebp, %ebp /* block stack tracing */
+ movl %esp, %ebx /* save frame */
+.endm
+
+.macro cpu_ll_exc_leave
+ movl %ebx, %esp /* restore stack */
+ call thread_schedule /* schedule threads */
+ cpu_ll_exc_load_registers
+ iretl
+.endm
+
+.macro cpu_ll_exc_handle vector
+ cpu_ll_exc_enter \vector
+ pushl %ebx
+ call cpu_exc_main
+ cpu_ll_exc_leave
+.endm
+
+.macro cpu_ll_intr_handle vector
+ cpu_ll_exc_enter \vector
+ pushl %ebx
+ call cpu_get_intr_stack
+ addl $4, %esp /* fix up stack pointer */
+ testl %eax, %eax /* switch stack ? */
+ jz 1f
+ movl %eax, %esp /* switch to interrupt stack */
+1:
+ pushl %ebx
+ call cpu_intr_main
+ cpu_ll_exc_leave
+.endm
+
+#endif /* __LP64__ */
+
+#define CPU_LL_EXC_HANDLER(vector, name) \
+ASM_ENTRY(name) \
+ push $0; \
+ cpu_ll_exc_handle vector; \
+ASM_END(name)
+
+#define CPU_LL_EXC_HANDLER_WITH_ERROR(vector, name) \
+ASM_ENTRY(name) \
+ cpu_ll_exc_handle vector; \
+ASM_END(name)
+
+#define CPU_LL_INTR_HANDLER(vector, name) \
+ASM_ENTRY(name) \
+ push $0; \
+ cpu_ll_intr_handle vector; \
+ASM_END(name)
+
+/*
+ * Low level handlers for architectural exceptions.
+ */
+CPU_LL_EXC_HANDLER(CPU_EXC_DE, cpu_ll_exc_divide_error)
+CPU_LL_EXC_HANDLER(CPU_EXC_DB, cpu_ll_exc_debug)
+CPU_LL_INTR_HANDLER(CPU_EXC_NMI, cpu_ll_exc_nmi)
+CPU_LL_EXC_HANDLER(CPU_EXC_BP, cpu_ll_exc_breakpoint)
+CPU_LL_EXC_HANDLER(CPU_EXC_OF, cpu_ll_exc_overflow)
+CPU_LL_EXC_HANDLER(CPU_EXC_BR, cpu_ll_exc_bound_range)
+CPU_LL_EXC_HANDLER(CPU_EXC_UD, cpu_ll_exc_undefined_opcode)
+CPU_LL_EXC_HANDLER(CPU_EXC_NM, cpu_ll_exc_no_math_coprocessor)
+CPU_LL_EXC_HANDLER_WITH_ERROR(CPU_EXC_DF, cpu_ll_exc_double_fault)
+CPU_LL_EXC_HANDLER_WITH_ERROR(CPU_EXC_TS, cpu_ll_exc_invalid_tss)
+CPU_LL_EXC_HANDLER_WITH_ERROR(CPU_EXC_NP, cpu_ll_exc_segment_not_present)
+CPU_LL_EXC_HANDLER_WITH_ERROR(CPU_EXC_SS, cpu_ll_exc_stack_segment_fault)
+CPU_LL_EXC_HANDLER_WITH_ERROR(CPU_EXC_GP, cpu_ll_exc_general_protection)
+CPU_LL_EXC_HANDLER_WITH_ERROR(CPU_EXC_PF, cpu_ll_exc_page_fault)
+CPU_LL_EXC_HANDLER(CPU_EXC_MF, cpu_ll_exc_math_fault)
+CPU_LL_EXC_HANDLER_WITH_ERROR(CPU_EXC_AC, cpu_ll_exc_alignment_check)
+CPU_LL_INTR_HANDLER(CPU_EXC_MC, cpu_ll_exc_machine_check)
+CPU_LL_EXC_HANDLER(CPU_EXC_XM, cpu_ll_exc_simd_fp_exception)
+
+/*
+ * Low level handlers for reserved exceptions.
+ *
+ * These exceptions should normally never occur, but have handlers ready just
+ * in case.
+ */
+CPU_LL_EXC_HANDLER(9, cpu_ll_exc_9)
+CPU_LL_EXC_HANDLER(15, cpu_ll_exc_15)
+CPU_LL_EXC_HANDLER(20, cpu_ll_exc_20)
+CPU_LL_EXC_HANDLER(21, cpu_ll_exc_21)
+CPU_LL_EXC_HANDLER(22, cpu_ll_exc_22)
+CPU_LL_EXC_HANDLER(23, cpu_ll_exc_23)
+CPU_LL_EXC_HANDLER(24, cpu_ll_exc_24)
+CPU_LL_EXC_HANDLER(25, cpu_ll_exc_25)
+CPU_LL_EXC_HANDLER(26, cpu_ll_exc_26)
+CPU_LL_EXC_HANDLER(27, cpu_ll_exc_27)
+CPU_LL_EXC_HANDLER(28, cpu_ll_exc_28)
+CPU_LL_EXC_HANDLER(29, cpu_ll_exc_29)
+CPU_LL_EXC_HANDLER(30, cpu_ll_exc_30)
+CPU_LL_EXC_HANDLER(31, cpu_ll_exc_31)
+
+/* Generic low level interrupt handlers */
+CPU_LL_INTR_HANDLER(32, cpu_ll_exc_32)
+CPU_LL_INTR_HANDLER(33, cpu_ll_exc_33)
+CPU_LL_INTR_HANDLER(34, cpu_ll_exc_34)
+CPU_LL_INTR_HANDLER(35, cpu_ll_exc_35)
+CPU_LL_INTR_HANDLER(36, cpu_ll_exc_36)
+CPU_LL_INTR_HANDLER(37, cpu_ll_exc_37)
+CPU_LL_INTR_HANDLER(38, cpu_ll_exc_38)
+CPU_LL_INTR_HANDLER(39, cpu_ll_exc_39)
+CPU_LL_INTR_HANDLER(40, cpu_ll_exc_40)
+CPU_LL_INTR_HANDLER(41, cpu_ll_exc_41)
+CPU_LL_INTR_HANDLER(42, cpu_ll_exc_42)
+CPU_LL_INTR_HANDLER(43, cpu_ll_exc_43)
+CPU_LL_INTR_HANDLER(44, cpu_ll_exc_44)
+CPU_LL_INTR_HANDLER(45, cpu_ll_exc_45)
+CPU_LL_INTR_HANDLER(46, cpu_ll_exc_46)
+CPU_LL_INTR_HANDLER(47, cpu_ll_exc_47)
+CPU_LL_INTR_HANDLER(48, cpu_ll_exc_48)
+CPU_LL_INTR_HANDLER(49, cpu_ll_exc_49)
+CPU_LL_INTR_HANDLER(50, cpu_ll_exc_50)
+CPU_LL_INTR_HANDLER(51, cpu_ll_exc_51)
+CPU_LL_INTR_HANDLER(52, cpu_ll_exc_52)
+CPU_LL_INTR_HANDLER(53, cpu_ll_exc_53)
+CPU_LL_INTR_HANDLER(54, cpu_ll_exc_54)
+CPU_LL_INTR_HANDLER(55, cpu_ll_exc_55)
+CPU_LL_INTR_HANDLER(56, cpu_ll_exc_56)
+CPU_LL_INTR_HANDLER(57, cpu_ll_exc_57)
+CPU_LL_INTR_HANDLER(58, cpu_ll_exc_58)
+CPU_LL_INTR_HANDLER(59, cpu_ll_exc_59)
+CPU_LL_INTR_HANDLER(60, cpu_ll_exc_60)
+CPU_LL_INTR_HANDLER(61, cpu_ll_exc_61)
+CPU_LL_INTR_HANDLER(62, cpu_ll_exc_62)
+CPU_LL_INTR_HANDLER(63, cpu_ll_exc_63)
+CPU_LL_INTR_HANDLER(64, cpu_ll_exc_64)
+CPU_LL_INTR_HANDLER(65, cpu_ll_exc_65)
+CPU_LL_INTR_HANDLER(66, cpu_ll_exc_66)
+CPU_LL_INTR_HANDLER(67, cpu_ll_exc_67)
+CPU_LL_INTR_HANDLER(68, cpu_ll_exc_68)
+CPU_LL_INTR_HANDLER(69, cpu_ll_exc_69)
+CPU_LL_INTR_HANDLER(70, cpu_ll_exc_70)
+CPU_LL_INTR_HANDLER(71, cpu_ll_exc_71)
+CPU_LL_INTR_HANDLER(72, cpu_ll_exc_72)
+CPU_LL_INTR_HANDLER(73, cpu_ll_exc_73)
+CPU_LL_INTR_HANDLER(74, cpu_ll_exc_74)
+CPU_LL_INTR_HANDLER(75, cpu_ll_exc_75)
+CPU_LL_INTR_HANDLER(76, cpu_ll_exc_76)
+CPU_LL_INTR_HANDLER(77, cpu_ll_exc_77)
+CPU_LL_INTR_HANDLER(78, cpu_ll_exc_78)
+CPU_LL_INTR_HANDLER(79, cpu_ll_exc_79)
+CPU_LL_INTR_HANDLER(80, cpu_ll_exc_80)
+CPU_LL_INTR_HANDLER(81, cpu_ll_exc_81)
+CPU_LL_INTR_HANDLER(82, cpu_ll_exc_82)
+CPU_LL_INTR_HANDLER(83, cpu_ll_exc_83)
+CPU_LL_INTR_HANDLER(84, cpu_ll_exc_84)
+CPU_LL_INTR_HANDLER(85, cpu_ll_exc_85)
+CPU_LL_INTR_HANDLER(86, cpu_ll_exc_86)
+CPU_LL_INTR_HANDLER(87, cpu_ll_exc_87)
+CPU_LL_INTR_HANDLER(88, cpu_ll_exc_88)
+CPU_LL_INTR_HANDLER(89, cpu_ll_exc_89)
+CPU_LL_INTR_HANDLER(90, cpu_ll_exc_90)
+CPU_LL_INTR_HANDLER(91, cpu_ll_exc_91)
+CPU_LL_INTR_HANDLER(92, cpu_ll_exc_92)
+CPU_LL_INTR_HANDLER(93, cpu_ll_exc_93)
+CPU_LL_INTR_HANDLER(94, cpu_ll_exc_94)
+CPU_LL_INTR_HANDLER(95, cpu_ll_exc_95)
+CPU_LL_INTR_HANDLER(96, cpu_ll_exc_96)
+CPU_LL_INTR_HANDLER(97, cpu_ll_exc_97)
+CPU_LL_INTR_HANDLER(98, cpu_ll_exc_98)
+CPU_LL_INTR_HANDLER(99, cpu_ll_exc_99)
+CPU_LL_INTR_HANDLER(100, cpu_ll_exc_100)
+CPU_LL_INTR_HANDLER(101, cpu_ll_exc_101)
+CPU_LL_INTR_HANDLER(102, cpu_ll_exc_102)
+CPU_LL_INTR_HANDLER(103, cpu_ll_exc_103)
+CPU_LL_INTR_HANDLER(104, cpu_ll_exc_104)
+CPU_LL_INTR_HANDLER(105, cpu_ll_exc_105)
+CPU_LL_INTR_HANDLER(106, cpu_ll_exc_106)
+CPU_LL_INTR_HANDLER(107, cpu_ll_exc_107)
+CPU_LL_INTR_HANDLER(108, cpu_ll_exc_108)
+CPU_LL_INTR_HANDLER(109, cpu_ll_exc_109)
+CPU_LL_INTR_HANDLER(110, cpu_ll_exc_110)
+CPU_LL_INTR_HANDLER(111, cpu_ll_exc_111)
+CPU_LL_INTR_HANDLER(112, cpu_ll_exc_112)
+CPU_LL_INTR_HANDLER(113, cpu_ll_exc_113)
+CPU_LL_INTR_HANDLER(114, cpu_ll_exc_114)
+CPU_LL_INTR_HANDLER(115, cpu_ll_exc_115)
+CPU_LL_INTR_HANDLER(116, cpu_ll_exc_116)
+CPU_LL_INTR_HANDLER(117, cpu_ll_exc_117)
+CPU_LL_INTR_HANDLER(118, cpu_ll_exc_118)
+CPU_LL_INTR_HANDLER(119, cpu_ll_exc_119)
+CPU_LL_INTR_HANDLER(120, cpu_ll_exc_120)
+CPU_LL_INTR_HANDLER(121, cpu_ll_exc_121)
+CPU_LL_INTR_HANDLER(122, cpu_ll_exc_122)
+CPU_LL_INTR_HANDLER(123, cpu_ll_exc_123)
+CPU_LL_INTR_HANDLER(124, cpu_ll_exc_124)
+CPU_LL_INTR_HANDLER(125, cpu_ll_exc_125)
+CPU_LL_INTR_HANDLER(126, cpu_ll_exc_126)
+CPU_LL_INTR_HANDLER(127, cpu_ll_exc_127)
+CPU_LL_INTR_HANDLER(128, cpu_ll_exc_128)
+CPU_LL_INTR_HANDLER(129, cpu_ll_exc_129)
+CPU_LL_INTR_HANDLER(130, cpu_ll_exc_130)
+CPU_LL_INTR_HANDLER(131, cpu_ll_exc_131)
+CPU_LL_INTR_HANDLER(132, cpu_ll_exc_132)
+CPU_LL_INTR_HANDLER(133, cpu_ll_exc_133)
+CPU_LL_INTR_HANDLER(134, cpu_ll_exc_134)
+CPU_LL_INTR_HANDLER(135, cpu_ll_exc_135)
+CPU_LL_INTR_HANDLER(136, cpu_ll_exc_136)
+CPU_LL_INTR_HANDLER(137, cpu_ll_exc_137)
+CPU_LL_INTR_HANDLER(138, cpu_ll_exc_138)
+CPU_LL_INTR_HANDLER(139, cpu_ll_exc_139)
+CPU_LL_INTR_HANDLER(140, cpu_ll_exc_140)
+CPU_LL_INTR_HANDLER(141, cpu_ll_exc_141)
+CPU_LL_INTR_HANDLER(142, cpu_ll_exc_142)
+CPU_LL_INTR_HANDLER(143, cpu_ll_exc_143)
+CPU_LL_INTR_HANDLER(144, cpu_ll_exc_144)
+CPU_LL_INTR_HANDLER(145, cpu_ll_exc_145)
+CPU_LL_INTR_HANDLER(146, cpu_ll_exc_146)
+CPU_LL_INTR_HANDLER(147, cpu_ll_exc_147)
+CPU_LL_INTR_HANDLER(148, cpu_ll_exc_148)
+CPU_LL_INTR_HANDLER(149, cpu_ll_exc_149)
+CPU_LL_INTR_HANDLER(150, cpu_ll_exc_150)
+CPU_LL_INTR_HANDLER(151, cpu_ll_exc_151)
+CPU_LL_INTR_HANDLER(152, cpu_ll_exc_152)
+CPU_LL_INTR_HANDLER(153, cpu_ll_exc_153)
+CPU_LL_INTR_HANDLER(154, cpu_ll_exc_154)
+CPU_LL_INTR_HANDLER(155, cpu_ll_exc_155)
+CPU_LL_INTR_HANDLER(156, cpu_ll_exc_156)
+CPU_LL_INTR_HANDLER(157, cpu_ll_exc_157)
+CPU_LL_INTR_HANDLER(158, cpu_ll_exc_158)
+CPU_LL_INTR_HANDLER(159, cpu_ll_exc_159)
+CPU_LL_INTR_HANDLER(160, cpu_ll_exc_160)
+CPU_LL_INTR_HANDLER(161, cpu_ll_exc_161)
+CPU_LL_INTR_HANDLER(162, cpu_ll_exc_162)
+CPU_LL_INTR_HANDLER(163, cpu_ll_exc_163)
+CPU_LL_INTR_HANDLER(164, cpu_ll_exc_164)
+CPU_LL_INTR_HANDLER(165, cpu_ll_exc_165)
+CPU_LL_INTR_HANDLER(166, cpu_ll_exc_166)
+CPU_LL_INTR_HANDLER(167, cpu_ll_exc_167)
+CPU_LL_INTR_HANDLER(168, cpu_ll_exc_168)
+CPU_LL_INTR_HANDLER(169, cpu_ll_exc_169)
+CPU_LL_INTR_HANDLER(170, cpu_ll_exc_170)
+CPU_LL_INTR_HANDLER(171, cpu_ll_exc_171)
+CPU_LL_INTR_HANDLER(172, cpu_ll_exc_172)
+CPU_LL_INTR_HANDLER(173, cpu_ll_exc_173)
+CPU_LL_INTR_HANDLER(174, cpu_ll_exc_174)
+CPU_LL_INTR_HANDLER(175, cpu_ll_exc_175)
+CPU_LL_INTR_HANDLER(176, cpu_ll_exc_176)
+CPU_LL_INTR_HANDLER(177, cpu_ll_exc_177)
+CPU_LL_INTR_HANDLER(178, cpu_ll_exc_178)
+CPU_LL_INTR_HANDLER(179, cpu_ll_exc_179)
+CPU_LL_INTR_HANDLER(180, cpu_ll_exc_180)
+CPU_LL_INTR_HANDLER(181, cpu_ll_exc_181)
+CPU_LL_INTR_HANDLER(182, cpu_ll_exc_182)
+CPU_LL_INTR_HANDLER(183, cpu_ll_exc_183)
+CPU_LL_INTR_HANDLER(184, cpu_ll_exc_184)
+CPU_LL_INTR_HANDLER(185, cpu_ll_exc_185)
+CPU_LL_INTR_HANDLER(186, cpu_ll_exc_186)
+CPU_LL_INTR_HANDLER(187, cpu_ll_exc_187)
+CPU_LL_INTR_HANDLER(188, cpu_ll_exc_188)
+CPU_LL_INTR_HANDLER(189, cpu_ll_exc_189)
+CPU_LL_INTR_HANDLER(190, cpu_ll_exc_190)
+CPU_LL_INTR_HANDLER(191, cpu_ll_exc_191)
+CPU_LL_INTR_HANDLER(192, cpu_ll_exc_192)
+CPU_LL_INTR_HANDLER(193, cpu_ll_exc_193)
+CPU_LL_INTR_HANDLER(194, cpu_ll_exc_194)
+CPU_LL_INTR_HANDLER(195, cpu_ll_exc_195)
+CPU_LL_INTR_HANDLER(196, cpu_ll_exc_196)
+CPU_LL_INTR_HANDLER(197, cpu_ll_exc_197)
+CPU_LL_INTR_HANDLER(198, cpu_ll_exc_198)
+CPU_LL_INTR_HANDLER(199, cpu_ll_exc_199)
+CPU_LL_INTR_HANDLER(200, cpu_ll_exc_200)
+CPU_LL_INTR_HANDLER(201, cpu_ll_exc_201)
+CPU_LL_INTR_HANDLER(202, cpu_ll_exc_202)
+CPU_LL_INTR_HANDLER(203, cpu_ll_exc_203)
+CPU_LL_INTR_HANDLER(204, cpu_ll_exc_204)
+CPU_LL_INTR_HANDLER(205, cpu_ll_exc_205)
+CPU_LL_INTR_HANDLER(206, cpu_ll_exc_206)
+CPU_LL_INTR_HANDLER(207, cpu_ll_exc_207)
+CPU_LL_INTR_HANDLER(208, cpu_ll_exc_208)
+CPU_LL_INTR_HANDLER(209, cpu_ll_exc_209)
+CPU_LL_INTR_HANDLER(210, cpu_ll_exc_210)
+CPU_LL_INTR_HANDLER(211, cpu_ll_exc_211)
+CPU_LL_INTR_HANDLER(212, cpu_ll_exc_212)
+CPU_LL_INTR_HANDLER(213, cpu_ll_exc_213)
+CPU_LL_INTR_HANDLER(214, cpu_ll_exc_214)
+CPU_LL_INTR_HANDLER(215, cpu_ll_exc_215)
+CPU_LL_INTR_HANDLER(216, cpu_ll_exc_216)
+CPU_LL_INTR_HANDLER(217, cpu_ll_exc_217)
+CPU_LL_INTR_HANDLER(218, cpu_ll_exc_218)
+CPU_LL_INTR_HANDLER(219, cpu_ll_exc_219)
+CPU_LL_INTR_HANDLER(220, cpu_ll_exc_220)
+CPU_LL_INTR_HANDLER(221, cpu_ll_exc_221)
+CPU_LL_INTR_HANDLER(222, cpu_ll_exc_222)
+CPU_LL_INTR_HANDLER(223, cpu_ll_exc_223)
+CPU_LL_INTR_HANDLER(224, cpu_ll_exc_224)
+CPU_LL_INTR_HANDLER(225, cpu_ll_exc_225)
+CPU_LL_INTR_HANDLER(226, cpu_ll_exc_226)
+CPU_LL_INTR_HANDLER(227, cpu_ll_exc_227)
+CPU_LL_INTR_HANDLER(228, cpu_ll_exc_228)
+CPU_LL_INTR_HANDLER(229, cpu_ll_exc_229)
+CPU_LL_INTR_HANDLER(230, cpu_ll_exc_230)
+CPU_LL_INTR_HANDLER(231, cpu_ll_exc_231)
+CPU_LL_INTR_HANDLER(232, cpu_ll_exc_232)
+CPU_LL_INTR_HANDLER(233, cpu_ll_exc_233)
+CPU_LL_INTR_HANDLER(234, cpu_ll_exc_234)
+CPU_LL_INTR_HANDLER(235, cpu_ll_exc_235)
+CPU_LL_INTR_HANDLER(236, cpu_ll_exc_236)
+CPU_LL_INTR_HANDLER(237, cpu_ll_exc_237)
+CPU_LL_INTR_HANDLER(238, cpu_ll_exc_238)
+CPU_LL_INTR_HANDLER(239, cpu_ll_exc_239)
+CPU_LL_INTR_HANDLER(240, cpu_ll_exc_240)
+CPU_LL_INTR_HANDLER(241, cpu_ll_exc_241)
+CPU_LL_INTR_HANDLER(242, cpu_ll_exc_242)
+CPU_LL_INTR_HANDLER(243, cpu_ll_exc_243)
+CPU_LL_INTR_HANDLER(244, cpu_ll_exc_244)
+CPU_LL_INTR_HANDLER(245, cpu_ll_exc_245)
+CPU_LL_INTR_HANDLER(246, cpu_ll_exc_246)
+CPU_LL_INTR_HANDLER(247, cpu_ll_exc_247)
+CPU_LL_INTR_HANDLER(248, cpu_ll_exc_248)
+CPU_LL_INTR_HANDLER(249, cpu_ll_exc_249)
+CPU_LL_INTR_HANDLER(250, cpu_ll_exc_250)
+CPU_LL_INTR_HANDLER(251, cpu_ll_exc_251)
+CPU_LL_INTR_HANDLER(252, cpu_ll_exc_252)
+CPU_LL_INTR_HANDLER(253, cpu_ll_exc_253)
+CPU_LL_INTR_HANDLER(254, cpu_ll_exc_254)
+CPU_LL_INTR_HANDLER(255, cpu_ll_exc_255)
+
+#ifdef __LP64__
+#define CPU_LL_EXC_HANDLER_ADDR(name) .quad name
+#else /* __LP64__ */
+#define CPU_LL_EXC_HANDLER_ADDR(name) .long name
+#endif /* __LP64__ */
+
+.section INIT_DATA_SECTION
+
+/* See the C declaration */
+ASM_DATA(cpu_ll_exc_handler_addrs)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_divide_error)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_debug)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_nmi)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_breakpoint)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_overflow)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_bound_range)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_undefined_opcode)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_no_math_coprocessor)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_double_fault)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_9)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_invalid_tss)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_segment_not_present)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_stack_segment_fault)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_general_protection)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_page_fault)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_15)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_math_fault)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_alignment_check)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_machine_check)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_simd_fp_exception)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_20)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_21)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_22)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_23)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_24)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_25)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_26)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_27)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_28)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_29)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_30)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_31)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_32)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_33)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_34)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_35)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_36)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_37)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_38)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_39)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_40)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_41)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_42)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_43)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_44)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_45)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_46)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_47)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_48)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_49)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_50)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_51)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_52)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_53)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_54)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_55)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_56)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_57)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_58)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_59)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_60)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_61)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_62)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_63)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_64)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_65)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_66)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_67)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_68)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_69)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_70)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_71)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_72)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_73)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_74)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_75)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_76)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_77)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_78)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_79)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_80)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_81)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_82)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_83)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_84)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_85)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_86)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_87)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_88)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_89)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_90)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_91)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_92)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_93)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_94)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_95)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_96)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_97)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_98)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_99)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_100)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_101)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_102)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_103)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_104)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_105)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_106)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_107)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_108)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_109)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_110)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_111)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_112)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_113)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_114)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_115)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_116)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_117)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_118)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_119)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_120)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_121)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_122)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_123)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_124)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_125)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_126)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_127)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_128)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_129)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_130)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_131)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_132)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_133)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_134)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_135)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_136)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_137)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_138)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_139)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_140)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_141)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_142)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_143)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_144)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_145)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_146)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_147)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_148)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_149)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_150)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_151)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_152)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_153)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_154)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_155)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_156)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_157)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_158)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_159)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_160)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_161)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_162)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_163)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_164)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_165)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_166)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_167)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_168)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_169)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_170)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_171)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_172)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_173)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_174)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_175)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_176)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_177)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_178)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_179)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_180)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_181)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_182)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_183)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_184)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_185)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_186)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_187)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_188)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_189)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_190)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_191)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_192)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_193)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_194)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_195)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_196)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_197)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_198)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_199)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_200)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_201)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_202)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_203)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_204)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_205)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_206)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_207)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_208)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_209)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_210)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_211)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_212)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_213)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_214)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_215)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_216)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_217)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_218)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_219)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_220)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_221)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_222)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_223)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_224)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_225)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_226)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_227)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_228)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_229)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_230)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_231)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_232)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_233)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_234)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_235)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_236)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_237)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_238)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_239)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_240)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_241)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_242)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_243)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_244)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_245)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_246)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_247)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_248)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_249)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_250)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_251)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_252)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_253)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_254)
+ CPU_LL_EXC_HANDLER_ADDR(cpu_ll_exc_255)
+ASM_END(cpu_ll_exc_handler_addrs)
ASM_ENTRY(cpu_load_gdt)
#ifdef __LP64__
- lgdt (%rdi)
+ lgdt (%rdi)
#else /* __LP64__ */
- movl 4(%esp), %eax
- lgdt (%eax)
+ movl 4(%esp), %eax
+ lgdt (%eax)
#endif /* __LP64__ */
- movl $CPU_GDT_SEL_DATA, %eax
- movl %eax, %ds
- movl %eax, %es
- movl %eax, %ss
+ movl $CPU_GDT_SEL_DATA, %eax
+ movl %eax, %ds
+ movl %eax, %es
+ movl %eax, %ss
- /* Alter the stack to reload the code segment using a far return */
+ /* Alter the stack to reload the code segment using a far return */
#ifdef __LP64__
- popq %rax
- pushq $CPU_GDT_SEL_CODE
- pushq %rax
- lretq
+ popq %rax
+ pushq $CPU_GDT_SEL_CODE
+ pushq %rax
+ lretq
#else /* __LP64__ */
- popl %eax
- pushl $CPU_GDT_SEL_CODE
- pushl %eax
- lret
+ popl %eax
+ pushl $CPU_GDT_SEL_CODE
+ pushl %eax
+ lret
#endif /* __LP64__ */
ASM_END(cpu_load_gdt)
diff --git a/arch/x86/machine/cpu_i.h b/arch/x86/machine/cpu_i.h
new file mode 100644
index 00000000..2c28573e
--- /dev/null
+++ b/arch/x86/machine/cpu_i.h
@@ -0,0 +1,196 @@
+/*
+ * Copyright (c) 2018 Richard Braun.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef X86_CPU_I_H
+#define X86_CPU_I_H
+
+/*
+ * EFLAGS register flags.
+ */
+#define CPU_EFL_ONE 0x00000002 /* Reserved, must be set */
+#define CPU_EFL_IF 0x00000200
+
+/*
+ * GDT segment selectors.
+ *
+ * Keep in mind that, on amd64, the size of a GDT entry referred to
+ * by a selector depends on the descriptor type.
+ */
+#define CPU_GDT_SEL_NULL 0
+#define CPU_GDT_SEL_CODE 8
+#define CPU_GDT_SEL_DATA 16
+#define CPU_GDT_SEL_TSS 24
+
+#ifdef __LP64__
+#define CPU_GDT_SIZE 40
+#else /* __LP64__ */
+#define CPU_GDT_SEL_DF_TSS 32
+#define CPU_GDT_SEL_PERCPU 40
+#define CPU_GDT_SEL_TLS 48
+#define CPU_GDT_SIZE 56
+#endif /* __LP64__ */
+
+#ifndef __ASSEMBLER__
+
+#include <stdalign.h>
+#include <stdint.h>
+
+struct cpu_tss {
+#ifdef __LP64__
+ uint32_t reserved0;
+ uint64_t rsp0;
+ uint64_t rsp1;
+ uint64_t rsp2;
+ uint64_t ist[8];
+ uint64_t reserved1;
+ uint16_t reserved2;
+#else /* __LP64__ */
+ uint32_t link;
+ uint32_t esp0;
+ uint32_t ss0;
+ uint32_t esp1;
+ uint32_t ss1;
+ uint32_t esp2;
+ uint32_t ss2;
+ uint32_t cr3;
+ uint32_t eip;
+ uint32_t eflags;
+ uint32_t eax;
+ uint32_t ecx;
+ uint32_t edx;
+ uint32_t ebx;
+ uint32_t esp;
+ uint32_t ebp;
+ uint32_t esi;
+ uint32_t edi;
+ uint32_t es;
+ uint32_t cs;
+ uint32_t ss;
+ uint32_t ds;
+ uint32_t fs;
+ uint32_t gs;
+ uint32_t ldt;
+ uint16_t trap_bit;
+#endif /* __LP64__ */
+ uint16_t iobp_base;
+} __packed;
+
+/*
+ * LDT or TSS system segment descriptor.
+ */
+struct cpu_sysseg_desc {
+ uint32_t word1;
+ uint32_t word2;
+#ifdef __LP64__
+ uint32_t word3;
+ uint32_t word4;
+#endif /* __LP64__ */
+};
+
+struct cpu_gdt {
+ alignas(CPU_L1_SIZE) char descs[CPU_GDT_SIZE];
+};
+
+#define CPU_VENDOR_ID_SIZE 13
+#define CPU_MODEL_NAME_SIZE 49
+
+/*
+ * CPU states.
+ * TODO Boolean.
+ */
+#define CPU_STATE_OFF 0
+#define CPU_STATE_ON 1
+
+struct cpu {
+ unsigned int id;
+ unsigned int apic_id;
+ char vendor_str[CPU_VENDOR_STR_SIZE];
+ char model_name[CPU_MODEL_NAME_SIZE];
+ unsigned int cpuid_max_basic;
+ unsigned int cpuid_max_extended;
+ unsigned int vendor_id;
+ unsigned int type;
+ unsigned int family;
+ unsigned int model;
+ unsigned int stepping;
+ unsigned int clflush_size;
+ unsigned int initial_apic_id;
+ unsigned int features1; // TODO Use a struct bitmap
+ unsigned int features2;
+ unsigned int features3;
+ unsigned int features4;
+ unsigned short phys_addr_width;
+ unsigned short virt_addr_width;
+
+ struct cpu_gdt gdt;
+
+ /*
+ * TSS segments, one set per CPU.
+ *
+ * One TSS at least is required per processor to provide the following :
+ * - stacks for double fault handlers, implemented with task switching
+ * on i386, interrupt stack tables on amd64
+ * - stacks for each privilege level
+ * - I/O permission bitmaps
+ *
+ * See Intel 64 and IA-32 Architecture Software Developer's Manual,
+ * Volume 3 System Programming Guide :
+ * - 6.12.2 Interrupt tasks
+ * - 7.3 Task switching
+ */
+ struct cpu_tss tss;
+#ifndef __LP64__
+ struct cpu_tss df_tss;
+#endif /* __LP64__ */
+
+ volatile int state; // TODO Atomic accessors
+ void *boot_stack;
+
+ alignas(CPU_DATA_ALIGN) char intr_stack[CPU_INTR_STACK_SIZE];
+ alignas(CPU_DATA_ALIGN) char df_stack[CPU_INTR_STACK_SIZE];
+};
+
+/*
+ * This percpu variable contains the address of the percpu area for the local
+ * processor. This is normally the same value stored in the percpu module, but
+ * it can be directly accessed through a segment register.
+ */
+extern void *cpu_local_area;
+
+/*
+ * Return the content of the EFLAGS register.
+ *
+ * Implies a compiler barrier.
+ *
+ * TODO Add cpu_flags_t type.
+ */
+static __always_inline unsigned long
+cpu_get_eflags(void)
+{
+ unsigned long eflags;
+
+ asm volatile("pushf\n"
+ "pop %0\n"
+ : "=r" (eflags)
+ : : "memory");
+
+ return eflags;
+}
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* X86_CPU_I_H */
diff --git a/arch/x86/machine/ioapic.c b/arch/x86/machine/ioapic.c
index f90e1209..2b74ed17 100644
--- a/arch/x86/machine/ioapic.c
+++ b/arch/x86/machine/ioapic.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 Richard Braun.
+ * Copyright (c) 2017-2018 Richard Braun.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -31,7 +31,6 @@
#include <machine/ioapic.h>
#include <machine/lapic.h>
#include <machine/pic.h>
-#include <machine/trap.h>
#include <vm/vm_kmem.h>
#define IOAPIC_REG_VERSION 0x01
@@ -157,9 +156,9 @@ ioapic_write_entry_high(struct ioapic *ioapic, unsigned int id, uint32_t value)
}
static void
-ioapic_intr(struct trap_frame *frame)
+ioapic_intr(unsigned int vector)
{
- intr_handle(frame->vector - TRAP_INTR_FIRST);
+ intr_handle(vector - CPU_EXC_INTR_FIRST);
}
static struct ioapic * __init
@@ -194,12 +193,12 @@ ioapic_create(unsigned int apic_id, uintptr_t addr, unsigned int gsi_base)
ioapic->last_gsi = ioapic->first_gsi + nr_gsis - 1;
/* XXX This assumes that interrupts are mapped 1:1 to traps */
- if (ioapic->last_gsi > (TRAP_INTR_LAST - TRAP_INTR_FIRST)) {
+ if (ioapic->last_gsi > (CPU_EXC_INTR_LAST - CPU_EXC_INTR_FIRST)) {
panic("ioapic: invalid interrupt range");
}
for (i = ioapic->first_gsi; i < ioapic->last_gsi; i++) {
- trap_register(TRAP_INTR_FIRST + i, ioapic_intr);
+ cpu_register_intr(CPU_EXC_INTR_FIRST + i, ioapic_intr);
}
log_info("ioapic%u: version:%#x gsis:%u-%u", ioapic->id,
@@ -228,14 +227,14 @@ ioapic_compute_entry(uint32_t *highp, uint32_t *lowp,
bool active_high, bool edge_triggered)
{
assert(apic_id < 16);
- assert(intr < (TRAP_NR_VECTORS - TRAP_INTR_FIRST));
+ assert(intr < (CPU_NR_EXC_VECTORS - CPU_EXC_INTR_FIRST));
*highp = apic_id << 24;
*lowp = (!edge_triggered ? IOAPIC_ENTLOW_LEVEL : 0)
| (!active_high ? IOAPIC_ENTLOW_ACTIVE_LOW : 0)
| IOAPIC_ENTLOW_PHYS_DELIVERY
| IOAPIC_ENTLOW_FIXED_DEST
- | (TRAP_INTR_FIRST + intr);
+ | (CPU_EXC_INTR_FIRST + intr);
}
static void
diff --git a/arch/x86/machine/lapic.c b/arch/x86/machine/lapic.c
index a15bd5f1..ed5bba3f 100644
--- a/arch/x86/machine/lapic.c
+++ b/arch/x86/machine/lapic.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011-2017 Richard Braun.
+ * Copyright (c) 2011-2018 Richard Braun.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -29,7 +29,6 @@
#include <machine/cpu.h>
#include <machine/lapic.h>
#include <machine/pmap.h>
-#include <machine/trap.h>
#include <vm/vm_kmem.h>
/*
@@ -201,7 +200,7 @@ lapic_compute_freq(void)
{
uint32_t c1, c2;
- lapic_write(&lapic_map->svr, LAPIC_SVR_SOFT_EN | TRAP_LAPIC_SPURIOUS);
+ lapic_write(&lapic_map->svr, LAPIC_SVR_SOFT_EN | CPU_EXC_LAPIC_SPURIOUS);
lapic_write(&lapic_map->timer_dcr, LAPIC_TIMER_DCR_DIV1);
/* The APIC timer counter should never wrap around here */
@@ -222,6 +221,52 @@ lapic_eoi(void)
lapic_write(&lapic_map->eoi, 0);
}
+#ifdef CONFIG_PERFMON
+static void
+lapic_pmc_overflow_intr(unsigned int vector)
+{
+ (void)vector;
+
+ lapic_eoi();
+
+ /* Reset the LVT entry as it is automatically cleared when triggered */
+ lapic_write(&lapic_map->lvt_pmc, CPU_EXC_LAPIC_PMC_OF);
+
+ perfmon_overflow_intr();
+}
+#endif /* CONFIG_PERFMON */
+
+static void
+lapic_timer_intr(unsigned int vector)
+{
+ (void)vector;
+
+ lapic_eoi();
+ clock_tick_intr();
+}
+
+static void
+lapic_error_intr(unsigned int vector)
+{
+ uint32_t esr;
+
+ (void)vector;
+
+ esr = lapic_read(&lapic_map->esr);
+ log_err("lapic: error on cpu%u: esr:%08x", cpu_id(), esr);
+ lapic_write(&lapic_map->esr, 0);
+ lapic_eoi();
+}
+
+static void
+lapic_spurious_intr(unsigned int vector)
+{
+ (void)vector;
+ log_warning("lapic: spurious interrupt");
+
+ /* No EOI for this interrupt */
+}
+
static void __init
lapic_setup_registers(void)
{
@@ -229,18 +274,18 @@ lapic_setup_registers(void)
* LVT mask bits can only be cleared when the local APIC is enabled.
* They are kept disabled while the local APIC is disabled.
*/
- lapic_write(&lapic_map->svr, LAPIC_SVR_SOFT_EN | TRAP_LAPIC_SPURIOUS);
+ lapic_write(&lapic_map->svr, LAPIC_SVR_SOFT_EN | CPU_EXC_LAPIC_SPURIOUS);
lapic_write(&lapic_map->tpr, 0);
lapic_write(&lapic_map->eoi, 0);
lapic_write(&lapic_map->esr, 0);
lapic_write(&lapic_map->lvt_timer, LAPIC_LVT_TIMER_PERIODIC
- | TRAP_LAPIC_TIMER);
+ | CPU_EXC_LAPIC_TIMER);
lapic_write(&lapic_map->lvt_lint0, LAPIC_LVT_MASK_INTR);
lapic_write(&lapic_map->lvt_lint1, LAPIC_LVT_MASK_INTR);
- lapic_write(&lapic_map->lvt_error, TRAP_LAPIC_ERROR);
+ lapic_write(&lapic_map->lvt_error, CPU_EXC_LAPIC_ERROR);
lapic_write(&lapic_map->timer_dcr, LAPIC_TIMER_DCR_DIV1);
lapic_write(&lapic_map->timer_icr, lapic_bus_freq / CLOCK_FREQ);
- lapic_write(&lapic_map->lvt_pmc, TRAP_LAPIC_PMC_OF);
+ lapic_write(&lapic_map->lvt_pmc, CPU_EXC_LAPIC_PMC_OF);
}
void __init
@@ -248,6 +293,13 @@ lapic_setup(uint32_t map_addr)
{
uint32_t value;
+#ifdef CONFIG_PERFMON
+ cpu_register_intr(CPU_EXC_LAPIC_PMC_OF, lapic_pmc_overflow_intr);
+#endif
+ cpu_register_intr(CPU_EXC_LAPIC_TIMER, lapic_timer_intr);
+ cpu_register_intr(CPU_EXC_LAPIC_ERROR, lapic_error_intr);
+ cpu_register_intr(CPU_EXC_LAPIC_SPURIOUS, lapic_spurious_intr);
+
lapic_map = vm_kmem_map_pa(map_addr, sizeof(*lapic_map), NULL, NULL);
if (lapic_map == NULL) {
@@ -334,48 +386,3 @@ lapic_ipi_broadcast(uint32_t vector)
lapic_ipi(0, LAPIC_ICR_DEST_ALL_EXCEPT_SELF
| (vector & LAPIC_ICR_VECTOR_MASK));
}
-
-#ifdef CONFIG_PERFMON
-void
-lapic_pmc_overflow_intr(struct trap_frame *frame)
-{
- (void)frame;
-
- lapic_eoi();
-
- /* Reset the LVT entry as it is automatically cleared when triggered */
- lapic_write(&lapic_map->lvt_pmc, TRAP_LAPIC_PMC_OF);
-
- perfmon_overflow_intr();
-}
-#endif /* CONFIG_PERFMON */
-
-void
-lapic_timer_intr(struct trap_frame *frame)
-{
- (void)frame;
-
- lapic_eoi();
- clock_tick_intr();
-}
-
-void
-lapic_error_intr(struct trap_frame *frame)
-{
- uint32_t esr;
-
- (void)frame;
- esr = lapic_read(&lapic_map->esr);
- log_err("lapic: error on cpu%u: esr:%08x", cpu_id(), esr);
- lapic_write(&lapic_map->esr, 0);
- lapic_eoi();
-}
-
-void
-lapic_spurious_intr(struct trap_frame *frame)
-{
- (void)frame;
- log_warning("lapic: spurious interrupt");
-
- /* No EOI for this interrupt */
-}
diff --git a/arch/x86/machine/lapic.h b/arch/x86/machine/lapic.h
index eac225d7..259bee51 100644
--- a/arch/x86/machine/lapic.h
+++ b/arch/x86/machine/lapic.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011-2017 Richard Braun.
+ * Copyright (c) 2011-2018 Richard Braun.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -21,8 +21,6 @@
#include <stdbool.h>
#include <stdint.h>
-#include <machine/trap.h>
-
/*
* Send an end-of-interrupt message to the local APIC.
*/
@@ -51,12 +49,4 @@ void lapic_ipi_startup(uint32_t apic_id, uint32_t vector);
void lapic_ipi_send(uint32_t apic_id, uint32_t vector);
void lapic_ipi_broadcast(uint32_t vector);
-/*
- * Interrupt handlers.
- */
-void lapic_pmc_overflow_intr(struct trap_frame *frame);
-void lapic_timer_intr(struct trap_frame *frame);
-void lapic_error_intr(struct trap_frame *frame);
-void lapic_spurious_intr(struct trap_frame *frame);
-
#endif /* X86_LAPIC_H */
diff --git a/arch/x86/machine/pic.c b/arch/x86/machine/pic.c
index 3c7e7623..6240e4f8 100644
--- a/arch/x86/machine/pic.c
+++ b/arch/x86/machine/pic.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2017 Richard Braun.
+ * Copyright (c) 2012-2018 Richard Braun.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -28,7 +28,6 @@
#include <machine/io.h>
#include <machine/lapic.h>
#include <machine/pic.h>
-#include <machine/trap.h>
/*
* I/O ports.
@@ -155,9 +154,9 @@ static const struct intr_ops pic_ops = {
};
static void
-pic_intr(struct trap_frame *frame)
+pic_intr(unsigned int vector)
{
- intr_handle(frame->vector - TRAP_INTR_FIRST);
+ intr_handle(vector - CPU_EXC_INTR_FIRST);
}
static void __init
@@ -168,7 +167,7 @@ pic_register(void)
intr_register_ctl(&pic_ops, NULL, 0, PIC_MAX_INTR);
for (intr = 0; intr <= PIC_MAX_INTR; intr++) {
- trap_register(TRAP_INTR_FIRST + intr, pic_intr);
+ cpu_register_intr(CPU_EXC_INTR_FIRST + intr, pic_intr);
}
}
@@ -212,8 +211,8 @@ pic_setup_common(bool register_ctl)
io_write_byte(PIC_SLAVE_CMD, PIC_ICW1_INIT | PIC_ICW1_IC4);
/* ICW 2 */
- io_write_byte(PIC_MASTER_IMR, TRAP_INTR_FIRST);
- io_write_byte(PIC_SLAVE_IMR, TRAP_INTR_FIRST + PIC_NR_INTRS);
+ io_write_byte(PIC_MASTER_IMR, CPU_EXC_INTR_FIRST);
+ io_write_byte(PIC_SLAVE_IMR, CPU_EXC_INTR_FIRST + PIC_NR_INTRS);
/* ICW 3 - Set up cascading */
io_write_byte(PIC_MASTER_IMR, 1 << PIC_SLAVE_INTR);
diff --git a/arch/x86/machine/pmap.c b/arch/x86/machine/pmap.c
index 182c765c..c8f0437d 100644
--- a/arch/x86/machine/pmap.c
+++ b/arch/x86/machine/pmap.c
@@ -23,6 +23,7 @@
#include <stdalign.h>
#include <stddef.h>
#include <stdint.h>
+#include <stdio.h>
#include <string.h>
#include <kern/cpumap.h>
@@ -43,7 +44,6 @@
#include <machine/page.h>
#include <machine/pmap.h>
#include <machine/tcb.h>
-#include <machine/trap.h>
#include <machine/types.h>
#include <vm/vm_kmem.h>
#include <vm/vm_page.h>
diff --git a/arch/x86/machine/trap.c b/arch/x86/machine/trap.c
deleted file mode 100644
index 90c8bf66..00000000
--- a/arch/x86/machine/trap.c
+++ /dev/null
@@ -1,354 +0,0 @@
-/*
- * Copyright (c) 2012-2014 Richard Braun.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- *
- * XXX Many traps have not been tested. Some, such as NMIs, are known to need
- * additional configuration and resources to be properly handled.
- */
-
-#include <assert.h>
-#include <stdalign.h>
-#include <stdint.h>
-#include <stdio.h>
-
-#include <kern/atomic.h>
-#include <kern/init.h>
-#include <kern/macros.h>
-#include <kern/spinlock.h>
-#include <kern/thread.h>
-#include <machine/cpu.h>
-#include <machine/lapic.h>
-#include <machine/pic.h>
-#include <machine/pmap.h>
-#include <machine/strace.h>
-#include <machine/trap.h>
-
-struct trap_cpu_data {
- alignas(CPU_DATA_ALIGN) unsigned char intr_stack[TRAP_STACK_SIZE];
-};
-
-static struct trap_cpu_data trap_cpu_data __percpu;
-
-/*
- * Type for interrupt service routines and trap handler functions.
- */
-typedef void (*trap_isr_fn_t)(void);
-
-/*
- * Trap handler flags.
- */
-#define TRAP_HF_INTR 0x1 /* Enter interrupt context */
-
-/*
- * Properties of a trap handler.
- */
-struct trap_handler {
- int flags;
- trap_handler_fn_t fn;
-};
-
-/*
- * Table of ISR addresses.
- */
-extern trap_isr_fn_t trap_isr_table[CPU_IDT_SIZE];
-
-/*
- * Array of trap handlers.
- */
-static struct trap_handler trap_handlers[CPU_IDT_SIZE] __read_mostly;
-
-/*
- * Global trap lock.
- *
- * This lock is currently only used to serialize concurrent trap handler
- * updates.
- *
- * Interrupts must be disabled when holding this lock.
- */
-static struct spinlock trap_lock;
-
-static struct trap_handler *
-trap_handler_get(unsigned int vector)
-{
- assert(vector < ARRAY_SIZE(trap_handlers));
- return &trap_handlers[vector];
-}
-
-static void __init
-trap_handler_init(struct trap_handler *handler, int flags, trap_handler_fn_t fn)
-{
- handler->flags = flags;
- handler->fn = fn;
-}
-
-static void __init
-trap_install(unsigned int vector, int flags, trap_handler_fn_t fn)
-{
- assert(vector < ARRAY_SIZE(trap_handlers));
- trap_handler_init(trap_handler_get(vector), flags, fn);
-}
-
-static void
-trap_show_thread(void)
-{
- struct thread *thread;
-
- thread = thread_self();
- printf("trap: interrupted thread: %p (%s)\n", thread, thread->name);
-}
-
-static void
-trap_double_fault(struct trap_frame *frame)
-{
- cpu_halt_broadcast();
-
-#ifndef __LP64__
- struct trap_frame frame_store;
- struct cpu *cpu;
-
- /*
- * Double faults are catched through a task gate, which makes the given
- * frame useless. The interrupted state is automatically saved in the
- * main TSS by the processor. Build a proper trap frame from there.
- */
- frame = &frame_store;
- cpu = cpu_current();
- frame->eax = cpu->tss.eax;
- frame->ebx = cpu->tss.ebx;
- frame->ecx = cpu->tss.ecx;
- frame->edx = cpu->tss.edx;
- frame->ebp = cpu->tss.ebp;
- frame->esi = cpu->tss.esi;
- frame->edi = cpu->tss.edi;
- frame->ds = cpu->tss.ds;
- frame->es = cpu->tss.es;
- frame->fs = cpu->tss.fs;
- frame->gs = cpu->tss.gs;
- frame->vector = TRAP_DF;
- frame->error = 0;
- frame->eip = cpu->tss.eip;
- frame->cs = cpu->tss.cs;
- frame->eflags = cpu->tss.eflags;
- frame->esp = cpu->tss.esp;
- frame->ss = cpu->tss.ss;
-#endif /* __LP64__ */
-
- printf("trap: double fault (cpu%u):\n", cpu_id());
- trap_show_thread();
- trap_frame_show(frame);
- trap_stack_show(frame);
- cpu_halt();
-}
-
-static void __init
-trap_install_double_fault(void)
-{
- trap_install(TRAP_DF, TRAP_HF_INTR, trap_double_fault);
- cpu_idt_set_double_fault(trap_isr_table[TRAP_DF]);
-}
-
-static void
-trap_default(struct trap_frame *frame)
-{
- cpu_halt_broadcast();
- printf("trap: unhandled interrupt or exception (cpu%u):\n", cpu_id());
- trap_show_thread();
- trap_frame_show(frame);
- trap_stack_show(frame);
- cpu_halt();
-}
-
-static int __init
-trap_setup(void)
-{
- size_t i;
-
- spinlock_init(&trap_lock);
-
- for (i = 0; i < ARRAY_SIZE(trap_isr_table); i++) {
- cpu_idt_set_gate(i, trap_isr_table[i]);
- }
-
- for (i = 0; i < ARRAY_SIZE(trap_handlers); i++) {
- trap_install(i, TRAP_HF_INTR, trap_default);
- }
-
- /* Architecture defined traps */
- trap_install(TRAP_DE, 0, trap_default);
- trap_install(TRAP_DB, 0, trap_default);
- trap_install(TRAP_NMI, TRAP_HF_INTR, trap_default);
- trap_install(TRAP_BP, 0, trap_default);
- trap_install(TRAP_OF, 0, trap_default);
- trap_install(TRAP_BR, 0, trap_default);
- trap_install(TRAP_UD, 0, trap_default);
- trap_install(TRAP_NM, 0, trap_default);
- trap_install_double_fault();
- trap_install(TRAP_TS, 0, trap_default);
- trap_install(TRAP_NP, 0, trap_default);
- trap_install(TRAP_SS, 0, trap_default);
- trap_install(TRAP_GP, 0, trap_default);
- trap_install(TRAP_PF, 0, trap_default);
- trap_install(TRAP_MF, 0, trap_default);
- trap_install(TRAP_AC, 0, trap_default);
- trap_install(TRAP_MC, TRAP_HF_INTR, trap_default);
- trap_install(TRAP_XM, 0, trap_default);
-
- /* System defined traps */
- trap_install(TRAP_XCALL, TRAP_HF_INTR, cpu_xcall_intr);
- trap_install(TRAP_THREAD_SCHEDULE, TRAP_HF_INTR, cpu_thread_schedule_intr);
- trap_install(TRAP_CPU_HALT, TRAP_HF_INTR, cpu_halt_intr);
-#ifdef CONFIG_PERFMON
- trap_install(TRAP_LAPIC_PMC_OF, TRAP_HF_INTR, lapic_pmc_overflow_intr);
-#endif
- trap_install(TRAP_LAPIC_TIMER, TRAP_HF_INTR, lapic_timer_intr);
- trap_install(TRAP_LAPIC_ERROR, TRAP_HF_INTR, lapic_error_intr);
- trap_install(TRAP_LAPIC_SPURIOUS, TRAP_HF_INTR, lapic_spurious_intr);
-
- return 0;
-}
-
-INIT_OP_DEFINE(trap_setup);
-
-void
-trap_main(struct trap_frame *frame)
-{
- struct trap_handler *handler;
- trap_handler_fn_t fn;
-
- assert(!cpu_intr_enabled());
-
- handler = trap_handler_get(frame->vector);
-
- if (handler->flags & TRAP_HF_INTR) {
- thread_intr_enter();
- }
-
- fn = atomic_load(&handler->fn, ATOMIC_RELAXED);
- fn(frame);
-
- if (handler->flags & TRAP_HF_INTR) {
- thread_intr_leave();
- }
-
- assert(!cpu_intr_enabled());
-}
-
-void
-trap_register(unsigned int vector, trap_handler_fn_t handler_fn)
-{
- unsigned long flags;
-
- spinlock_lock_intr_save(&trap_lock, &flags);
- trap_install(vector, TRAP_HF_INTR, handler_fn);
- spinlock_unlock_intr_restore(&trap_lock, flags);
-}
-
-#ifdef __LP64__
-
-void
-trap_frame_show(struct trap_frame *frame)
-{
- printf("trap: rax: %016lx rbx: %016lx rcx: %016lx\n"
- "trap: rdx: %016lx rbp: %016lx rsi: %016lx\n"
- "trap: rdi: %016lx r8: %016lx r9: %016lx\n"
- "trap: r10: %016lx r11: %016lx r12: %016lx\n"
- "trap: r13: %016lx r14: %016lx r15: %016lx\n"
- "trap: vector: %lu error: %08lx\n"
- "trap: rip: %016lx cs: %lu rflags: %016lx\n"
- "trap: rsp: %016lx ss: %lu\n",
- (unsigned long)frame->rax, (unsigned long)frame->rbx,
- (unsigned long)frame->rcx, (unsigned long)frame->rdx,
- (unsigned long)frame->rbp, (unsigned long)frame->rsi,
- (unsigned long)frame->rdi, (unsigned long)frame->r8,
- (unsigned long)frame->r9, (unsigned long)frame->r10,
- (unsigned long)frame->r11, (unsigned long)frame->r12,
- (unsigned long)frame->r13, (unsigned long)frame->r14,
- (unsigned long)frame->r15, (unsigned long)frame->vector,
- (unsigned long)frame->error, (unsigned long)frame->rip,
- (unsigned long)frame->cs, (unsigned long)frame->rflags,
- (unsigned long)frame->rsp, (unsigned long)frame->ss);
-
- /* XXX Until the page fault handler is written */
- if (frame->vector == 14) {
- printf("trap: cr2: %016lx\n", (unsigned long)cpu_get_cr2());
- }
-}
-
-#else /* __LP64__ */
-
-void
-trap_frame_show(struct trap_frame *frame)
-{
- unsigned long esp, ss;
-
- if ((frame->cs & CPU_PL_USER) || (frame->vector == TRAP_DF)) {
- esp = frame->esp;
- ss = frame->ss;
- } else {
- esp = 0;
- ss = 0;
- }
-
- printf("trap: eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n"
- "trap: ebp: %08lx esi: %08lx edi: %08lx\n"
- "trap: ds: %hu es: %hu fs: %hu gs: %hu\n"
- "trap: vector: %lu error: %08lx\n"
- "trap: eip: %08lx cs: %lu eflags: %08lx\n"
- "trap: esp: %08lx ss: %lu\n",
- (unsigned long)frame->eax, (unsigned long)frame->ebx,
- (unsigned long)frame->ecx, (unsigned long)frame->edx,
- (unsigned long)frame->ebp, (unsigned long)frame->esi,
- (unsigned long)frame->edi, (unsigned short)frame->ds,
- (unsigned short)frame->es, (unsigned short)frame->fs,
- (unsigned short)frame->gs, (unsigned long)frame->vector,
- (unsigned long)frame->error, (unsigned long)frame->eip,
- (unsigned long)frame->cs, (unsigned long)frame->eflags,
- (unsigned long)esp, (unsigned long)ss);
-
-
- /* XXX Until the page fault handler is written */
- if (frame->vector == 14) {
- printf("trap: cr2: %08lx\n", (unsigned long)cpu_get_cr2());
- }
-}
-
-#endif /* __LP64__ */
-
-void
-trap_stack_show(struct trap_frame *frame)
-{
-#ifdef __LP64__
- strace_show(frame->rip, frame->rbp);
-#else /* __LP64__ */
- strace_show(frame->eip, frame->ebp);
-#endif /* __LP64__ */
-}
-
-void *
-trap_get_interrupt_stack(const struct trap_frame *frame)
-{
- struct trap_cpu_data *cpu_data;
- struct trap_handler *handler;
-
- handler = trap_handler_get(frame->vector);
-
- if ((handler->flags & TRAP_HF_INTR) && !thread_interrupted()) {
- cpu_data = cpu_local_ptr(trap_cpu_data);
- return cpu_data->intr_stack + sizeof(cpu_data->intr_stack);
- } else {
- return NULL;
- }
-}
diff --git a/arch/x86/machine/trap.h b/arch/x86/machine/trap.h
deleted file mode 100644
index c5bdc1f2..00000000
--- a/arch/x86/machine/trap.h
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- * Copyright (c) 2011-2014 Richard Braun.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- *
- * Trap (interrupt and exception) handling.
- *
- * This file is a top header in the inclusion hierarchy, and shouldn't include
- * other headers that may cause circular dependencies.
- */
-
-#ifndef X86_TRAP_H
-#define X86_TRAP_H
-
-#include <machine/page.h>
-
-/*
- * Architecture defined traps.
- */
-#define TRAP_DE 0 /* Divide Error */
-#define TRAP_DB 1 /* Debug */
-#define TRAP_NMI 2 /* NMI Interrupt */
-#define TRAP_BP 3 /* Breakpoint */
-#define TRAP_OF 4 /* Overflow */
-#define TRAP_BR 5 /* BOUND Range Exceeded */
-#define TRAP_UD 6 /* Invalid Opcode (Undefined Opcode) */
-#define TRAP_NM 7 /* Device Not Available (No Math Coprocessor) */
-#define TRAP_DF 8 /* Double Fault */
-#define TRAP_TS 10 /* Invalid TSS */
-#define TRAP_NP 11 /* Segment Not Present */
-#define TRAP_SS 12 /* Stack-Segment Fault */
-#define TRAP_GP 13 /* General Protection */
-#define TRAP_PF 14 /* Page Fault */
-#define TRAP_MF 16 /* x87 FPU Floating-Point Error (Math Fault) */
-#define TRAP_AC 17 /* Alignment Check */
-#define TRAP_MC 18 /* Machine Check */
-#define TRAP_XM 19 /* SIMD Floating-Point Exception */
-
-/*
- * Traps used for handling external interrupts.
- */
-#define TRAP_INTR_FIRST 32
-#define TRAP_INTR_LAST 223
-
-/*
- * System defined traps.
- *
- * The local APIC assigns one priority every 16 vectors.
- */
-#define TRAP_XCALL 238
-#define TRAP_THREAD_SCHEDULE 239
-#define TRAP_CPU_HALT 240
-#define TRAP_LAPIC_PMC_OF 252
-#define TRAP_LAPIC_TIMER 253
-#define TRAP_LAPIC_ERROR 254
-#define TRAP_LAPIC_SPURIOUS 255
-
-#define TRAP_NR_VECTORS 256
-
-#define TRAP_INTR_TABLE_SIZE 256
-
-#define TRAP_STACK_SIZE PAGE_SIZE
-
-#ifndef __ASSEMBLER__
-
-#include <stdint.h>
-#include <stdio.h>
-
-#include <kern/init.h>
-#include <kern/macros.h>
-
-#ifdef __LP64__
-
-struct trap_frame {
- uint64_t rax;
- uint64_t rbx;
- uint64_t rcx;
- uint64_t rdx;
- uint64_t rbp;
- uint64_t rsi;
- uint64_t rdi;
- uint64_t r8;
- uint64_t r9;
- uint64_t r10;
- uint64_t r11;
- uint64_t r12;
- uint64_t r13;
- uint64_t r14;
- uint64_t r15;
- uint64_t vector;
- uint64_t error;
- uint64_t rip;
- uint64_t cs;
- uint64_t rflags;
- uint64_t rsp;
- uint64_t ss;
-} __packed;
-
-#else /* __LP64__ */
-
-struct trap_frame {
- uint32_t eax;
- uint32_t ebx;
- uint32_t ecx;
- uint32_t edx;
- uint32_t ebp;
- uint32_t esi;
- uint32_t edi;
- uint16_t ds;
- uint16_t es;
- uint16_t fs;
- uint16_t gs;
- uint32_t vector;
- uint32_t error;
- uint32_t eip;
- uint32_t cs;
- uint32_t eflags;
- uint32_t esp; /* esp and ss are undefined if trapped in kernel */
- uint32_t ss;
-} __packed;
-
-#endif /* __LP64__ */
-
-/*
- * Type for trap handler functions.
- */
-typedef void (*trap_handler_fn_t)(struct trap_frame *);
-
-static inline void
-trap_trigger_double_fault(void)
-{
- printf("trap: double fault test\n");
- asm volatile("movl $0xdead, %esp; push $0");
-}
-
-/*
- * Unified trap entry point.
- */
-void trap_main(struct trap_frame *frame);
-
-/*
- * Register a trap handler.
- */
-void trap_register(unsigned int vector, trap_handler_fn_t handler_fn);
-
-/*
- * Display the content of a trap frame.
- */
-void trap_frame_show(struct trap_frame *frame);
-
-/*
- * Display the call trace interrupted by the trap of the given frame.
- */
-void trap_stack_show(struct trap_frame *frame);
-
-/*
- * Return a pointer to the local interrupt stack.
- *
- * This function is called by the low level trap handling code.
- *
- * Return NULL if no stack switching is required.
- */
-void * trap_get_interrupt_stack(const struct trap_frame *frame);
-
-/*
- * This init operation provides :
- * - initialization of all IDT entries and trap handlers
- * - double fault exception support
- */
-INIT_OP_DECLARE(trap_setup);
-
-#endif /* __ASSEMBLER__ */
-
-#endif /* X86_TRAP_H */
diff --git a/arch/x86/machine/trap_asm.S b/arch/x86/machine/trap_asm.S
deleted file mode 100644
index 2f683bd7..00000000
--- a/arch/x86/machine/trap_asm.S
+++ /dev/null
@@ -1,693 +0,0 @@
-/*
- * Copyright (c) 2012-2017 Richard Braun.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <kern/init.h>
-#include <machine/asm.h>
-#include <machine/boot.h>
-#include <machine/trap.h>
-
-.text
-
-#ifdef __LP64__
-
-.macro TRAP_STORE_REGISTERS
- pushq %r15
- pushq %r14
- pushq %r13
- pushq %r12
- pushq %r11
- pushq %r10
- pushq %r9
- pushq %r8
- pushq %rdi
- pushq %rsi
- pushq %rbp
- pushq %rdx
- pushq %rcx
- pushq %rbx
- pushq %rax
-.endm
-
-.macro TRAP_LOAD_REGISTERS
- popq %rax
- popq %rbx
- popq %rcx
- popq %rdx
- popq %rbp
- popq %rsi
- popq %rdi
- popq %r8
- popq %r9
- popq %r10
- popq %r11
- popq %r12
- popq %r13
- popq %r14
- popq %r15
- addq $16, %rsp /* skip vector and error */
-.endm
-
-#define TRAP(vector, name) \
-ASM_ENTRY(name) \
- pushq $0; \
- pushq $(vector); \
- jmp trap_common; \
-ASM_END(name)
-
-#define TRAP_ERROR(vector, name) \
-ASM_ENTRY(name) \
- pushq $(vector); \
- jmp trap_common; \
-ASM_END(name)
-
-ASM_ENTRY(trap_common)
- TRAP_STORE_REGISTERS
- movq %rsp, %rbx /* save frame */
- movq %rbx, %rdi
- call trap_get_interrupt_stack
- testq %rax, %rax /* switch stack ? */
- jz 1f
- movq %rax, %rsp /* switch to interrupt stack ? */
-1:
- xorq %rbp, %rbp /* block stack tracing */
- movq %rbx, %rdi
- call trap_main
- movq %rbx, %rsp /* restore stack */
- call thread_schedule /* schedule threads */
- TRAP_LOAD_REGISTERS
- iretq
-ASM_END(trap_common)
-
-#else /* __LP64__ */
-
-.macro TRAP_STORE_REGISTERS
- pushw %gs
- pushw %fs
- pushw %es
- pushw %ds
- pushl %edi
- pushl %esi
- pushl %ebp
- pushl %edx
- pushl %ecx
- pushl %ebx
- pushl %eax
-.endm
-
-/* XXX Don't load segment registers for now */
-.macro TRAP_LOAD_REGISTERS
- popl %eax
- popl %ebx
- popl %ecx
- popl %edx
- popl %ebp
- popl %esi
- popl %edi
- addl $16, %esp /* skip segment registers, vector and error */
-.endm
-
-#define TRAP(vector, name) \
-ASM_ENTRY(name) \
- pushl $0; \
- pushl $(vector); \
- jmp trap_common; \
-ASM_END(name)
-
-#define TRAP_ERROR(vector, name) \
-ASM_ENTRY(name) \
- pushl $(vector); \
- jmp trap_common; \
-ASM_END(name)
-
-ASM_ENTRY(trap_common)
- TRAP_STORE_REGISTERS
- movl %esp, %ebx /* save frame */
- pushl %ebx
- call trap_get_interrupt_stack
- addl $4, %esp
- testl %eax, %eax /* switch stack ? */
- jz 1f
- movl %eax, %esp /* switch to interrupt stack */
-1:
- xorl %ebp, %ebp /* block stack tracing */
- pushl %ebx
- call trap_main
- movl %ebx, %esp /* restore stack */
- call thread_schedule /* schedule threads */
- TRAP_LOAD_REGISTERS
- iret
-ASM_END(trap_common)
-
-#endif /* __LP64__ */
-
-/* Architecture defined traps */
-TRAP(TRAP_DE, trap_isr_divide_error)
-TRAP(TRAP_DB, trap_isr_debug)
-TRAP(TRAP_NMI, trap_isr_nmi)
-TRAP(TRAP_BP, trap_isr_breakpoint)
-TRAP(TRAP_OF, trap_isr_overflow)
-TRAP(TRAP_BR, trap_isr_bound_range)
-TRAP(TRAP_UD, trap_isr_invalid_opcode)
-TRAP(TRAP_NM, trap_isr_device_not_available)
-TRAP_ERROR(TRAP_DF, trap_isr_double_fault)
-TRAP_ERROR(TRAP_TS, trap_isr_invalid_tss)
-TRAP_ERROR(TRAP_NP, trap_isr_segment_not_present)
-TRAP_ERROR(TRAP_SS, trap_isr_stack_segment_fault)
-TRAP_ERROR(TRAP_GP, trap_isr_general_protection)
-TRAP_ERROR(TRAP_PF, trap_isr_page_fault)
-TRAP(TRAP_MF, trap_isr_math_fault)
-TRAP_ERROR(TRAP_AC, trap_isr_alignment_check)
-TRAP(TRAP_MC, trap_isr_machine_check)
-TRAP(TRAP_XM, trap_isr_simd_fp_exception)
-
-/*
- * Handlers for reserved exceptions.
- *
- * These exceptions should normally never occur, but have handlers ready just
- * in case.
- */
-TRAP(9, trap_isr_9)
-TRAP(15, trap_isr_15)
-TRAP(20, trap_isr_20)
-TRAP(21, trap_isr_21)
-TRAP(22, trap_isr_22)
-TRAP(23, trap_isr_23)
-TRAP(24, trap_isr_24)
-TRAP(25, trap_isr_25)
-TRAP(26, trap_isr_26)
-TRAP(27, trap_isr_27)
-TRAP(28, trap_isr_28)
-TRAP(29, trap_isr_29)
-TRAP(31, trap_isr_31)
-
-/* Generic trap handlers */
-TRAP(30, trap_isr_30)
-TRAP(32, trap_isr_32)
-TRAP(33, trap_isr_33)
-TRAP(34, trap_isr_34)
-TRAP(35, trap_isr_35)
-TRAP(36, trap_isr_36)
-TRAP(37, trap_isr_37)
-TRAP(38, trap_isr_38)
-TRAP(39, trap_isr_39)
-TRAP(40, trap_isr_40)
-TRAP(41, trap_isr_41)
-TRAP(42, trap_isr_42)
-TRAP(43, trap_isr_43)
-TRAP(44, trap_isr_44)
-TRAP(45, trap_isr_45)
-TRAP(46, trap_isr_46)
-TRAP(47, trap_isr_47)
-TRAP(48, trap_isr_48)
-TRAP(49, trap_isr_49)
-TRAP(50, trap_isr_50)
-TRAP(51, trap_isr_51)
-TRAP(52, trap_isr_52)
-TRAP(53, trap_isr_53)
-TRAP(54, trap_isr_54)
-TRAP(55, trap_isr_55)
-TRAP(56, trap_isr_56)
-TRAP(57, trap_isr_57)
-TRAP(58, trap_isr_58)
-TRAP(59, trap_isr_59)
-TRAP(60, trap_isr_60)
-TRAP(61, trap_isr_61)
-TRAP(62, trap_isr_62)
-TRAP(63, trap_isr_63)
-TRAP(64, trap_isr_64)
-TRAP(65, trap_isr_65)
-TRAP(66, trap_isr_66)
-TRAP(67, trap_isr_67)
-TRAP(68, trap_isr_68)
-TRAP(69, trap_isr_69)
-TRAP(70, trap_isr_70)
-TRAP(71, trap_isr_71)
-TRAP(72, trap_isr_72)
-TRAP(73, trap_isr_73)
-TRAP(74, trap_isr_74)
-TRAP(75, trap_isr_75)
-TRAP(76, trap_isr_76)
-TRAP(77, trap_isr_77)
-TRAP(78, trap_isr_78)
-TRAP(79, trap_isr_79)
-TRAP(80, trap_isr_80)
-TRAP(81, trap_isr_81)
-TRAP(82, trap_isr_82)
-TRAP(83, trap_isr_83)
-TRAP(84, trap_isr_84)
-TRAP(85, trap_isr_85)
-TRAP(86, trap_isr_86)
-TRAP(87, trap_isr_87)
-TRAP(88, trap_isr_88)
-TRAP(89, trap_isr_89)
-TRAP(90, trap_isr_90)
-TRAP(91, trap_isr_91)
-TRAP(92, trap_isr_92)
-TRAP(93, trap_isr_93)
-TRAP(94, trap_isr_94)
-TRAP(95, trap_isr_95)
-TRAP(96, trap_isr_96)
-TRAP(97, trap_isr_97)
-TRAP(98, trap_isr_98)
-TRAP(99, trap_isr_99)
-TRAP(100, trap_isr_100)
-TRAP(101, trap_isr_101)
-TRAP(102, trap_isr_102)
-TRAP(103, trap_isr_103)
-TRAP(104, trap_isr_104)
-TRAP(105, trap_isr_105)
-TRAP(106, trap_isr_106)
-TRAP(107, trap_isr_107)
-TRAP(108, trap_isr_108)
-TRAP(109, trap_isr_109)
-TRAP(110, trap_isr_110)
-TRAP(111, trap_isr_111)
-TRAP(112, trap_isr_112)
-TRAP(113, trap_isr_113)
-TRAP(114, trap_isr_114)
-TRAP(115, trap_isr_115)
-TRAP(116, trap_isr_116)
-TRAP(117, trap_isr_117)
-TRAP(118, trap_isr_118)
-TRAP(119, trap_isr_119)
-TRAP(120, trap_isr_120)
-TRAP(121, trap_isr_121)
-TRAP(122, trap_isr_122)
-TRAP(123, trap_isr_123)
-TRAP(124, trap_isr_124)
-TRAP(125, trap_isr_125)
-TRAP(126, trap_isr_126)
-TRAP(127, trap_isr_127)
-TRAP(128, trap_isr_128)
-TRAP(129, trap_isr_129)
-TRAP(130, trap_isr_130)
-TRAP(131, trap_isr_131)
-TRAP(132, trap_isr_132)
-TRAP(133, trap_isr_133)
-TRAP(134, trap_isr_134)
-TRAP(135, trap_isr_135)
-TRAP(136, trap_isr_136)
-TRAP(137, trap_isr_137)
-TRAP(138, trap_isr_138)
-TRAP(139, trap_isr_139)
-TRAP(140, trap_isr_140)
-TRAP(141, trap_isr_141)
-TRAP(142, trap_isr_142)
-TRAP(143, trap_isr_143)
-TRAP(144, trap_isr_144)
-TRAP(145, trap_isr_145)
-TRAP(146, trap_isr_146)
-TRAP(147, trap_isr_147)
-TRAP(148, trap_isr_148)
-TRAP(149, trap_isr_149)
-TRAP(150, trap_isr_150)
-TRAP(151, trap_isr_151)
-TRAP(152, trap_isr_152)
-TRAP(153, trap_isr_153)
-TRAP(154, trap_isr_154)
-TRAP(155, trap_isr_155)
-TRAP(156, trap_isr_156)
-TRAP(157, trap_isr_157)
-TRAP(158, trap_isr_158)
-TRAP(159, trap_isr_159)
-TRAP(160, trap_isr_160)
-TRAP(161, trap_isr_161)
-TRAP(162, trap_isr_162)
-TRAP(163, trap_isr_163)
-TRAP(164, trap_isr_164)
-TRAP(165, trap_isr_165)
-TRAP(166, trap_isr_166)
-TRAP(167, trap_isr_167)
-TRAP(168, trap_isr_168)
-TRAP(169, trap_isr_169)
-TRAP(170, trap_isr_170)
-TRAP(171, trap_isr_171)
-TRAP(172, trap_isr_172)
-TRAP(173, trap_isr_173)
-TRAP(174, trap_isr_174)
-TRAP(175, trap_isr_175)
-TRAP(176, trap_isr_176)
-TRAP(177, trap_isr_177)
-TRAP(178, trap_isr_178)
-TRAP(179, trap_isr_179)
-TRAP(180, trap_isr_180)
-TRAP(181, trap_isr_181)
-TRAP(182, trap_isr_182)
-TRAP(183, trap_isr_183)
-TRAP(184, trap_isr_184)
-TRAP(185, trap_isr_185)
-TRAP(186, trap_isr_186)
-TRAP(187, trap_isr_187)
-TRAP(188, trap_isr_188)
-TRAP(189, trap_isr_189)
-TRAP(190, trap_isr_190)
-TRAP(191, trap_isr_191)
-TRAP(192, trap_isr_192)
-TRAP(193, trap_isr_193)
-TRAP(194, trap_isr_194)
-TRAP(195, trap_isr_195)
-TRAP(196, trap_isr_196)
-TRAP(197, trap_isr_197)
-TRAP(198, trap_isr_198)
-TRAP(199, trap_isr_199)
-TRAP(200, trap_isr_200)
-TRAP(201, trap_isr_201)
-TRAP(202, trap_isr_202)
-TRAP(203, trap_isr_203)
-TRAP(204, trap_isr_204)
-TRAP(205, trap_isr_205)
-TRAP(206, trap_isr_206)
-TRAP(207, trap_isr_207)
-TRAP(208, trap_isr_208)
-TRAP(209, trap_isr_209)
-TRAP(210, trap_isr_210)
-TRAP(211, trap_isr_211)
-TRAP(212, trap_isr_212)
-TRAP(213, trap_isr_213)
-TRAP(214, trap_isr_214)
-TRAP(215, trap_isr_215)
-TRAP(216, trap_isr_216)
-TRAP(217, trap_isr_217)
-TRAP(218, trap_isr_218)
-TRAP(219, trap_isr_219)
-TRAP(220, trap_isr_220)
-TRAP(221, trap_isr_221)
-TRAP(222, trap_isr_222)
-TRAP(223, trap_isr_223)
-TRAP(224, trap_isr_224)
-TRAP(225, trap_isr_225)
-TRAP(226, trap_isr_226)
-TRAP(227, trap_isr_227)
-TRAP(228, trap_isr_228)
-TRAP(229, trap_isr_229)
-TRAP(230, trap_isr_230)
-TRAP(231, trap_isr_231)
-TRAP(232, trap_isr_232)
-TRAP(233, trap_isr_233)
-TRAP(234, trap_isr_234)
-TRAP(235, trap_isr_235)
-TRAP(236, trap_isr_236)
-TRAP(237, trap_isr_237)
-TRAP(238, trap_isr_238)
-TRAP(239, trap_isr_239)
-TRAP(240, trap_isr_240)
-TRAP(241, trap_isr_241)
-TRAP(242, trap_isr_242)
-TRAP(243, trap_isr_243)
-TRAP(244, trap_isr_244)
-TRAP(245, trap_isr_245)
-TRAP(246, trap_isr_246)
-TRAP(247, trap_isr_247)
-TRAP(248, trap_isr_248)
-TRAP(249, trap_isr_249)
-TRAP(250, trap_isr_250)
-TRAP(251, trap_isr_251)
-TRAP(252, trap_isr_252)
-TRAP(253, trap_isr_253)
-TRAP(254, trap_isr_254)
-TRAP(255, trap_isr_255)
-
-#ifdef __LP64__
-#define TRAP_TABLE_ENTRY(name) .quad name
-#else /* __LP64__ */
-#define TRAP_TABLE_ENTRY(name) .long name
-#endif /* __LP64__ */
-
-.section INIT_DATA_SECTION
-
-/*
- * This table lists all interrupt service routines as installed in the
- * IDT.
- */
-ASM_DATA(trap_isr_table)
-TRAP_TABLE_ENTRY(trap_isr_divide_error)
-TRAP_TABLE_ENTRY(trap_isr_debug)
-TRAP_TABLE_ENTRY(trap_isr_nmi)
-TRAP_TABLE_ENTRY(trap_isr_breakpoint)
-TRAP_TABLE_ENTRY(trap_isr_overflow)
-TRAP_TABLE_ENTRY(trap_isr_bound_range)
-TRAP_TABLE_ENTRY(trap_isr_invalid_opcode)
-TRAP_TABLE_ENTRY(trap_isr_device_not_available)
-TRAP_TABLE_ENTRY(trap_isr_double_fault)
-TRAP_TABLE_ENTRY(trap_isr_9)
-TRAP_TABLE_ENTRY(trap_isr_invalid_tss)
-TRAP_TABLE_ENTRY(trap_isr_segment_not_present)
-TRAP_TABLE_ENTRY(trap_isr_stack_segment_fault)
-TRAP_TABLE_ENTRY(trap_isr_general_protection)
-TRAP_TABLE_ENTRY(trap_isr_page_fault)
-TRAP_TABLE_ENTRY(trap_isr_15)
-TRAP_TABLE_ENTRY(trap_isr_math_fault)
-TRAP_TABLE_ENTRY(trap_isr_alignment_check)
-TRAP_TABLE_ENTRY(trap_isr_machine_check)
-TRAP_TABLE_ENTRY(trap_isr_simd_fp_exception)
-TRAP_TABLE_ENTRY(trap_isr_20)
-TRAP_TABLE_ENTRY(trap_isr_21)
-TRAP_TABLE_ENTRY(trap_isr_22)
-TRAP_TABLE_ENTRY(trap_isr_23)
-TRAP_TABLE_ENTRY(trap_isr_24)
-TRAP_TABLE_ENTRY(trap_isr_25)
-TRAP_TABLE_ENTRY(trap_isr_26)
-TRAP_TABLE_ENTRY(trap_isr_27)
-TRAP_TABLE_ENTRY(trap_isr_28)
-TRAP_TABLE_ENTRY(trap_isr_29)
-TRAP_TABLE_ENTRY(trap_isr_30)
-TRAP_TABLE_ENTRY(trap_isr_31)
-TRAP_TABLE_ENTRY(trap_isr_32)
-TRAP_TABLE_ENTRY(trap_isr_33)
-TRAP_TABLE_ENTRY(trap_isr_34)
-TRAP_TABLE_ENTRY(trap_isr_35)
-TRAP_TABLE_ENTRY(trap_isr_36)
-TRAP_TABLE_ENTRY(trap_isr_37)
-TRAP_TABLE_ENTRY(trap_isr_38)
-TRAP_TABLE_ENTRY(trap_isr_39)
-TRAP_TABLE_ENTRY(trap_isr_40)
-TRAP_TABLE_ENTRY(trap_isr_41)
-TRAP_TABLE_ENTRY(trap_isr_42)
-TRAP_TABLE_ENTRY(trap_isr_43)
-TRAP_TABLE_ENTRY(trap_isr_44)
-TRAP_TABLE_ENTRY(trap_isr_45)
-TRAP_TABLE_ENTRY(trap_isr_46)
-TRAP_TABLE_ENTRY(trap_isr_47)
-TRAP_TABLE_ENTRY(trap_isr_48)
-TRAP_TABLE_ENTRY(trap_isr_49)
-TRAP_TABLE_ENTRY(trap_isr_50)
-TRAP_TABLE_ENTRY(trap_isr_51)
-TRAP_TABLE_ENTRY(trap_isr_52)
-TRAP_TABLE_ENTRY(trap_isr_53)
-TRAP_TABLE_ENTRY(trap_isr_54)
-TRAP_TABLE_ENTRY(trap_isr_55)
-TRAP_TABLE_ENTRY(trap_isr_56)
-TRAP_TABLE_ENTRY(trap_isr_57)
-TRAP_TABLE_ENTRY(trap_isr_58)
-TRAP_TABLE_ENTRY(trap_isr_59)
-TRAP_TABLE_ENTRY(trap_isr_60)
-TRAP_TABLE_ENTRY(trap_isr_61)
-TRAP_TABLE_ENTRY(trap_isr_62)
-TRAP_TABLE_ENTRY(trap_isr_63)
-TRAP_TABLE_ENTRY(trap_isr_64)
-TRAP_TABLE_ENTRY(trap_isr_65)
-TRAP_TABLE_ENTRY(trap_isr_66)
-TRAP_TABLE_ENTRY(trap_isr_67)
-TRAP_TABLE_ENTRY(trap_isr_68)
-TRAP_TABLE_ENTRY(trap_isr_69)
-TRAP_TABLE_ENTRY(trap_isr_70)
-TRAP_TABLE_ENTRY(trap_isr_71)
-TRAP_TABLE_ENTRY(trap_isr_72)
-TRAP_TABLE_ENTRY(trap_isr_73)
-TRAP_TABLE_ENTRY(trap_isr_74)
-TRAP_TABLE_ENTRY(trap_isr_75)
-TRAP_TABLE_ENTRY(trap_isr_76)
-TRAP_TABLE_ENTRY(trap_isr_77)
-TRAP_TABLE_ENTRY(trap_isr_78)
-TRAP_TABLE_ENTRY(trap_isr_79)
-TRAP_TABLE_ENTRY(trap_isr_80)
-TRAP_TABLE_ENTRY(trap_isr_81)
-TRAP_TABLE_ENTRY(trap_isr_82)
-TRAP_TABLE_ENTRY(trap_isr_83)
-TRAP_TABLE_ENTRY(trap_isr_84)
-TRAP_TABLE_ENTRY(trap_isr_85)
-TRAP_TABLE_ENTRY(trap_isr_86)
-TRAP_TABLE_ENTRY(trap_isr_87)
-TRAP_TABLE_ENTRY(trap_isr_88)
-TRAP_TABLE_ENTRY(trap_isr_89)
-TRAP_TABLE_ENTRY(trap_isr_90)
-TRAP_TABLE_ENTRY(trap_isr_91)
-TRAP_TABLE_ENTRY(trap_isr_92)
-TRAP_TABLE_ENTRY(trap_isr_93)
-TRAP_TABLE_ENTRY(trap_isr_94)
-TRAP_TABLE_ENTRY(trap_isr_95)
-TRAP_TABLE_ENTRY(trap_isr_96)
-TRAP_TABLE_ENTRY(trap_isr_97)
-TRAP_TABLE_ENTRY(trap_isr_98)
-TRAP_TABLE_ENTRY(trap_isr_99)
-TRAP_TABLE_ENTRY(trap_isr_100)
-TRAP_TABLE_ENTRY(trap_isr_101)
-TRAP_TABLE_ENTRY(trap_isr_102)
-TRAP_TABLE_ENTRY(trap_isr_103)
-TRAP_TABLE_ENTRY(trap_isr_104)
-TRAP_TABLE_ENTRY(trap_isr_105)
-TRAP_TABLE_ENTRY(trap_isr_106)
-TRAP_TABLE_ENTRY(trap_isr_107)
-TRAP_TABLE_ENTRY(trap_isr_108)
-TRAP_TABLE_ENTRY(trap_isr_109)
-TRAP_TABLE_ENTRY(trap_isr_110)
-TRAP_TABLE_ENTRY(trap_isr_111)
-TRAP_TABLE_ENTRY(trap_isr_112)
-TRAP_TABLE_ENTRY(trap_isr_113)
-TRAP_TABLE_ENTRY(trap_isr_114)
-TRAP_TABLE_ENTRY(trap_isr_115)
-TRAP_TABLE_ENTRY(trap_isr_116)
-TRAP_TABLE_ENTRY(trap_isr_117)
-TRAP_TABLE_ENTRY(trap_isr_118)
-TRAP_TABLE_ENTRY(trap_isr_119)
-TRAP_TABLE_ENTRY(trap_isr_120)
-TRAP_TABLE_ENTRY(trap_isr_121)
-TRAP_TABLE_ENTRY(trap_isr_122)
-TRAP_TABLE_ENTRY(trap_isr_123)
-TRAP_TABLE_ENTRY(trap_isr_124)
-TRAP_TABLE_ENTRY(trap_isr_125)
-TRAP_TABLE_ENTRY(trap_isr_126)
-TRAP_TABLE_ENTRY(trap_isr_127)
-TRAP_TABLE_ENTRY(trap_isr_128)
-TRAP_TABLE_ENTRY(trap_isr_129)
-TRAP_TABLE_ENTRY(trap_isr_130)
-TRAP_TABLE_ENTRY(trap_isr_131)
-TRAP_TABLE_ENTRY(trap_isr_132)
-TRAP_TABLE_ENTRY(trap_isr_133)
-TRAP_TABLE_ENTRY(trap_isr_134)
-TRAP_TABLE_ENTRY(trap_isr_135)
-TRAP_TABLE_ENTRY(trap_isr_136)
-TRAP_TABLE_ENTRY(trap_isr_137)
-TRAP_TABLE_ENTRY(trap_isr_138)
-TRAP_TABLE_ENTRY(trap_isr_139)
-TRAP_TABLE_ENTRY(trap_isr_140)
-TRAP_TABLE_ENTRY(trap_isr_141)
-TRAP_TABLE_ENTRY(trap_isr_142)
-TRAP_TABLE_ENTRY(trap_isr_143)
-TRAP_TABLE_ENTRY(trap_isr_144)
-TRAP_TABLE_ENTRY(trap_isr_145)
-TRAP_TABLE_ENTRY(trap_isr_146)
-TRAP_TABLE_ENTRY(trap_isr_147)
-TRAP_TABLE_ENTRY(trap_isr_148)
-TRAP_TABLE_ENTRY(trap_isr_149)
-TRAP_TABLE_ENTRY(trap_isr_150)
-TRAP_TABLE_ENTRY(trap_isr_151)
-TRAP_TABLE_ENTRY(trap_isr_152)
-TRAP_TABLE_ENTRY(trap_isr_153)
-TRAP_TABLE_ENTRY(trap_isr_154)
-TRAP_TABLE_ENTRY(trap_isr_155)
-TRAP_TABLE_ENTRY(trap_isr_156)
-TRAP_TABLE_ENTRY(trap_isr_157)
-TRAP_TABLE_ENTRY(trap_isr_158)
-TRAP_TABLE_ENTRY(trap_isr_159)
-TRAP_TABLE_ENTRY(trap_isr_160)
-TRAP_TABLE_ENTRY(trap_isr_161)
-TRAP_TABLE_ENTRY(trap_isr_162)
-TRAP_TABLE_ENTRY(trap_isr_163)
-TRAP_TABLE_ENTRY(trap_isr_164)
-TRAP_TABLE_ENTRY(trap_isr_165)
-TRAP_TABLE_ENTRY(trap_isr_166)
-TRAP_TABLE_ENTRY(trap_isr_167)
-TRAP_TABLE_ENTRY(trap_isr_168)
-TRAP_TABLE_ENTRY(trap_isr_169)
-TRAP_TABLE_ENTRY(trap_isr_170)
-TRAP_TABLE_ENTRY(trap_isr_171)
-TRAP_TABLE_ENTRY(trap_isr_172)
-TRAP_TABLE_ENTRY(trap_isr_173)
-TRAP_TABLE_ENTRY(trap_isr_174)
-TRAP_TABLE_ENTRY(trap_isr_175)
-TRAP_TABLE_ENTRY(trap_isr_176)
-TRAP_TABLE_ENTRY(trap_isr_177)
-TRAP_TABLE_ENTRY(trap_isr_178)
-TRAP_TABLE_ENTRY(trap_isr_179)
-TRAP_TABLE_ENTRY(trap_isr_180)
-TRAP_TABLE_ENTRY(trap_isr_181)
-TRAP_TABLE_ENTRY(trap_isr_182)
-TRAP_TABLE_ENTRY(trap_isr_183)
-TRAP_TABLE_ENTRY(trap_isr_184)
-TRAP_TABLE_ENTRY(trap_isr_185)
-TRAP_TABLE_ENTRY(trap_isr_186)
-TRAP_TABLE_ENTRY(trap_isr_187)
-TRAP_TABLE_ENTRY(trap_isr_188)
-TRAP_TABLE_ENTRY(trap_isr_189)
-TRAP_TABLE_ENTRY(trap_isr_190)
-TRAP_TABLE_ENTRY(trap_isr_191)
-TRAP_TABLE_ENTRY(trap_isr_192)
-TRAP_TABLE_ENTRY(trap_isr_193)
-TRAP_TABLE_ENTRY(trap_isr_194)
-TRAP_TABLE_ENTRY(trap_isr_195)
-TRAP_TABLE_ENTRY(trap_isr_196)
-TRAP_TABLE_ENTRY(trap_isr_197)
-TRAP_TABLE_ENTRY(trap_isr_198)
-TRAP_TABLE_ENTRY(trap_isr_199)
-TRAP_TABLE_ENTRY(trap_isr_200)
-TRAP_TABLE_ENTRY(trap_isr_201)
-TRAP_TABLE_ENTRY(trap_isr_202)
-TRAP_TABLE_ENTRY(trap_isr_203)
-TRAP_TABLE_ENTRY(trap_isr_204)
-TRAP_TABLE_ENTRY(trap_isr_205)
-TRAP_TABLE_ENTRY(trap_isr_206)
-TRAP_TABLE_ENTRY(trap_isr_207)
-TRAP_TABLE_ENTRY(trap_isr_208)
-TRAP_TABLE_ENTRY(trap_isr_209)
-TRAP_TABLE_ENTRY(trap_isr_210)
-TRAP_TABLE_ENTRY(trap_isr_211)
-TRAP_TABLE_ENTRY(trap_isr_212)
-TRAP_TABLE_ENTRY(trap_isr_213)
-TRAP_TABLE_ENTRY(trap_isr_214)
-TRAP_TABLE_ENTRY(trap_isr_215)
-TRAP_TABLE_ENTRY(trap_isr_216)
-TRAP_TABLE_ENTRY(trap_isr_217)
-TRAP_TABLE_ENTRY(trap_isr_218)
-TRAP_TABLE_ENTRY(trap_isr_219)
-TRAP_TABLE_ENTRY(trap_isr_220)
-TRAP_TABLE_ENTRY(trap_isr_221)
-TRAP_TABLE_ENTRY(trap_isr_222)
-TRAP_TABLE_ENTRY(trap_isr_223)
-TRAP_TABLE_ENTRY(trap_isr_224)
-TRAP_TABLE_ENTRY(trap_isr_225)
-TRAP_TABLE_ENTRY(trap_isr_226)
-TRAP_TABLE_ENTRY(trap_isr_227)
-TRAP_TABLE_ENTRY(trap_isr_228)
-TRAP_TABLE_ENTRY(trap_isr_229)
-TRAP_TABLE_ENTRY(trap_isr_230)
-TRAP_TABLE_ENTRY(trap_isr_231)
-TRAP_TABLE_ENTRY(trap_isr_232)
-TRAP_TABLE_ENTRY(trap_isr_233)
-TRAP_TABLE_ENTRY(trap_isr_234)
-TRAP_TABLE_ENTRY(trap_isr_235)
-TRAP_TABLE_ENTRY(trap_isr_236)
-TRAP_TABLE_ENTRY(trap_isr_237)
-TRAP_TABLE_ENTRY(trap_isr_238)
-TRAP_TABLE_ENTRY(trap_isr_239)
-TRAP_TABLE_ENTRY(trap_isr_240)
-TRAP_TABLE_ENTRY(trap_isr_241)
-TRAP_TABLE_ENTRY(trap_isr_242)
-TRAP_TABLE_ENTRY(trap_isr_243)
-TRAP_TABLE_ENTRY(trap_isr_244)
-TRAP_TABLE_ENTRY(trap_isr_245)
-TRAP_TABLE_ENTRY(trap_isr_246)
-TRAP_TABLE_ENTRY(trap_isr_247)
-TRAP_TABLE_ENTRY(trap_isr_248)
-TRAP_TABLE_ENTRY(trap_isr_249)
-TRAP_TABLE_ENTRY(trap_isr_250)
-TRAP_TABLE_ENTRY(trap_isr_251)
-TRAP_TABLE_ENTRY(trap_isr_252)
-TRAP_TABLE_ENTRY(trap_isr_253)
-TRAP_TABLE_ENTRY(trap_isr_254)
-TRAP_TABLE_ENTRY(trap_isr_255)
-ASM_END(trap_isr_table)
diff --git a/arch/x86/machine/uart.c b/arch/x86/machine/uart.c
index be9ad725..b60c128b 100644
--- a/arch/x86/machine/uart.c
+++ b/arch/x86/machine/uart.c
@@ -18,6 +18,7 @@
#include <assert.h>
#include <errno.h>
#include <stdint.h>
+#include <stdio.h>
#include <kern/arg.h>
#include <kern/console.h>
diff --git a/kern/intr.c b/kern/intr.c
index 541dc5cb..ac2cea6f 100644
--- a/kern/intr.c
+++ b/kern/intr.c
@@ -39,7 +39,6 @@
#include <kern/thread.h>
#include <machine/boot.h>
#include <machine/cpu.h>
-#include <machine/trap.h>
struct intr_handler {
alignas(CPU_L1_SIZE) struct list node;
@@ -79,7 +78,7 @@ struct intr_entry {
/*
* Interrupt table.
*/
-static struct intr_entry intr_table[TRAP_INTR_TABLE_SIZE];
+static struct intr_entry intr_table[CPU_INTR_TABLE_SIZE];
/*
* List of registered controllers.