summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2013-06-15 10:18:07 +0200
committerRichard Braun <rbraun@sceen.net>2013-07-06 19:02:10 +0200
commit75a1e4532166e7aeb2c0c3109851d4dd48c25183 (patch)
tree2fa5c822579694dbc7cd407108a65115c8c91a99
parent738d08d5e306e67ba443763fd648a393a48c55d4 (diff)
x86 page fault handling
-rw-r--r--arch/x86/machine/trap.c57
-rw-r--r--kern/kernel.c46
-rw-r--r--vm/vm_anon.c9
-rw-r--r--vm/vm_map.c10
-rw-r--r--vm/vm_map.h7
-rw-r--r--vm/vm_object.h5
6 files changed, 127 insertions, 7 deletions
diff --git a/arch/x86/machine/trap.c b/arch/x86/machine/trap.c
index 36585c25..9ef26a27 100644
--- a/arch/x86/machine/trap.c
+++ b/arch/x86/machine/trap.c
@@ -23,7 +23,9 @@
#include <kern/init.h>
#include <kern/macros.h>
#include <kern/panic.h>
+#include <kern/param.h>
#include <kern/printk.h>
+#include <kern/task.h>
#include <kern/thread.h>
#include <machine/cpu.h>
#include <machine/lapic.h>
@@ -31,6 +33,18 @@
#include <machine/pmap.h>
#include <machine/strace.h>
#include <machine/trap.h>
+#include <vm/vm_kmem.h>
+#include <vm/vm_map.h>
+#include <vm/vm_prot.h>
+
+/*
+ * Page fault error codes.
+ */
+#define TRAP_ERROR_PF_PROT 0x01 /* Protection violation */
+#define TRAP_ERROR_PF_WRITE 0x02 /* Write access */
+#define TRAP_ERROR_PF_USER 0x04 /* User mode access */
+#define TRAP_ERROR_PF_RESERVED 0x08 /* Invalid PTE (reserved bit set) */
+#define TRAP_ERROR_PF_EXEC 0x10 /* Instruction fetch */
/*
* Type for interrupt service routines and trap handler functions.
@@ -158,6 +172,47 @@ trap_install_double_fault(void)
}
static void
+trap_page_fault(struct trap_frame *frame)
+{
+ struct thread *thread;
+ struct vm_map *map;
+ unsigned long addr;
+ int error, access;
+
+ /*
+ * TODO Page faults can currently only be handled when they are accesses
+ * from kernel space to valid mapped objects. Complete according to the
+ * VM system capabilities.
+ */
+ assert(!(frame->error & TRAP_ERROR_PF_PROT));
+ assert(!(frame->error & TRAP_ERROR_PF_USER));
+ assert(!(frame->error & TRAP_ERROR_PF_RESERVED));
+ assert(!(frame->error & TRAP_ERROR_PF_EXEC));
+
+ /*
+ * Reading CR2 is safe because interrupts are disabled and kernel code
+ * can't cause another page fault while handling a page fault.
+ */
+ addr = cpu_get_cr2();
+ access = (frame->error & TRAP_ERROR_PF_WRITE)
+ ? VM_PROT_WRITE
+ : VM_PROT_READ;
+ thread = thread_self();
+ map = (addr >= VM_MIN_KERNEL_ADDRESS) ? kernel_map : thread->task->map;
+
+ error = vm_map_fault(map, addr, access);
+
+ if (error) {
+ cpu_halt_broadcast();
+ printk("trap: page fault error: %d, code %#lx at %#lx in task %s\n",
+ error, frame->error, addr, thread->task->name);
+ trap_frame_show(frame);
+ trap_stack_show(frame);
+ cpu_halt();
+ }
+}
+
+static void
trap_default(struct trap_frame *frame)
{
cpu_halt_broadcast();
@@ -189,7 +244,7 @@ trap_setup(void)
trap_install(TRAP_NP, 0, trap_isr_segment_not_present, trap_default);
trap_install(TRAP_SS, 0, trap_isr_stack_segment_fault, trap_default);
trap_install(TRAP_GP, 0, trap_isr_general_protection, trap_default);
- trap_install(TRAP_PF, 0, trap_isr_page_fault, trap_default);
+ trap_install(TRAP_PF, 0, trap_isr_page_fault, trap_page_fault);
trap_install(TRAP_MF, 0, trap_isr_math_fault, trap_default);
trap_install(TRAP_AC, 0, trap_isr_alignment_check, trap_default);
trap_install(TRAP_MC, TRAP_HF_NOPREEMPT,
diff --git a/kern/kernel.c b/kern/kernel.c
index d636d54a..126b8a33 100644
--- a/kern/kernel.c
+++ b/kern/kernel.c
@@ -26,6 +26,50 @@
#include <kern/work.h>
#include <machine/cpu.h>
+#include <vm/vm_anon.h>
+#include <vm/vm_kmem.h>
+#include <vm/vm_map.h>
+#include <vm/vm_object.h>
+
+#define OBJ_SIZE (PAGE_SIZE * 10)
+
+static void
+kernel_test(void *arg)
+{
+ struct vm_object *object;
+ unsigned long addr;
+ int error, flags;
+
+ (void)arg;
+
+ object = vm_anon_create(OBJ_SIZE);
+ assert(object != NULL);
+ addr = 0;
+ flags = VM_MAP_PROT_ALL | VM_MAP_MAX_PROT_ALL | VM_MAP_INHERIT_NONE
+ | VM_MAP_ADV_NORMAL;
+ error = vm_map_enter(kernel_map, object, 0, &addr, OBJ_SIZE, 0, flags);
+ assert(!error);
+ printk("anonymous object mapped at %#lx\n", addr);
+ vm_map_info(kernel_map);
+ memset((void *)addr, '\0', OBJ_SIZE);
+}
+
+static void
+start_test(void)
+{
+ struct thread_attr attr;
+ struct thread *thread;
+ int error;
+
+ attr.name = "test";
+ attr.cpumap = NULL;
+ attr.task = NULL;
+ attr.policy = THREAD_SCHED_POLICY_TS;
+ attr.priority = THREAD_SCHED_TS_PRIO_DEFAULT;
+ error = thread_create(&thread, &attr, kernel_test, NULL);
+ assert(!error);
+}
+
void __init
kernel_main(void)
{
@@ -42,6 +86,8 @@ kernel_main(void)
work_setup();
llsync_setup();
+ start_test();
+
/* Rendezvous with APs */
cpu_mp_sync();
diff --git a/vm/vm_anon.c b/vm/vm_anon.c
index fac7528c..e923ff31 100644
--- a/vm/vm_anon.c
+++ b/vm/vm_anon.c
@@ -26,6 +26,7 @@
#include <kern/stdint.h>
#include <vm/vm_anon.h>
#include <vm/vm_object.h>
+#include <vm/vm_page.h>
/*
* Anonymous memory container.
@@ -42,7 +43,7 @@ struct vm_anon {
static void vm_anon_ref(struct vm_object *object);
static void vm_anon_unref(struct vm_object *object);
static int vm_anon_get(struct vm_object *object, uint64_t offset,
- struct list *pages, int access_prot, int advice);
+ struct vm_page **pagep, int access_prot, int advice);
static struct vm_object_pager vm_anon_pager = {
.ref = vm_anon_ref,
@@ -106,12 +107,12 @@ vm_anon_unref(struct vm_object *object)
}
static int
-vm_anon_get(struct vm_object *object, uint64_t offset, struct list *pages,
- int access_prot, int advice)
+vm_anon_get(struct vm_object *object, uint64_t offset,
+ struct vm_page **pagep, int access_prot, int advice)
{
(void)object;
(void)offset;
- (void)pages;
+ (void)pagep;
(void)access_prot;
(void)advice;
diff --git a/vm/vm_map.c b/vm/vm_map.c
index 4ff86904..0a94b351 100644
--- a/vm/vm_map.c
+++ b/vm/vm_map.c
@@ -900,6 +900,16 @@ out:
mutex_unlock(&map->lock);
}
+int
+vm_map_fault(struct vm_map *map, unsigned long addr, int access)
+{
+ (void)map;
+ (void)addr;
+ (void)access;
+
+ return ERROR_AGAIN;
+}
+
static void
vm_map_init(struct vm_map *map, struct pmap *pmap, unsigned long start,
unsigned long end)
diff --git a/vm/vm_map.h b/vm/vm_map.h
index 7a2bc44c..d686cb93 100644
--- a/vm/vm_map.h
+++ b/vm/vm_map.h
@@ -103,6 +103,13 @@ int vm_map_enter(struct vm_map *map, struct vm_object *object, uint64_t offset,
void vm_map_remove(struct vm_map *map, unsigned long start, unsigned long end);
/*
+ * Page fault handling.
+ *
+ * Access is one of VM_PROT_READ, VM_PROT_WRITE or VM_PROT_EXECUTE.
+ */
+int vm_map_fault(struct vm_map *map, unsigned long addr, int access);
+
+/*
* Set up the vm_map module.
*/
void vm_map_setup(void);
diff --git a/vm/vm_object.h b/vm/vm_object.h
index 5de636ab..86bd12f5 100644
--- a/vm/vm_object.h
+++ b/vm/vm_object.h
@@ -31,6 +31,7 @@
#include <kern/mutex.h>
#include <kern/rdxtree.h>
#include <kern/stdint.h>
+#include <vm/vm_page.h>
struct vm_object_pager;
@@ -50,8 +51,8 @@ struct vm_object {
struct vm_object_pager {
void (*ref)(struct vm_object *object);
void (*unref)(struct vm_object *object);
- int (*get)(struct vm_object *object, uint64_t offset, struct list *pages,
- int access_prot, int advice);
+ int (*get)(struct vm_object *object, uint64_t offset,
+ struct vm_page **pagep, int access_type, int advice);
};
static inline void