summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2017-10-14 23:45:04 +0200
committerRichard Braun <rbraun@sceen.net>2018-01-04 01:57:38 +0100
commit9437f135da9fab16180fc64cdd64e2a3bb3d5b7a (patch)
tree8cd3d9e769c2af24463d58e8ba416aae9de9ce7b /src
Initial commit
Diffstat (limited to 'src')
-rw-r--r--src/boot.c43
-rw-r--r--src/boot.h34
-rw-r--r--src/boot_asm.S96
-rw-r--r--src/condvar.c180
-rw-r--r--src/condvar.h149
-rw-r--r--src/cpu.c493
-rw-r--r--src/cpu.h131
-rw-r--r--src/cpu_asm.S163
-rw-r--r--src/error.c67
-rw-r--r--src/error.h50
-rw-r--r--src/i8254.c72
-rw-r--r--src/i8254.h34
-rw-r--r--src/i8259.c257
-rw-r--r--src/i8259.h57
-rw-r--r--src/io.h47
-rw-r--r--src/io_asm.S37
-rw-r--r--src/kernel.lds113
-rw-r--r--src/main.c69
-rw-r--r--src/mem.c692
-rw-r--r--src/mem.h92
-rw-r--r--src/mutex.c151
-rw-r--r--src/mutex.h165
-rw-r--r--src/panic.c42
-rw-r--r--src/panic.h32
-rw-r--r--src/stdio.c87
-rw-r--r--src/string.c143
-rw-r--r--src/sw.c295
-rw-r--r--src/sw.h34
-rw-r--r--src/thread.c823
-rw-r--r--src/thread.h326
-rw-r--r--src/thread_asm.S51
-rw-r--r--src/timer.c321
-rw-r--r--src/timer.h136
-rw-r--r--src/uart.c190
-rw-r--r--src/uart.h61
35 files changed, 5733 insertions, 0 deletions
diff --git a/src/boot.c b/src/boot.c
new file mode 100644
index 0000000..ac08818
--- /dev/null
+++ b/src/boot.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2017 Richard Braun.
+ * Copyright (c) 2017 Jerko Lenstra.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <stdint.h>
+
+#include <lib/macros.h>
+
+#include "boot.h"
+
+/*
+ * This is the boot stack, used by the boot code to set the value of
+ * the ESP register very early once control is passed to the kernel.
+ *
+ * It is aligned to 4 bytes to comply with the System V Intel 386 ABI [1].
+ * While not strictly required since x86 supports unaligned accesses,
+ * aligned accesses are faster, and the compiler generates instructions
+ * accessing the stack that assume it's aligned.
+ *
+ * See the assembly code at the boot_start label in boot_asm.S.
+ *
+ * [1] http://www.sco.com/developers/devspecs/abi386-4.pdf
+ */
+uint8_t boot_stack[BOOT_STACK_SIZE] __aligned(4);
diff --git a/src/boot.h b/src/boot.h
new file mode 100644
index 0000000..d813605
--- /dev/null
+++ b/src/boot.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2017 Richard Braun.
+ * Copyright (c) 2017 Jerko Lenstra.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _BOOT_H
+#define _BOOT_H
+
+/*
+ * The size of the boot stack.
+ *
+ * See the boot_stack variable in boot.c.
+ */
+#define BOOT_STACK_SIZE 4096
+
+#endif /* _BOOT_H */
diff --git a/src/boot_asm.S b/src/boot_asm.S
new file mode 100644
index 0000000..018f096
--- /dev/null
+++ b/src/boot_asm.S
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2017 Richard Braun.
+ * Copyright (c) 2017 Jerko Lenstra.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "boot.h"
+
+/*
+ * These are values used in the OS image header, as defined by the multiboot
+ * specification.
+ *
+ * See https://www.gnu.org/software/grub/manual/multiboot/multiboot.html.
+ */
+#define BOOT_HDR_MAGIC 0x1BADB002
+#define BOOT_HDR_CHECK 0x2BADB002
+#define BOOT_HDR_FLAGS 0x0
+
+/*
+ * The .section directive tells the assembler which section the following
+ * instructions should go into.
+ *
+ * The "a" flag makes the section allocatable, meaning memory will be
+ * allocated for that section at load time.
+ *
+ * See https://sourceware.org/binutils/docs-2.29/as/Section.html#Section.
+ */
+.section .hdr, "a"
+
+/* Generate code for i386 */
+.code32
+
+/*
+ * The .int directive is used to emit verbatim machine words. Here, the
+ * third word is the checksum of the first two, defined as "a 32-bit
+ * unsigned value which, when added to the other magic fields (i.e.
+ * ‘magic’ and ‘flags’), must have a 32-bit unsigned sum of zero".
+ * Intuitively, adding the two first words and making the result negative
+ * gives a value that, when added to the other fields, gives 0, despite
+ * the word being unsigned. This trick works because values use two's
+ * complement representation.
+ *
+ * See https://en.wikipedia.org/wiki/Two%27s_complement.
+ */
+.int BOOT_HDR_MAGIC
+.int BOOT_HDR_FLAGS
+.int -(BOOT_HDR_FLAGS + BOOT_HDR_MAGIC)
+
+/*
+ * Put the following instructions into the .text section, which is
+ * allocatable and executable.
+ */
+.section .text, "ax"
+
+/*
+ * This symbol is the entry point, i.e. the first instruction that should
+ * be run when control is passed to the kernel. The address of this symbol
+ * is what the following command returns :
+ * readelf -aW x1 | grep "Entry point"
+ *
+ * The .global directive tells the assembler to make the symbol global,
+ * i.e. to make it visible to other compilation units.
+ *
+ * When this code is run, the machine state should comply with what the
+ * multiboot specification defines.
+ */
+.global boot_start
+boot_start:
+ cmp $BOOT_HDR_CHECK, %eax /* Compare EAX against the expected value */
+ jne . /* If not equal, jump to the current address.
+ This is an infinite loop. */
+ mov $boot_stack, %esp /* Set up a stack */
+ add $BOOT_STACK_SIZE, %esp /* On x86, stacks grow downwards, so start
+ at the top */
+ jmp main /* Jump to the C main function */
+
+loop:
+ hlt /* Never reached, for safety */
+ jmp loop
diff --git a/src/condvar.c b/src/condvar.c
new file mode 100644
index 0000000..2424e7d
--- /dev/null
+++ b/src/condvar.c
@@ -0,0 +1,180 @@
+/*
+ * Copyright (c) 2017 Richard Braun.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <stdbool.h>
+
+#include <lib/list.h>
+
+#include "condvar.h"
+#include "mutex.h"
+#include "thread.h"
+
+/*
+ * Structure used to bind a waiting thread and a condition variable.
+ *
+ * The purpose of this structure is to avoid adding the node to the thread
+ * structure. Instead, it's allocated from the stack and only exists while
+ * the thread is waiting for the condition variable to be signalled.
+ *
+ * When another thread signals the condition variable, it finds threads
+ * to wake up by accessing the condition variable list of waiters.
+ *
+ * The awaken member records whether the waiting thread has actually been
+ * awaken, to guard against spurious wake-ups.
+ *
+ * Preemption must be disabled when accessing a waiter.
+ */
+struct condvar_waiter {
+ struct list node;
+ struct thread *thread;
+ bool awaken;
+};
+
+static void
+condvar_waiter_init(struct condvar_waiter *waiter, struct thread *thread)
+{
+ waiter->thread = thread;
+ waiter->awaken = false;
+}
+
+static bool
+condvar_waiter_awaken(const struct condvar_waiter *waiter)
+{
+ return waiter->awaken;
+}
+
+static bool
+condvar_waiter_wakeup(struct condvar_waiter *waiter)
+{
+ if (condvar_waiter_awaken(waiter)) {
+ return false;
+ }
+
+ thread_wakeup(waiter->thread);
+ waiter->awaken = true;
+ return true;
+}
+
+void
+condvar_init(struct condvar *condvar)
+{
+ list_init(&condvar->waiters);
+}
+
+void
+condvar_signal(struct condvar *condvar)
+{
+ struct condvar_waiter *waiter;
+ bool awaken;
+
+ thread_preempt_disable();
+
+ list_for_each_entry(&condvar->waiters, waiter, node) {
+ awaken = condvar_waiter_wakeup(waiter);
+
+ if (awaken) {
+ break;
+ }
+ }
+
+ thread_preempt_enable();
+}
+
+void
+condvar_broadcast(struct condvar *condvar)
+{
+ struct condvar_waiter *waiter;
+
+ /*
+ * Note that this broadcast implementation, a very simple and naive one,
+ * allows a situation known as the "thundering herd problem" [1].
+ *
+ * Remember that a condition variable is always associated with a mutex
+ * when waiting on it. This means that, when broadcasting a condition
+ * variable on which many threads are waiting, they will all be awaken,
+ * but only one of them acquires the associated mutex. All the others
+ * will sleep, waiting for the mutex to be unlocked. This unnecessary
+ * round of wake-ups closely followed by sleeps may be very expensive
+ * compared to the cost of the critical sections around the wait, and
+ * that cost increases linearly with the number of waiting threads.
+ *
+ * Smarter but more complicated implementations can avoid this problem,
+ * e.g. by directly queuing the current waiters on the associated mutex.
+ *
+ * [1] https://en.wikipedia.org/wiki/Thundering_herd_problem
+ */
+
+ thread_preempt_disable();
+
+ list_for_each_entry(&condvar->waiters, waiter, node) {
+ condvar_waiter_wakeup(waiter);
+ }
+
+ thread_preempt_enable();
+}
+
+void
+condvar_wait(struct condvar *condvar, struct mutex *mutex)
+{
+ struct condvar_waiter waiter;
+ struct thread *thread;
+
+ thread = thread_self();
+ condvar_waiter_init(&waiter, thread);
+
+ thread_preempt_disable();
+
+ /*
+ * Unlocking the mutex associated with the condition variable after
+ * acquiring the condition variable (done here by disabling preemption)
+ * is what makes waiting "atomic". Note that atomicity isn't absolute.
+ * Here, the wait is atomic with respect to concurrent signals.
+ *
+ * Signalling a condition variable is always safe in the sense that
+ * it is permitted and won't make the system crash, but signals may be
+ * "missed" if the associated mutex isn't locked when signalling.
+ */
+ mutex_unlock(mutex);
+
+ list_insert_tail(&condvar->waiters, &waiter.node);
+
+ do {
+ thread_sleep();
+ } while (!condvar_waiter_awaken(&waiter));
+
+ list_remove(&waiter.node);
+
+ thread_preempt_enable();
+
+ /*
+ * Unlike releasing the mutex earlier, relocking the mutex may be
+ * done before or after releasing the condition variable. In this
+ * case, it may not be done before because acquiring the condition
+ * variable is achieved by disabling preemption, which forbids
+ * sleeping, and therefore locking a mutex, but another implementation
+ * may use a different synchronization mechanism.
+ *
+ * It's also slightly better to relock outside the previous critical
+ * section in order to make it shorter.
+ */
+ mutex_lock(mutex);
+}
diff --git a/src/condvar.h b/src/condvar.h
new file mode 100644
index 0000000..1448d69
--- /dev/null
+++ b/src/condvar.h
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2017 Richard Braun.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ *
+ * Condition variable module.
+ *
+ * A condition variable is a thread synchronization tool used to wait until
+ * a predicate, in the form of data shared between multiple threads, becomes
+ * true. One or more thread may be waiting on a condition variable for the
+ * predicate to become true, and other threads may set the predicate and
+ * signal the condition variable, waking up one or all the waiters.
+ *
+ * This interface of condition variables closely matches the core
+ * requirements of the POSIX specification [1]. In particular, a condition
+ * variable must always be associated with a mutex, and waiting on a
+ * condition variable must always be done in a loop rechecking the
+ * predicate to guard against spurious wake-ups, which may occur for
+ * various reasons, ranging from implementation details to C/Unix signals,
+ * such as SIGINT, sent by a user (this kernel doesn't implement such
+ * signals, they're given as a real life example on modern systems).
+ *
+ * [1] http://pubs.opengroup.org/onlinepubs/9699919799/
+ */
+
+#ifndef _CONDVAR_H
+#define _CONDVAR_H
+
+#include <lib/list.h>
+
+#include "mutex.h"
+
+/*
+ * Condition variable type.
+ *
+ * All members are private.
+ */
+struct condvar {
+ struct list waiters;
+};
+
+/*
+ * Initialize a condition variable.
+ */
+void condvar_init(struct condvar *condvar);
+
+/*
+ * Signal a condition variable.
+ *
+ * At least one thread is awaken if any threads are waiting on the
+ * condition variable.
+ *
+ * Signalling a condition variable is always safe in the sense that
+ * it is permitted and won't make the system crash, but signals may be
+ * "missed" if the mutex associated with the condition variable isn't
+ * locked when signalling.
+ */
+void condvar_signal(struct condvar *condvar);
+
+/*
+ * Broadcast a condition variable.
+ *
+ * Same as signalling except all threads waiting on the given condition
+ * variable are awaken.
+ */
+void condvar_broadcast(struct condvar *condvar);
+
+/*
+ * Wait on a condition variable.
+ *
+ * This function makes the calling thread sleep until the given
+ * condition variable is signalled. A condition variable is always
+ * associated with a mutex when waiting. That mutex is used to
+ * serialize access to the variables shared between the waiting
+ * thread, and the signalling thread, including the predicate the
+ * calling thread is waiting on.
+ *
+ * When calling this function, the mutex must be locked, so that
+ * checking the predicate and waiting on the condition variable is
+ * done atomically with respect to signalling. Obviously, while
+ * waiting, the mutex must be unlocked, to allow another thread to
+ * set the predicate and signal any waiting thread. As a result,
+ * this function unlocks the mutex before sleeping, and relocks
+ * it before returning.
+ *
+ * Note that this function may return for other reasons than the
+ * condition variable being signalled. These wake-ups are called
+ * spurious wake-ups and may be caused by implementation details
+ * as well as manually waking up threads (e.g. with C/Unix signals
+ * such as SIGINT). This is why waiting on a condition variable
+ * should always be enclosed in a loop, rechecking the predicate
+ * on each iteration.
+ *
+ * Here is an example of waiting and signalling :
+ *
+ * static bool predicate;
+ * static struct mutex m;
+ * static struct condvar cv;
+ *
+ * void
+ * init(void)
+ * {
+ * predicate = false;
+ * mutex_init(&m);
+ * condvar_init(&cv);
+ * }
+ *
+ * void
+ * wait(void)
+ * {
+ * mutex_lock(&m);
+ *
+ * while (!predicate) { Checking the predicate and waiting
+ * condvar_wait(&cv, &m); on the condition variable is atomic
+ * } with respect to setting the predicate
+ * and signalling.
+ * mutex_unlock(&m);
+ * }
+ *
+ * void
+ * signal(void)
+ * {
+ * mutex_lock(&m); Because the mutex is locked, setting
+ * predicate = true; the predicate and signalling is
+ * condvar_signal(&cv); atomic with respect to checking the
+ * mutex_unlock(&m); predicate and waiting on the condition
+ * variable.
+ * }
+ */
+void condvar_wait(struct condvar *condvar, struct mutex *mutex);
+
+#endif /* _CONDVAR_H */
diff --git a/src/cpu.c b/src/cpu.c
new file mode 100644
index 0000000..c336c5e
--- /dev/null
+++ b/src/cpu.c
@@ -0,0 +1,493 @@
+/*
+ * Copyright (c) 2017 Richard Braun.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ *
+ * All references to the Intel 64 and IA-32 Architecture Software Developer's
+ * Manual are valid for order number: 325462-061US, December 2016.
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+
+#include <lib/macros.h>
+
+#include "cpu.h"
+#include "error.h"
+#include "i8259.h"
+#include "thread.h"
+
+/*
+ * Segment flags.
+ *
+ * See Intel 64 and IA-32 Architecture Software Developer's Manual, Volume 3
+ * System Programming Guide :
+ * - 3.4.5 Segment Descriptors
+ * - 3.5 System Descriptor Types
+ */
+#define CPU_SEG_DATA_RW 0x00000200
+#define CPU_SEG_CODE_RX 0x00000900
+#define CPU_SEG_S 0x00001000
+#define CPU_SEG_P 0x00008000
+#define CPU_SEG_DB 0x00400000
+#define CPU_SEG_G 0x00800000
+
+#define CPU_IDT_SIZE (CPU_IDT_VECT_IRQ_BASE + I8259_NR_IRQ_VECTORS)
+
+/*
+ * Segment descriptor.
+ *
+ * These entries are found in the GDT and IDT tables (described below).
+ * When loading a segment register, the value of the register is a
+ * segment selector, which is an index (in bytes) along with flags.
+ *
+ * See Intel 64 and IA-32 Architecture Software Developer's Manual,
+ * Volume 3 System Programming Guide :
+ * - 3.4.2 Segment Selectors
+ * - 3.4.5 Segment Descriptors
+ */
+struct cpu_seg_desc {
+ uint32_t low;
+ uint32_t high;
+};
+
+/*
+ * A pseudo descriptor is an operand for the LGDT/LIDT instructions.
+ *
+ * See Intel 64 and IA-32 Architecture Software Developer's Manual,
+ * Volume 3 System Programming Guide, 3.5.1 Segment Descriptor Tables,
+ * Figure 3-11 Pseudo-Descriptor Formats.
+ *
+ * This structure is packed to prevent any holes between limit and base.
+ */
+struct cpu_pseudo_desc {
+ uint16_t limit;
+ uint32_t base;
+} __packed;
+
+/*
+ * These segment descriptor tables are the Global Descriptor Table (GDT)
+ * and the Interrupt Descriptor Table (IDT) respectively. The GDT was
+ * historically used to create segments. Segmentation could be used to run
+ * multiple instances of the same program at different locations in memory,
+ * by changing the base address of segments. It could implement a simple
+ * form of memory protection by restricting the length of segments. With
+ * modern virtual memory based entirely on paging, segmentation has become
+ * obsolete, and all modern systems use a flat memory model, where all
+ * segments span the entire physical space. Segments may still be used to
+ * provide per-processor or per-thread variables (e.g. this is how TLS,
+ * thread-local storage, is implemented).
+ *
+ * The IDT is used for exception and interrupt handling, collectively known
+ * as interrupts. Here, "exception" refers to interrupts originating from
+ * the CPU such as a division by zero exception, whereas "IRQ" refers to
+ * interrupts raised by external devices. These terms are often used
+ * interchangeably. What's important to keep in mind is that interrupts
+ * divert the flow of execution of the processor. The IDT tells the processor
+ * where to branch when an interrupt occurs.
+ *
+ * The GDT and IDT should be 8-byte aligned for best performance.
+ *
+ * See Intel 64 and IA-32 Architecture Software Developer's Manual, Volume 3
+ * System Programming Guide :
+ * - 3.5.1 Segment Descriptor Tables (GDT)
+ * - 6.10 Interrupt Descriptor Table (IDT)
+ */
+static struct cpu_seg_desc cpu_gdt[CPU_GDT_SIZE] __aligned(8);
+static struct cpu_seg_desc cpu_idt[CPU_IDT_SIZE] __aligned(8);
+
+/*
+ * Handler for external interrupt requests.
+ */
+struct cpu_irq_handler {
+ cpu_irq_handler_fn_t fn;
+ void *arg;
+};
+
+/*
+ * Array where driver IRQ handlers are registered.
+ *
+ * Interrupts and preemption must be disabled when accessing the handlers.
+ */
+static struct cpu_irq_handler cpu_irq_handlers[I8259_NR_IRQ_VECTORS];
+
+/*
+ * The interrupt frame is the stack content forged by interrupt handlers.
+ * They store the data needed to restore the processor to its state prior
+ * to the interrupt.
+ */
+struct cpu_intr_frame {
+ /* These members are pushed by the low level ISRs */
+ uint32_t edi;
+ uint32_t esi;
+ uint32_t ebp;
+ uint32_t edx;
+ uint32_t ecx;
+ uint32_t ebx;
+ uint32_t eax;
+ uint32_t vector;
+
+ /*
+ * This member may be pushed by either the CPU or the low level ISRs
+ * for exceptions/interrupts that don't emit such an error code.
+ */
+ uint32_t error;
+
+ /* These members are automatically pushed by the CPU */
+ uint32_t eip;
+ uint32_t cs;
+ uint32_t eflags;
+};
+
+/*
+ * Declarations for C/assembly functions that are global so that they can
+ * be shared between cpu.c and cpu_asm.S, but are considered private to
+ * the cpu module.
+ */
+uint32_t cpu_get_eflags(void);
+void cpu_set_eflags(uint32_t eflags);
+void cpu_load_gdt(const struct cpu_pseudo_desc *desc);
+void cpu_load_idt(const struct cpu_pseudo_desc *desc);
+void cpu_intr_main(const struct cpu_intr_frame *frame);
+
+/*
+ * Low level interrupt service routines.
+ *
+ * These are the addresses where the CPU directly branches to when an
+ * interrupt is received.
+ */
+void cpu_isr_divide_error(void);
+void cpu_isr_general_protection(void);
+void cpu_isr_32(void);
+void cpu_isr_33(void);
+void cpu_isr_34(void);
+void cpu_isr_35(void);
+void cpu_isr_36(void);
+void cpu_isr_37(void);
+void cpu_isr_38(void);
+void cpu_isr_39(void);
+void cpu_isr_40(void);
+void cpu_isr_41(void);
+void cpu_isr_42(void);
+void cpu_isr_43(void);
+void cpu_isr_44(void);
+void cpu_isr_45(void);
+void cpu_isr_46(void);
+void cpu_isr_47(void);
+
+uint32_t
+cpu_intr_save(void)
+{
+ uint32_t eflags;
+
+ eflags = cpu_get_eflags();
+ cpu_intr_disable();
+ return eflags;
+}
+
+void
+cpu_intr_restore(uint32_t eflags)
+{
+ cpu_set_eflags(eflags);
+}
+
+bool
+cpu_intr_enabled(void)
+{
+ uint32_t eflags;
+
+ eflags = cpu_get_eflags();
+ return eflags & CPU_EFL_IF;
+}
+
+void
+cpu_halt(void)
+{
+ cpu_intr_disable();
+
+ for (;;) {
+ cpu_idle();
+ }
+}
+
+static void
+cpu_default_intr_handler(void)
+{
+ printf("cpu: error: unhandled interrupt\n");
+ cpu_halt();
+}
+
+static void
+cpu_seg_desc_init_null(struct cpu_seg_desc *desc)
+{
+ desc->low = 0;
+ desc->high = 0;
+}
+
+static void
+cpu_seg_desc_init_code(struct cpu_seg_desc *desc)
+{
+ /*
+ * Base: 0
+ * Limit: 0xffffffff
+ * Privilege level: 0 (most privileged)
+ */
+ desc->low = 0xffff;
+ desc->high = CPU_SEG_G
+ | CPU_SEG_DB
+ | (0xf << 16)
+ | CPU_SEG_P
+ | CPU_SEG_S
+ | CPU_SEG_CODE_RX;
+}
+
+static void
+cpu_seg_desc_init_data(struct cpu_seg_desc *desc)
+{
+ /*
+ * Base: 0
+ * Limit: 0xffffffff
+ * Privilege level: 0 (most privileged)
+ */
+ desc->low = 0xffff;
+ desc->high = CPU_SEG_G
+ | CPU_SEG_DB
+ | (0xf << 16)
+ | CPU_SEG_P
+ | CPU_SEG_S
+ | CPU_SEG_DATA_RW;
+}
+
+static void
+cpu_seg_desc_init_intr_gate(struct cpu_seg_desc *desc,
+ void (*handler)(void))
+{
+ desc->low = (CPU_GDT_SEL_CODE << 16)
+ | (((uint32_t)handler) & 0xffff);
+ desc->high = (((uint32_t)handler) & 0xffff0000)
+ | CPU_SEG_P
+ | 0xe00;
+}
+
+static void
+cpu_pseudo_desc_init(struct cpu_pseudo_desc *desc,
+ const void *addr, size_t size)
+{
+ assert(size <= 0x10000);
+ desc->limit = size - 1;
+ desc->base = (uint32_t)addr;
+}
+
+static struct cpu_seg_desc *
+cpu_get_gdt_entry(size_t selector)
+{
+ size_t index;
+
+ /*
+ * The first 3 bits are the TI and RPL bits
+ *
+ * See Intel 64 and IA-32 Architecture Software Developer's Manual,
+ * Volume 3 System Programming Guide, 3.4.2 Segment Selectors.
+ */
+ index = selector >> 3;
+ assert(index < ARRAY_SIZE(cpu_gdt));
+ return &cpu_gdt[index];
+}
+
+static void
+cpu_irq_handler_init(struct cpu_irq_handler *handler)
+{
+ handler->fn = NULL;
+}
+
+static struct cpu_irq_handler *
+cpu_lookup_irq_handler(unsigned int irq)
+{
+ assert(irq < ARRAY_SIZE(cpu_irq_handlers));
+ return &cpu_irq_handlers[irq];
+}
+
+static void
+cpu_irq_handler_set_fn(struct cpu_irq_handler *handler,
+ cpu_irq_handler_fn_t fn, void *arg)
+{
+ assert(handler->fn == NULL);
+ handler->fn = fn;
+ handler->arg = arg;
+}
+
+static void
+cpu_setup_gdt(void)
+{
+ struct cpu_pseudo_desc pseudo_desc;
+
+ cpu_seg_desc_init_null(cpu_get_gdt_entry(CPU_GDT_SEL_NULL));
+ cpu_seg_desc_init_code(cpu_get_gdt_entry(CPU_GDT_SEL_CODE));
+ cpu_seg_desc_init_data(cpu_get_gdt_entry(CPU_GDT_SEL_DATA));
+
+ cpu_pseudo_desc_init(&pseudo_desc, cpu_gdt, sizeof(cpu_gdt));
+ cpu_load_gdt(&pseudo_desc);
+}
+
+static void
+cpu_setup_idt(void)
+{
+ struct cpu_pseudo_desc pseudo_desc;
+
+ for (size_t i = 0; i < ARRAY_SIZE(cpu_irq_handlers); i++) {
+ cpu_irq_handler_init(cpu_lookup_irq_handler(i));
+ }
+
+ for (size_t i = 0; i < ARRAY_SIZE(cpu_idt); i++) {
+ cpu_seg_desc_init_intr_gate(&cpu_idt[i], cpu_default_intr_handler);
+ }
+
+ cpu_seg_desc_init_intr_gate(&cpu_idt[CPU_IDT_VECT_DIV],
+ cpu_isr_divide_error);
+ cpu_seg_desc_init_intr_gate(&cpu_idt[CPU_IDT_VECT_GP],
+ cpu_isr_general_protection);
+ cpu_seg_desc_init_intr_gate(&cpu_idt[32], cpu_isr_32);
+ cpu_seg_desc_init_intr_gate(&cpu_idt[33], cpu_isr_33);
+ cpu_seg_desc_init_intr_gate(&cpu_idt[34], cpu_isr_34);
+ cpu_seg_desc_init_intr_gate(&cpu_idt[35], cpu_isr_35);
+ cpu_seg_desc_init_intr_gate(&cpu_idt[36], cpu_isr_36);
+ cpu_seg_desc_init_intr_gate(&cpu_idt[37], cpu_isr_37);
+ cpu_seg_desc_init_intr_gate(&cpu_idt[38], cpu_isr_38);
+ cpu_seg_desc_init_intr_gate(&cpu_idt[39], cpu_isr_39);
+ cpu_seg_desc_init_intr_gate(&cpu_idt[40], cpu_isr_40);
+ cpu_seg_desc_init_intr_gate(&cpu_idt[41], cpu_isr_41);
+ cpu_seg_desc_init_intr_gate(&cpu_idt[42], cpu_isr_42);
+ cpu_seg_desc_init_intr_gate(&cpu_idt[43], cpu_isr_43);
+ cpu_seg_desc_init_intr_gate(&cpu_idt[44], cpu_isr_44);
+ cpu_seg_desc_init_intr_gate(&cpu_idt[45], cpu_isr_45);
+ cpu_seg_desc_init_intr_gate(&cpu_idt[46], cpu_isr_46);
+ cpu_seg_desc_init_intr_gate(&cpu_idt[47], cpu_isr_47);
+
+ cpu_pseudo_desc_init(&pseudo_desc, cpu_idt, sizeof(cpu_idt));
+ cpu_load_idt(&pseudo_desc);
+}
+
+static void
+cpu_print_frame(const struct cpu_intr_frame *frame)
+{
+ printf("cpu: vector: %-8x eip: %08x eax: %08x ebx: %08x\n"
+ "cpu: error: %-8x esp: %08x ecx: %08x edx: %08x\n"
+ "cpu: eflags: %08x ebp: %08x esi: %08x edi: %08x\n",
+ (unsigned int)frame->vector, (unsigned int)frame->eip,
+ (unsigned int)frame->eax, (unsigned int)frame->ebx,
+ (unsigned int)frame->error, (unsigned int)(frame + 1),
+ (unsigned int)frame->ecx, (unsigned int)frame->edx,
+ (unsigned int)frame->eflags, (unsigned int)frame->ebp,
+ (unsigned int)frame->esi, (unsigned int)frame->edi);
+}
+
+static void
+cpu_exc_main(const struct cpu_intr_frame *frame)
+{
+ printf("cpu: exception:\n");
+ cpu_print_frame(frame);
+
+ switch (frame->vector)
+ {
+ case CPU_IDT_VECT_DIV:
+ panic("cpu: divide error");
+ case CPU_IDT_VECT_GP:
+ panic("cpu: general protection fault");
+ default:
+ cpu_default_intr_handler();
+ }
+}
+
+void
+cpu_intr_main(const struct cpu_intr_frame *frame)
+{
+ struct cpu_irq_handler *handler;
+ unsigned int irq;
+
+ assert(!cpu_intr_enabled());
+ assert(frame->vector < ARRAY_SIZE(cpu_idt));
+
+ /*
+ * Interrupt handlers may call functions that may in turn yield the
+ * processor. When running in interrupt context, as opposed to thread
+ * context, there is no way to yield the processor, because the context
+ * isn't saved into a scheduled structure, which is what threads are
+ * for. As a result, disable preemption to prevent an invalid context
+ * switch.
+ */
+ thread_preempt_disable();
+
+ if (frame->vector < CPU_IDT_VECT_IRQ_BASE) {
+ cpu_exc_main(frame);
+ } else {
+ irq = frame->vector - CPU_IDT_VECT_IRQ_BASE;
+
+ /*
+ * Acknowledge the IRQ as early as possible to allow another one to
+ * be raised.
+ */
+ i8259_irq_eoi(irq);
+
+ handler = cpu_lookup_irq_handler(irq);
+
+ if (!handler || !handler->fn) {
+ printf("cpu: error: invalid handler for irq %u\n", irq);
+ } else {
+ handler->fn(handler->arg);
+ }
+ }
+
+ /*
+ * On entry, preemption could have been either enabled or disabled.
+ * If it was enabled, this call will reenable it. As a side effect,
+ * it will check if the current thread was marked for yielding, e.g.
+ * because the interrupt handler has awaken a higher priority thread,
+ * in which case a context switch is triggerred. Such context switches
+ * are called involuntary.
+ */
+ thread_preempt_enable();
+}
+
+void
+cpu_irq_register(unsigned int irq, cpu_irq_handler_fn_t fn, void *arg)
+{
+ struct cpu_irq_handler *handler;
+ uint32_t eflags;
+
+ thread_preempt_disable();
+ eflags = cpu_intr_save();
+
+ handler = cpu_lookup_irq_handler(irq);
+ cpu_irq_handler_set_fn(handler, fn, arg);
+ i8259_irq_enable(irq);
+
+ thread_preempt_disable();
+ cpu_intr_restore(eflags);
+}
+
+void
+cpu_setup(void)
+{
+ cpu_setup_gdt();
+ cpu_setup_idt();
+}
diff --git a/src/cpu.h b/src/cpu.h
new file mode 100644
index 0000000..6326a58
--- /dev/null
+++ b/src/cpu.h
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2017 Richard Braun.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ *
+ * CPU services.
+ *
+ * The main functionality of this module is to provide interrupt control,
+ * and registration of IRQ handlers.
+ *
+ * See the i8259 module.
+ */
+
+#ifndef _CPU_H
+#define _CPU_H
+
+/*
+ * EFLAGS register flags.
+ *
+ * See Intel 64 and IA-32 Architecture Software Developer's Manual, Volume 3
+ * System Programming Guide, 2.3 System Flags and Fields in The EFLAGS Register.
+ */
+#define CPU_EFL_ONE 0x002
+#define CPU_EFL_IF 0x200
+
+/*
+ * GDT segment descriptor indexes, in bytes.
+ *
+ * See Intel 64 and IA-32 Architecture Software Developer's Manual, Volume 3
+ * System Programming Guide, 3.4.1 Segment Descriptor Tables.
+ */
+#define CPU_GDT_SEL_NULL 0x00
+#define CPU_GDT_SEL_CODE 0x08
+#define CPU_GDT_SEL_DATA 0x10
+#define CPU_GDT_SIZE 3
+
+/*
+ * IDT segment descriptor indexes (exception and interrupt vectors).
+ *
+ * There are actually a lot more potential exceptions on x86. This list
+ * only includes vectors that are handled by the implementation.
+ *
+ * See Intel 64 and IA-32 Architecture Software Developer's Manual, Volume 3
+ * System Programming Guide, 6.3 Sources of Interrupts.
+ */
+#define CPU_IDT_VECT_DIV 0 /* Divide error */
+#define CPU_IDT_VECT_GP 13 /* General protection fault */
+#define CPU_IDT_VECT_IRQ_BASE 32 /* Base vector for external IRQs */
+
+/*
+ * Preprocessor declarations may be included by assembly source files, but
+ * C declarations may not.
+ */
+#ifndef __ASSEMBLER__
+
+#include <stdbool.h>
+#include <stdint.h>
+
+/*
+ * Type for IRQ handler functions.
+ *
+ * When called, interrupts and preemption are disabled.
+ */
+typedef void (*cpu_irq_handler_fn_t)(void *arg);
+
+/*
+ * Enable/disable interrupts.
+ */
+void cpu_intr_enable(void);
+void cpu_intr_disable(void);
+
+/*
+ * Disable/restore interrupts.
+ *
+ * Calls to these functions can safely nest.
+ */
+uint32_t cpu_intr_save(void);
+void cpu_intr_restore(uint32_t eflags);
+
+/*
+ * Return true if interrupts are enabled.
+ *
+ * Implies a compiler barrier.
+ */
+bool cpu_intr_enabled(void);
+
+/*
+ * Enter an idle state until the next interrupt.
+ */
+void cpu_idle(void);
+
+/*
+ * Completely halt execution on the processor.
+ *
+ * This function disables interrupts and enters an infinite idle loop.
+ */
+void cpu_halt(void) __attribute__((noreturn));
+
+/*
+ * Register an IRQ handler.
+ *
+ * When the given IRQ is raised, the handler function is called with the
+ * given argument.
+ */
+void cpu_irq_register(unsigned int irq, cpu_irq_handler_fn_t fn, void *arg);
+
+/*
+ * Initialize the cpu module.
+ */
+void cpu_setup(void);
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* _CPU_H */
diff --git a/src/cpu_asm.S b/src/cpu_asm.S
new file mode 100644
index 0000000..cdaf081
--- /dev/null
+++ b/src/cpu_asm.S
@@ -0,0 +1,163 @@
+/*
+ * Copyright (c) 2017 Richard Braun.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "cpu.h"
+
+.section .text
+.code32
+
+.global cpu_get_eflags
+cpu_get_eflags:
+ pushf
+ pop %eax
+ ret
+
+.global cpu_set_eflags
+cpu_set_eflags:
+ mov 4(%esp), %eax
+ push %eax
+ popf
+ ret
+
+.global cpu_intr_enable
+cpu_intr_enable:
+ sti
+ ret
+
+.global cpu_intr_disable
+cpu_intr_disable:
+ cli
+ ret
+
+.global cpu_idle
+cpu_idle:
+ hlt
+ ret
+
+.global cpu_load_gdt
+cpu_load_gdt:
+ mov 4(%esp), %eax /* eax = &desc */
+ lgdt (%eax) /* lgdt(*eax) */
+
+ mov $CPU_GDT_SEL_DATA, %eax
+ mov %eax, %ds
+ mov %eax, %es
+ mov %eax, %fs
+ mov %eax, %gs
+ mov %eax, %ss
+
+ /*
+ * The code segment cannot directly be written to, and is instead
+ * modified by performing a long jump.
+ *
+ * See Intel 64 and IA-32 Architecture Software Developer's Manual :
+ * - Volume 2 Instruction Set Reference
+ * - 3.2 Instructions (A-L)
+ * - JMP
+ * - Far Jumps in Protected Mode
+ * - Volume 3 System Programming Guide:
+ * - 3.4.3 Segment Registers
+ */
+ ljmp $CPU_GDT_SEL_CODE, $1f
+
+1:
+ ret
+
+.global cpu_load_idt
+cpu_load_idt:
+ mov 4(%esp), %eax /* eax = &desc */
+ lidt (%eax) /* lidt(*eax) */
+ ret
+
+/*
+ * See struct cpu_intr_frame in cpu.c.
+ */
+.macro CPU_INTR_STORE_REGISTERS
+ push %edi
+ push %esi
+ push %ebp
+ push %edx
+ push %ecx
+ push %ebx
+ push %eax
+.endm
+
+.macro CPU_INTR_LOAD_REGISTERS
+ pop %eax
+ pop %ebx
+ pop %ecx
+ pop %edx
+ pop %ebp
+ pop %esi
+ pop %edi
+.endm
+
+/*
+ * Some interrupts push an error code, and some don't.
+ * Have a single interrupt frame layout by pushing a dummy error code.
+ */
+#define CPU_INTR(vector, name) \
+.global name; \
+name: \
+ pushl $0; \
+ pushl $(vector); \
+ jmp cpu_intr_common
+
+#define CPU_INTR_ERROR(vector, name) \
+.global name; \
+name: \
+ pushl $(vector); \
+ jmp cpu_intr_common
+
+cpu_intr_common:
+ CPU_INTR_STORE_REGISTERS
+ push %esp /* push the address of the interrupt frame */
+ call cpu_intr_main /* cpu_intr_main(frame) */
+ add $4, %esp /* restore the stack pointer */
+ CPU_INTR_LOAD_REGISTERS
+ add $8, %esp /* skip vector and error */
+ iret /* return from interrupt */
+
+CPU_INTR(CPU_IDT_VECT_DIV, cpu_isr_divide_error)
+CPU_INTR_ERROR(CPU_IDT_VECT_GP, cpu_isr_general_protection)
+
+/*
+ * XXX There must be as many low level ISRs as there are possible IRQ vectors.
+ *
+ * See the i8259 module.
+ */
+CPU_INTR(32, cpu_isr_32)
+CPU_INTR(33, cpu_isr_33)
+CPU_INTR(34, cpu_isr_34)
+CPU_INTR(35, cpu_isr_35)
+CPU_INTR(36, cpu_isr_36)
+CPU_INTR(37, cpu_isr_37)
+CPU_INTR(38, cpu_isr_38)
+CPU_INTR(39, cpu_isr_39)
+CPU_INTR(40, cpu_isr_40)
+CPU_INTR(41, cpu_isr_41)
+CPU_INTR(42, cpu_isr_42)
+CPU_INTR(43, cpu_isr_43)
+CPU_INTR(44, cpu_isr_44)
+CPU_INTR(45, cpu_isr_45)
+CPU_INTR(46, cpu_isr_46)
+CPU_INTR(47, cpu_isr_47)
diff --git a/src/error.c b/src/error.c
new file mode 100644
index 0000000..8bd7875
--- /dev/null
+++ b/src/error.c
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2017 Richard Braun.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <lib/macros.h>
+
+#include "error.h"
+#include "panic.h"
+
+/*
+ * Error message table.
+ *
+ * This table must be consistent with the enum defined in error.h.
+ */
+static const char *error_msg_table[] = {
+ "success",
+ "invalid argument",
+ "resource temporarily unavailable",
+ "not enough space",
+ "input/output error",
+ "resource busy",
+ "entry exist",
+};
+
+const char *
+error_str(unsigned int error)
+{
+ if (error >= ARRAY_SIZE(error_msg_table)) {
+ return "invalid error code";
+ }
+
+ return error_msg_table[error];
+}
+
+void
+error_check(int error, const char *prefix)
+{
+ if (!error) {
+ return;
+ }
+
+ panic("%s%s%s",
+ (prefix == NULL) ? "" : prefix,
+ (prefix == NULL) ? "" : ": ",
+ error_str(error));
+}
diff --git a/src/error.h b/src/error.h
new file mode 100644
index 0000000..fca3980
--- /dev/null
+++ b/src/error.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2017 Richard Braun.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _ERROR_H
+#define _ERROR_H
+
+enum {
+ ERROR_INVAL = 1,
+ ERROR_AGAIN,
+ ERROR_NOMEM,
+ ERROR_IO,
+ ERROR_BUSY,
+ ERROR_EXIST,
+};
+
+/*
+ * Return the message matching the given error.
+ *
+ * The returned address points to a statically allocated, read only,
+ * null-terminated string literal. The caller must not attempt to use it
+ * for anything else than error reporting.
+ */
+const char * error_str(unsigned int error);
+
+/*
+ * If error denotes an actual error (i.e. is not 0), panic, using the given
+ * string as a prefix for the error message. A NULL prefix is allowed.
+ */
+void error_check(int error, const char *prefix);
+
+#endif /* _ERROR_H */
diff --git a/src/i8254.c b/src/i8254.c
new file mode 100644
index 0000000..fc4f48d
--- /dev/null
+++ b/src/i8254.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2017 Richard Braun.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <lib/macros.h>
+
+#include "cpu.h"
+#include "i8254.h"
+#include "io.h"
+#include "thread.h"
+
+#define I8254_FREQ 1193182
+
+#define I8254_PORT_CHANNEL0 0x40
+#define I8254_PORT_MODE 0x43
+
+#define I8254_CONTROL_BINARY 0x00
+#define I8254_CONTROL_RATE_GEN 0x04
+#define I8254_CONTROL_RW_LSB 0x10
+#define I8254_CONTROL_RW_MSB 0x20
+#define I8254_CONTROL_COUNTER0 0x00
+
+#define I8254_INITIAL_COUNT DIV_CEIL(I8254_FREQ, THREAD_SCHED_FREQ)
+
+#define I8254_IRQ 0
+
+static void
+i8254_irq_handler(void *arg)
+{
+ (void)arg;
+ thread_report_tick();
+}
+
+void
+i8254_setup(void)
+{
+ uint16_t value;
+
+ /*
+ * Program the timer to raise an interrupt at the scheduling frequency.
+ */
+
+ io_write(I8254_PORT_MODE, I8254_CONTROL_COUNTER0
+ | I8254_CONTROL_RW_MSB
+ | I8254_CONTROL_RW_LSB
+ | I8254_CONTROL_RATE_GEN
+ | I8254_CONTROL_BINARY);
+
+ value = I8254_INITIAL_COUNT;
+ io_write(I8254_PORT_CHANNEL0, value & 0xff);
+ io_write(I8254_PORT_CHANNEL0, value >> 8);
+
+ cpu_irq_register(I8254_IRQ, i8254_irq_handler, NULL);
+}
diff --git a/src/i8254.h b/src/i8254.h
new file mode 100644
index 0000000..c70ded9
--- /dev/null
+++ b/src/i8254.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2017 Richard Braun.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ *
+ * Intel 8254 programmable interval timer (PIT) driver.
+ */
+
+#ifndef _I8254_H
+#define _I8254_H
+
+/*
+ * Initialize the i8254 module.
+ */
+void i8254_setup(void);
+
+#endif /* _I8254_H */
diff --git a/src/i8259.c b/src/i8259.c
new file mode 100644
index 0000000..589e13c
--- /dev/null
+++ b/src/i8259.c
@@ -0,0 +1,257 @@
+/*
+ * Copyright (c) 2017 Richard Braun.
+ * Copyright (c) 2017 Jerko Lenstra.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ *
+ * IRQ means Interrupt ReQuest. They're used by external hardware to signal
+ * the CPU, and in turn the OS, that an external event has happened and
+ * requires processing. The usual model is shown in the image at
+ * https://upload.wikimedia.org/wikipedia/commons/thumb/1/15/PIC_Hardware_interrupt_path.svg/300px-PIC_Hardware_interrupt_path.svg.png.
+ *
+ * This driver implements IRQ handling on the Intel 8259 PIC. The IBM PC/AT
+ * actually uses 2 of these PICs for external interrupt handling, as shown
+ * in https://masherz.files.wordpress.com/2010/08/217.jpg. The public
+ * interface completely hides this detail and considers all given IRQs
+ * as logical indexes, used to find the corresponding PIC (master or slave)
+ * and the local IRQ on that PIC.
+ *
+ * 8259 datasheet :
+ * https://pdos.csail.mit.edu/6.828/2010/readings/hardware/8259A.pdf
+ */
+
+#include <assert.h>
+#include <stddef.h>
+#include <stdbool.h>
+#include <stdint.h>
+
+#include <lib/macros.h>
+
+#include "cpu.h"
+#include "error.h"
+#include "i8259.h"
+#include "io.h"
+
+#define I8259_IRQ_CASCADE 2 /* IRQ used for cascading on the master */
+#define I8259_NR_IRQS 8
+
+/*
+ * Initialization Control Word 1 bits.
+ */
+#define I8259_ICW1_ICW4 0x01 /* State that a 4th ICW will be sent */
+#define I8259_ICW1_INIT 0x10 /* This bit must be set */
+
+/*
+ * Initialization Control Word 4 bits.
+ */
+#define I8259_ICW4_8086 0x01 /* 8086 mode, as x86 is still compatible
+ with the old 8086 processor */
+
+#define I8259_OCW2_EOI 0x20 /* End of interrupt control word */
+
+enum {
+ I8259_PIC_ID_MASTER,
+ I8259_PIC_ID_SLAVE,
+ I8259_NR_PICS
+};
+
+/*
+ * Intel 8259 programmable interrupt controller.
+ */
+struct i8259_pic {
+ uint16_t cmd_port; /* Command I/O port of the PIC */
+ uint16_t data_port; /* Data I/O port of the PIC */
+ uint8_t imr; /* Cached value of the IMR register */
+ bool master; /* True if this PIC is the master */
+};
+
+/*
+ * Static instances of PIC objects.
+ */
+static struct i8259_pic i8259_pics[] = {
+ [I8259_PIC_ID_MASTER] = {
+ .cmd_port = 0x20,
+ .data_port = 0x21,
+ .imr = 0xff,
+ .master = true,
+ },
+ [I8259_PIC_ID_SLAVE] = {
+ .cmd_port = 0xa0,
+ .data_port = 0xa1,
+ .imr = 0xff,
+ .master = false,
+ },
+};
+
+static struct i8259_pic *
+i8259_get_pic(unsigned int id)
+{
+ assert(id < ARRAY_SIZE(i8259_pics));
+ return &i8259_pics[id];
+}
+
+static int
+i8259_convert_global_irq(unsigned int irq, struct i8259_pic **pic,
+ unsigned int *local_irq)
+{
+ int error;
+
+ if (irq < I8259_NR_IRQS) {
+ *pic = i8259_get_pic(I8259_PIC_ID_MASTER);
+
+ if (local_irq) {
+ *local_irq = irq;
+ }
+
+ error = 0;
+ } else if (irq < (I8259_NR_IRQS * I8259_NR_PICS)) {
+ *pic = i8259_get_pic(I8259_PIC_ID_SLAVE);
+
+ if (local_irq) {
+ *local_irq = irq - I8259_NR_IRQS;
+ }
+
+ error = 0;
+ } else {
+ *local_irq = 0;
+ error = ERROR_INVAL;
+ }
+
+ return error;
+}
+
+static void
+i8259_pic_write_cmd(const struct i8259_pic *pic, uint8_t byte)
+{
+ io_write(pic->cmd_port, byte);
+}
+
+static void
+i8259_pic_write_data(const struct i8259_pic *pic, uint8_t byte)
+{
+ io_write(pic->data_port, byte);
+}
+
+static void
+i8259_pic_apply_imr(const struct i8259_pic *pic)
+{
+ io_write(pic->data_port, pic->imr);
+}
+
+static void
+i8259_pic_enable_irq(struct i8259_pic *pic, unsigned int irq)
+{
+ assert(irq < I8259_NR_IRQS);
+
+ pic->imr &= ~(1 << irq);
+ i8259_pic_apply_imr(pic);
+}
+
+static void
+i8259_pic_disable_irq(struct i8259_pic *pic, unsigned int irq)
+{
+ assert(irq < I8259_NR_IRQS);
+
+ pic->imr |= (1 << irq);
+ i8259_pic_apply_imr(pic);
+}
+
+static void
+i8259_pic_eoi(struct i8259_pic *pic)
+{
+ io_write(pic->cmd_port, I8259_OCW2_EOI);
+}
+
+void
+i8259_setup(void)
+{
+ struct i8259_pic *master, *slave;
+
+ master = i8259_get_pic(I8259_PIC_ID_MASTER);
+ slave = i8259_get_pic(I8259_PIC_ID_SLAVE);
+
+ i8259_pic_write_cmd(master, I8259_ICW1_INIT | I8259_ICW1_ICW4);
+ i8259_pic_write_cmd(slave, I8259_ICW1_INIT | I8259_ICW1_ICW4);
+ i8259_pic_write_data(master, CPU_IDT_VECT_IRQ_BASE);
+ i8259_pic_write_data(slave, CPU_IDT_VECT_IRQ_BASE + I8259_NR_IRQS);
+ i8259_pic_write_data(master, 1 << I8259_IRQ_CASCADE);
+ i8259_pic_write_data(slave, I8259_IRQ_CASCADE);
+ i8259_pic_write_data(master, I8259_ICW4_8086);
+ i8259_pic_write_data(slave, I8259_ICW4_8086);
+
+ i8259_pic_enable_irq(master, I8259_IRQ_CASCADE);
+ i8259_pic_apply_imr(master);
+ i8259_pic_apply_imr(slave);
+}
+
+void
+i8259_irq_enable(unsigned int irq)
+{
+ struct i8259_pic *pic;
+ unsigned int local_irq;
+ int error;
+
+ error = i8259_convert_global_irq(irq, &pic, &local_irq);
+ assert(!error);
+ i8259_pic_enable_irq(pic, local_irq);
+}
+
+void
+i8259_irq_disable(unsigned int irq)
+{
+ struct i8259_pic *pic;
+ unsigned int local_irq;
+ int error;
+
+ error = i8259_convert_global_irq(irq, &pic, &local_irq);
+ assert(!error);
+ i8259_pic_disable_irq(pic, local_irq);
+}
+
+void
+i8259_irq_eoi(unsigned int irq)
+{
+ struct i8259_pic *pic;
+ int error;
+
+ assert(!cpu_intr_enabled());
+
+ error = i8259_convert_global_irq(irq, &pic, NULL);
+ assert(!error);
+
+ if (!pic->master) {
+ /*
+ * The order in which EOI messages are sent (master then slave or the
+ * reverse) is irrelevant :
+ * - If the slave is sent the EOI message first, it may raise another
+ * interrupt right away, in which case it will be pending at the
+ * master until the latter is sent the EOI message too.
+ * - If the master is sent the EOI message first, it may raise another
+ * interrupt right away, in which case it will be pending at the
+ * processor until interrupts are reenabled, assuming that this
+ * function is called with interrupts disabled, and that interrupts
+ * remain disabled until control is returned to the interrupted
+ * thread.
+ */
+ i8259_pic_eoi(i8259_get_pic(I8259_PIC_ID_MASTER));
+ }
+
+ i8259_pic_eoi(pic);
+}
diff --git a/src/i8259.h b/src/i8259.h
new file mode 100644
index 0000000..102d77b
--- /dev/null
+++ b/src/i8259.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2017 Richard Braun.
+ * Copyright (c) 2017 Jerko Lenstra.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ *
+ * Intel 8259 programmable interrupt controller (PIC) driver.
+ */
+
+#ifndef _I8259_H
+#define _I8259_H
+
+/*
+ * Range of vectors used for IRQ handling, 8 per PIC.
+ */
+#define I8259_NR_IRQ_VECTORS 16
+
+/*
+ * Initialize the i8259 module.
+ */
+void i8259_setup(void);
+
+/*
+ * Enable an IRQ line on the PIC.
+ */
+void i8259_irq_enable(unsigned int irq);
+
+/*
+ * Disable an IRQ line on the PIC.
+ */
+void i8259_irq_disable(unsigned int irq);
+
+/*
+ * Report an end of interrupt.
+ *
+ * This function must be called with interrupts disabled.
+ */
+void i8259_irq_eoi(unsigned int irq);
+
+#endif /* _I8259_H */
diff --git a/src/io.h b/src/io.h
new file mode 100644
index 0000000..19aa719
--- /dev/null
+++ b/src/io.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2017 Richard Braun.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ *
+ * I/O ports access.
+ *
+ * The x86 architecture is special in that, in addition to the physical memory
+ * address space, it also has an I/O port space. Most modern processors use
+ * the physical memory address space to access memory-mapped device memory and
+ * registers, and that's also the case on x86, but the I/O port space is also
+ * used for this purpose, at least for some legacy devices.
+ */
+
+#ifndef _IO_H
+#define _IO_H
+
+#include <stdint.h>
+
+/*
+ * Read a byte from an I/O port.
+ */
+uint8_t io_read(uint16_t port);
+
+/*
+ * Write a byte to an I/O port.
+ */
+void io_write(uint16_t port, uint8_t byte);
+
+#endif /* _IO_H */
diff --git a/src/io_asm.S b/src/io_asm.S
new file mode 100644
index 0000000..adf0c01
--- /dev/null
+++ b/src/io_asm.S
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2017 Richard Braun.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+.section .text
+.code32
+
+.global io_read
+io_read:
+ mov 4(%esp), %edx /* edx = port */
+ in %dx, %al /* al = in(dx) */
+ ret
+
+.global io_write
+io_write:
+ mov 8(%esp), %eax /* eax = byte */
+ mov 4(%esp), %edx /* edx = port */
+ out %al, %dx /* out(al, dx) */
+ ret
diff --git a/src/kernel.lds b/src/kernel.lds
new file mode 100644
index 0000000..f4f209a
--- /dev/null
+++ b/src/kernel.lds
@@ -0,0 +1,113 @@
+/*
+ * This linker script is used to drive the link step of the kernel, by e.g.
+ * forcing the linker to use specific addresses when allocating space for
+ * sections and symbols.
+ *
+ * It assumes flat physical memory (RAM) starting at 0, of size 64MB,
+ * of which only "upper memory", starting at 1MB, is used.
+ *
+ * On x86, the first 1MB of physical memory is where legacy BIOS mappings
+ * are mapped. Completely skip that region for convenience.
+ *
+ * For a map of lower memory, see http://wiki.osdev.org/Memory_Map_(x86).
+ */
+
+/*
+ * Override the default entry point. This sets the address of the first
+ * instruction run when the boot loader passes control to the kernel.
+ */
+ENTRY(boot_start)
+
+/*
+ * The memory command is used to describe regions of memory. Here, a single
+ * region of RAM is defined. Adding another region, such as the video RAM
+ * at 0xa0000, would allow other commands in this script to allocate symbols
+ * out of that region.
+ *
+ * Describing memory regions is optional. It is best used when building for
+ * known devices with a specific memory layout.
+ */
+MEMORY
+{
+ RAM : ORIGIN = 1M, LENGTH = 63M
+}
+
+/*
+ * The program headers define segments in the ELF image. they are used to
+ * fix properties on sections mapped to segments. Here, the PT_LOAD flag
+ * tells the boot loader that a segment must actually be loaded from the
+ * ELF image to memory. Some sections, such as most debugging sections, are
+ * normally not loaded to memory. FLAGS are used to set Unix-like
+ * permissions to a segment, so that a value of 4 means the segment may
+ * only contain read-only non-executable data, 5 (4 + 1) means read-only
+ * executable data (normally instructions), and 6 (4 + 2) means read-write
+ * non-executable data.
+ *
+ * The hdr segment is meant to contain the multiboot header. The name "text"
+ * is the historical name used to refer to instructions.
+ *
+ * See https://sourceware.org/binutils/docs-2.29/ld/index.html.
+ */
+PHDRS
+{
+ hdr PT_LOAD FLAGS(4);
+ text PT_LOAD FLAGS(5);
+ data PT_LOAD FLAGS(6);
+}
+
+/*
+ * Sections define how the image data are partitioned.
+ *
+ * Common sections include :
+ *
+ * - .text
+ * The code section.
+ * - .data
+ * The section for initialized data (e.g. static int var = 123;).
+ * - .bss
+ * The section for uninitialized data (e.g. static int var;). Its name
+ * is historical and means "Block Started by Symbol". The .bss section
+ * is special in that it takes no space in the kernel image, because
+ * it's filled with bytes of value 0. Its size in memory is stored
+ * in the ELF file, and in this case, the boot loader initializes the
+ * memory for the .bss section.
+ *
+ * Here, an additional section is used to store the multiboot header, and
+ * any section for read-only data produced by the compiler is forced into
+ * the .data section.
+ *
+ * Sections are allocated out of the RAM memory region, and mapped to heir
+ * corresponding program headers segment.
+ *
+ * See https://sourceware.org/binutils/docs-2.29/ld/Input-Section-Basics.html#Input-Section-Basics
+ * for more information about the syntax used below.
+ */
+SECTIONS
+{
+ .hdr : {
+ *(.hdr)
+ } > RAM : hdr
+
+ .text : {
+ *(.text*)
+ } > RAM : text
+
+ .data : {
+ *(.rodata*)
+ *(.data*)
+ } > RAM : data
+
+ .bss : {
+ *(.bss)
+ } > RAM : data
+
+ /*
+ * The .eh_frame section is used by DWARF tools to unwind the stack,
+ * allowing software to dump stack traces. Although this section could
+ * safely be left in the kernel image, it may confuse people who
+ * disassemble it.
+ */
+ /DISCARD/ : {
+ *(.eh_frame)
+ }
+}
diff --git a/src/main.c b/src/main.c
new file mode 100644
index 0000000..839e273
--- /dev/null
+++ b/src/main.c
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2017 Richard Braun.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <stdbool.h>
+#include <stdio.h>
+
+#include <lib/macros.h>
+#include <lib/shell.h>
+
+#include "cpu.h"
+#include "i8254.h"
+#include "i8259.h"
+#include "mem.h"
+#include "panic.h"
+#include "sw.h"
+#include "thread.h"
+#include "timer.h"
+#include "uart.h"
+
+/*
+ * XXX The Clang compiler apparently doesn't like the lack of prototype for
+ * the main function in free standing mode.
+ */
+void main(void);
+
+/*
+ * This function is the main entry point for C code. It's called from
+ * assembly code in the boot module, very soon after control is passed
+ * to the kernel.
+ */
+void
+main(void)
+{
+ thread_bootstrap();
+ cpu_setup();
+ i8259_setup();
+ i8254_setup();
+ uart_setup();
+ mem_setup();
+ thread_setup();
+ timer_setup();
+ shell_setup();
+ sw_setup();
+
+ printf("X1 " QUOTE(VERSION) "\n\n");
+
+ thread_enable_scheduler();
+
+ /* Never reached */
+}
diff --git a/src/mem.c b/src/mem.c
new file mode 100644
index 0000000..572f2be
--- /dev/null
+++ b/src/mem.c
@@ -0,0 +1,692 @@
+/*
+ * Copyright (c) 2017 Richard Braun.
+ * Copyright (c) 2017 Jerko Lenstra.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ *
+ * Algorithm
+ * ---------
+ * This allocator is based on "The Art of Computer Programming" by Donald Knuth.
+ * The algorithm itself is described in :
+ * - Volume 1 - Fundamental Algorithms
+ * - Chapter 2 – Information Structures
+ * - 2.5 Dynamic Storage
+ * - Algorithm A (First-fit method)
+ * - Algorithm C (Liberation with boundary tags)
+ *
+ * The point of a memory allocator is to manage memory in terms of allocation
+ * and liberation requests. Allocation finds and reserves memory for a user,
+ * whereas liberation makes that memory available again for future allocations.
+ * Memory is not created, it must be available from the start, and the allocator
+ * simply tracks its usage with metadata, allocated from memory itself.
+ *
+ * Data structures
+ * ---------------
+ * Here is a somewhat graphical representation of how memory is organized in
+ * this implementation :
+ *
+ * allocated block free block
+ * +------+-----------------+ +------+-----------------+
+ * | size | allocation flag | | size | allocation flag | <- header boundary
+ * +------+-----------------+ +------+-----------------+ tag
+ * | | | free list node (prev | <- payload or
+ * . payload . | and next pointers) | free list node
+ * . . +------------------------+
+ * . . . .
+ * . . . .
+ * . . . .
+ * +------+-----------------+ +------+-----------------+
+ * | size | allocation flag | | size | allocation flag | <- footer boundary
+ * +------+-----------------+ +------+-----------------+ tag
+ *
+ * Here is a view of multiple contiguous blocks :
+ *
+ * +------+-----------------+ <--+
+ * | size | allocation flag | |
+ * +------+-----------------+ +- single block
+ * | | |
+ * . payload . |
+ * . . |
+ * . . |
+ * +------+-----------------+ |
+ * | size | allocation flag | |
+ * +------+-----------------+ <--+
+ * +------+-----------------+
+ * | size | allocation flag |
+ * +------+-----------------+
+ * | |
+ * . payload .
+ * . .
+ * . .
+ * +------+-----------------+
+ * | size | allocation flag |
+ * +------+-----------------+
+ * +------+-----------------+
+ * | size | allocation flag |
+ * +------+-----------------+
+ * | |
+ * . payload .
+ * . .
+ * . .
+ * +------+-----------------+
+ * | size | allocation flag |
+ * +------+-----------------+
+ *
+ * The reason for the footer boundary tag is merging on liberation. When
+ * called, the mem_free() function is given a pointer to a payload. Since
+ * the size of a boundary tag is fixed, the address of the whole block
+ * can easily be computed. In order to reduce fragmentation, i.e. a state
+ * where all free blocks are small and prevent allocating bigger blocks,
+ * the allocator attempts to merge neighbor free blocks. Obtaining the
+ * address of the next block is easily achieved by simply adding the size
+ * of the current block, stored in the boundary tag, to the address of
+ * the block. But without a footer boundary tag, finding the address of
+ * the previous block is computationally expensive.
+ *
+ * Alignment
+ * ---------
+ * The word "aligned" and references to "alignment" in general can be
+ * found throughout the documentation of this module. Alignment is a
+ * property of a value, usually an address, to be a multiple of a size.
+ * This value is said to be "size-byte aligned", or "aligned on a size
+ * byte boundary". Common sizes include the processor word or cache line
+ * sizes. For example, the x86 architecture is 32-bits, making the word
+ * size 4 bytes. Addresses such as 0, 4, 8, 12, 512 and 516 are 4-byte
+ * aligned, whereas 1, 2, 3, 511, 513, 514 and 515 aren't.
+ *
+ *
+ * Pointers
+ * --------
+ * The code in this module makes extensive use of pointer arithmetic and
+ * conversion between pointer types. It's important to keep in mind that,
+ * in C, the void pointer is meant as a generic reference to objects of
+ * any type. As a result, any pointer can be assigned to a void pointer
+ * without explicit casting, and a void pointer may be assigned to any
+ * pointer as well.
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <lib/list.h>
+#include <lib/macros.h>
+
+#include "mem.h"
+#include "mutex.h"
+
+/*
+ * Total size of the backing storage heap.
+ *
+ * Note that the resulting kernel image file remains much smaller than this.
+ * The reason is because the heap is defined as uninitialized data, which are
+ * allocated out of the bss section by default. The bss section is filled
+ * with zeroes when the kernel image is loaded by the boot loader, as
+ * mandated by the ELF specification, which means there is no need to store
+ * the heap data, or any other statically allocated uninitialized data, in
+ * the kernel image file.
+ */
+#define MEM_HEAP_SIZE (32 * 1024 * 1024)
+
+/*
+ * Alignment required on addresses returned by mem_alloc().
+ *
+ * See the description of mem_alloc() in the public header.
+ */
+#define MEM_ALIGN 4
+
+/*
+ * Minimum size of a block.
+ *
+ * When free, the payload of a block is used as storage for the free list node.
+ */
+#define MEM_BLOCK_MIN_SIZE P2ROUND(((sizeof(struct mem_btag) * 2) \
+ + sizeof(struct mem_free_node)), MEM_ALIGN)
+
+/*
+ * The heap itself must be aligned, so that the first block is also aligned.
+ * Assuming all blocks have an aligned size, the last block must also end on
+ * an aligned address.
+ *
+ * This kind of check increases safety and robustness when changing
+ * compile-time parameters such as the heap size declared above.
+ *
+ * Another relevant check would be to make sure the heap is large enough
+ * to contain at least one block, but since the minimum block size is
+ * defined using the C sizeof operator, unknown to the C preprocessor,
+ * this check requires C11 static assertions.
+ */
+#if !P2ALIGNED(MEM_HEAP_SIZE, MEM_ALIGN)
+#error "invalid heap size"
+#endif
+
+/*
+ * Masks applied on boundary tags to extract the size and the allocation flag.
+ */
+#define MEM_BTAG_ALLOCATED_MASK ((size_t)0x1)
+#define MEM_BTAG_SIZE_MASK (~MEM_BTAG_ALLOCATED_MASK)
+
+/*
+ * Boundary tag.
+ *
+ * Since the alignment constraint specified for mem_alloc() applies to
+ * payloads, not blocks, it's important that boundary tags also be aligned.
+ * This is a check that would best be performed with C11 static assertions.
+ *
+ * In addition, the alignment constraint implies that the least significant
+ * bit is always 0. Therefore, this bit is used to store the allocation flag.
+ */
+struct mem_btag {
+ size_t value;
+};
+
+/*
+ * Memory block.
+ *
+ * Note the use of a C99 flexible array member. This construction enables
+ * the implementation to directly access the payload without pointer
+ * arithmetic or casting, and is one of the preferred ways to deal with
+ * headers.
+ */
+struct mem_block {
+ struct mem_btag btag;
+ char payload[];
+};
+
+/*
+ * Free list node.
+ *
+ * This structure is used as the payload of free blocks.
+ */
+struct mem_free_node {
+ struct list node;
+};
+
+/*
+ * List of free nodes.
+ *
+ * Here is an example of a TODO entry, a method used to store and retrieve
+ * pending tasks using source code only :
+ *
+ * TODO Statistics counters.
+ */
+struct mem_free_list {
+ struct list free_nodes;
+};
+
+/*
+ * Memory heap.
+ *
+ * This allocator uses a single heap initialized with a single large free
+ * block. The allocator could be extended to support multiple heaps, each
+ * initialized with a single free block, forming an initial free list of
+ * more than one block.
+ *
+ * The heap must be correctly aligned, so that the first block is
+ * correctly aligned.
+ */
+static char mem_heap[MEM_HEAP_SIZE] __aligned(MEM_ALIGN);
+
+/*
+ * The unique free list.
+ *
+ * In order to improve allocation speed, multiple lists may be used, each
+ * for a specific size range. Such lists are called segregated free lists
+ * and enable more allocation policies, such as instant-fit.
+ */
+static struct mem_free_list mem_free_list;
+
+/*
+ * Global mutex used to serialize access to allocation data.
+ */
+static struct mutex mem_mutex;
+
+static bool
+mem_aligned(size_t value)
+{
+ return P2ALIGNED(value, MEM_ALIGN);
+}
+
+static void *
+mem_heap_end(void)
+{
+ return &mem_heap[sizeof(mem_heap)];
+}
+
+static bool
+mem_btag_allocated(const struct mem_btag *btag)
+{
+ return btag->value & MEM_BTAG_ALLOCATED_MASK;
+}
+
+static void
+mem_btag_set_allocated(struct mem_btag *btag)
+{
+ btag->value |= MEM_BTAG_ALLOCATED_MASK;
+}
+
+static void
+mem_btag_clear_allocated(struct mem_btag *btag)
+{
+ btag->value &= ~MEM_BTAG_ALLOCATED_MASK;
+}
+
+static size_t
+mem_btag_size(const struct mem_btag *btag)
+{
+ return btag->value & MEM_BTAG_SIZE_MASK;
+}
+
+static void
+mem_btag_init(struct mem_btag *btag, size_t size)
+{
+ btag->value = size;
+ mem_btag_set_allocated(btag);
+}
+
+static size_t
+mem_block_size(const struct mem_block *block)
+{
+ return mem_btag_size(&block->btag);
+}
+
+static struct mem_block *
+mem_block_from_payload(void *payload)
+{
+ size_t offset;
+
+ /*
+ * Here, payload refers to the payload member in struct mem_block, not
+ * the local variable.
+ */
+ offset = offsetof(struct mem_block, payload);
+
+ /*
+ * Always keep pointer arithmetic in mind !
+ *
+ * The rule is fairly simple : whenever arithmetic operators are used
+ * on pointers, the operation is scaled on the type size, so that e.g.
+ * adding 1 means pointing to the next element. A good way to remember
+ * this is to remember that pointers can be used as arrays, so that
+ * both these expressions are equivalent :
+ * - &ptr[1]
+ * - ptr + 1
+ *
+ * As a result, when counting in bytes and not in objects, it is
+ * necessary to cast into a suitable pointer type. The char * type
+ * is specifically meant for this as C99 mandates that sizeof(char) be 1
+ * (6.5.3.4 The sizeof operator).
+ *
+ * As a side note, a GNU extension allows using pointer arithmetic on
+ * void pointers, where the "size of void" is 1, turning pointer
+ * arithmetic on void pointers into byte arithmetic, allowing this
+ * expression to be written as :
+ *
+ * return payload - offset;
+ *
+ * See https://gcc.gnu.org/onlinedocs/gcc-6.4.0/gcc/Pointer-Arith.html
+ */
+ return (struct mem_block *)((char *)payload - offset);
+}
+
+static void *
+mem_block_end(const struct mem_block *block)
+{
+ /* See mem_block_from_payload() */
+ return (struct mem_block *)((char *)block + mem_block_size(block));
+}
+
+static struct mem_btag *
+mem_block_header_btag(struct mem_block *block)
+{
+ return &block->btag;
+}
+
+static struct mem_btag *
+mem_block_footer_btag(struct mem_block *block)
+{
+ struct mem_btag *btag;
+
+ btag = mem_block_end(block);
+
+ /*
+ * See ISO/IEC 9899:1999, 6.5.2.1 "Array subscripting" :
+ * "A postfix expression followed by an expression in square brackets []
+ * is a subscripted designation of an element of an array object. The
+ * definition of the subscript operator [] is that E1[E2] is identical to
+ * (*((E1)+(E2)))".
+ *
+ * This, by the way, implies the following equivalent expressions :
+ * - &btag[-1]
+ * - &(-1)[btag]
+ * - &*(btag - 1)
+ * - btag - 1
+ */
+ return &btag[-1];
+}
+
+static struct mem_block *
+mem_block_prev(struct mem_block *block)
+{
+ struct mem_btag *btag;
+
+ if ((char *)block == mem_heap) {
+ return NULL;
+ }
+
+ btag = mem_block_header_btag(block);
+ return (struct mem_block *)((char *)block - mem_btag_size(&btag[-1]));
+}
+
+static struct mem_block *
+mem_block_next(struct mem_block *block)
+{
+ block = mem_block_end(block);
+
+ if ((void *)block == mem_heap_end()) {
+ return NULL;
+ }
+
+ return block;
+}
+
+static bool
+mem_block_allocated(struct mem_block *block)
+{
+ return mem_btag_allocated(mem_block_header_btag(block));
+}
+
+static void
+mem_block_set_allocated(struct mem_block *block)
+{
+ mem_btag_set_allocated(mem_block_header_btag(block));
+ mem_btag_set_allocated(mem_block_footer_btag(block));
+}
+
+static void
+mem_block_clear_allocated(struct mem_block *block)
+{
+ mem_btag_clear_allocated(mem_block_header_btag(block));
+ mem_btag_clear_allocated(mem_block_footer_btag(block));
+}
+
+static void
+mem_block_init(struct mem_block *block, size_t size)
+{
+ mem_btag_init(mem_block_header_btag(block), size);
+ mem_btag_init(mem_block_footer_btag(block), size);
+}
+
+static void *
+mem_block_payload(struct mem_block *block)
+{
+ return block->payload;
+}
+
+static struct mem_free_node *
+mem_block_get_free_node(struct mem_block *block)
+{
+ assert(!mem_block_allocated(block));
+ return mem_block_payload(block);
+}
+
+static bool
+mem_block_inside_heap(const struct mem_block *block)
+{
+ void *heap_end;
+
+ heap_end = mem_heap_end();
+ return (((char *)block >= mem_heap)
+ && ((void *)block->payload < heap_end)
+ && ((void *)mem_block_end(block) <= heap_end));
+}
+
+static void
+mem_free_list_add(struct mem_free_list *list, struct mem_block *block)
+{
+ struct mem_free_node *free_node;
+
+ assert(mem_block_allocated(block));
+
+ mem_block_clear_allocated(block);
+ free_node = mem_block_get_free_node(block);
+
+ /*
+ * Free blocks may be added at either the head or the tail of a list.
+ * In this case, it's normally better to add at the head, because the
+ * first-fit algorithm implementation starts from the beginning. This
+ * means there is a good chance that a block recently freed may "soon"
+ * be allocated again. Since it's likely that this block was accessed
+ * before it was freed, there is a good chance that (part of) its memory
+ * is still in the processor cache, potentially increasing the chances
+ * of cache hits and saving a few expensive accesses from the processor
+ * to memory. This is an example of inexpensive micro-optimization.
+ */
+ list_insert_head(&list->free_nodes, &free_node->node);
+}
+
+static void
+mem_free_list_remove(struct mem_free_list *list, struct mem_block *block)
+{
+ struct mem_free_node *free_node;
+
+ (void)list;
+
+ assert(!mem_block_allocated(block));
+
+ free_node = mem_block_get_free_node(block);
+ list_remove(&free_node->node);
+ mem_block_set_allocated(block);
+}
+
+static struct mem_block *
+mem_free_list_find(struct mem_free_list *list, size_t size)
+{
+ struct mem_free_node *free_node;
+ struct mem_block *block;
+
+ /*
+ * The algorithmic complexity of this operation is O(n) [1] which
+ * basically means the maximum number of steps, and time, for the
+ * operation to complete depend on the number of elements in the list.
+ * This is one of the main reasons why memory allocation is generally
+ * avoided in interrupt handlers and real-time applications. Special
+ * allocators with guaranteed constant time or a fixed and known worst
+ * case time, may be used in these cases.
+ *
+ * [1] https://en.wikipedia.org/wiki/Big_O_notation
+ */
+ list_for_each_entry(&list->free_nodes, free_node, node) {
+ block = mem_block_from_payload(free_node);
+
+ if (mem_block_size(block) >= size) {
+ return block;
+ }
+ }
+
+ return NULL;
+}
+
+static void
+mem_free_list_init(struct mem_free_list *list)
+{
+ list_init(&list->free_nodes);
+}
+
+static bool
+mem_block_inside(struct mem_block *block, void *addr)
+{
+ return (addr >= (void *)block) && (addr < mem_block_end(block));
+}
+
+static bool
+mem_block_overlap(struct mem_block *block1, struct mem_block *block2)
+{
+ return mem_block_inside(block1, block2)
+ || mem_block_inside(block2, block1);
+}
+
+static struct mem_block *
+mem_block_split(struct mem_block *block, size_t size)
+{
+ struct mem_block *block2;
+ size_t total_size;
+
+ assert(mem_block_allocated(block));
+ assert(mem_aligned(size));
+
+ if (mem_block_size(block) < (size + MEM_BLOCK_MIN_SIZE)) {
+ return NULL;
+ }
+
+ total_size = mem_block_size(block);
+ mem_block_init(block, size);
+ block2 = mem_block_end(block);
+ mem_block_init(block2, total_size - size);
+
+ return block2;
+}
+
+static struct mem_block *
+mem_block_merge(struct mem_block *block1, struct mem_block *block2)
+{
+ size_t size;
+
+ assert(!mem_block_overlap(block1, block2));
+
+ if (mem_block_allocated(block1) || mem_block_allocated(block2)) {
+ return NULL;
+ }
+
+ mem_free_list_remove(&mem_free_list, block1);
+ mem_free_list_remove(&mem_free_list, block2);
+ size = mem_block_size(block1) + mem_block_size(block2);
+
+ if (block1 > block2) {
+ block1 = block2;
+ }
+
+ mem_block_init(block1, size);
+ mem_free_list_add(&mem_free_list, block1);
+ return block1;
+}
+
+void
+mem_setup(void)
+{
+ struct mem_block *block;
+
+ block = (struct mem_block *)mem_heap;
+ mem_block_init(block, sizeof(mem_heap));
+ mem_free_list_init(&mem_free_list);
+ mem_free_list_add(&mem_free_list, block);
+ mutex_init(&mem_mutex);
+}
+
+static size_t
+mem_convert_to_block_size(size_t size)
+{
+ /*
+ * Make sure all blocks have a correctly aligned size. That, and the fact
+ * that the heap address is also aligned, means all block addresses are
+ * aligned.
+ */
+ size = P2ROUND(size, MEM_ALIGN);
+ size += sizeof(struct mem_btag) * 2;
+
+ if (size < MEM_BLOCK_MIN_SIZE) {
+ size = MEM_BLOCK_MIN_SIZE;
+ }
+
+ return size;
+}
+
+void *
+mem_alloc(size_t size)
+{
+ struct mem_block *block, *block2;
+ void *ptr;
+
+ if (size == 0) {
+ return NULL;
+ }
+
+ size = mem_convert_to_block_size(size);
+
+ mutex_lock(&mem_mutex);
+
+ block = mem_free_list_find(&mem_free_list, size);
+
+ if (block == NULL) {
+ mutex_unlock(&mem_mutex);
+ return NULL;
+ }
+
+ mem_free_list_remove(&mem_free_list, block);
+ block2 = mem_block_split(block, size);
+
+ if (block2 != NULL) {
+ mem_free_list_add(&mem_free_list, block2);
+ }
+
+ mutex_unlock(&mem_mutex);
+
+ ptr = mem_block_payload(block);
+ assert(mem_aligned((uintptr_t)ptr));
+ return ptr;
+}
+
+void
+mem_free(void *ptr)
+{
+ struct mem_block *block, *tmp;
+
+ if (!ptr) {
+ return;
+ }
+
+ assert(mem_aligned((uintptr_t)ptr));
+
+ block = mem_block_from_payload(ptr);
+ assert(mem_block_inside_heap(block));
+
+ mutex_lock(&mem_mutex);
+
+ mem_free_list_add(&mem_free_list, block);
+
+ tmp = mem_block_prev(block);
+
+ if (tmp) {
+ tmp = mem_block_merge(block, tmp);
+
+ if (tmp) {
+ block = tmp;
+ }
+ }
+
+ tmp = mem_block_next(block);
+
+ if (tmp) {
+ mem_block_merge(block, tmp);
+ }
+
+ mutex_unlock(&mem_mutex);
+}
diff --git a/src/mem.h b/src/mem.h
new file mode 100644
index 0000000..b04837a
--- /dev/null
+++ b/src/mem.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2017 Richard Braun.
+ * Copyright (c) 2017 Jerko Lenstra.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ *
+ * Kernel dynamic memory allocator.
+ *
+ * Here, the word "dynamic" is used in opposition to "static", which denotes
+ * memory allocated at compile time by the linker.
+ */
+
+#ifndef _MEM_H
+#define _MEM_H
+
+#include <stddef.h>
+
+/*
+ * Initialize the mem module.
+ */
+void mem_setup(void);
+
+/*
+ * Allocate memory.
+ *
+ * This function conforms to the specification of the standard malloc()
+ * function, i.e. :
+ * - The size argument is the allocation request size, in bytes.
+ * - An allocation size of 0 is permitted.
+ * - The content of the allocated block is uninitialized.
+ * - The returned value is the address of the allocated block of memory.
+ * - The address of the allocated block is aligned to the maximum built-in
+ * type size. Since this code targets the 32-bits i386 architecture, the
+ * largest built-in type is unsigned int, resulting in addresses aligned
+ * to 4 bytes boundaries. Here, "built-in" means natively supported by
+ * the processor. The document that defines the size of built-in types
+ * is the ABI (Application Binary Interface) specification, in this case
+ * System V Intel386 ABI [1] (see the GCC -mabi option for x86). The ABI
+ * normally uses one of the most common data models [2] for C types, in
+ * this case ILP32 (for int/long/pointers 32-bits).
+ *
+ * This last detail is important because C specifies the alignment of both
+ * built-in and aggregate types. In particular, the alignment of structure
+ * members must match the alignment of their respective types
+ * (ISO/IEC 9899:1999, 6.7.2.1 "Structure and union specifiers", 12 "Each
+ * non-bit-field member of a structure or union object is aligned in an
+ * implementation-defined manner appropriate to its type". A compiler may
+ * safely assume that structure member accesses are correctly aligned and
+ * generate instructions assuming this alignment.
+ *
+ * On x86, this doesn't matter too much, because unaligned accesses have
+ * always been supported, although they are less performant, since the
+ * processor potentially has more work to do. For example, if an unaligned
+ * variable crosses a cache line boundary, the processor may have to load
+ * two cache lines instead of one.
+ *
+ * On other architectures, unaligned accesses may simply not be supported,
+ * and generate exceptions.
+ *
+ * [1] http://www.sco.com/developers/devspecs/abi386-4.pdf
+ * [2] http://www.unix.org/version2/whatsnew/lp64_wp.html
+ */
+void * mem_alloc(size_t size);
+
+/*
+ * Free memory.
+ *
+ * This function conforms to the specification of the standard free()
+ * function, i.e. :
+ * - It may safely be called with a NULL argument.
+ * - Otherwise, it may only be passed memory addresses returned by mem_alloc().
+ */
+void mem_free(void *ptr);
+
+#endif /* _MEM_H */
diff --git a/src/mutex.c b/src/mutex.c
new file mode 100644
index 0000000..d70bd82
--- /dev/null
+++ b/src/mutex.c
@@ -0,0 +1,151 @@
+/*
+ * Copyright (c) 2017 Richard Braun.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <stddef.h>
+
+#include <lib/list.h>
+#include <lib/macros.h>
+
+#include "error.h"
+#include "mutex.h"
+#include "thread.h"
+
+/*
+ * Structure used to bind a waiting thread and a mutex.
+ *
+ * The purpose of this structure is to avoid adding the node to the thread
+ * structure. Instead, it's allocated from the stack and only exists while
+ * the thread is waiting for the mutex to be unlocked.
+ *
+ * When the owner unlocks the mutex, it finds threads to wake up by
+ * accessing the mutex list of waiters.
+ *
+ * Preemption must be disabled when accessing a waiter.
+ */
+struct mutex_waiter {
+ struct list node;
+ struct thread *thread;
+};
+
+static void
+mutex_waiter_init(struct mutex_waiter *waiter, struct thread *thread)
+{
+ waiter->thread = thread;
+}
+
+static void
+mutex_waiter_wakeup(struct mutex_waiter *waiter)
+{
+ thread_wakeup(waiter->thread);
+}
+
+void
+mutex_init(struct mutex *mutex)
+{
+ list_init(&mutex->waiters);
+ mutex->locked = false;
+}
+
+static void
+mutex_set_owner(struct mutex *mutex, struct thread *thread)
+{
+ assert(!mutex->owner);
+ assert(!mutex->locked);
+
+ mutex->owner = thread;
+ mutex->locked = true;
+}
+
+static void
+mutex_clear_owner(struct mutex *mutex)
+{
+ assert(mutex->owner == thread_self());
+ assert(mutex->locked);
+
+ mutex->owner = NULL;
+ mutex->locked = false;
+}
+
+void
+mutex_lock(struct mutex *mutex)
+{
+ struct thread *thread;
+
+ thread = thread_self();
+
+ thread_preempt_disable();
+
+ if (mutex->locked) {
+ struct mutex_waiter waiter;
+
+ mutex_waiter_init(&waiter, thread);
+ list_insert_tail(&mutex->waiters, &waiter.node);
+
+ do {
+ thread_sleep();
+ } while (mutex->locked);
+
+ list_remove(&waiter.node);
+ }
+
+ mutex_set_owner(mutex, thread);
+
+ thread_preempt_enable();
+}
+
+int
+mutex_trylock(struct mutex *mutex)
+{
+ int error;
+
+ thread_preempt_disable();
+
+ if (mutex->locked) {
+ error = ERROR_BUSY;
+ } else {
+ error = 0;
+ mutex_set_owner(mutex, thread_self());
+ }
+
+ thread_preempt_enable();
+
+ return error;
+}
+
+void
+mutex_unlock(struct mutex *mutex)
+{
+ struct mutex_waiter *waiter;
+
+ thread_preempt_disable();
+
+ mutex_clear_owner(mutex);
+
+ if (!list_empty(&mutex->waiters)) {
+ waiter = list_first_entry(&mutex->waiters, struct mutex_waiter, node);
+ mutex_waiter_wakeup(waiter);
+ }
+
+ thread_preempt_enable();
+}
diff --git a/src/mutex.h b/src/mutex.h
new file mode 100644
index 0000000..c158d5e
--- /dev/null
+++ b/src/mutex.h
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2017 Richard Braun.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ *
+ * Mutex module.
+ *
+ * A mutex is a thread synchronization tool that provides mutual exclusion.
+ * The code between locking and unlocking a mutex forms a critical section
+ * that only one thread may be running at a time. This is true for all
+ * critical sections created with the same mutex, i.e. only one thread may
+ * be running code in all the critical sections created with the same mutex.
+ *
+ * A mutex is initially not owned. When a thread locks a mutex, it becomes
+ * owned until unlocked, and any other thread trying to lock the mutex
+ * waits for the owner to unlock it. This is called contention. Waiting
+ * can be active, e.g. by spinning on the mutex, constantly checking if
+ * it's still locked, or passive, by sleeping until the owner unlocks
+ * and wakes up one of the waiters. Mutexes are generally passive, and
+ * this is also the case for this module.
+ *
+ * Since mutexes work by blocking execution of some threads, they are a
+ * type of lock. Despite their apparent simplicity, they are associated
+ * with a number of problems, most considered difficult when compared to
+ * other well know problems of computer science.
+ *
+ * In particular, mutexes are vulnerable to deadlocks, i.e. a situation
+ * where all threads involved are waiting for events that cannot occur.
+ * Imagine two threads, T1 and T2, and 2 mutexes, M1 and M2. Here is
+ * a representation of the execution history of those threads leading to
+ * a deadlock :
+ *
+ * T1 : lock(M1) -> lock(M2) (M2 is locked by T2)
+ * T2 : lock(M2) -> lock(M1) (M1 is locked by T1)
+ *
+ * Here, both threads are waiting for the other to unlock one of the
+ * mutexes before unlocking the other mutex, and none can make progress.
+ *
+ * The most simple solution to avoid deadlocks is to not use locks in
+ * the first place, and use other synchronization tools such as disabling
+ * preemption, or resorting to lock-free algorithms, but this is often
+ * not possible (preemption cannot normally be disabled outside kernel
+ * code, and lock-free algorithms are difficult to implement and may
+ * lack other dependencies like atomic instructions on small processors).
+ *
+ * Another simple solution is to avoid holding two mutexes at the same
+ * time. It can often be achieved, but not always. Sometimes, holding
+ * multiple mutexes at the same time is a requirement. In such cases,
+ * the most common solution is to establish a locking order, since a
+ * deadlock may only occur if mutexes are acquired in different orders.
+ * In the example above, the locking order could be represented as
+ * "M1 -> M2", stating that, when hodling both mutexes, M1 must always
+ * be locked before M2.
+ *
+ * Another common problem with mutexes is unbounded priority inversion.
+ * Imagine 2 threads, T1 and T2 with priorities 1 and 2 respectively
+ * (higher value means higher priority). On a real-time system with
+ * a simple fixed priority scheduler, T2 would preempt T1 whenever it
+ * is active. But if it then tries to lock a mutex owned by T1, it will
+ * have to wait for T1 to unlock the mutex. This is priority inversion,
+ * because T1 is running instead of T2 despite having a lower priority.
+ * Priority inversion itself is common and often unavoidable in many
+ * cases, and if kept reliably short, it normally doesn't disturb
+ * real-time behaviour. But if there is no guarantee that the critical
+ * section is time-bounded, real-time behaviour may not be achieved.
+ * The classic example is three threads, T1, T2, and T3, with priorities
+ * 1, 2, and 3 respectively, and T3 locking a mutex already owned by T1 :
+ *
+ * time ->
+ * T3 lock(M) + run ...
+ * T2 run + run + |
+ * T1 lock(M) - run + unlock(M) +
+ * +: preemption ^^^
+ * duration not known/bounded
+ *
+ * Here, T3 has to wait for T1 to unlock the mutex before making progress,
+ * but T2 may preempt T1 because of its higher priority. The time T2 runs,
+ * keeping T1 from making progress, is likely not known and unbounded, and
+ * it indirectly delays T3, despite T3 having a higher priority. This is
+ * unbounded priority inversion. It can be avoided at somewhat high cost
+ * with priority ceiling/inheritance, or by avoiding the use of mutexes,
+ * relying instead on e.g. message queues using preemption for
+ * synchronization.
+ *
+ * The implementation of this module is very simple and doesn't prevent
+ * unbounded priority inversions.
+ *
+ * When deciding whether to use a mutex or to disable preemption for
+ * mutual exclusion, keep in mind that all real-world mutex implementations
+ * disable preemption internally. As a result, it is best to use mutexes
+ * when critical sections are noticeably more expensive than the overhead
+ * of locking. Another parameter is whether or not the critical section
+ * must remain preemptible.
+ *
+ * This mutex interface matches the "fast" kind of POSIX mutexes. In
+ * particular, a mutex cannot be locked recursively.
+ */
+
+#ifndef _MUTEX_H
+#define _MUTEX_H
+
+#include <stdbool.h>
+
+#include <lib/list.h>
+
+#include "thread.h"
+
+/*
+ * Mutex type.
+ *
+ * All members are private.
+ */
+struct mutex {
+ struct list waiters;
+ struct thread *owner;
+ bool locked;
+};
+
+/*
+ * Initialize a mutex.
+ */
+void mutex_init(struct mutex *mutex);
+
+/*
+ * Lock a mutex.
+ *
+ * If the given mutex is already locked, the calling thread blocks (by
+ * sleeping) and resumes once it holds the mutex.
+ */
+void mutex_lock(struct mutex *mutex);
+
+/*
+ * Try to lock a mutex.
+ *
+ * This is the non-blocking variant of mutex_lock().
+ *
+ * Return 0 on success, ERROR_BUSY if locking the mutex failed.
+ */
+int mutex_trylock(struct mutex *mutex);
+
+/*
+ * Unlock a mutex.
+ *
+ * The calling thread must be the mutex owner.
+ */
+void mutex_unlock(struct mutex *mutex);
+
+#endif /* _MUTEX_H */
diff --git a/src/panic.c b/src/panic.c
new file mode 100644
index 0000000..d47330a
--- /dev/null
+++ b/src/panic.c
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2017 Richard Braun.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <stdarg.h>
+#include <stdio.h>
+
+#include "cpu.h"
+#include "panic.h"
+#include "thread.h"
+
+void
+panic(const char *format, ...)
+{
+ va_list ap;
+
+ thread_preempt_disable();
+ cpu_intr_disable();
+ printf("\npanic: ");
+ va_start(ap, format);
+ vprintf(format, ap);
+ printf("\n");
+ cpu_halt();
+}
diff --git a/src/panic.h b/src/panic.h
new file mode 100644
index 0000000..be91405
--- /dev/null
+++ b/src/panic.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2017 Richard Braun.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _PANIC_H
+#define _PANIC_H
+
+#include <lib/macros.h>
+
+void panic(const char *format, ...)
+ __attribute__((noreturn))
+ __attribute__((format(printf, 1, 2)));
+
+#endif /* _PANIC_H */
diff --git a/src/stdio.c b/src/stdio.c
new file mode 100644
index 0000000..6aee7b3
--- /dev/null
+++ b/src/stdio.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2017 Richard Braun.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <stdarg.h>
+#include <stdint.h>
+#include <stdio.h>
+
+#include "cpu.h"
+#include "thread.h"
+#include "uart.h"
+
+#define PRINTF_BUFFER_SIZE 1024
+
+/*
+ * This implementation uses a global buffer in order to avoid allocating
+ * from the stack, since stacks may be very small.
+ */
+static char printf_buffer[PRINTF_BUFFER_SIZE];
+
+void
+putchar(unsigned char c)
+{
+ uart_write(c);
+}
+
+int
+getchar(void)
+{
+ uint8_t byte;
+ int error;
+
+ error = uart_read(&byte);
+ return error ? EOF : byte;
+}
+
+int
+printf(const char *format, ...)
+{
+ va_list ap;
+ int length;
+
+ va_start(ap, format);
+ length = vprintf(format, ap);
+ va_end(ap);
+
+ return length;
+}
+
+int
+vprintf(const char *format, va_list ap)
+{
+ uint32_t eflags;
+ int length;
+
+ thread_preempt_disable();
+ eflags = cpu_intr_save();
+
+ length = vsnprintf(printf_buffer, sizeof(printf_buffer), format, ap);
+
+ for (const char *ptr = printf_buffer; *ptr != '\0'; ptr++) {
+ uart_write((uint8_t)*ptr);
+ }
+
+ cpu_intr_restore(eflags);
+ thread_preempt_enable();
+
+ return length;
+}
diff --git a/src/string.c b/src/string.c
new file mode 100644
index 0000000..210b2bc
--- /dev/null
+++ b/src/string.c
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2017 Richard Braun.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <stddef.h>
+#include <string.h>
+
+#include <lib/macros.h>
+
+void *
+memcpy(void *dest, const void *src, size_t n)
+{
+ const char *src_ptr;
+ char *dest_ptr;
+ size_t i;
+
+ dest_ptr = dest;
+ src_ptr = src;
+
+ for (i = 0; i < n; i++) {
+ *dest_ptr = *src_ptr;
+ dest_ptr++;
+ src_ptr++;
+ }
+
+ return dest;
+}
+
+void *
+memmove(void *dest, const void *src, size_t n)
+{
+ const char *src_ptr;
+ char *dest_ptr;
+ size_t i;
+
+ if (dest <= src) {
+ dest_ptr = dest;
+ src_ptr = src;
+
+ for (i = 0; i < n; i++) {
+ *dest_ptr = *src_ptr;
+ dest_ptr++;
+ src_ptr++;
+ }
+ } else {
+ dest_ptr = dest + n - 1;
+ src_ptr = src + n - 1;
+
+ for (i = 0; i < n; i++) {
+ *dest_ptr = *src_ptr;
+ dest_ptr--;
+ src_ptr--;
+ }
+ }
+
+ return dest;
+}
+
+char *
+strcpy(char *dest, const char *src)
+{
+ char *tmp;
+
+ tmp = dest;
+
+ while ((*dest = *src) != '\0') {
+ dest++;
+ src++;
+ }
+
+ return tmp;
+}
+
+size_t
+strlen(const char *s)
+{
+ const char *start;
+
+ start = s;
+
+ while (*s != '\0') {
+ s++;
+ }
+
+ return (s - start);
+}
+
+int
+strcmp(const char *s1, const char *s2)
+{
+ char c1, c2;
+
+ while ((c1 = *s1) == (c2 = *s2)) {
+ if (c1 == '\0') {
+ return 0;
+ }
+
+ s1++;
+ s2++;
+ }
+
+ return (int)c1 - (int)c2;
+}
+
+int
+strncmp(const char *s1, const char *s2, size_t n)
+{
+ char c1, c2;
+
+ if (unlikely(n == 0)) {
+ return 0;
+ }
+
+ while ((n != 0) && (c1 = *s1) == (c2 = *s2)) {
+ if (c1 == '\0') {
+ return 0;
+ }
+
+ n--;
+ s1++;
+ s2++;
+ }
+
+ return (int)c1 - (int)c2;
+}
diff --git a/src/sw.c b/src/sw.c
new file mode 100644
index 0000000..fc253a4
--- /dev/null
+++ b/src/sw.c
@@ -0,0 +1,295 @@
+/*
+ * Copyright (c) 2017 Richard Braun.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ *
+ * Stopwatch demo application.
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <lib/macros.h>
+#include <lib/shell.h>
+
+#include "condvar.h"
+#include "mutex.h"
+#include "panic.h"
+#include "sw.h"
+#include "thread.h"
+#include "timer.h"
+
+/*
+ * Display interval, in seconds.
+ */
+#define SW_DISPLAY_INTERVAL 5
+
+/*
+ * Maximum wait time for the sw_wait command.
+ */
+#define SW_MAX_WAIT 30
+
+/*
+ * Stopwatch type.
+ *
+ * The mutex must be locked before accessing any member.
+ */
+struct sw {
+ struct mutex mutex;
+ struct condvar cv;
+ struct timer timer;
+ unsigned long ticks;
+ bool timer_scheduled;
+ bool thread_waiting;
+ unsigned long wait_ticks;
+};
+
+/*
+ * Singleton instance.
+ */
+static struct sw *sw_instance;
+
+static struct sw *
+sw_get_instance(void)
+{
+ assert(sw_instance);
+ return sw_instance;
+}
+
+static void
+sw_timer_run(void *arg)
+{
+ struct sw *sw;
+
+ sw = arg;
+
+ mutex_lock(&sw->mutex);
+
+ if (!sw->timer_scheduled) {
+ goto out;
+ }
+
+ sw->ticks++;
+
+ if ((sw->ticks % (THREAD_SCHED_FREQ * SW_DISPLAY_INTERVAL)) == 0) {
+ printf("%lu\n", sw->ticks);
+ }
+
+ if (sw->thread_waiting && timer_ticks_occurred(sw->wait_ticks, sw->ticks)) {
+ sw->thread_waiting = false;
+ condvar_signal(&sw->cv);
+ }
+
+ timer_schedule(&sw->timer, timer_get_time(&sw->timer) + 1);
+
+out:
+ mutex_unlock(&sw->mutex);
+}
+
+static struct sw *
+sw_create(void)
+{
+ struct sw *sw;
+
+ sw = malloc(sizeof(*sw));
+
+ if (!sw) {
+ return NULL;
+ }
+
+ mutex_init(&sw->mutex);
+ condvar_init(&sw->cv);
+ timer_init(&sw->timer, sw_timer_run, sw);
+ sw->timer_scheduled = false;
+ sw->thread_waiting = false;
+ return sw;
+}
+
+static void
+sw_schedule(struct sw *sw)
+{
+ if (sw->timer_scheduled) {
+ return;
+ }
+
+ sw->timer_scheduled = true;
+ timer_schedule(&sw->timer, timer_now() + 1);
+}
+
+static void
+sw_start(struct sw *sw)
+{
+ mutex_lock(&sw->mutex);
+ sw->ticks = 0;
+ sw_schedule(sw);
+ mutex_unlock(&sw->mutex);
+}
+
+static void
+sw_stop(struct sw *sw)
+{
+ mutex_lock(&sw->mutex);
+ sw->timer_scheduled = false;
+ mutex_unlock(&sw->mutex);
+}
+
+static void
+sw_resume(struct sw *sw)
+{
+ mutex_lock(&sw->mutex);
+ sw_schedule(sw);
+ mutex_unlock(&sw->mutex);
+}
+
+static unsigned long
+sw_read(struct sw *sw)
+{
+ unsigned long ticks;
+
+ mutex_lock(&sw->mutex);
+ ticks = sw->ticks;
+ mutex_unlock(&sw->mutex);
+
+ return ticks;
+}
+
+static void
+sw_wait(struct sw *sw, unsigned long seconds)
+{
+ mutex_lock(&sw->mutex);
+
+ if (!sw->timer_scheduled) {
+ printf("sw_wait: error: stopwatch disabled\n");
+ goto out;
+ } else if (sw->thread_waiting) {
+ printf("sw_wait: error: thread already waiting\n");
+ goto out;
+ }
+
+ sw->thread_waiting = true;
+ sw->wait_ticks = sw->ticks + (seconds * THREAD_SCHED_FREQ);
+
+ do {
+ condvar_wait(&sw->cv, &sw->mutex);
+ } while (sw->thread_waiting);
+
+out:
+ mutex_unlock(&sw->mutex);
+}
+
+static void
+sw_shell_start(int argc, char **argv)
+{
+ (void)argc;
+ (void)argv;
+
+ sw_start(sw_get_instance());
+}
+
+static void
+sw_shell_stop(int argc, char **argv)
+{
+ (void)argc;
+ (void)argv;
+
+ sw_stop(sw_get_instance());
+}
+
+static void
+sw_shell_resume(int argc, char **argv)
+{
+ (void)argc;
+ (void)argv;
+
+ sw_resume(sw_get_instance());
+}
+
+static void
+sw_shell_read(int argc, char **argv)
+{
+ (void)argc;
+ (void)argv;
+
+ printf("%lu\n", sw_read(sw_get_instance()));
+}
+
+static void
+sw_shell_wait(int argc, char **argv)
+{
+ unsigned long seconds;
+ int ret;
+
+ if (argc != 2) {
+ goto error;
+ }
+
+ ret = sscanf(argv[1], "%lu", &seconds);
+
+ if ((ret != 1) || (seconds > SW_MAX_WAIT)) {
+ goto error;
+ }
+
+ sw_wait(sw_get_instance(), seconds);
+ return;
+
+error:
+ printf("sw_wait: error: invalid arguments\n");
+}
+
+static struct shell_cmd shell_cmds[] = {
+ SHELL_CMD_INITIALIZER("sw_start", sw_shell_start,
+ "sw_start",
+ "start the stopwatch"),
+ SHELL_CMD_INITIALIZER("sw_stop", sw_shell_stop,
+ "sw_stop",
+ "stop the stopwatch"),
+ SHELL_CMD_INITIALIZER("sw_resume", sw_shell_resume,
+ "sw_resume",
+ "resume the stopwatch"),
+ SHELL_CMD_INITIALIZER("sw_read", sw_shell_read,
+ "sw_read",
+ "read the stopwatch time"),
+ SHELL_CMD_INITIALIZER("sw_wait", sw_shell_wait,
+ "sw_wait <seconds>",
+ "wait for up to " QUOTE(SW_MAX_WAIT) " seconds"),
+};
+
+void
+sw_setup(void)
+{
+ int error;
+
+ sw_instance = sw_create();
+
+ if (!sw_instance) {
+ panic("sw: error: unable to create stopwatch");
+ }
+
+ for (size_t i = 0; i < ARRAY_SIZE(shell_cmds); i++) {
+ error = shell_cmd_register(&shell_cmds[i]);
+
+ if (error) {
+ panic("unable to register shell command");
+ }
+ }
+}
diff --git a/src/sw.h b/src/sw.h
new file mode 100644
index 0000000..91488c3
--- /dev/null
+++ b/src/sw.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2017 Richard Braun.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ *
+ * Stopwatch demo application.
+ */
+
+#ifndef _SW_H
+#define _SW_H
+
+/*
+ * Initialize the sw module.
+ */
+void sw_setup(void);
+
+#endif /* _SW_H */
diff --git a/src/thread.c b/src/thread.c
new file mode 100644
index 0000000..e361473
--- /dev/null
+++ b/src/thread.c
@@ -0,0 +1,823 @@
+/*
+ * Copyright (c) 2017 Richard Braun.
+ * Copyright (c) 2017 Jerko Lenstra.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <lib/macros.h>
+#include <lib/list.h>
+
+#include "cpu.h"
+#include "error.h"
+#include "panic.h"
+#include "thread.h"
+#include "timer.h"
+
+/*
+ * The compiler expects the stack pointer to be properly aligned when a
+ * function is called, and maintains this alignment across a call chain.
+ * The constraints are similar to the return value of malloc().
+ * See the description of mem_alloc() in mem.h.
+ *
+ * Note that modern compilers expect the stack to be 16-byte aligned
+ * even on 32-bits i386 processors, to cope with SSE instructions which
+ * don't support unaligned accesses (see a revised version of the System V
+ * Intel386 ABI [1] for more details). Since all floating point support is
+ * disabled when building the kernel, this requirement can be safely ignored
+ * and the legacy 4-byte alignment used instead.
+ *
+ * [1] https://www.uclibc.org/docs/psABI-i386.pdf
+ */
+#define THREAD_STACK_ALIGN 4
+
+/*
+ * List of threads sharing the same priority.
+ */
+struct thread_list {
+ struct list threads;
+};
+
+/*
+ * Run queue.
+ *
+ * Threads that aren't in the running state aren't tracked in a run queue.
+ * The current thread, which is the one currently running on the processor,
+ * isn't in any of the thread lists.
+ *
+ * The idle thread runs when no other thread is in the running state.
+ *
+ * Interrupts and preemption must be disabled when accessing a run queue.
+ * Interrupts must be disabled to prevent a timer interrupt from corrupting
+ * the run queue. Preemption must be disabled to prevent an interrupt handler
+ * from causing an early context switch when returning from interrupt.
+ */
+struct thread_runq {
+ struct thread *current;
+ unsigned int nr_threads;
+ struct thread_list lists[THREAD_NR_PRIORITIES];
+ struct thread *idle;
+};
+
+enum thread_state {
+ THREAD_STATE_RUNNING, /* Thread is in a run queue */
+ THREAD_STATE_SLEEPING, /* Thread is not running */
+ THREAD_STATE_DEAD, /* Thread is sleeping and may not be awaken */
+};
+
+/*
+ * Thread structure.
+ *
+ * The purpose of a thread is to support saving and restoring state in order
+ * to achieve time sharing between activities.
+ *
+ * A thread is mostly used to store the saved state of an activity, called
+ * its context. The context is usually made up of some registers that are
+ * saved on the stack, the stack itself, and the thread structure.
+ * When the context is saved, the stack pointer is updated to the value
+ * of the stack register at the time the thread context is saved.
+ *
+ * A thread may be be "voluntarily" preempted, when it yields the processor
+ * itself, or "involuntarily" preempted when an interrupt occurs. Since it's
+ * not possible to immediately yield the processor when preemption is
+ * disabled (including in the middle of an interrupt handler), preemption
+ * may only occur on specific events :
+ * - Calling thread_yield() with preemption enabled.
+ * - Preemption is reenabled, i.e. the preemption level goes back to 0.
+ * - Return from an interrupt.
+ *
+ * Calling thread_yield() is completely voluntary and is ignored in this
+ * discussion. Returning from an interrupt actually reenables preemption,
+ * so the only case discussed is reenabling preemption. The mechanism used
+ * to trigger scheduling when preemption is reenabled is the yield flag.
+ * When the scheduler determines that the current thread should yield the
+ * processor, e.g. because an interrupt handler has awaken a higher priority
+ * thread, it marks the current thread for yielding. When preemption is
+ * reenabled, the flag is checked, and if true, the current thread yields
+ * the processor.
+ */
+struct thread {
+ void *sp;
+ enum thread_state state;
+ bool yield;
+ struct list node;
+ unsigned int preempt_level;
+ unsigned int priority;
+ struct thread *joiner;
+ char name[THREAD_NAME_MAX_SIZE];
+ void *stack;
+};
+
+/*
+ * Run queue singleton.
+ */
+static struct thread_runq thread_runq;
+
+/*
+ * Dummy thread context used to make functions that require a thread context
+ * work before the thread module is fully initialized.
+ */
+static struct thread thread_dummy;
+
+/*
+ * Declarations for C/assembly functions that are global so that they can
+ * be shared between thread.c and thread_asm.S, but are considered private to
+ * the thread module.
+ */
+void thread_load_context(struct thread *thread) __attribute__((noreturn));
+void thread_switch_context(struct thread *prev, struct thread *next);
+void thread_main(thread_fn_t fn, void *arg);
+
+/*
+ * Function implementing the idle thread.
+ */
+static void
+thread_idle(void *arg)
+{
+ (void)arg;
+
+ for (;;) {
+ cpu_idle();
+ }
+}
+
+static bool
+thread_is_running(const struct thread *thread)
+{
+ return thread->state == THREAD_STATE_RUNNING;
+}
+
+static void
+thread_set_running(struct thread *thread)
+{
+ thread->state = THREAD_STATE_RUNNING;
+}
+
+static void
+thread_set_sleeping(struct thread *thread)
+{
+ thread->state = THREAD_STATE_SLEEPING;
+}
+
+static bool
+thread_is_dead(const struct thread *thread)
+{
+ return thread->state == THREAD_STATE_DEAD;
+}
+
+static void
+thread_set_dead(struct thread *thread)
+{
+ thread->state = THREAD_STATE_DEAD;
+}
+
+static bool
+thread_should_yield(const struct thread *thread)
+{
+ return thread->yield;
+}
+
+static void
+thread_set_yield(struct thread *thread)
+{
+ thread->yield = true;
+}
+
+static void
+thread_clear_yield(struct thread *thread)
+{
+ thread->yield = false;
+}
+
+static unsigned int
+thread_get_priority(struct thread *thread)
+{
+ return thread->priority;
+}
+
+static void
+thread_remove_from_list(struct thread *thread)
+{
+ list_remove(&thread->node);
+}
+
+static void
+thread_yield_if_needed(void)
+{
+ if (thread_should_yield(thread_self())) {
+ thread_yield();
+ }
+}
+
+void
+thread_preempt_disable(void)
+{
+ struct thread *thread;
+
+ thread = thread_self();
+ thread->preempt_level++;
+ assert(thread->preempt_level != 0);
+
+ barrier();
+}
+
+static void
+thread_preempt_enable_no_yield(void)
+{
+ struct thread *thread;
+
+ barrier();
+
+ thread = thread_self();
+ assert(thread->preempt_level != 0);
+ thread->preempt_level--;
+}
+
+void
+thread_preempt_enable(void)
+{
+ thread_preempt_enable_no_yield();
+ thread_yield_if_needed();
+}
+
+bool
+thread_preempt_enabled(void)
+{
+ struct thread *thread;
+
+ thread = thread_self();
+ return thread->preempt_level == 0;
+}
+
+static bool
+thread_scheduler_locked(void)
+{
+ return !cpu_intr_enabled() && !thread_preempt_enabled();
+}
+
+static uint32_t
+thread_lock_scheduler(void)
+{
+ /*
+ * When disabling both preemption and interrupts, it is best to do it in
+ * this order, since, if an interrupt occurs after preemption is disabled
+ * but before interrupts are disabled, it may not cause a context switch.
+ *
+ * Besides, disabling interrupts first would slightly increase interrupt
+ * latencies.
+ */
+ thread_preempt_disable();
+ return cpu_intr_save();
+}
+
+static void
+thread_unlock_scheduler(uint32_t eflags, bool yield)
+{
+ cpu_intr_restore(eflags);
+
+ if (yield) {
+ thread_preempt_enable();
+ } else {
+ thread_preempt_enable_no_yield();
+ }
+}
+
+static void
+thread_list_init(struct thread_list *list)
+{
+ list_init(&list->threads);
+}
+
+static void
+thread_list_enqueue(struct thread_list *list, struct thread *thread)
+{
+ list_insert_tail(&list->threads, &thread->node);
+}
+
+static struct thread *
+thread_list_dequeue(struct thread_list *list)
+{
+ struct thread *thread;
+
+ thread = list_first_entry(&list->threads, typeof(*thread), node);
+ thread_remove_from_list(thread);
+ return thread;
+}
+
+static bool
+thread_list_empty(struct thread_list *list)
+{
+ return list_empty(&list->threads);
+}
+
+static struct thread_list *
+thread_runq_get_list(struct thread_runq *runq, unsigned int priority)
+{
+ assert(priority < ARRAY_SIZE(runq->lists));
+ return &runq->lists[priority];
+}
+
+static struct thread *
+thread_runq_get_current(struct thread_runq *runq)
+{
+ return runq->current;
+}
+
+static void
+thread_runq_put_prev(struct thread_runq *runq, struct thread *thread)
+{
+ struct thread_list *list;
+
+ if (thread == runq->idle) {
+ return;
+ }
+
+ list = thread_runq_get_list(runq, thread_get_priority(thread));
+ thread_list_enqueue(list, thread);
+}
+
+static struct thread *
+thread_runq_get_next(struct thread_runq *runq)
+{
+ struct thread *thread;
+
+ assert(runq->current);
+
+ if (runq->nr_threads == 0) {
+ thread = runq->idle;
+ } else {
+ struct thread_list *list;
+ size_t nr_lists;
+
+ nr_lists = ARRAY_SIZE(runq->lists);
+
+ /*
+ * Note that size_t is unsigned, which means that when the iterator
+ * is 0 and decremented, the new value is actually the maximum value
+ * that the type may have, which is necessarily higher than the array
+ * size. This behaviour is very well defined by the C specification
+ * (6.3.1.3 Signed and unsigned integers), and avoids warnings about
+ * mixing types of different signedness.
+ */
+ for (size_t i = (nr_lists - 1); i < nr_lists; i--) {
+ list = thread_runq_get_list(runq, i);
+
+ if (!thread_list_empty(list)) {
+ break;
+ }
+ }
+
+ thread = thread_list_dequeue(list);
+ }
+
+ runq->current = thread;
+ return thread;
+}
+
+static void
+thread_runq_add(struct thread_runq *runq, struct thread *thread)
+{
+ struct thread_list *list;
+
+ assert(thread_scheduler_locked());
+ assert(thread_is_running(thread));
+
+ list = thread_runq_get_list(runq, thread_get_priority(thread));
+ thread_list_enqueue(list, thread);
+
+ runq->nr_threads++;
+ assert(runq->nr_threads != 0);
+
+ if (thread_get_priority(thread) > thread_get_priority(runq->current)) {
+ thread_set_yield(runq->current);
+ }
+}
+
+static void
+thread_runq_remove(struct thread_runq *runq, struct thread *thread)
+{
+ assert(runq->nr_threads != 0);
+ runq->nr_threads--;
+
+ assert(!thread_is_running(thread));
+ thread_remove_from_list(thread);
+}
+
+static void
+thread_runq_schedule(struct thread_runq *runq)
+{
+ struct thread *prev, *next;
+
+ prev = thread_runq_get_current(runq);
+
+ assert(thread_scheduler_locked());
+ assert(prev->preempt_level == 1);
+
+ thread_runq_put_prev(runq, prev);
+
+ if (!thread_is_running(prev)) {
+ thread_runq_remove(runq, prev);
+ }
+
+ next = thread_runq_get_next(runq);
+
+ if (prev != next) {
+ /*
+ * When switching context, it is extremely important that no
+ * data access generated by the compiler "leak" across the switch.
+ * All operations (i.e. side effects) started before the switch
+ * should complete before the switch, and all operations starting
+ * after the switch should start after the switch.
+ *
+ * This is what allows a thread waiting for an event to reliably
+ * "see" that event after another thread or interrupt handler has
+ * triggered it.
+ *
+ * This requires a barrier, and, since this is a single-processor
+ * scheduler, a compiler barrier (as opposed to memory barriers)
+ * is enough. But there is no such barrier here. The reason is that
+ * the context switch is implemented in assembly, and the compiler
+ * is unable to understand what the assembly code does. As a result,
+ * even with aggressive optimizations enabled, the compiler has to
+ * assume that memory may have changed in completely unexpected ways,
+ * which is equivalent to the inline assembly expression used to
+ * implement compiler barriers with GCC (see barrier() in macros.h).
+ */
+ thread_switch_context(prev, next);
+ }
+}
+
+void
+thread_enable_scheduler(void)
+{
+ struct thread *thread;
+
+ thread = thread_runq_get_next(&thread_runq);
+ assert(thread);
+ assert(thread->preempt_level == 1);
+ thread_load_context(thread);
+
+ /* Never reached */
+}
+
+void
+thread_main(thread_fn_t fn, void *arg)
+{
+ assert(fn);
+
+ assert(thread_scheduler_locked());
+ assert(thread_self()->preempt_level == 1);
+
+ cpu_intr_enable();
+ thread_preempt_enable();
+
+ fn(arg);
+
+ thread_exit();
+}
+
+const char *
+thread_name(const struct thread *thread)
+{
+ return thread->name;
+}
+
+static void
+thread_set_name(struct thread *thread, const char *name)
+{
+ snprintf(thread->name, sizeof(thread->name), "%s", name);
+}
+
+static void
+thread_stack_push(uint32_t **stackp, size_t *stack_sizep, uint32_t word)
+{
+ uint32_t *stack;
+ size_t stack_size;
+
+ stack = *stackp;
+ stack_size = *stack_sizep;
+ assert(stack_size >= sizeof(word));
+ stack--;
+ stack_size -= sizeof(word);
+ *stack = word;
+ *stackp = stack;
+ *stack_sizep = stack_size;
+}
+
+static void *
+thread_stack_forge(char *stack_addr, size_t stack_size,
+ thread_fn_t fn, void *arg)
+{
+ uint32_t *stack;
+
+ stack = (uint32_t *)(stack_addr + stack_size);
+
+ /*
+ * This part of the stack makes context restoration "return" to
+ * thread_main() as if it were called from address 0 (which stops
+ * backtracing when using a debugger).
+ *
+ * This is how an assembly call to thread_main() looks like, according
+ * to the ABI (System V Intel 386 ABI [1]) :
+ * push arg
+ * push fn
+ * call thread_main
+ *
+ * Remember that the call instruction pushes the return address on the
+ * stack.
+ *
+ * [1] http://www.sco.com/developers/devspecs/abi386-4.pdf
+ */
+ thread_stack_push(&stack, &stack_size, (uint32_t)arg); /* 2nd argument */
+ thread_stack_push(&stack, &stack_size, (uint32_t)fn); /* 1st argument */
+ thread_stack_push(&stack, &stack_size, (uint32_t)0); /* Return address */
+ thread_stack_push(&stack, &stack_size, (uint32_t)thread_main);
+
+ /*
+ * This part of the stack contains the registers that should be restored.
+ * The selection of the registers to save is made according to the
+ * ABI, which specifies which registers are owned by the caller, and
+ * which are owned by the callee. Since, in all cases, switching context
+ * is achieved by calling the thread_switch_context() function, it
+ * is safe to rely on the ABI for this selection. Naturally, the
+ * registers that must be saved are those owned by the caller, since
+ * the compiler assumes all registers owned by the callee may have
+ * changed on return. See the System V Intel386 ABI "Registers and the
+ * Stack Frame".
+ *
+ * For debugging purposes, a complete save of all the registers may be
+ * performed instead, allowing precise inspection of the state of a
+ * thread not currently running on the processor.
+ *
+ * It is recommended to read the assembly code at the thread_restore_context
+ * label in thread_asm.S to better understand this stack frame.
+ */
+ thread_stack_push(&stack, &stack_size, 0); /* EBP */
+ thread_stack_push(&stack, &stack_size, 0); /* EBX */
+ thread_stack_push(&stack, &stack_size, 0); /* EDI */
+ thread_stack_push(&stack, &stack_size, 0); /* ESI */
+
+ return stack;
+}
+
+static void
+thread_init(struct thread *thread, thread_fn_t fn, void *arg,
+ const char *name, char *stack, size_t stack_size,
+ unsigned int priority)
+{
+ assert(P2ALIGNED((uintptr_t)stack, THREAD_STACK_ALIGN));
+
+ /*
+ * New threads are created in a state that is similar to preempted threads,
+ * since it makes running them for the first time indistinguishable from
+ * redispatching a thread that has actually been preempted. This state
+ * is very specific and includes the following properties :
+ * - state is running
+ * - interrupts are disabled
+ * - preemption level must be exactly one (see the description of
+ * thread_sleep() in thread.h)
+ *
+ * A state is artificially forged on the new stack to make it look like
+ * the new thread has been preempted.
+ */
+
+ if (stack) {
+ thread->sp = thread_stack_forge(stack, stack_size, fn, arg);
+ }
+
+ thread->state = THREAD_STATE_RUNNING;
+ thread->yield = false;
+ thread->preempt_level = 1;
+ thread->priority = priority;
+ thread->joiner = NULL;
+ thread_set_name(thread, name);
+ thread->stack = stack;
+}
+
+int
+thread_create(struct thread **threadp, thread_fn_t fn, void *arg,
+ const char *name, size_t stack_size, unsigned int priority)
+{
+ struct thread *thread;
+ uint32_t eflags;
+ void *stack;
+
+ assert(fn);
+
+ thread = malloc(sizeof(*thread));
+
+ if (!thread) {
+ return ERROR_NOMEM;
+ }
+
+ if (stack_size < THREAD_STACK_MIN_SIZE) {
+ stack_size = THREAD_STACK_MIN_SIZE;
+ }
+
+ stack = malloc(stack_size);
+
+ if (!stack) {
+ free(thread);
+ return ERROR_NOMEM;
+ }
+
+ thread_init(thread, fn, arg, name, stack, stack_size, priority);
+
+ eflags = thread_lock_scheduler();
+ thread_runq_add(&thread_runq, thread);
+ thread_unlock_scheduler(eflags, true);
+
+ if (threadp) {
+ *threadp = thread;
+ }
+
+ return 0;
+}
+
+static void
+thread_destroy(struct thread *thread)
+{
+ assert(thread_is_dead(thread));
+
+ free(thread->stack);
+ free(thread);
+}
+
+void
+thread_exit(void)
+{
+ struct thread *thread;
+
+ thread = thread_self();
+
+ assert(thread_preempt_enabled());
+
+ thread_lock_scheduler();
+ assert(thread_is_running(thread));
+ thread_set_dead(thread);
+ thread_wakeup(thread->joiner);
+ thread_runq_schedule(&thread_runq);
+
+ panic("thread: error: dead thread walking");
+}
+
+void
+thread_join(struct thread *thread)
+{
+ uint32_t eflags;
+
+ eflags = thread_lock_scheduler();
+
+ thread->joiner = thread_self();
+
+ while (!thread_is_dead(thread)) {
+ thread_sleep();
+ }
+
+ thread_unlock_scheduler(eflags, true);
+
+ thread_destroy(thread);
+}
+
+struct thread *
+thread_self(void)
+{
+ return thread_runq_get_current(&thread_runq);
+}
+
+static struct thread *
+thread_create_idle(void)
+{
+ struct thread *idle;
+ void *stack;
+
+ idle = malloc(sizeof(*idle));
+
+ if (!idle) {
+ panic("thread: unable to allocate idle thread");
+ }
+
+ stack = malloc(THREAD_STACK_MIN_SIZE);
+
+ if (!stack) {
+ panic("thread: unable to allocate idle thread stack");
+ }
+
+ thread_init(idle, thread_idle, NULL, "idle",
+ stack, THREAD_STACK_MIN_SIZE, THREAD_IDLE_PRIORITY);
+ return idle;
+}
+
+static void
+thread_runq_preinit(struct thread_runq *runq)
+{
+ thread_init(&thread_dummy, NULL, NULL, "dummy", NULL, 0, 0);
+ runq->current = &thread_dummy;
+}
+
+static void
+thread_runq_init(struct thread_runq *runq)
+{
+ runq->nr_threads = 0;
+
+ for (size_t i = 0; i < ARRAY_SIZE(runq->lists); i++) {
+ thread_list_init(&runq->lists[i]);
+ }
+
+ runq->idle = thread_create_idle();
+}
+
+void
+thread_bootstrap(void)
+{
+ thread_runq_preinit(&thread_runq);
+}
+
+void
+thread_setup(void)
+{
+ thread_runq_init(&thread_runq);
+}
+
+void
+thread_yield(void)
+{
+ uint32_t eflags;
+
+ if (!thread_preempt_enabled()) {
+ return;
+ }
+
+ eflags = thread_lock_scheduler();
+ thread_clear_yield(thread_self());
+ thread_runq_schedule(&thread_runq);
+ thread_unlock_scheduler(eflags, false);
+}
+
+void
+thread_sleep(void)
+{
+ struct thread *thread;
+ uint32_t eflags;
+
+ thread = thread_self();
+
+ eflags = cpu_intr_save();
+ assert(thread_is_running(thread));
+ thread_set_sleeping(thread);
+ thread_runq_schedule(&thread_runq);
+ assert(thread_is_running(thread));
+ cpu_intr_restore(eflags);
+}
+
+void
+thread_wakeup(struct thread *thread)
+{
+ uint32_t eflags;
+
+ if (!thread || (thread == thread_self())) {
+ return;
+ }
+
+ eflags = thread_lock_scheduler();
+
+ if (!thread_is_running(thread)) {
+ assert(!thread_is_dead(thread));
+ thread_set_running(thread);
+ thread_runq_add(&thread_runq, thread);
+ }
+
+ thread_unlock_scheduler(eflags, true);
+}
+
+void
+thread_report_tick(void)
+{
+ assert(thread_scheduler_locked());
+
+ thread_set_yield(thread_self());
+ timer_report_tick();
+}
diff --git a/src/thread.h b/src/thread.h
new file mode 100644
index 0000000..78f87da
--- /dev/null
+++ b/src/thread.h
@@ -0,0 +1,326 @@
+/*
+ * Copyright (c) 2017 Richard Braun.
+ * Copyright (c) 2017 Jerko Lenstra.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ *
+ * Thread module.
+ *
+ * This module provides threads of execution, which encapsulate sequential
+ * activities that may be preempted (stopped by the scheduler) so that other
+ * threads may run on the processor. This is what makes an operating system
+ * "time sharing".
+ *
+ * Threads are usually scheduled according to a scheduling policy. This
+ * module implements a very simple fixed-priority based scheduling policy,
+ * where the thread running is ideally always the thread with the highest
+ * priority. Because of priority inversions, this isn't always possible.
+ * A real-time operating system (RTOS) strives to achieve a behaviour
+ * as close as possible to this ideal.
+ *
+ * Threads may also be called tasks (e.g. in many small embedded RTOS), or
+ * lightweight processes (BSDs, Solaris). The word "process", usually refers
+ * to much more heavyweight resource containers on Unix, which include both
+ * threads and other resources such as an address space and a file descriptor
+ * table. Sometimes, the word "task" also refers to this (e.g. in Linux),
+ * so using the word "thread" is usually much less ambiguous.
+ *
+ * Almost all modern operating systems associate a stack with each thread.
+ * In the past, threads could explicitely save the state they would need to
+ * be restored (look up "continuations"). But since preemptive scheduling
+ * requires the ability to preempt a thread at almost any time, a generic
+ * way to save and restore the thread state is needed. The most common way
+ * is to store that state on the stack, save the stack pointer into the
+ * thread structure, and later do the reverse when the thread is dispatched
+ * again on the processor.
+ *
+ * Stack overflows in embedded systems are a major source of bugs. The stack
+ * of a thread needs to be large enough to store all the data required by the
+ * longest call chain possible and also the saved state of the thread if
+ * interrupted right at the end of that chain, something that is quite
+ * difficult to determine from static analysis. Some systems provide tools
+ * to detect such overflows by e.g. filling the stack with a pattern at
+ * creation time, and making sure the pattern hasn't changed near the end
+ * of the stack. On systems with virtual memory, guard pages can be used to
+ * achieve an even more reliable effect, although at greater cost.
+ *
+ * A major concept around (kernel) threads is preemption. Disabling preemption
+ * means that the current thread may not be preempted, i.e. it will keep
+ * running until preemption is reenabled. Therefore, it's not possible for a
+ * thread to passively wait for an event if preemption is disabled. Disabling
+ * preemption is one of the primary means to implement critical sections.
+ * Many embedded systems build critical sections by disabling interrupts,
+ * but this lack of decoupling means that interrupts may unnecessarily be
+ * disabled, which increases interrupt latency.
+ *
+ * In X1, interrupts must only be disabled when data is shared between a
+ * thread and an interrupt handler, as a means to prevent the interrupt
+ * handler from running while the thread is accessing the shared data.
+ * Disabling preemption is the preferred way to implement short, time-bounded
+ * critical sections. If the cost of the critical section is "too high"
+ * (something quite hard to evaluate and which rests entirely on the decision
+ * of the developer), mutexes are the recommended tool, since they keep
+ * preemption enabled during the critical section.
+ */
+
+#ifndef _THREAD_H
+#define _THREAD_H
+
+#include <stdbool.h>
+#include <stddef.h>
+
+/*
+ * The scheduling frequency is the rate at which the clock used for scheduling
+ * ticks. On each tick, the scheduler may mark the currently running thread to
+ * yield.
+ *
+ * This implementation uses periodic ticks, but if the underlying hardware
+ * supports reprogramming without losing track of time, a dynamic tick
+ * implementation could be used.
+ */
+#define THREAD_SCHED_FREQ 100
+
+/*
+ * Maximum size of thread names, including the null terminating character.
+ */
+#define THREAD_NAME_MAX_SIZE 16
+
+/*
+ * Minimum size of thread stacks.
+ */
+#define THREAD_STACK_MIN_SIZE 512
+
+/*
+ * Total number of thread priorities.
+ */
+#define THREAD_NR_PRIORITIES 20
+
+/*
+ * The lowest priority is used by the idle thread and can be used by very
+ * low priority (usually background) threads.
+ */
+#define THREAD_IDLE_PRIORITY 0
+
+/*
+ * Range of regular priorities.
+ *
+ * Lower values mean lower priorities.
+ */
+#define THREAD_MIN_PRIORITY 1
+#define THREAD_MAX_PRIORITY (THREAD_NR_PRIORITIES - 1)
+
+/*
+ * Type for thread functions.
+ */
+typedef void (*thread_fn_t)(void *arg);
+
+/*
+ * Opaque declaration.
+ *
+ * An opaque declaration allows the interface to prevent any direct access
+ * to the structure itself from external users. The price is that, without
+ * the definition, the compiler doesn't know the size of the structure,
+ * which prevents allocations from the stack and composition by embedding
+ * in other structures, and usually requires the interface to provide
+ * dynamic allocation.
+ */
+struct thread;
+
+/*
+ * Early initialization of the thread module.
+ *
+ * This function initializes a dummy thread context so that functions that
+ * require a thread context, such as thread_self(), can be used before the
+ * thread module is initialized.
+ */
+void thread_bootstrap(void);
+
+/*
+ * Initialize the thread module.
+ *
+ * On return, new threads may be created. They will only run once the scheduler
+ * is enabled.
+ */
+void thread_setup(void);
+
+/*
+ * Create a thread.
+ *
+ * The new thread runs the given function, along with the given argument,
+ * as soon as permitted by the scheduler.
+ *
+ * A pointer to the new thread is returned into *threadp, if the latter isn't
+ * NULL.
+ */
+int thread_create(struct thread **threadp, thread_fn_t fn, void *arg,
+ const char *name, size_t stack_size, unsigned int priority);
+
+/*
+ * Make the current thread terminate.
+ *
+ * For the sake of simplicity, this module doesn't provide "detached"
+ * threads, where the resources are automatically released on exit.
+ * This means that all threads must be joined to avoid resource leaks.
+ *
+ * This function doesn't return.
+ */
+void thread_exit(void) __attribute__((noreturn));
+
+/*
+ * Wait for a thread to terminate.
+ *
+ * When the given thread is joined, its resources are released.
+ */
+void thread_join(struct thread *thread);
+
+/*
+ * Return the current thread.
+ */
+struct thread * thread_self(void);
+
+/*
+ * Return the name of the given thread.
+ */
+const char * thread_name(const struct thread *thread);
+
+/*
+ * Yield the processor.
+ *
+ * The calling thread remains in the running state, and may keep running on
+ * the processor if the scheduler determines that it should continue.
+ */
+void thread_yield(void);
+
+/*
+ * Make the calling thread sleep until awaken.
+ *
+ * Preemption must be disabled exactly once before calling this function, and
+ * is used to reliably synchronize waiting for/triggering an event.
+ *
+ * Preemption is used to serialize access to the variables shared between
+ * the sleeping thread and the waking code, which may run from another thread
+ * (thread context) or from an interrupt handler (interrupt context).
+ * If serializing against an interrupt handler, the user must also disable
+ * interrupts.
+ *
+ * By disabling preemption, checking the predicate and sleeping is done
+ * atomically with respect to setting the predicate and waking up. Obviously,
+ * while sleeping, preemption is reenabled by this function, to allow another
+ * thread to set the predicate and wake up the sleeping thread, which is why
+ * the preemption level must be exactly one on entry.
+ *
+ * Note that this function may return for other reasons than the predicate
+ * becoming true. These wake-ups are called spurious wake-ups and may be
+ * caused by implementation details as well as manually waking up threads
+ * (e.g. with C/Unix signals such as SIGINT). This is why sleeping should
+ * always be enclosed in a loop, rechecking the predicate on each iteration.
+ *
+ * Here is an example of sleeping and waking up :
+ *
+ * static bool predicate;
+ * static struct thread *thread;
+ *
+ * void
+ * init(void)
+ * {
+ * predicate = false;
+ * thread_create(&thread, run, ...);
+ * }
+ *
+ * void
+ * run(void)
+ * {
+ * for (;;) {
+ * thread_preempt_disable(); Disable preemption exactly once.
+ *
+ * while (!predicate) { Checking the predicate and sleeping
+ * thread_sleep(); is atomic with respect to setting
+ * } the predicate and waking up.
+ *
+ * do_something();
+ *
+ * thread_preempt_enable();
+ * }
+ * }
+ *
+ * void
+ * wakeup(void)
+ * {
+ * assert(!thread_preempt_enabled()); Because preemption is disabled,
+ * predicate = true; setting the predicate and waking up
+ * thread_wakeup(thread); is atomic with respect to checking
+ * the predicate and sleeping.
+ * }
+ *
+ * This pattern is very close to the POSIX condition variable [1]. The
+ * differences are :
+ * - The mutex is replaced by preemption for serialization.
+ * - The condition variable isn't needed because the interface only allows
+ * waking up a single thread.
+ * - Cancellation is completely ignored.
+ *
+ * You may compare this example with the one in condvar.h for a clear view of
+ * the similarities.
+ *
+ * A mutex would have been difficult to use here since they rely this low
+ * level interface, leading to complicated dependency issues. In addition,
+ * this is a single processor / single run queue scheduler so using
+ * preemption, which is usually processor-local, is sufficient. Finally,
+ * it's also the cheapest way to build critical sections, which improves
+ * performance.
+ *
+ * [1] http://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_cond_wait.html.
+ */
+void thread_sleep(void);
+
+/*
+ * Wake up the given thread.
+ *
+ * For reliable event processing, preemption should be disabled.
+ *
+ * This function may safely be called from interrupt context.
+ */
+void thread_wakeup(struct thread *thread);
+
+/*
+ * Preemption control functions.
+ *
+ * Note that enabling preemption actually increments the preemption level
+ * whereas disabling preemption decrements it, allowing preemption-based
+ * critical sections to nest. Preemption is enabled when the preemption
+ * level is 0.
+ */
+void thread_preempt_enable(void);
+void thread_preempt_disable(void);
+bool thread_preempt_enabled(void);
+
+/*
+ * Report a tick.
+ *
+ * This function must be called from interrupt context.
+ */
+void thread_report_tick(void);
+
+/*
+ * Enable the scheduler.
+ */
+void thread_enable_scheduler(void) __attribute__((noreturn));
+
+#endif /* _THREAD_H */
diff --git a/src/thread_asm.S b/src/thread_asm.S
new file mode 100644
index 0000000..0327fce
--- /dev/null
+++ b/src/thread_asm.S
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2017 Richard Braun.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+.section .text
+.code32
+
+.global thread_load_context
+thread_load_context:
+ mov 4(%esp), %eax /* Load the thread address */
+ mov (%eax), %esp /* Switch to the thread stack */
+ jmp thread_restore_context
+
+.global thread_switch_context
+thread_switch_context:
+ mov 4(%esp), %eax /* Get prev thread address */
+ mov 8(%esp), %ecx /* Get next thread address */
+
+ push %ebp /* Save registers owned by the caller */
+ push %ebx
+ push %edi
+ push %esi
+
+ mov %esp, (%eax) /* Save prev thread stack pointer */
+ mov (%ecx), %esp /* Switch to the stack of the next thread */
+
+thread_restore_context:
+ pop %esi
+ pop %edi
+ pop %ebx
+ pop %ebp
+
+ ret
diff --git a/src/timer.c b/src/timer.c
new file mode 100644
index 0000000..b663e10
--- /dev/null
+++ b/src/timer.c
@@ -0,0 +1,321 @@
+/*
+ * Copyright (c) 2017 Richard Braun.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <lib/list.h>
+#include <lib/macros.h>
+
+#include "cpu.h"
+#include "mutex.h"
+#include "panic.h"
+#include "thread.h"
+#include "timer.h"
+
+#define TIMER_STACK_SIZE 4096
+
+/*
+ * When determining whether a point in time is in the future or the past,
+ * it's important to remember that the value is always finite. Here,
+ * the ticks use a 32-bits type (unsigned long on i386), so, assuming
+ * a 100Hz frequency, time wraps around about every 497 days. Therefore,
+ * the implementation needs a way to partition time between the future
+ * and the past. To do so, it considers that all values from a reference
+ * up to a threshold are in the future (except the present), and all other
+ * values are in the past. This macro defines that threshold.
+ *
+ * Currently, the threshold is half of the maximum value, which is
+ * equivalent to using a signed integer where positive values would denote
+ * the future and negative ones the past, except that overflows on signed
+ * integers are undefined behaviour, whereas overflows on unsigned
+ * integers are well specified (3.4.3 Undefined Behaviour and 6.2.5 Types).
+ */
+#define TIMER_THRESHOLD (((unsigned long)-1) / 2)
+
+/*
+ * The current time, in ticks.
+ */
+static unsigned long timer_ticks;
+
+/*
+ * List of timers, sorted by time.
+ *
+ * The timer mutex must be locked when accessing the timer list.
+ */
+static struct list timer_list;
+static struct mutex timer_mutex;
+
+/*
+ * The timer thread, which provides context for all timer callbacks.
+ */
+static struct thread *timer_thread;
+
+/*
+ * True if the timer list is empty.
+ *
+ * This is a copy of list_empty(&timer_list) which may be used from
+ * interrupt context, where locking a mutex is impossible.
+ *
+ * Interrupts must be disabled when accessing this variable.
+ */
+static bool timer_list_empty;
+
+/*
+ * Time in ticks of the first timer on the timer list.
+ *
+ * Only valid if the list isn't empty.
+ *
+ * Interrupts must be disabled when accessing this variable.
+ */
+static unsigned long timer_wakeup_ticks;
+
+bool
+timer_ticks_expired(unsigned long ticks, unsigned long ref)
+{
+ return (ticks - ref) > TIMER_THRESHOLD;
+}
+
+bool
+timer_ticks_occurred(unsigned long ticks, unsigned long ref)
+{
+ return (ticks == ref) || timer_ticks_expired(ticks, ref);
+}
+
+static bool
+timer_work_pending(void)
+{
+ assert(!cpu_intr_enabled());
+
+ return !timer_list_empty
+ && timer_ticks_occurred(timer_wakeup_ticks, timer_ticks);
+}
+
+static bool
+timer_scheduled(const struct timer *timer)
+{
+ return !list_node_unlinked(&timer->node);
+}
+
+static bool
+timer_expired(const struct timer *timer, unsigned long ref)
+{
+ return timer_ticks_expired(timer->ticks, ref);
+}
+
+static bool
+timer_occurred(const struct timer *timer, unsigned long ref)
+{
+ return timer_ticks_occurred(timer->ticks, ref);
+}
+
+static void
+timer_process(struct timer *timer)
+{
+ timer->fn(timer->arg);
+}
+
+static void
+timer_process_list(unsigned long now)
+{
+ struct timer *timer;
+ uint32_t eflags;
+
+ mutex_lock(&timer_mutex);
+
+ while (!list_empty(&timer_list)) {
+ timer = list_first_entry(&timer_list, typeof(*timer), node);
+
+ if (!timer_occurred(timer, now)) {
+ break;
+ }
+
+ list_remove(&timer->node);
+ list_node_init(&timer->node);
+ mutex_unlock(&timer_mutex);
+
+ timer_process(timer);
+
+ mutex_lock(&timer_mutex);
+ }
+
+ eflags = cpu_intr_save();
+
+ timer_list_empty = list_empty(&timer_list);
+
+ if (!timer_list_empty) {
+ timer = list_first_entry(&timer_list, typeof(*timer), node);
+ timer_wakeup_ticks = timer->ticks;
+ }
+
+ cpu_intr_restore(eflags);
+
+ mutex_unlock(&timer_mutex);
+}
+
+static void
+timer_run(void *arg)
+{
+ unsigned long now;
+ uint32_t eflags;
+
+ (void)arg;
+
+ for (;;) {
+ thread_preempt_disable();
+ eflags = cpu_intr_save();
+
+ for (;;) {
+ now = timer_ticks;
+
+ if (timer_work_pending()) {
+ break;
+ }
+
+ thread_sleep();
+ }
+
+ cpu_intr_restore(eflags);
+ thread_preempt_enable();
+
+ timer_process_list(now);
+ }
+}
+
+void
+timer_setup(void)
+{
+ int error;
+
+ timer_ticks = 0;
+ timer_list_empty = true;
+
+ list_init(&timer_list);
+ mutex_init(&timer_mutex);
+
+ error = thread_create(&timer_thread, timer_run, NULL,
+ "timer", TIMER_STACK_SIZE, THREAD_MAX_PRIORITY);
+
+ if (error) {
+ panic("timer: unable to create thread");
+ }
+}
+
+unsigned long
+timer_now(void)
+{
+ unsigned long ticks;
+ uint32_t eflags;
+
+ eflags = cpu_intr_save();
+ ticks = timer_ticks;
+ cpu_intr_restore(eflags);
+
+ return ticks;
+}
+
+void
+timer_init(struct timer *timer, timer_fn_t fn, void *arg)
+{
+ list_node_init(&timer->node);
+ timer->fn = fn;
+ timer->arg = arg;
+}
+
+unsigned long
+timer_get_time(const struct timer *timer)
+{
+ unsigned long ticks;
+
+ mutex_lock(&timer_mutex);
+ ticks = timer->ticks;
+ mutex_unlock(&timer_mutex);
+
+ return ticks;
+}
+
+void
+timer_schedule(struct timer *timer, unsigned long ticks)
+{
+ struct timer *tmp;
+ uint32_t eflags;
+
+ mutex_lock(&timer_mutex);
+
+ assert(!timer_scheduled(timer));
+
+ timer->ticks = ticks;
+
+ /*
+ * Find the insertion point,
+ *
+ * This makes timer scheduling an O(n) operation, and assumes a low
+ * number of timers. This is also why a mutex is used instead of
+ * disabling preemption, since preemption then remains enabled,
+ * allowing higher priority threads to run.
+ *
+ * [1] https://en.wikipedia.org/wiki/Big_O_notation
+ */
+ list_for_each_entry(&timer_list, tmp, node) {
+ if (!timer_expired(tmp, ticks)) {
+ break;
+ }
+ }
+
+ list_insert_before(&tmp->node, &timer->node);
+
+ timer = list_first_entry(&timer_list, typeof(*timer), node);
+
+ /*
+ * This interrupt-based critical section could be moved outside the
+ * mutex-based one, which would make the latter shorter. The downside
+ * of this approach is that a timer interrupt occurring after unlocking
+ * the mutex but before disabling interrupts will wake up the timer
+ * thread, if there was work pending already. The timer thread may
+ * preempt the current thread and process all timers before the current
+ * thread resumes and clears the list empty flag. At the next timer
+ * interrupt, the handler will determine that there is work pending and
+ * wake up the timer thread, despite the fact that there is actually no
+ * timer in the list.
+ *
+ * By holding the mutex while clearing the list empty flag, potential
+ * spurious wake-ups are completely avoided.
+ */
+ eflags = cpu_intr_save();
+ timer_list_empty = false;
+ timer_wakeup_ticks = timer->ticks;
+ cpu_intr_restore(eflags);
+
+ mutex_unlock(&timer_mutex);
+}
+
+void
+timer_report_tick(void)
+{
+ timer_ticks++;
+
+ if (timer_work_pending()) {
+ thread_wakeup(timer_thread);
+ }
+}
diff --git a/src/timer.h b/src/timer.h
new file mode 100644
index 0000000..e4a531e
--- /dev/null
+++ b/src/timer.h
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2017 Richard Braun.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ *
+ * Software timer module.
+ */
+
+#ifndef _TIMER_H
+#define _TIMER_H
+
+#include <stdbool.h>
+
+#include <lib/list.h>
+
+/*
+ * Type for timer callback functions.
+ *
+ * These functions run in the context of the timer thread.
+ */
+typedef void (*timer_fn_t)(void *arg);
+
+struct timer {
+ struct list node;
+ unsigned long ticks;
+ timer_fn_t fn;
+ void *arg;
+};
+
+/*
+ * Check if a time, in ticks, is considered to have expired/occurred
+ * compared to a given reference time, also in ticks.
+ *
+ * A time is considered expired when it's strictly in the past compared
+ * to the given reference time, and occurred when it's expired or equal
+ * to the reference time.
+ */
+bool timer_ticks_expired(unsigned long ticks, unsigned long ref);
+bool timer_ticks_occurred(unsigned long ticks, unsigned long ref);
+
+/*
+ * Initialize the timer module.
+ */
+void timer_setup(void);
+
+/*
+ * Return the current time, in ticks.
+ */
+unsigned long timer_now(void);
+
+/*
+ * Initialize a timer.
+ *
+ * A timer may only be safely initialized when not scheduled.
+ */
+void timer_init(struct timer *timer, timer_fn_t fn, void *arg);
+
+/*
+ * Schedule a timer.
+ *
+ * The timer callback function is called at or after the given scheduled
+ * (absolute) time, in ticks. If the scheduled time denotes the past, the
+ * timer function is called immediately.
+ *
+ * A timer may only be safely scheduled when not already scheduled. When
+ * a timer expires and its callback function runs, it is not considered
+ * scheduled any more, and may be safely rescheduled from within the
+ * callback function. This is how periodic timers are implemented.
+ *
+ * Note that a timer callback function never runs immediately at its
+ * scheduled time. The duration between the actual scheduled time and the
+ * time at which the timer callback function runs is called the latency.
+ * Ideally, this latency should be as short as possible and never exceed
+ * a maximum limit, i.e. be time-bounded. That's what real-time systems
+ * are about. Unfortunately, it's quite difficult to achieve most of the
+ * time. There are many sources of unexpected latency such as thread
+ * scheduling (if not using a real-time scheduling algorithm), priority
+ * inversions, other interrupts, cache/TLB misses, contention on the system
+ * bus (e.g. when the CPU and a DMA controller compete to become the bus
+ * master for a transfer), and memory (DDR SDRAM) access requests being
+ * reordered by the controller, to name the most common.
+ *
+ * Also note that, in addition to latency, another parameter that affects
+ * the processing of a timer is resolution. In this implementation, the
+ * timer is configured to raise interrupts at the thread scheduler
+ * frequency, normally 100 Hz, making the resolution 10ms, which is
+ * considered a low resolution. This means that a timer cannot be
+ * scheduled to trigger at times that aren't multiples of 10ms on the
+ * clock used by the timer system. Finally, note that when scheduling
+ * relative timers, unless stated otherwise, the time for the timer to
+ * trigger is less than the time requested. This is because the "current
+ * time" always marks the past. For example, with the 10ms resolution
+ * timer system used here, timer_now() could return 1000 when the "real"
+ * current time is actually 1000.9. Assuming the timer is scheduled to
+ * trigger at 1001, this means that, instead of waiting a complete tick,
+ * the timer would trigger only a tenth of the requested time after being
+ * scheduled.
+ *
+ * What this interface guarantees is that the function never runs before
+ * its scheduled time.
+ *
+ * Finally, for the sake of simplicity, this function doesn't provide a
+ * way to cancel a timer.
+ */
+void timer_schedule(struct timer *timer, unsigned long ticks);
+
+/*
+ * Return the scheduled time of a timer, in ticks.
+ */
+unsigned long timer_get_time(const struct timer *timer);
+
+/*
+ * Report a periodic tick to the timer module.
+ *
+ * This function is called by the hardware timer driver interrupt handler.
+ */
+void timer_report_tick(void);
+
+#endif /* _TIMER_H */
diff --git a/src/uart.c b/src/uart.c
new file mode 100644
index 0000000..eed7fe4
--- /dev/null
+++ b/src/uart.c
@@ -0,0 +1,190 @@
+/*
+ * Copyright (c) 2017 Richard Braun.
+ * Copyright (c) 2017 Jerko Lenstra.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+
+#include <lib/cbuf.h>
+#include <lib/macros.h>
+
+#include "cpu.h"
+#include "error.h"
+#include "io.h"
+#include "uart.h"
+#include "thread.h"
+
+#define UART_BAUD_RATE 115200
+
+#define UART_CLOCK 115200
+#define UART_DIVISOR (UART_CLOCK / UART_BAUD_RATE)
+
+#define UART_IRQ 4
+
+#define UART_IER_DATA 0x1
+
+#define UART_LCR_8BITS 0x3
+#define UART_LCR_STOP1 0
+#define UART_LCR_PARITY_NONE 0
+#define UART_LCR_DLAB 0x80
+
+#define UART_LSR_DATA_READY 0x01
+#define UART_LSR_TX_EMPTY 0x20
+
+#define UART_COM1_PORT 0x3F8
+#define UART_REG_DAT 0
+#define UART_REG_DIVL 0
+#define UART_REG_IER 1
+#define UART_REG_DIVH 1
+#define UART_REG_LCR 3
+#define UART_REG_LSR 5
+
+#define UART_BUFFER_SIZE 16
+
+#if !ISP2(UART_BUFFER_SIZE)
+#error "invalid buffer size"
+#endif
+
+/*
+ * Data shared between threads and the interrupt handler.
+ *
+ * Interrupts and preemption must be disabled when accessing these data.
+ */
+static uint8_t uart_buffer[UART_BUFFER_SIZE];
+static struct cbuf uart_cbuf;
+static struct thread *uart_waiter;
+
+static void
+uart_irq_handler(void *arg)
+{
+ uint8_t byte;
+ int error;
+ bool spurious;
+
+ (void)arg;
+
+ spurious = true;
+
+ for (;;) {
+ byte = io_read(UART_COM1_PORT + UART_REG_LSR);
+
+ if (!(byte & UART_LSR_DATA_READY)) {
+ break;
+ }
+
+ spurious = false;
+ byte = io_read(UART_COM1_PORT + UART_REG_DAT);
+ error = cbuf_pushb(&uart_cbuf, byte, false);
+
+ if (error) {
+ printf("uart: error: buffer full\n");
+ break;
+ }
+ }
+
+ if (!spurious) {
+ thread_wakeup(uart_waiter);
+ }
+}
+
+void
+uart_setup(void)
+{
+ cbuf_init(&uart_cbuf, uart_buffer, sizeof(uart_buffer));
+
+ io_write(UART_COM1_PORT + UART_REG_LCR, UART_LCR_DLAB);
+ io_write(UART_COM1_PORT + UART_REG_DIVL, UART_DIVISOR);
+ io_write(UART_COM1_PORT + UART_REG_DIVH, UART_DIVISOR >> 8);
+ io_write(UART_COM1_PORT + UART_REG_LCR, UART_LCR_8BITS | UART_LCR_STOP1
+ | UART_LCR_PARITY_NONE);
+ io_write(UART_COM1_PORT + UART_REG_IER, UART_IER_DATA);
+
+ cpu_irq_register(UART_IRQ, uart_irq_handler, NULL);
+}
+
+static void
+uart_tx_wait(void)
+{
+ uint8_t byte;
+
+ for (;;) {
+ byte = io_read(UART_COM1_PORT + UART_REG_LSR);
+
+ if (byte & UART_LSR_TX_EMPTY) {
+ break;
+ }
+ }
+}
+
+static void
+uart_write_byte(uint8_t byte)
+{
+ uart_tx_wait();
+ io_write(UART_COM1_PORT + UART_REG_DAT, byte);
+}
+
+void
+uart_write(uint8_t byte)
+{
+ if (byte == '\n') {
+ uart_write_byte('\r');
+ }
+
+ uart_write_byte(byte);
+}
+
+int
+uart_read(uint8_t *byte)
+{
+ int eflags, error;
+
+ thread_preempt_disable();
+ eflags = cpu_intr_save();
+
+ if (uart_waiter) {
+ error = ERROR_BUSY;
+ goto out;
+ }
+
+ for (;;) {
+ error = cbuf_popb(&uart_cbuf, byte);
+
+ if (!error) {
+ break;
+ }
+
+ uart_waiter = thread_self();
+ thread_sleep();
+ uart_waiter = NULL;
+ }
+
+ error = 0;
+
+out:
+ cpu_intr_restore(eflags);
+ thread_preempt_enable();
+
+ return error;
+}
diff --git a/src/uart.h b/src/uart.h
new file mode 100644
index 0000000..0dd2864
--- /dev/null
+++ b/src/uart.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2017 Richard Braun.
+ * Copyright (c) 2017 Jerko Lenstra.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ *
+ * UART driver.
+ *
+ * An UART is a Universal Asynchronous Receiver-Transmitter, an old, low level
+ * interface used for machine communication. It's typically used on embedded
+ * devices as the primary diagnostic interface, especially during development.
+ * It may also be used between remote boards that don't require fast
+ * communication.
+ */
+
+#ifndef _UART_H
+#define _UART_H
+
+#include <stdint.h>
+
+/*
+ * Initialize the uart module.
+ */
+void uart_setup(void);
+
+/*
+ * Write a byte to the UART.
+ */
+void uart_write(uint8_t byte);
+
+/*
+ * Read a byte from the UART.
+ *
+ * This function may only be called from thread context, since it blocks
+ * until there is data to consume.
+ *
+ * If successful, return 0. If another thread is already waiting for data,
+ * ERROR_BUSY is returned.
+ *
+ * Preemption must be enabled when calling this function.
+ */
+int uart_read(uint8_t *byte);
+
+#endif /* _UART_H */