summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/x86/machine/boot.c11
-rw-r--r--arch/x86/machine/boot.h4
-rw-r--r--arch/x86/machine/cpu.c5
-rw-r--r--arch/x86/machine/cpu.h6
-rw-r--r--arch/x86/machine/pmap.c17
-rw-r--r--arch/x86/machine/tcb.c7
-rw-r--r--arch/x86/machine/tcb.h4
-rw-r--r--arch/x86/machine/trap.c3
-rw-r--r--kern/intr.c9
-rw-r--r--kern/kernel.h6
-rw-r--r--kern/kmem_i.h5
-rw-r--r--kern/llsync_i.h3
-rw-r--r--kern/panic.h4
-rw-r--r--kern/sleepq.c5
-rw-r--r--kern/spinlock.c3
-rw-r--r--kern/thread.c10
-rw-r--r--kern/thread.h6
-rw-r--r--kern/thread_i.h6
-rw-r--r--kern/turnstile.c5
-rw-r--r--kern/work.c5
-rw-r--r--kern/xcall.c9
-rw-r--r--vm/vm_page.c5
22 files changed, 79 insertions, 59 deletions
diff --git a/arch/x86/machine/boot.c b/arch/x86/machine/boot.c
index 76fbcb40..7ce7b85d 100644
--- a/arch/x86/machine/boot.c
+++ b/arch/x86/machine/boot.c
@@ -42,6 +42,7 @@
* to "enabling paging" do not refer to this initial identity mapping.
*/
+#include <stdalign.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
@@ -77,14 +78,14 @@
#include <vm/vm_kmem.h>
#include <vm/vm_setup.h>
-char boot_stack[BOOT_STACK_SIZE] __aligned(CPU_DATA_ALIGN) __bootdata;
-char boot_ap_stack[BOOT_STACK_SIZE] __aligned(CPU_DATA_ALIGN) __bootdata;
+alignas(CPU_DATA_ALIGN) char boot_stack[BOOT_STACK_SIZE] __bootdata;
+alignas(CPU_DATA_ALIGN) char boot_ap_stack[BOOT_STACK_SIZE] __bootdata;
unsigned int boot_ap_id __bootdata;
#ifdef __LP64__
-pmap_pte_t boot_pml4[PMAP_L3_PTES_PER_PT] __aligned(PAGE_SIZE) __bootdata;
-pmap_pte_t boot_pdpt[PMAP_L2_PTES_PER_PT] __aligned(PAGE_SIZE) __bootdata;
-pmap_pte_t boot_pdir[4 * PMAP_L1_PTES_PER_PT] __aligned(PAGE_SIZE) __bootdata;
+alignas(PAGE_SIZE) pmap_pte_t boot_pml4[PMAP_L3_PTES_PER_PT] __bootdata;
+alignas(PAGE_SIZE) pmap_pte_t boot_pdpt[PMAP_L2_PTES_PER_PT] __bootdata;
+alignas(PAGE_SIZE) pmap_pte_t boot_pdir[4 * PMAP_L1_PTES_PER_PT] __bootdata;
char boot_panic_long_mode_msg[] __bootdata
= "boot: processor doesn't support long mode";
#endif /* __LP64__ */
diff --git a/arch/x86/machine/boot.h b/arch/x86/machine/boot.h
index 7a4d85d0..8c590c64 100644
--- a/arch/x86/machine/boot.h
+++ b/arch/x86/machine/boot.h
@@ -18,6 +18,8 @@
#ifndef _X86_BOOT_H
#define _X86_BOOT_H
+#include <stdnoreturn.h>
+
#include <kern/macros.h>
#include <machine/page.h>
#include <machine/pmap.h>
@@ -99,7 +101,7 @@ void * boot_memcpy(void *dest, const void *src, size_t n);
void * boot_memmove(void *dest, const void *src, size_t n);
void * boot_memset(void *s, int c, size_t n);
size_t boot_strlen(const char *s);
-void __noreturn boot_panic(const char *s);
+noreturn void boot_panic(const char *s);
/*
* This function is called by the bootstrap code before paging is enabled.
diff --git a/arch/x86/machine/cpu.c b/arch/x86/machine/cpu.c
index 4746c5f9..a53d8e2a 100644
--- a/arch/x86/machine/cpu.c
+++ b/arch/x86/machine/cpu.c
@@ -16,6 +16,7 @@
*/
#include <assert.h>
+#include <stdalign.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
@@ -135,7 +136,7 @@ static const struct cpu_tls_seg cpu_tls_seg = {
/*
* Interrupt descriptor table.
*/
-static struct cpu_gate_desc cpu_idt[CPU_IDT_SIZE] __aligned(8) __read_mostly;
+static alignas(8) struct cpu_gate_desc cpu_idt[CPU_IDT_SIZE] __read_mostly;
/*
* Double fault handler, and stack for the main processor.
@@ -144,7 +145,7 @@ static struct cpu_gate_desc cpu_idt[CPU_IDT_SIZE] __aligned(8) __read_mostly;
* memory.
*/
static unsigned long cpu_double_fault_handler;
-static char cpu_double_fault_stack[TRAP_STACK_SIZE] __aligned(CPU_DATA_ALIGN);
+static alignas(CPU_DATA_ALIGN) char cpu_double_fault_stack[TRAP_STACK_SIZE];
void
cpu_delay(unsigned long usecs)
diff --git a/arch/x86/machine/cpu.h b/arch/x86/machine/cpu.h
index 0061e136..ee4eb18b 100644
--- a/arch/x86/machine/cpu.h
+++ b/arch/x86/machine/cpu.h
@@ -123,7 +123,9 @@
#ifndef __ASSEMBLER__
+#include <stdalign.h>
#include <stdint.h>
+#include <stdnoreturn.h>
#include <kern/macros.h>
#include <kern/percpu.h>
@@ -238,7 +240,7 @@ struct cpu {
unsigned int features4;
unsigned short phys_addr_width;
unsigned short virt_addr_width;
- char gdt[CPU_GDT_SIZE] __aligned(8);
+ alignas(8) char gdt[CPU_GDT_SIZE];
struct cpu_tss tss;
#ifndef __LP64__
struct cpu_tss double_fault_tss;
@@ -398,7 +400,7 @@ cpu_idle(void)
*
* Implies a compiler barrier.
*/
-static __noreturn __always_inline void
+noreturn static __always_inline void
cpu_halt(void)
{
cpu_intr_disable();
diff --git a/arch/x86/machine/pmap.c b/arch/x86/machine/pmap.c
index 628a9d93..93b68758 100644
--- a/arch/x86/machine/pmap.c
+++ b/arch/x86/machine/pmap.c
@@ -19,6 +19,7 @@
*/
#include <assert.h>
+#include <stdalign.h>
#include <stddef.h>
#include <string.h>
@@ -113,8 +114,8 @@ struct pmap *pmap_current_ptr __percpu;
/*
* "Hidden" kernel root page tables for PAE mode.
*/
-static pmap_pte_t pmap_cpu_kpdpts[X15_MAX_CPUS][PMAP_L2_PTES_PER_PT]
- __read_mostly __aligned(PMAP_PDPT_ALIGN);
+static alignas(PMAP_PDPT_ALIGN) pmap_pte_t
+ pmap_cpu_kpdpts[X15_MAX_CPUS][PMAP_L2_PTES_PER_PT] __read_mostly;
#endif /* X15_X86_PAE */
@@ -184,11 +185,11 @@ struct pmap_update_op {
* operation.
*/
struct pmap_update_oplist {
- struct cpumap cpumap;
+ alignas(CPU_L1_SIZE) struct cpumap cpumap;
struct pmap *pmap;
unsigned int nr_ops;
struct pmap_update_op ops[PMAP_UPDATE_MAX_OPS];
-} __aligned(CPU_L1_SIZE);
+};
static unsigned int pmap_oplist_tsd_key __read_mostly;
@@ -219,13 +220,13 @@ struct pmap_update_queue {
* they perform.
*/
struct pmap_syncer {
- struct thread *thread;
+ alignas(CPU_L1_SIZE) struct thread *thread;
struct pmap_update_queue queue;
struct syscnt sc_updates;
struct syscnt sc_update_enters;
struct syscnt sc_update_removes;
struct syscnt sc_update_protects;
-} __aligned(CPU_L1_SIZE);
+};
static void pmap_sync(void *arg);
@@ -244,14 +245,14 @@ static struct pmap_syncer pmap_syncer __percpu;
* individual TLB entries or globally flush the TLB.
*/
struct pmap_update_request {
- struct list node;
+ alignas(CPU_L1_SIZE) struct list node;
struct spinlock lock;
struct thread *sender;
const struct pmap_update_oplist *oplist;
unsigned int nr_mappings;
int done;
int error;
-} __aligned(CPU_L1_SIZE);
+};
/*
* Per processor array of requests.
diff --git a/arch/x86/machine/tcb.c b/arch/x86/machine/tcb.c
index 8604cee4..fb31a172 100644
--- a/arch/x86/machine/tcb.c
+++ b/arch/x86/machine/tcb.c
@@ -15,16 +15,17 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <stdnoreturn.h>
+
#include <kern/init.h>
-#include <kern/macros.h>
#include <kern/thread.h>
#include <machine/cpu.h>
#include <machine/pmap.h>
#include <machine/strace.h>
#include <machine/tcb.h>
-void __noreturn tcb_context_load(struct tcb *tcb);
-void __noreturn tcb_start(void);
+noreturn void tcb_context_load(struct tcb *tcb);
+noreturn void tcb_start(void);
void tcb_context_restore(void);
struct tcb *tcb_current_ptr __percpu;
diff --git a/arch/x86/machine/tcb.h b/arch/x86/machine/tcb.h
index b5b20bfc..9c69a5ee 100644
--- a/arch/x86/machine/tcb.h
+++ b/arch/x86/machine/tcb.h
@@ -23,8 +23,8 @@
#include <assert.h>
#include <stdint.h>
+#include <stdnoreturn.h>
-#include <kern/macros.h>
#include <machine/cpu.h>
#include <machine/page.h>
@@ -70,7 +70,7 @@ tcb_set_current(struct tcb *tcb)
*
* Called with interrupts disabled. The caller context is lost.
*/
-void __noreturn tcb_load(struct tcb *tcb);
+noreturn void tcb_load(struct tcb *tcb);
/*
* Context switch.
diff --git a/arch/x86/machine/trap.c b/arch/x86/machine/trap.c
index 19ddaf70..1580fa53 100644
--- a/arch/x86/machine/trap.c
+++ b/arch/x86/machine/trap.c
@@ -20,6 +20,7 @@
*/
#include <assert.h>
+#include <stdalign.h>
#include <stdint.h>
#include <stdio.h>
@@ -36,7 +37,7 @@
#include <machine/trap.h>
struct trap_cpu_data {
- unsigned char intr_stack[TRAP_STACK_SIZE] __aligned(CPU_DATA_ALIGN);
+ alignas(CPU_DATA_ALIGN) unsigned char intr_stack[TRAP_STACK_SIZE];
};
static struct trap_cpu_data trap_cpu_data __percpu;
diff --git a/kern/intr.c b/kern/intr.c
index 6590fca0..769f4d93 100644
--- a/kern/intr.c
+++ b/kern/intr.c
@@ -22,6 +22,7 @@
* Shared interrupts are supported.
*/
+#include <stdalign.h>
#include <stdbool.h>
#include <stddef.h>
@@ -39,10 +40,10 @@
#include <machine/trap.h>
struct intr_handler {
- struct list node;
+ alignas(CPU_L1_SIZE) struct list node;
intr_handler_fn_t fn;
void *arg;
-} __aligned(CPU_L1_SIZE);
+};
/*
* Interrupt controller.
@@ -67,11 +68,11 @@ struct intr_ctl {
* span a cache line to avoid false sharing.
*/
struct intr_entry {
- struct spinlock lock;
+ alignas(CPU_L1_SIZE) struct spinlock lock;
struct intr_ctl *ctl;
unsigned int cpu;
struct list handlers;
-} __aligned(CPU_L1_SIZE);
+};
/*
* Interrupt table.
diff --git a/kern/kernel.h b/kern/kernel.h
index 868066e6..22cae43b 100644
--- a/kern/kernel.h
+++ b/kern/kernel.h
@@ -18,7 +18,7 @@
#ifndef _KERN_KERNEL_H
#define _KERN_KERNEL_H
-#include <kern/macros.h>
+#include <stdnoreturn.h>
/*
* Kernel properties.
@@ -31,13 +31,13 @@
*
* Interrupts must be disabled when calling this function.
*/
-void __noreturn kernel_main(void);
+noreturn void kernel_main(void);
/*
* Entry point for APs.
*
* Interrupts must be disabled when calling this function.
*/
-void __noreturn kernel_ap_main(void);
+noreturn void kernel_ap_main(void);
#endif /* _KERN_KERNEL_H */
diff --git a/kern/kmem_i.h b/kern/kmem_i.h
index d4671e24..beae6c45 100644
--- a/kern/kmem_i.h
+++ b/kern/kmem_i.h
@@ -18,6 +18,7 @@
#ifndef _KERN_KMEM_I_H
#define _KERN_KMEM_I_H
+#include <stdalign.h>
#include <stddef.h>
#include <kern/list.h>
@@ -30,13 +31,13 @@
* The flags member is a read-only CPU-local copy of the parent cache flags.
*/
struct kmem_cpu_pool {
- struct mutex lock;
+ alignas(CPU_L1_SIZE) struct mutex lock;
int flags;
int size;
int transfer_size;
int nr_objs;
void **array;
-} __aligned(CPU_L1_SIZE);
+};
/*
* When a cache is created, its CPU pool type is determined from the buffer
diff --git a/kern/llsync_i.h b/kern/llsync_i.h
index dd5a6179..e043eb9d 100644
--- a/kern/llsync_i.h
+++ b/kern/llsync_i.h
@@ -19,6 +19,7 @@
#define _KERN_LLSYNC_I_H
#include <assert.h>
+#include <stdalign.h>
#include <kern/cpumap.h>
#include <kern/macros.h>
@@ -57,7 +58,7 @@ struct llsync_data {
* - apply optimistic accesses to reduce contention
*/
struct {
- volatile unsigned int value __aligned(CPU_L1_SIZE);
+ alignas(CPU_L1_SIZE) volatile unsigned int value;
} gcid;
};
diff --git a/kern/panic.h b/kern/panic.h
index cd0651a4..e0980575 100644
--- a/kern/panic.h
+++ b/kern/panic.h
@@ -18,12 +18,12 @@
#ifndef _KERN_PANIC_H
#define _KERN_PANIC_H
-#include <kern/macros.h>
+#include <stdnoreturn.h>
/*
* Print the given message and halt the system immediately.
*/
-void __noreturn panic(const char *format, ...)
+noreturn void panic(const char *format, ...)
__attribute__((format(printf, 1, 2)));
#endif /* _KERN_PANIC_H */
diff --git a/kern/sleepq.c b/kern/sleepq.c
index f7c3245b..d8bbbddf 100644
--- a/kern/sleepq.c
+++ b/kern/sleepq.c
@@ -19,6 +19,7 @@
*/
#include <assert.h>
+#include <stdalign.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
@@ -33,9 +34,9 @@
#include <machine/cpu.h>
struct sleepq_bucket {
- struct spinlock lock;
+ alignas(CPU_L1_SIZE) struct spinlock lock;
struct list list;
-} __aligned(CPU_L1_SIZE);
+};
struct sleepq {
struct sleepq_bucket *bucket;
diff --git a/kern/spinlock.c b/kern/spinlock.c
index 68e84215..6b23b36b 100644
--- a/kern/spinlock.c
+++ b/kern/spinlock.c
@@ -64,6 +64,7 @@
*/
#include <assert.h>
+#include <stdalign.h>
#include <stddef.h>
#include <kern/atomic.h>
@@ -128,7 +129,7 @@ struct spinlock_qnode {
#endif
struct spinlock_cpu_data {
- struct spinlock_qnode qnodes[SPINLOCK_NR_CTXS - 1] __aligned(CPU_L1_SIZE);
+ alignas(CPU_L1_SIZE) struct spinlock_qnode qnodes[SPINLOCK_NR_CTXS - 1];
};
static struct spinlock_cpu_data spinlock_cpu_data __percpu;
diff --git a/kern/thread.c b/kern/thread.c
index eb0f1eb5..a48a08a9 100644
--- a/kern/thread.c
+++ b/kern/thread.c
@@ -82,10 +82,12 @@
*/
#include <assert.h>
+#include <stdalign.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
+#include <stdnoreturn.h>
#include <string.h>
#include <kern/atomic.h>
@@ -227,7 +229,7 @@ struct thread_fs_runq {
* return path) may violate the locking order.
*/
struct thread_runq {
- struct spinlock lock;
+ alignas(CPU_L1_SIZE) struct spinlock lock;
unsigned int cpu;
unsigned int nr_threads;
struct thread *current;
@@ -258,7 +260,7 @@ struct thread_runq {
struct syscnt sc_schedule_intrs;
struct syscnt sc_tick_intrs;
struct syscnt sc_boosts;
-} __aligned(CPU_L1_SIZE);
+};
/*
* Operations of a scheduling class.
@@ -322,7 +324,7 @@ static struct cpumap thread_idle_runqs;
* There can be moderate bouncing on this word so give it its own cache line.
*/
static struct {
- volatile unsigned long value __aligned(CPU_L1_SIZE);
+ alignas(CPU_L1_SIZE) volatile unsigned long value;
} thread_fs_highest_round_struct;
#define thread_fs_highest_round (thread_fs_highest_round_struct.value)
@@ -1515,7 +1517,7 @@ thread_sched_idle_select_runq(struct thread *thread)
panic("thread: idler threads cannot be awaken");
}
-static void __noreturn
+static noreturn void
thread_sched_idle_panic(void)
{
panic("thread: only idle threads are allowed in the idle class");
diff --git a/kern/thread.h b/kern/thread.h
index 24e3c677..760bb5ec 100644
--- a/kern/thread.h
+++ b/kern/thread.h
@@ -36,11 +36,11 @@
#include <assert.h>
#include <stdbool.h>
#include <stddef.h>
+#include <stdnoreturn.h>
#include <kern/atomic.h>
#include <kern/condition.h>
#include <kern/cpumap.h>
-#include <kern/macros.h>
#include <kern/spinlock_types.h>
#include <kern/turnstile_types.h>
#include <machine/cpu.h>
@@ -203,7 +203,7 @@ int thread_create(struct thread **threadp, const struct thread_attr *attr,
/*
* Terminate the calling thread.
*/
-void __noreturn thread_exit(void);
+noreturn void thread_exit(void);
/*
* Wait for the given thread to terminate and release its resources.
@@ -244,7 +244,7 @@ void thread_wakeup(struct thread *thread);
*
* Interrupts must be disabled when calling this function.
*/
-void __noreturn thread_run_scheduler(void);
+noreturn void thread_run_scheduler(void);
/*
* Make the calling thread release the processor.
diff --git a/kern/thread_i.h b/kern/thread_i.h
index b7d3de17..3f350889 100644
--- a/kern/thread_i.h
+++ b/kern/thread_i.h
@@ -18,13 +18,13 @@
#ifndef _KERN_THREAD_I_H
#define _KERN_THREAD_I_H
+#include <stdalign.h>
#include <stdbool.h>
#include <kern/atomic.h>
#include <kern/condition_types.h>
#include <kern/cpumap.h>
#include <kern/list_types.h>
-#include <kern/macros.h>
#include <kern/mutex_types.h>
#include <kern/turnstile_types.h>
#include <machine/cpu.h>
@@ -97,7 +97,7 @@ struct thread_fs_data {
* ( ) read-only
*/
struct thread {
- struct tcb tcb; /* (r) */
+ alignas(CPU_L1_SIZE) struct tcb tcb; /* (r) */
unsigned long nr_refs; /* (a) */
unsigned long flags; /* (a) */
@@ -178,7 +178,7 @@ struct thread {
struct list task_node; /* (T) */
void *stack; /* (-) */
char name[THREAD_NAME_SIZE]; /* ( ) */
-} __aligned(CPU_L1_SIZE);
+};
#define THREAD_ATTR_DETACHED 0x1
diff --git a/kern/turnstile.c b/kern/turnstile.c
index 97416ae9..24da2cd2 100644
--- a/kern/turnstile.c
+++ b/kern/turnstile.c
@@ -44,6 +44,7 @@
*/
#include <assert.h>
+#include <stdalign.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
@@ -64,9 +65,9 @@
* (b) bucket
*/
struct turnstile_bucket {
- struct spinlock lock;
+ alignas(CPU_L1_SIZE) struct spinlock lock;
struct list list; /* (b) */
-} __aligned(CPU_L1_SIZE);
+};
/*
* Adding/removing waiters to/from a turnstile are performed while
diff --git a/kern/work.c b/kern/work.c
index 9925c198..5021d6f8 100644
--- a/kern/work.c
+++ b/kern/work.c
@@ -16,6 +16,7 @@
*/
#include <assert.h>
+#include <stdalign.h>
#include <stddef.h>
#include <kern/bitmap.h>
@@ -84,7 +85,7 @@ struct work_thread {
* only use one queue.
*/
struct work_pool {
- struct spinlock lock;
+ alignas(CPU_L1_SIZE) struct spinlock lock;
int flags;
struct work_queue queue0;
struct work_queue queue1;
@@ -97,7 +98,7 @@ struct work_pool {
struct list available_threads;
struct list dead_threads;
BITMAP_DECLARE(bitmap, WORK_MAX_THREADS);
-} __aligned(CPU_L1_SIZE);
+};
static int work_thread_create(struct work_pool *pool, unsigned int id);
static void work_thread_destroy(struct work_thread *worker);
diff --git a/kern/xcall.c b/kern/xcall.c
index 1c052ac4..aab45b3b 100644
--- a/kern/xcall.c
+++ b/kern/xcall.c
@@ -16,6 +16,7 @@
*/
#include <assert.h>
+#include <stdalign.h>
#include <stddef.h>
#include <kern/atomic.h>
@@ -27,9 +28,9 @@
#include <machine/cpu.h>
struct xcall {
- xcall_fn_t fn;
+ alignas(CPU_L1_SIZE) xcall_fn_t fn;
void *arg;
-} __aligned(CPU_L1_SIZE);
+};
/*
* Per-CPU data.
@@ -48,11 +49,11 @@ struct xcall {
* between multiple cross-calls.
*/
struct xcall_cpu_data {
- struct xcall send_calls[X15_MAX_CPUS];
+ alignas(CPU_L1_SIZE) struct xcall send_calls[X15_MAX_CPUS];
struct xcall *recv_call;
struct spinlock lock;
-} __aligned(CPU_L1_SIZE);
+};
static struct xcall_cpu_data xcall_cpu_data __percpu;
diff --git a/vm/vm_page.c b/vm/vm_page.c
index caa8c98c..00674499 100644
--- a/vm/vm_page.c
+++ b/vm/vm_page.c
@@ -30,6 +30,7 @@
*/
#include <assert.h>
+#include <stdalign.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
@@ -75,12 +76,12 @@
* Per-processor cache of pages.
*/
struct vm_page_cpu_pool {
- struct mutex lock;
+ alignas(CPU_L1_SIZE) struct mutex lock;
int size;
int transfer_size;
int nr_pages;
struct list pages;
-} __aligned(CPU_L1_SIZE);
+};
/*
* Special order value for pages that aren't in a free list. Such pages are