From ec4ddf8b95585cc3e340dd06df109f83a23b2d77 Mon Sep 17 00:00:00 2001 From: Richard Braun Date: Sat, 21 Apr 2018 10:42:11 +0200 Subject: Fix atomic operations argument types In preparation of the rework of atomic operations, all atomic function calls are fixed to use fully supported, compatible types. This means that atomic operations ar erestricted to 32-bit and 64-bit, and that value types must be strictly compatible with pointer types. --- arch/x86/machine/trap.c | 2 +- kern/clock.c | 2 +- kern/rtmutex_i.h | 4 ++-- kern/spinlock.c | 8 ++++---- kern/task.h | 4 ++-- kern/thread.h | 4 ++-- 6 files changed, 12 insertions(+), 12 deletions(-) diff --git a/arch/x86/machine/trap.c b/arch/x86/machine/trap.c index 878f20c..534b3f6 100644 --- a/arch/x86/machine/trap.c +++ b/arch/x86/machine/trap.c @@ -91,7 +91,7 @@ static void __init trap_handler_init(struct trap_handler *handler, int flags, trap_handler_fn_t fn) { handler->flags = flags; - atomic_store(&handler->fn, fn, ATOMIC_RELAXED); + handler->fn = fn; } static void __init diff --git a/kern/clock.c b/kern/clock.c index 27fb9a2..5c48bb9 100644 --- a/kern/clock.c +++ b/kern/clock.c @@ -72,7 +72,7 @@ void clock_tick_intr(void) if (cpu_id() == 0) { #ifdef ATOMIC_HAVE_64B_OPS - atomic_add(&clock_global_time.ticks, 1, ATOMIC_RELAXED); + atomic_add(&clock_global_time.ticks, 1ULL, ATOMIC_RELAXED); #else /* ATOMIC_HAVE_64B_OPS */ diff --git a/kern/rtmutex_i.h b/kern/rtmutex_i.h index 64ff69a..373c180 100644 --- a/kern/rtmutex_i.h +++ b/kern/rtmutex_i.h @@ -41,8 +41,8 @@ * the turnstile wait function so that only the highest priority thread * may lock the mutex. */ -#define RTMUTEX_CONTENDED 0x1 -#define RTMUTEX_FORCE_WAIT 0x2 +#define RTMUTEX_CONTENDED ((uintptr_t)0x1) +#define RTMUTEX_FORCE_WAIT ((uintptr_t)0x2) #define RTMUTEX_OWNER_MASK (~((uintptr_t)(RTMUTEX_FORCE_WAIT \ | RTMUTEX_CONTENDED))) diff --git a/kern/spinlock.c b/kern/spinlock.c index 71e60cb..5bff42f 100644 --- a/kern/spinlock.c +++ b/kern/spinlock.c @@ -102,7 +102,7 @@ static_assert(SPINLOCK_BITS <= (CHAR_BIT * sizeof(uint32_t)), struct spinlock_qnode { alignas(CPU_L1_SIZE) struct spinlock_qnode *next; - bool locked; + int locked; }; /* TODO NMI support */ @@ -194,13 +194,13 @@ spinlock_qnode_set_next(struct spinlock_qnode *qnode, struct spinlock_qnode *nex static void spinlock_qnode_set_locked(struct spinlock_qnode *qnode) { - qnode->locked = true; + qnode->locked = 1; } static void spinlock_qnode_wait_locked(const struct spinlock_qnode *qnode) { - bool locked; + int locked; for (;;) { locked = atomic_load(&qnode->locked, ATOMIC_ACQUIRE); @@ -216,7 +216,7 @@ spinlock_qnode_wait_locked(const struct spinlock_qnode *qnode) static void spinlock_qnode_clear_locked(struct spinlock_qnode *qnode) { - atomic_store(&qnode->locked, false, ATOMIC_RELEASE); + atomic_store(&qnode->locked, 0, ATOMIC_RELEASE); } static void diff --git a/kern/task.h b/kern/task.h index d6e9eb4..12e29ac 100644 --- a/kern/task.h +++ b/kern/task.h @@ -55,7 +55,7 @@ task_ref(struct task *task) { unsigned long nr_refs; - nr_refs = atomic_fetch_add(&task->nr_refs, 1, ATOMIC_RELAXED); + nr_refs = atomic_fetch_add(&task->nr_refs, 1UL, ATOMIC_RELAXED); assert(nr_refs != (unsigned long)-1); } @@ -64,7 +64,7 @@ task_unref(struct task *task) { unsigned long nr_refs; - nr_refs = atomic_fetch_sub(&task->nr_refs, 1, ATOMIC_ACQ_REL); + nr_refs = atomic_fetch_sub(&task->nr_refs, 1UL, ATOMIC_ACQ_REL); assert(nr_refs != 0); if (nr_refs == 1) { diff --git a/kern/thread.h b/kern/thread.h index eba9bf2..4bead75 100644 --- a/kern/thread.h +++ b/kern/thread.h @@ -288,7 +288,7 @@ thread_ref(struct thread *thread) { unsigned long nr_refs; - nr_refs = atomic_fetch_add(&thread->nr_refs, 1, ATOMIC_RELAXED); + nr_refs = atomic_fetch_add(&thread->nr_refs, 1UL, ATOMIC_RELAXED); assert(nr_refs != (unsigned long)-1); } @@ -297,7 +297,7 @@ thread_unref(struct thread *thread) { unsigned long nr_refs; - nr_refs = atomic_fetch_sub(&thread->nr_refs, 1, ATOMIC_ACQ_REL); + nr_refs = atomic_fetch_sub(&thread->nr_refs, 1UL, ATOMIC_ACQ_REL); assert(nr_refs != 0); if (nr_refs == 1) { -- cgit v1.2.3 From 9186a2f543d7446666a8bd18ae6f94a83816a0e9 Mon Sep 17 00:00:00 2001 From: Richard Braun Date: Sat, 21 Apr 2018 10:44:34 +0200 Subject: kern/atomic: rework This commit restricts atomic operations to 32-bit and 64-bit (when supported). It keeps a similar source interface, but adds restrictions on the supported types. The main drive behind this change is portability, and in particular, upcoming local atomic operations. --- arch/x86/machine/atomic.h | 115 +++++---- kern/atomic.h | 107 ++++---- kern/atomic_i.h | 619 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 727 insertions(+), 114 deletions(-) create mode 100644 kern/atomic_i.h diff --git a/arch/x86/machine/atomic.h b/arch/x86/machine/atomic.h index 36f0067..5da8568 100644 --- a/arch/x86/machine/atomic.h +++ b/arch/x86/machine/atomic.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2017 Richard Braun. + * Copyright (c) 2012-2018 Richard Braun. * Copyright (c) 2017 Agustina Arzille. * * This program is free software: you can redistribute it and/or modify @@ -27,78 +27,77 @@ #endif #include -#include #include #ifndef __LP64__ /* - * On i386, the compiler generates either an FP-stack read/write, or an SSE2 - * store/load to implement these 64-bit atomic operations. Since that's not - * feasible in the kernel, fall back to cmpxchg8b. Note that, in this case, - * loading becomes a potentially mutating operation, but it's not expected - * to be a problem since atomic operations are normally not used on read-only - * memory. Also note that this assumes the processor is at least an i586. + * XXX Clang seems to have trouble with 64-bit operations on 32-bit + * processors. */ +#ifndef __clang__ -/* - * Temporarily discard qualifiers when loading 64-bits values with a - * compare-and-swap operation. - */ -#define atomic_load_64(ptr, mo) \ -MACRO_BEGIN \ - uint64_t ret_ = 0; \ - \ - __atomic_compare_exchange_n((uint64_t *)(ptr), &ret_, 0, \ - false, mo, __ATOMIC_RELAXED); \ - ret_; \ -MACRO_END - -#define atomic_load(ptr, mo) \ - (typeof(*(ptr)))__builtin_choose_expr(sizeof(*(ptr)) == 8, \ - atomic_load_64(ptr, mo), \ - __atomic_load_n(ptr, mo)) - -#define atomic_store(ptr, val, mo) \ -MACRO_BEGIN \ - if (sizeof(*(ptr)) != 8) { \ - __atomic_store_n(ptr, val, mo); \ - } else { \ - typeof(*(ptr)) oval_, nval_; \ - bool done_; \ - \ - oval_ = *(ptr); \ - nval_ = (val); \ - \ - do { \ - done_ = __atomic_compare_exchange_n(ptr, &oval_, nval_, \ - false, mo, \ - __ATOMIC_RELAXED); \ - } while (!done_); \ - \ - } \ -MACRO_END +/* Report that 64-bits operations are supported */ +#define ATOMIC_HAVE_64B_OPS /* - * Report that load and store are architecture-specific. + * On i386, the compiler generates either an FP-stack read/write, or an SSE2 + * store/load to implement these 64-bit atomic operations. Since that's not + * feasible in the kernel, fall back to cmpxchg8b. + * + * XXX Note that, in this case, loading becomes a potentially mutating + * operation, but it's not expected to be a problem since atomic operations + * are normally not used on read-only memory. + * + * Also note that this assumes the processor is at least an i586. */ -#define ATOMIC_ARCH_SPECIFIC_LOAD -#define ATOMIC_ARCH_SPECIFIC_STORE -#endif /* __LP64__ */ +#define atomic_x86_select(ptr, op) \ +_Generic(*(ptr), \ + unsigned int: __atomic_ ## op ## _n, \ + unsigned long long: atomic_i386_ ## op ## _64) -/* - * XXX Clang seems to have trouble with 64-bits operations on 32-bits - * processors. - */ -#if defined(__LP64__) || !defined(__clang__) +static inline unsigned long long +atomic_i386_load_64(const unsigned long long *ptr, int memorder) +{ + unsigned long long prev; -/* - * Report that 64-bits operations are supported. - */ -#define ATOMIC_HAVE_64B_OPS + prev = 0; + __atomic_compare_exchange_n((unsigned long long *)ptr, &prev, + 0, false, memorder, __ATOMIC_RELAXED); + return prev; +} + +#define atomic_load_n(ptr, memorder) \ +atomic_x86_select(ptr, load)(ptr, memorder) + +static inline void +atomic_i386_store_64(unsigned long long *ptr, unsigned long long val, + int memorder) +{ + unsigned long long prev; + bool done; + + prev = *ptr; + + do { + done = __atomic_compare_exchange_n(ptr, &prev, val, + false, memorder, + __ATOMIC_RELAXED); + } while (!done); +} + +#define atomic_store_n(ptr, val, memorder) \ +atomic_x86_select(ptr, store)(ptr, val, memorder) #endif /* __clang__ */ +#else /* __LP64__ */ + +/* Report that 64-bits operations are supported */ +#define ATOMIC_HAVE_64B_OPS + +#endif /* __LP64__ */ + #endif /* X86_ATOMIC_H */ diff --git a/kern/atomic.h b/kern/atomic.h index d37a28b..1172009 100644 --- a/kern/atomic.h +++ b/kern/atomic.h @@ -1,4 +1,5 @@ /* + * Copyright (c) 2018 Richard Braun. * Copyright (c) 2017 Agustina Arzille. * * This program is free software: you can redistribute it and/or modify @@ -17,6 +18,12 @@ * * Type-generic memory-model aware atomic operations. * + * For portability reasons, this interface restricts atomic operation + * sizes to 32-bit and 64-bit. + * + * Some configurations may not support 64-bit operations. Check if the + * ATOMIC_HAVE_64B_OPS macro is defined to find out. + * * TODO Replace mentions of "memory barriers" throughout the code with * C11 memory model terminology. */ @@ -26,43 +33,25 @@ #include -#include #include /* * Supported memory orders. */ -#define ATOMIC_RELAXED __ATOMIC_RELAXED -#define ATOMIC_CONSUME __ATOMIC_CONSUME -#define ATOMIC_ACQUIRE __ATOMIC_ACQUIRE -#define ATOMIC_RELEASE __ATOMIC_RELEASE -#define ATOMIC_ACQ_REL __ATOMIC_ACQ_REL -#define ATOMIC_SEQ_CST __ATOMIC_SEQ_CST - -/* - * Type-generic atomic operations. - */ -#define atomic_fetch_add(ptr, val, mo) __atomic_fetch_add(ptr, val, mo) - -#define atomic_fetch_sub(ptr, val, mo) __atomic_fetch_sub(ptr, val, mo) - -#define atomic_fetch_and(ptr, val, mo) __atomic_fetch_and(ptr, val, mo) - -#define atomic_fetch_or(ptr, val, mo) __atomic_fetch_or(ptr, val, mo) - -#define atomic_fetch_xor(ptr, val, mo) __atomic_fetch_xor(ptr, val, mo) - -#define atomic_add(ptr, val, mo) (void)__atomic_add_fetch(ptr, val, mo) - -#define atomic_sub(ptr, val, mo) (void)__atomic_sub_fetch(ptr, val, mo) - -#define atomic_and(ptr, val, mo) (void)__atomic_and_fetch(ptr, val, mo) +#define ATOMIC_RELAXED __ATOMIC_RELAXED +#define ATOMIC_CONSUME __ATOMIC_CONSUME +#define ATOMIC_ACQUIRE __ATOMIC_ACQUIRE +#define ATOMIC_RELEASE __ATOMIC_RELEASE +#define ATOMIC_ACQ_REL __ATOMIC_ACQ_REL +#define ATOMIC_SEQ_CST __ATOMIC_SEQ_CST -#define atomic_or(ptr, val, mo) (void)__atomic_or_fetch(ptr, val, mo) +#include -#define atomic_xor(ptr, val, mo) (void)__atomic_xor_fetch(ptr, val, mo) +#define atomic_load(ptr, memorder) \ +((typeof(*(ptr)))atomic_select(ptr, load)(ptr, memorder)) -#define atomic_swap(ptr, val, mo) __atomic_exchange_n(ptr, val, mo) +#define atomic_store(ptr, val, memorder) \ +atomic_select(ptr, store)(ptr, val, memorder) /* * For compare-and-swap, deviate a little from the standard, and only @@ -73,37 +62,43 @@ * because atomic CAS is typically used in a loop. However, if a different * code path is taken on failure (rather than retrying), then the user * should be aware that a memory fence might be necessary. - * - * Finally, although a local variable isn't strictly needed for the new - * value, some compilers seem to have trouble when all parameters don't - * have the same type. */ -#define atomic_cas(ptr, oval, nval, mo) \ -MACRO_BEGIN \ - typeof(*(ptr)) oval_, nval_; \ - \ - oval_ = (oval); \ - nval_ = (nval); \ - __atomic_compare_exchange_n(ptr, &oval_, nval_, false, \ - mo, ATOMIC_RELAXED); \ - oval_; \ -MACRO_END +#define atomic_cas(ptr, oval, nval, memorder) \ +((typeof(*(ptr)))atomic_select(ptr, cas)(ptr, oval, nval, memorder)) -/* - * Some architectures may need specific definitions for loads and stores, - * in order to prevent the compiler from emitting unsupported instructions. - * As such, only define these if the architecture-specific part of the - * module didn't already. - */ +#define atomic_swap(ptr, val, memorder) \ +((typeof(*(ptr)))atomic_select(ptr, swap)(ptr, val, memorder)) + +#define atomic_fetch_add(ptr, val, memorder) \ +((typeof(*(ptr)))atomic_select(ptr, fetch_add)(ptr, val, memorder)) + +#define atomic_fetch_sub(ptr, val, memorder) \ +((typeof(*(ptr)))atomic_select(ptr, fetch_sub)(ptr, val, memorder)) + +#define atomic_fetch_and(ptr, val, memorder) \ +((typeof(*(ptr)))atomic_select(ptr, fetch_and)(ptr, val, memorder)) + +#define atomic_fetch_or(ptr, val, memorder) \ +((typeof(*(ptr)))atomic_select(ptr, fetch_or)(ptr, val, memorder)) + +#define atomic_fetch_xor(ptr, val, memorder) \ +((typeof(*(ptr)))atomic_select(ptr, fetch_xor)(ptr, val, memorder)) + +#define atomic_add(ptr, val, memorder) \ +atomic_select(ptr, add)(ptr, val, memorder) + +#define atomic_sub(ptr, val, memorder) \ +atomic_select(ptr, sub)(ptr, val, memorder) + +#define atomic_and(ptr, val, memorder) \ +atomic_select(ptr, and)(ptr, val, memorder) -#ifndef ATOMIC_ARCH_SPECIFIC_LOAD -#define atomic_load(ptr, mo) __atomic_load_n(ptr, mo) -#endif +#define atomic_or(ptr, val, memorder) \ +atomic_select(ptr, or)(ptr, val, memorder) -#ifndef ATOMIC_ARCH_SPECIFIC_STORE -#define atomic_store(ptr, val, mo) __atomic_store_n(ptr, val, mo) -#endif +#define atomic_xor(ptr, val, memorder) \ +atomic_select(ptr, xor)(ptr, val, memorder) -#define atomic_fence(mo) __atomic_thread_fence(mo) +#define atomic_fence(memorder) __atomic_thread_fence(memorder) #endif /* KERN_ATOMIC_H */ diff --git a/kern/atomic_i.h b/kern/atomic_i.h new file mode 100644 index 0000000..9986622 --- /dev/null +++ b/kern/atomic_i.h @@ -0,0 +1,619 @@ +/* + * Copyright (c) 2018 Richard Braun. + * Copyright (c) 2017 Agustina Arzille. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * + * Architecture-specific code may override any of the type-generic + * atomic_xxx_n macros by defining them. + */ + +#ifndef KERN_ATOMIC_I_H +#define KERN_ATOMIC_I_H + +#include +#include +#include + +#include +#include + +#ifndef atomic_load_n +#define atomic_load_n __atomic_load_n +#endif /* atomic_load_n */ + +#ifndef atomic_store_n +#define atomic_store_n __atomic_store_n +#endif /* atomic_store_n */ + +#ifndef atomic_cas_n +#define atomic_cas_n(ptr, oval, nval, memorder) \ +MACRO_BEGIN \ + typeof(*(ptr)) oval_; \ + \ + oval_ = (oval); \ + __atomic_compare_exchange_n(ptr, &oval_, (nval), false, \ + memorder, __ATOMIC_RELAXED); \ + oval_; \ +MACRO_END +#endif /* atomic_cas_n */ + +#ifndef atomic_swap_n +#define atomic_swap_n __atomic_exchange_n +#endif /* atomic_swap_n */ + +#ifndef atomic_fetch_add_n +#define atomic_fetch_add_n __atomic_fetch_add +#endif /* atomic_fetch_add_n */ + +#ifndef atomic_fetch_sub_n +#define atomic_fetch_sub_n __atomic_fetch_sub +#endif /* atomic_fetch_sub_n */ + +#ifndef atomic_fetch_and_n +#define atomic_fetch_and_n __atomic_fetch_and +#endif /* atomic_fetch_and_n */ + +#ifndef atomic_fetch_or_n +#define atomic_fetch_or_n __atomic_fetch_or +#endif /* atomic_fetch_or_n */ + +#ifndef atomic_fetch_xor_n +#define atomic_fetch_xor_n __atomic_fetch_xor +#endif /* atomic_fetch_xor_n */ + +#ifndef atomic_add_n +#define atomic_add_n (void)__atomic_add_fetch +#endif /* atomic_add_n */ + +#ifndef atomic_sub_n +#define atomic_sub_n (void)__atomic_sub_fetch +#endif /* atomic_sub_n */ + +#ifndef atomic_and_n +#define atomic_and_n (void)__atomic_and_fetch +#endif /* atomic_and_n */ + +#ifndef atomic_or_n +#define atomic_or_n (void)__atomic_or_fetch +#endif /* atomic_or_n */ + +#ifndef atomic_xor_n +#define atomic_xor_n (void)__atomic_xor_fetch +#endif /* atomic_xor_n */ + +/* + * This macro is used to select the appropriate function for the given + * operation. The default expression is selected for pointer types. + * In order to avoid confusing errors, all built-in types are explicitely + * listed, so that unsupported ones don't select pointer operations. + * Instead, they select a function with an explicit name indicating + * an invalid type. + */ +#define atomic_select(ptr, op) \ +_Generic(*(ptr), \ + float: atomic_invalid_type, \ + double: atomic_invalid_type, \ + long double: atomic_invalid_type, \ + bool: atomic_invalid_type, \ + char: atomic_invalid_type, \ + signed char: atomic_invalid_type, \ + unsigned char: atomic_invalid_type, \ + short: atomic_invalid_type, \ + unsigned short: atomic_invalid_type, \ + int: atomic_ ## op ## _32, \ + unsigned int: atomic_ ## op ## _32, \ + long: atomic_ ## op ## _ul, \ + unsigned long: atomic_ ## op ## _ul, \ + long long: atomic_ ## op ## _64, \ + unsigned long long: atomic_ ## op ## _64, \ + default: atomic_ ## op ## _ptr) + +void atomic_invalid_type(void); + +/* + * After function selection, type genericity is achieved with transparent + * unions, a GCC extension. Here are a few things to keep in mind : + * - all members must have the same representation + * - calling conventions are inferred from the first member + */ + +#ifdef __LP64__ + +union atomic_ptr_32 { + int *i_ptr; + unsigned int *ui_ptr; +} __attribute__((transparent_union)); + +union atomic_constptr_32 { + const int *i_ptr; + const unsigned int *ui_ptr; +} __attribute__((transparent_union)); + +union atomic_val32 { + int i; + unsigned int ui; +} __attribute__((transparent_union)); + +#ifdef ATOMIC_HAVE_64B_OPS + +union atomic_ptr_64 { + void *ptr; + unsigned long long *ull_ptr; +} __attribute__((transparent_union)); + +union atomic_constptr_64 { + const void *ptr; + const unsigned long long *ull_ptr; +} __attribute__((transparent_union)); + +union atomic_val_64 { + void *ptr; + long l; + unsigned long ul; + long long ll; + unsigned long long ull; +} __attribute__((transparent_union)); + +#endif /* ATOMIC_HAVE_64B_OPS */ + +#else /* __LP64__ */ + +union atomic_ptr_32 { + void *ptr; + unsigned int *ui_ptr; +} __attribute__((transparent_union)); + +union atomic_constptr_32 { + const void *ptr; + const unsigned int *ui_ptr; +} __attribute__((transparent_union)); + +union atomic_val32 { + void *ptr; + int i; + unsigned int ui; + long l; + unsigned long ul; +} __attribute__((transparent_union)); + +#ifdef ATOMIC_HAVE_64B_OPS + +union atomic_ptr_64 { + long long *ll_ptr; + unsigned long long *ull_ptr; +} __attribute__((transparent_union)); + +union atomic_constptr_64 { + const long long *ll_ptr; + const unsigned long long *ull_ptr; +} __attribute__((transparent_union)); + +union atomic_val_64 { + long long ll; + unsigned long long ull; +} __attribute__((transparent_union)); + +#endif /* ATOMIC_HAVE_64B_OPS */ + +#endif /* __LP64__ */ + +#define atomic_ptr_aligned(ptr) P2ALIGNED((uintptr_t)(ptr), sizeof(ptr)) + +/* atomic_load */ + +static inline unsigned int +atomic_load_32(union atomic_constptr_32 ptr, int memorder) +{ + assert(atomic_ptr_aligned(ptr.ui_ptr)); + return atomic_load_n(ptr.ui_ptr, memorder); +} + +#ifdef ATOMIC_HAVE_64B_OPS +static inline unsigned long long +atomic_load_64(union atomic_constptr_64 ptr, int memorder) +{ + assert(atomic_ptr_aligned(ptr.ull_ptr)); + return atomic_load_n(ptr.ull_ptr, memorder); +} +#else /* ATOMIC_HAVE_64B_OPS */ +#define atomic_load_64 atomic_invalid_type +#endif /* ATOMIC_HAVE_64B_OPS */ + +#ifdef __LP64__ +#define atomic_load_ul atomic_load_64 +#else /* __LP64__ */ +#define atomic_load_ul atomic_load_32 +#endif /* __LP64__ */ + +#define atomic_load_ptr atomic_load_ul + +/* atomic_store */ + +static inline void +atomic_store_32(union atomic_ptr_32 ptr, union atomic_val32 val, int memorder) +{ + assert(atomic_ptr_aligned(ptr.ui_ptr)); + return atomic_store_n(ptr.ui_ptr, val.ui, memorder); +} + +#ifdef ATOMIC_HAVE_64B_OPS +static inline void +atomic_store_64(union atomic_ptr_64 ptr, union atomic_val_64 val, int memorder) +{ + assert(atomic_ptr_aligned(ptr.ull_ptr)); + return atomic_store_n(ptr.ull_ptr, val.ull, memorder); +} +#else /* ATOMIC_HAVE_64B_OPS */ +#define atomic_store_64 atomic_invalid_type +#endif /* ATOMIC_HAVE_64B_OPS */ + +#ifdef __LP64__ +#define atomic_store_ul atomic_store_64 +#else /* __LP64__ */ +#define atomic_store_ul atomic_store_32 +#endif /* __LP64__ */ + +#define atomic_store_ptr atomic_store_ul + +/* atomic_cas */ + +static inline unsigned int +atomic_cas_32(union atomic_ptr_32 ptr, union atomic_val32 oval, + union atomic_val32 nval, int memorder) +{ + assert(atomic_ptr_aligned(ptr.ui_ptr)); + return atomic_cas_n(ptr.ui_ptr, oval.ui, nval.ui, memorder); +} + +#ifdef ATOMIC_HAVE_64B_OPS +static inline unsigned long long +atomic_cas_64(union atomic_ptr_64 ptr, union atomic_val_64 oval, + union atomic_val_64 nval, int memorder) +{ + assert(atomic_ptr_aligned(ptr.ull_ptr)); + return atomic_cas_n(ptr.ull_ptr, oval.ull, nval.ull, memorder); +} +#else /* ATOMIC_HAVE_64B_OPS */ +#define atomic_cas_64 atomic_invalid_type +#endif /* ATOMIC_HAVE_64B_OPS */ + +#ifdef __LP64__ +#define atomic_cas_ul atomic_cas_64 +#else /* __LP64__ */ +#define atomic_cas_ul atomic_cas_32 +#endif /* __LP64__ */ + +#define atomic_cas_ptr atomic_cas_ul + +/* atomic_swap */ + +static inline unsigned int +atomic_swap_32(union atomic_ptr_32 ptr, union atomic_val32 val, int memorder) +{ + assert(atomic_ptr_aligned(ptr.ui_ptr)); + return atomic_swap_n(ptr.ui_ptr, val.ui, memorder); +} + +#ifdef ATOMIC_HAVE_64B_OPS +static inline unsigned long long +atomic_swap_64(union atomic_ptr_64 ptr, union atomic_val_64 val, int memorder) +{ + assert(atomic_ptr_aligned(ptr.ull_ptr)); + return atomic_swap_n(ptr.ull_ptr, val.ull, memorder); +} +#else /* ATOMIC_HAVE_64B_OPS */ +#define atomic_swap_64 atomic_invalid_type +#endif /* ATOMIC_HAVE_64B_OPS */ + +#ifdef __LP64__ +#define atomic_swap_ul atomic_swap_64 +#else /* __LP64__ */ +#define atomic_swap_ul atomic_swap_32 +#endif /* __LP64__ */ + +#define atomic_swap_ptr atomic_swap_ul + +/* atomic_fetch_add */ + +static inline unsigned int +atomic_fetch_add_32(union atomic_ptr_32 ptr, union atomic_val32 val, + int memorder) +{ + assert(atomic_ptr_aligned(ptr.ui_ptr)); + return atomic_fetch_add_n(ptr.ui_ptr, val.ui, memorder); +} + +#ifdef ATOMIC_HAVE_64B_OPS +static inline unsigned long long +atomic_fetch_add_64(union atomic_ptr_64 ptr, union atomic_val_64 val, + int memorder) +{ + assert(atomic_ptr_aligned(ptr.ull_ptr)); + return atomic_fetch_add_n(ptr.ull_ptr, val.ull, memorder); +} +#else /* ATOMIC_HAVE_64B_OPS */ +#define atomic_fetch_add_64 atomic_invalid_type +#endif /* ATOMIC_HAVE_64B_OPS */ + +#ifdef __LP64__ +#define atomic_fetch_add_ul atomic_fetch_add_64 +#else /* __LP64__ */ +#define atomic_fetch_add_ul atomic_fetch_add_32 +#endif /* __LP64__ */ + +#define atomic_fetch_add_ptr atomic_fetch_add_ul + +/* atomic_fetch_sub */ + +static inline unsigned int +atomic_fetch_sub_32(union atomic_ptr_32 ptr, union atomic_val32 val, + int memorder) +{ + assert(atomic_ptr_aligned(ptr.ui_ptr)); + return atomic_fetch_sub_n(ptr.ui_ptr, val.ui, memorder); +} + +#ifdef ATOMIC_HAVE_64B_OPS +static inline unsigned long long +atomic_fetch_sub_64(union atomic_ptr_64 ptr, union atomic_val_64 val, + int memorder) +{ + assert(atomic_ptr_aligned(ptr.ull_ptr)); + return atomic_fetch_sub_n(ptr.ull_ptr, val.ull, memorder); +} +#else /* ATOMIC_HAVE_64B_OPS */ +#define atomic_fetch_sub_64 atomic_invalid_type +#endif /* ATOMIC_HAVE_64B_OPS */ + +#ifdef __LP64__ +#define atomic_fetch_sub_ul atomic_fetch_sub_64 +#else /* __LP64__ */ +#define atomic_fetch_sub_ul atomic_fetch_sub_32 +#endif /* __LP64__ */ + +#define atomic_fetch_sub_ptr atomic_fetch_sub_ul + +/* atomic_fetch_and */ + +static inline unsigned int +atomic_fetch_and_32(union atomic_ptr_32 ptr, union atomic_val32 val, + int memorder) +{ + assert(atomic_ptr_aligned(ptr.ui_ptr)); + return atomic_fetch_and_n(ptr.ui_ptr, val.ui, memorder); +} + +#ifdef ATOMIC_HAVE_64B_OPS +static inline unsigned long long +atomic_fetch_and_64(union atomic_ptr_64 ptr, union atomic_val_64 val, + int memorder) +{ + assert(atomic_ptr_aligned(ptr.ull_ptr)); + return atomic_fetch_and_n(ptr.ull_ptr, val.ull, memorder); +} +#else /* ATOMIC_HAVE_64B_OPS */ +#define atomic_fetch_and_64 atomic_invalid_type +#endif /* ATOMIC_HAVE_64B_OPS */ + +#ifdef __LP64__ +#define atomic_fetch_and_ul atomic_fetch_and_64 +#else /* __LP64__ */ +#define atomic_fetch_and_ul atomic_fetch_and_32 +#endif /* __LP64__ */ + +#define atomic_fetch_and_ptr atomic_fetch_and_ul + +/* atomic_fetch_or */ + +static inline unsigned int +atomic_fetch_or_32(union atomic_ptr_32 ptr, union atomic_val32 val, + int memorder) +{ + assert(atomic_ptr_aligned(ptr.ui_ptr)); + return atomic_fetch_or_n(ptr.ui_ptr, val.ui, memorder); +} + +#ifdef ATOMIC_HAVE_64B_OPS +static inline unsigned long long +atomic_fetch_or_64(union atomic_ptr_64 ptr, union atomic_val_64 val, + int memorder) +{ + assert(atomic_ptr_aligned(ptr.ull_ptr)); + return atomic_fetch_or_n(ptr.ull_ptr, val.ull, memorder); +} +#else /* ATOMIC_HAVE_64B_OPS */ +#define atomic_or_64 atomic_invalid_type +#endif /* ATOMIC_HAVE_64B_OPS */ + +#ifdef __LP64__ +#define atomic_fetch_or_ul atomic_fetch_or_64 +#else /* __LP64__ */ +#define atomic_fetch_or_ul atomic_fetch_or_32 +#endif /* __LP64__ */ + +#define atomic_fetch_or_ptr atomic_fetch_or_ul + +/* atomic_fetch_xor */ + +static inline unsigned int +atomic_fetch_xor_32(union atomic_ptr_32 ptr, union atomic_val32 val, + int memorder) +{ + assert(atomic_ptr_aligned(ptr.ui_ptr)); + return atomic_fetch_xor_n(ptr.ui_ptr, val.ui, memorder); +} + +#ifdef ATOMIC_HAVE_64B_OPS +static inline unsigned long long +atomic_fetch_xor_64(union atomic_ptr_64 ptr, union atomic_val_64 val, + int memorder) +{ + assert(atomic_ptr_aligned(ptr.ull_ptr)); + return atomic_fetch_xor_n(ptr.ull_ptr, val.ull, memorder); +} +#else /* ATOMIC_HAVE_64B_OPS */ +#define atomic_xor_64 atomic_invalid_type +#endif /* ATOMIC_HAVE_64B_OPS */ + +#ifdef __LP64__ +#define atomic_fetch_xor_ul atomic_fetch_xor_64 +#else /* __LP64__ */ +#define atomic_fetch_xor_ul atomic_fetch_xor_32 +#endif /* __LP64__ */ + +#define atomic_fetch_xor_ptr atomic_fetch_xor_ul + +/* atomic_add */ + +static inline void +atomic_add_32(union atomic_ptr_32 ptr, union atomic_val32 val, int memorder) +{ + assert(atomic_ptr_aligned(ptr.ui_ptr)); + return atomic_add_n(ptr.ui_ptr, val.ui, memorder); +} + +#ifdef ATOMIC_HAVE_64B_OPS +static inline void +atomic_add_64(union atomic_ptr_64 ptr, union atomic_val_64 val, int memorder) +{ + assert(atomic_ptr_aligned(ptr.ull_ptr)); + return atomic_add_n(ptr.ull_ptr, val.ull, memorder); +} +#else /* ATOMIC_HAVE_64B_OPS */ +#define atomic_add_64 atomic_invalid_type +#endif /* ATOMIC_HAVE_64B_OPS */ + +#ifdef __LP64__ +#define atomic_add_ul atomic_add_64 +#else /* __LP64__ */ +#define atomic_add_ul atomic_add_32 +#endif /* __LP64__ */ + +#define atomic_add_ptr atomic_add_ul + +/* atomic_sub */ + +static inline void +atomic_sub_32(union atomic_ptr_32 ptr, union atomic_val32 val, int memorder) +{ + assert(atomic_ptr_aligned(ptr.ui_ptr)); + return atomic_sub_n(ptr.ui_ptr, val.ui, memorder); +} + +#ifdef ATOMIC_HAVE_64B_OPS +static inline void +atomic_sub_64(union atomic_ptr_64 ptr, union atomic_val_64 val, int memorder) +{ + assert(atomic_ptr_aligned(ptr.ull_ptr)); + return atomic_sub_n(ptr.ull_ptr, val.ull, memorder); +} +#else /* ATOMIC_HAVE_64B_OPS */ +#define atomic_sub_64 atomic_invalid_type +#endif /* ATOMIC_HAVE_64B_OPS */ + +#ifdef __LP64__ +#define atomic_sub_ul atomic_sub_64 +#else /* __LP64__ */ +#define atomic_sub_ul atomic_sub_32 +#endif /* __LP64__ */ + +#define atomic_sub_ptr atomic_sub_ul + +/* atomic_and */ + +static inline void +atomic_and_32(union atomic_ptr_32 ptr, union atomic_val32 val, int memorder) +{ + assert(atomic_ptr_aligned(ptr.ui_ptr)); + return atomic_and_n(ptr.ui_ptr, val.ui, memorder); +} + +#ifdef ATOMIC_HAVE_64B_OPS +static inline void +atomic_and_64(union atomic_ptr_64 ptr, union atomic_val_64 val, int memorder) +{ + assert(atomic_ptr_aligned(ptr.ull_ptr)); + return atomic_and_n(ptr.ull_ptr, val.ull, memorder); +} +#else /* ATOMIC_HAVE_64B_OPS */ +#define atomic_and_64 atomic_invalid_type +#endif /* ATOMIC_HAVE_64B_OPS */ + +#ifdef __LP64__ +#define atomic_and_ul atomic_and_64 +#else /* __LP64__ */ +#define atomic_and_ul atomic_and_32 +#endif /* __LP64__ */ + +#define atomic_and_ptr atomic_and_ul + +/* atomic_or */ + +static inline void +atomic_or_32(union atomic_ptr_32 ptr, union atomic_val32 val, int memorder) +{ + assert(atomic_ptr_aligned(ptr.ui_ptr)); + return atomic_or_n(ptr.ui_ptr, val.ui, memorder); +} + +#ifdef ATOMIC_HAVE_64B_OPS +static inline void +atomic_or_64(union atomic_ptr_64 ptr, union atomic_val_64 val, int memorder) +{ + assert(atomic_ptr_aligned(ptr.ull_ptr)); + return atomic_or_n(ptr.ull_ptr, val.ull, memorder); +} +#else /* ATOMIC_HAVE_64B_OPS */ +#define atomic_or_64 atomic_invalid_type +#endif /* ATOMIC_HAVE_64B_OPS */ + +#ifdef __LP64__ +#define atomic_or_ul atomic_or_64 +#else /* __LP64__ */ +#define atomic_or_ul atomic_or_32 +#endif /* __LP64__ */ + +#define atomic_or_ptr atomic_or_ul + +/* atomic_xor */ + +static inline void +atomic_xor_32(union atomic_ptr_32 ptr, union atomic_val32 val, int memorder) +{ + assert(atomic_ptr_aligned(ptr.ui_ptr)); + return atomic_xor_n(ptr.ui_ptr, val.ui, memorder); +} + +#ifdef ATOMIC_HAVE_64B_OPS +static inline void +atomic_xor_64(union atomic_ptr_64 ptr, union atomic_val_64 val, int memorder) +{ + assert(atomic_ptr_aligned(ptr.ull_ptr)); + return atomic_xor_n(ptr.ull_ptr, val.ull, memorder); +} +#else /* ATOMIC_HAVE_64B_OPS */ +#define atomic_xor_64 atomic_invalid_type +#endif /* ATOMIC_HAVE_64B_OPS */ + +#ifdef __LP64__ +#define atomic_xor_ul atomic_xor_64 +#else /* __LP64__ */ +#define atomic_xor_ul atomic_xor_32 +#endif /* __LP64__ */ + +#define atomic_xor_ptr atomic_xor_ul + +#endif /* KERN_ATOMIC_I_H */ -- cgit v1.2.3 From c986dc56b743c7e2de4ca370ea8aba57b064178e Mon Sep 17 00:00:00 2001 From: Richard Braun Date: Sat, 21 Apr 2018 10:46:17 +0200 Subject: doc/intro(9): mention the supported data models --- doc/intro.9.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/intro.9.txt b/doc/intro.9.txt index d4532cb..281db50 100644 --- a/doc/intro.9.txt +++ b/doc/intro.9.txt @@ -261,8 +261,8 @@ module:arch/tcb:: module:arch/trap:: Interrupt and exception handling. -The machine-independent code assumes a completely relaxed memory model as -allowed by the C11 specification. +The machine-independent code assumes either an ILP32 or LP64 data model, and +a completely relaxed memory model as allowed by the C11 specification. X15 currently requires a memory management unit, but that may change in the future. -- cgit v1.2.3