From ec4ddf8b95585cc3e340dd06df109f83a23b2d77 Mon Sep 17 00:00:00 2001 From: Richard Braun Date: Sat, 21 Apr 2018 10:42:11 +0200 Subject: Fix atomic operations argument types In preparation of the rework of atomic operations, all atomic function calls are fixed to use fully supported, compatible types. This means that atomic operations ar erestricted to 32-bit and 64-bit, and that value types must be strictly compatible with pointer types. --- arch/x86/machine/trap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86/machine') diff --git a/arch/x86/machine/trap.c b/arch/x86/machine/trap.c index 878f20c..534b3f6 100644 --- a/arch/x86/machine/trap.c +++ b/arch/x86/machine/trap.c @@ -91,7 +91,7 @@ static void __init trap_handler_init(struct trap_handler *handler, int flags, trap_handler_fn_t fn) { handler->flags = flags; - atomic_store(&handler->fn, fn, ATOMIC_RELAXED); + handler->fn = fn; } static void __init -- cgit v1.2.3 From 9186a2f543d7446666a8bd18ae6f94a83816a0e9 Mon Sep 17 00:00:00 2001 From: Richard Braun Date: Sat, 21 Apr 2018 10:44:34 +0200 Subject: kern/atomic: rework This commit restricts atomic operations to 32-bit and 64-bit (when supported). It keeps a similar source interface, but adds restrictions on the supported types. The main drive behind this change is portability, and in particular, upcoming local atomic operations. --- arch/x86/machine/atomic.h | 115 +++++---- kern/atomic.h | 107 ++++---- kern/atomic_i.h | 619 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 727 insertions(+), 114 deletions(-) create mode 100644 kern/atomic_i.h (limited to 'arch/x86/machine') diff --git a/arch/x86/machine/atomic.h b/arch/x86/machine/atomic.h index 36f0067..5da8568 100644 --- a/arch/x86/machine/atomic.h +++ b/arch/x86/machine/atomic.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2017 Richard Braun. + * Copyright (c) 2012-2018 Richard Braun. * Copyright (c) 2017 Agustina Arzille. * * This program is free software: you can redistribute it and/or modify @@ -27,78 +27,77 @@ #endif #include -#include #include #ifndef __LP64__ /* - * On i386, the compiler generates either an FP-stack read/write, or an SSE2 - * store/load to implement these 64-bit atomic operations. Since that's not - * feasible in the kernel, fall back to cmpxchg8b. Note that, in this case, - * loading becomes a potentially mutating operation, but it's not expected - * to be a problem since atomic operations are normally not used on read-only - * memory. Also note that this assumes the processor is at least an i586. + * XXX Clang seems to have trouble with 64-bit operations on 32-bit + * processors. */ +#ifndef __clang__ -/* - * Temporarily discard qualifiers when loading 64-bits values with a - * compare-and-swap operation. - */ -#define atomic_load_64(ptr, mo) \ -MACRO_BEGIN \ - uint64_t ret_ = 0; \ - \ - __atomic_compare_exchange_n((uint64_t *)(ptr), &ret_, 0, \ - false, mo, __ATOMIC_RELAXED); \ - ret_; \ -MACRO_END - -#define atomic_load(ptr, mo) \ - (typeof(*(ptr)))__builtin_choose_expr(sizeof(*(ptr)) == 8, \ - atomic_load_64(ptr, mo), \ - __atomic_load_n(ptr, mo)) - -#define atomic_store(ptr, val, mo) \ -MACRO_BEGIN \ - if (sizeof(*(ptr)) != 8) { \ - __atomic_store_n(ptr, val, mo); \ - } else { \ - typeof(*(ptr)) oval_, nval_; \ - bool done_; \ - \ - oval_ = *(ptr); \ - nval_ = (val); \ - \ - do { \ - done_ = __atomic_compare_exchange_n(ptr, &oval_, nval_, \ - false, mo, \ - __ATOMIC_RELAXED); \ - } while (!done_); \ - \ - } \ -MACRO_END +/* Report that 64-bits operations are supported */ +#define ATOMIC_HAVE_64B_OPS /* - * Report that load and store are architecture-specific. + * On i386, the compiler generates either an FP-stack read/write, or an SSE2 + * store/load to implement these 64-bit atomic operations. Since that's not + * feasible in the kernel, fall back to cmpxchg8b. + * + * XXX Note that, in this case, loading becomes a potentially mutating + * operation, but it's not expected to be a problem since atomic operations + * are normally not used on read-only memory. + * + * Also note that this assumes the processor is at least an i586. */ -#define ATOMIC_ARCH_SPECIFIC_LOAD -#define ATOMIC_ARCH_SPECIFIC_STORE -#endif /* __LP64__ */ +#define atomic_x86_select(ptr, op) \ +_Generic(*(ptr), \ + unsigned int: __atomic_ ## op ## _n, \ + unsigned long long: atomic_i386_ ## op ## _64) -/* - * XXX Clang seems to have trouble with 64-bits operations on 32-bits - * processors. - */ -#if defined(__LP64__) || !defined(__clang__) +static inline unsigned long long +atomic_i386_load_64(const unsigned long long *ptr, int memorder) +{ + unsigned long long prev; -/* - * Report that 64-bits operations are supported. - */ -#define ATOMIC_HAVE_64B_OPS + prev = 0; + __atomic_compare_exchange_n((unsigned long long *)ptr, &prev, + 0, false, memorder, __ATOMIC_RELAXED); + return prev; +} + +#define atomic_load_n(ptr, memorder) \ +atomic_x86_select(ptr, load)(ptr, memorder) + +static inline void +atomic_i386_store_64(unsigned long long *ptr, unsigned long long val, + int memorder) +{ + unsigned long long prev; + bool done; + + prev = *ptr; + + do { + done = __atomic_compare_exchange_n(ptr, &prev, val, + false, memorder, + __ATOMIC_RELAXED); + } while (!done); +} + +#define atomic_store_n(ptr, val, memorder) \ +atomic_x86_select(ptr, store)(ptr, val, memorder) #endif /* __clang__ */ +#else /* __LP64__ */ + +/* Report that 64-bits operations are supported */ +#define ATOMIC_HAVE_64B_OPS + +#endif /* __LP64__ */ + #endif /* X86_ATOMIC_H */ diff --git a/kern/atomic.h b/kern/atomic.h index d37a28b..1172009 100644 --- a/kern/atomic.h +++ b/kern/atomic.h @@ -1,4 +1,5 @@ /* + * Copyright (c) 2018 Richard Braun. * Copyright (c) 2017 Agustina Arzille. * * This program is free software: you can redistribute it and/or modify @@ -17,6 +18,12 @@ * * Type-generic memory-model aware atomic operations. * + * For portability reasons, this interface restricts atomic operation + * sizes to 32-bit and 64-bit. + * + * Some configurations may not support 64-bit operations. Check if the + * ATOMIC_HAVE_64B_OPS macro is defined to find out. + * * TODO Replace mentions of "memory barriers" throughout the code with * C11 memory model terminology. */ @@ -26,43 +33,25 @@ #include -#include #include /* * Supported memory orders. */ -#define ATOMIC_RELAXED __ATOMIC_RELAXED -#define ATOMIC_CONSUME __ATOMIC_CONSUME -#define ATOMIC_ACQUIRE __ATOMIC_ACQUIRE -#define ATOMIC_RELEASE __ATOMIC_RELEASE -#define ATOMIC_ACQ_REL __ATOMIC_ACQ_REL -#define ATOMIC_SEQ_CST __ATOMIC_SEQ_CST - -/* - * Type-generic atomic operations. - */ -#define atomic_fetch_add(ptr, val, mo) __atomic_fetch_add(ptr, val, mo) - -#define atomic_fetch_sub(ptr, val, mo) __atomic_fetch_sub(ptr, val, mo) - -#define atomic_fetch_and(ptr, val, mo) __atomic_fetch_and(ptr, val, mo) - -#define atomic_fetch_or(ptr, val, mo) __atomic_fetch_or(ptr, val, mo) - -#define atomic_fetch_xor(ptr, val, mo) __atomic_fetch_xor(ptr, val, mo) - -#define atomic_add(ptr, val, mo) (void)__atomic_add_fetch(ptr, val, mo) - -#define atomic_sub(ptr, val, mo) (void)__atomic_sub_fetch(ptr, val, mo) - -#define atomic_and(ptr, val, mo) (void)__atomic_and_fetch(ptr, val, mo) +#define ATOMIC_RELAXED __ATOMIC_RELAXED +#define ATOMIC_CONSUME __ATOMIC_CONSUME +#define ATOMIC_ACQUIRE __ATOMIC_ACQUIRE +#define ATOMIC_RELEASE __ATOMIC_RELEASE +#define ATOMIC_ACQ_REL __ATOMIC_ACQ_REL +#define ATOMIC_SEQ_CST __ATOMIC_SEQ_CST -#define atomic_or(ptr, val, mo) (void)__atomic_or_fetch(ptr, val, mo) +#include -#define atomic_xor(ptr, val, mo) (void)__atomic_xor_fetch(ptr, val, mo) +#define atomic_load(ptr, memorder) \ +((typeof(*(ptr)))atomic_select(ptr, load)(ptr, memorder)) -#define atomic_swap(ptr, val, mo) __atomic_exchange_n(ptr, val, mo) +#define atomic_store(ptr, val, memorder) \ +atomic_select(ptr, store)(ptr, val, memorder) /* * For compare-and-swap, deviate a little from the standard, and only @@ -73,37 +62,43 @@ * because atomic CAS is typically used in a loop. However, if a different * code path is taken on failure (rather than retrying), then the user * should be aware that a memory fence might be necessary. - * - * Finally, although a local variable isn't strictly needed for the new - * value, some compilers seem to have trouble when all parameters don't - * have the same type. */ -#define atomic_cas(ptr, oval, nval, mo) \ -MACRO_BEGIN \ - typeof(*(ptr)) oval_, nval_; \ - \ - oval_ = (oval); \ - nval_ = (nval); \ - __atomic_compare_exchange_n(ptr, &oval_, nval_, false, \ - mo, ATOMIC_RELAXED); \ - oval_; \ -MACRO_END +#define atomic_cas(ptr, oval, nval, memorder) \ +((typeof(*(ptr)))atomic_select(ptr, cas)(ptr, oval, nval, memorder)) -/* - * Some architectures may need specific definitions for loads and stores, - * in order to prevent the compiler from emitting unsupported instructions. - * As such, only define these if the architecture-specific part of the - * module didn't already. - */ +#define atomic_swap(ptr, val, memorder) \ +((typeof(*(ptr)))atomic_select(ptr, swap)(ptr, val, memorder)) + +#define atomic_fetch_add(ptr, val, memorder) \ +((typeof(*(ptr)))atomic_select(ptr, fetch_add)(ptr, val, memorder)) + +#define atomic_fetch_sub(ptr, val, memorder) \ +((typeof(*(ptr)))atomic_select(ptr, fetch_sub)(ptr, val, memorder)) + +#define atomic_fetch_and(ptr, val, memorder) \ +((typeof(*(ptr)))atomic_select(ptr, fetch_and)(ptr, val, memorder)) + +#define atomic_fetch_or(ptr, val, memorder) \ +((typeof(*(ptr)))atomic_select(ptr, fetch_or)(ptr, val, memorder)) + +#define atomic_fetch_xor(ptr, val, memorder) \ +((typeof(*(ptr)))atomic_select(ptr, fetch_xor)(ptr, val, memorder)) + +#define atomic_add(ptr, val, memorder) \ +atomic_select(ptr, add)(ptr, val, memorder) + +#define atomic_sub(ptr, val, memorder) \ +atomic_select(ptr, sub)(ptr, val, memorder) + +#define atomic_and(ptr, val, memorder) \ +atomic_select(ptr, and)(ptr, val, memorder) -#ifndef ATOMIC_ARCH_SPECIFIC_LOAD -#define atomic_load(ptr, mo) __atomic_load_n(ptr, mo) -#endif +#define atomic_or(ptr, val, memorder) \ +atomic_select(ptr, or)(ptr, val, memorder) -#ifndef ATOMIC_ARCH_SPECIFIC_STORE -#define atomic_store(ptr, val, mo) __atomic_store_n(ptr, val, mo) -#endif +#define atomic_xor(ptr, val, memorder) \ +atomic_select(ptr, xor)(ptr, val, memorder) -#define atomic_fence(mo) __atomic_thread_fence(mo) +#define atomic_fence(memorder) __atomic_thread_fence(memorder) #endif /* KERN_ATOMIC_H */ diff --git a/kern/atomic_i.h b/kern/atomic_i.h new file mode 100644 index 0000000..9986622 --- /dev/null +++ b/kern/atomic_i.h @@ -0,0 +1,619 @@ +/* + * Copyright (c) 2018 Richard Braun. + * Copyright (c) 2017 Agustina Arzille. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * + * Architecture-specific code may override any of the type-generic + * atomic_xxx_n macros by defining them. + */ + +#ifndef KERN_ATOMIC_I_H +#define KERN_ATOMIC_I_H + +#include +#include +#include + +#include +#include + +#ifndef atomic_load_n +#define atomic_load_n __atomic_load_n +#endif /* atomic_load_n */ + +#ifndef atomic_store_n +#define atomic_store_n __atomic_store_n +#endif /* atomic_store_n */ + +#ifndef atomic_cas_n +#define atomic_cas_n(ptr, oval, nval, memorder) \ +MACRO_BEGIN \ + typeof(*(ptr)) oval_; \ + \ + oval_ = (oval); \ + __atomic_compare_exchange_n(ptr, &oval_, (nval), false, \ + memorder, __ATOMIC_RELAXED); \ + oval_; \ +MACRO_END +#endif /* atomic_cas_n */ + +#ifndef atomic_swap_n +#define atomic_swap_n __atomic_exchange_n +#endif /* atomic_swap_n */ + +#ifndef atomic_fetch_add_n +#define atomic_fetch_add_n __atomic_fetch_add +#endif /* atomic_fetch_add_n */ + +#ifndef atomic_fetch_sub_n +#define atomic_fetch_sub_n __atomic_fetch_sub +#endif /* atomic_fetch_sub_n */ + +#ifndef atomic_fetch_and_n +#define atomic_fetch_and_n __atomic_fetch_and +#endif /* atomic_fetch_and_n */ + +#ifndef atomic_fetch_or_n +#define atomic_fetch_or_n __atomic_fetch_or +#endif /* atomic_fetch_or_n */ + +#ifndef atomic_fetch_xor_n +#define atomic_fetch_xor_n __atomic_fetch_xor +#endif /* atomic_fetch_xor_n */ + +#ifndef atomic_add_n +#define atomic_add_n (void)__atomic_add_fetch +#endif /* atomic_add_n */ + +#ifndef atomic_sub_n +#define atomic_sub_n (void)__atomic_sub_fetch +#endif /* atomic_sub_n */ + +#ifndef atomic_and_n +#define atomic_and_n (void)__atomic_and_fetch +#endif /* atomic_and_n */ + +#ifndef atomic_or_n +#define atomic_or_n (void)__atomic_or_fetch +#endif /* atomic_or_n */ + +#ifndef atomic_xor_n +#define atomic_xor_n (void)__atomic_xor_fetch +#endif /* atomic_xor_n */ + +/* + * This macro is used to select the appropriate function for the given + * operation. The default expression is selected for pointer types. + * In order to avoid confusing errors, all built-in types are explicitely + * listed, so that unsupported ones don't select pointer operations. + * Instead, they select a function with an explicit name indicating + * an invalid type. + */ +#define atomic_select(ptr, op) \ +_Generic(*(ptr), \ + float: atomic_invalid_type, \ + double: atomic_invalid_type, \ + long double: atomic_invalid_type, \ + bool: atomic_invalid_type, \ + char: atomic_invalid_type, \ + signed char: atomic_invalid_type, \ + unsigned char: atomic_invalid_type, \ + short: atomic_invalid_type, \ + unsigned short: atomic_invalid_type, \ + int: atomic_ ## op ## _32, \ + unsigned int: atomic_ ## op ## _32, \ + long: atomic_ ## op ## _ul, \ + unsigned long: atomic_ ## op ## _ul, \ + long long: atomic_ ## op ## _64, \ + unsigned long long: atomic_ ## op ## _64, \ + default: atomic_ ## op ## _ptr) + +void atomic_invalid_type(void); + +/* + * After function selection, type genericity is achieved with transparent + * unions, a GCC extension. Here are a few things to keep in mind : + * - all members must have the same representation + * - calling conventions are inferred from the first member + */ + +#ifdef __LP64__ + +union atomic_ptr_32 { + int *i_ptr; + unsigned int *ui_ptr; +} __attribute__((transparent_union)); + +union atomic_constptr_32 { + const int *i_ptr; + const unsigned int *ui_ptr; +} __attribute__((transparent_union)); + +union atomic_val32 { + int i; + unsigned int ui; +} __attribute__((transparent_union)); + +#ifdef ATOMIC_HAVE_64B_OPS + +union atomic_ptr_64 { + void *ptr; + unsigned long long *ull_ptr; +} __attribute__((transparent_union)); + +union atomic_constptr_64 { + const void *ptr; + const unsigned long long *ull_ptr; +} __attribute__((transparent_union)); + +union atomic_val_64 { + void *ptr; + long l; + unsigned long ul; + long long ll; + unsigned long long ull; +} __attribute__((transparent_union)); + +#endif /* ATOMIC_HAVE_64B_OPS */ + +#else /* __LP64__ */ + +union atomic_ptr_32 { + void *ptr; + unsigned int *ui_ptr; +} __attribute__((transparent_union)); + +union atomic_constptr_32 { + const void *ptr; + const unsigned int *ui_ptr; +} __attribute__((transparent_union)); + +union atomic_val32 { + void *ptr; + int i; + unsigned int ui; + long l; + unsigned long ul; +} __attribute__((transparent_union)); + +#ifdef ATOMIC_HAVE_64B_OPS + +union atomic_ptr_64 { + long long *ll_ptr; + unsigned long long *ull_ptr; +} __attribute__((transparent_union)); + +union atomic_constptr_64 { + const long long *ll_ptr; + const unsigned long long *ull_ptr; +} __attribute__((transparent_union)); + +union atomic_val_64 { + long long ll; + unsigned long long ull; +} __attribute__((transparent_union)); + +#endif /* ATOMIC_HAVE_64B_OPS */ + +#endif /* __LP64__ */ + +#define atomic_ptr_aligned(ptr) P2ALIGNED((uintptr_t)(ptr), sizeof(ptr)) + +/* atomic_load */ + +static inline unsigned int +atomic_load_32(union atomic_constptr_32 ptr, int memorder) +{ + assert(atomic_ptr_aligned(ptr.ui_ptr)); + return atomic_load_n(ptr.ui_ptr, memorder); +} + +#ifdef ATOMIC_HAVE_64B_OPS +static inline unsigned long long +atomic_load_64(union atomic_constptr_64 ptr, int memorder) +{ + assert(atomic_ptr_aligned(ptr.ull_ptr)); + return atomic_load_n(ptr.ull_ptr, memorder); +} +#else /* ATOMIC_HAVE_64B_OPS */ +#define atomic_load_64 atomic_invalid_type +#endif /* ATOMIC_HAVE_64B_OPS */ + +#ifdef __LP64__ +#define atomic_load_ul atomic_load_64 +#else /* __LP64__ */ +#define atomic_load_ul atomic_load_32 +#endif /* __LP64__ */ + +#define atomic_load_ptr atomic_load_ul + +/* atomic_store */ + +static inline void +atomic_store_32(union atomic_ptr_32 ptr, union atomic_val32 val, int memorder) +{ + assert(atomic_ptr_aligned(ptr.ui_ptr)); + return atomic_store_n(ptr.ui_ptr, val.ui, memorder); +} + +#ifdef ATOMIC_HAVE_64B_OPS +static inline void +atomic_store_64(union atomic_ptr_64 ptr, union atomic_val_64 val, int memorder) +{ + assert(atomic_ptr_aligned(ptr.ull_ptr)); + return atomic_store_n(ptr.ull_ptr, val.ull, memorder); +} +#else /* ATOMIC_HAVE_64B_OPS */ +#define atomic_store_64 atomic_invalid_type +#endif /* ATOMIC_HAVE_64B_OPS */ + +#ifdef __LP64__ +#define atomic_store_ul atomic_store_64 +#else /* __LP64__ */ +#define atomic_store_ul atomic_store_32 +#endif /* __LP64__ */ + +#define atomic_store_ptr atomic_store_ul + +/* atomic_cas */ + +static inline unsigned int +atomic_cas_32(union atomic_ptr_32 ptr, union atomic_val32 oval, + union atomic_val32 nval, int memorder) +{ + assert(atomic_ptr_aligned(ptr.ui_ptr)); + return atomic_cas_n(ptr.ui_ptr, oval.ui, nval.ui, memorder); +} + +#ifdef ATOMIC_HAVE_64B_OPS +static inline unsigned long long +atomic_cas_64(union atomic_ptr_64 ptr, union atomic_val_64 oval, + union atomic_val_64 nval, int memorder) +{ + assert(atomic_ptr_aligned(ptr.ull_ptr)); + return atomic_cas_n(ptr.ull_ptr, oval.ull, nval.ull, memorder); +} +#else /* ATOMIC_HAVE_64B_OPS */ +#define atomic_cas_64 atomic_invalid_type +#endif /* ATOMIC_HAVE_64B_OPS */ + +#ifdef __LP64__ +#define atomic_cas_ul atomic_cas_64 +#else /* __LP64__ */ +#define atomic_cas_ul atomic_cas_32 +#endif /* __LP64__ */ + +#define atomic_cas_ptr atomic_cas_ul + +/* atomic_swap */ + +static inline unsigned int +atomic_swap_32(union atomic_ptr_32 ptr, union atomic_val32 val, int memorder) +{ + assert(atomic_ptr_aligned(ptr.ui_ptr)); + return atomic_swap_n(ptr.ui_ptr, val.ui, memorder); +} + +#ifdef ATOMIC_HAVE_64B_OPS +static inline unsigned long long +atomic_swap_64(union atomic_ptr_64 ptr, union atomic_val_64 val, int memorder) +{ + assert(atomic_ptr_aligned(ptr.ull_ptr)); + return atomic_swap_n(ptr.ull_ptr, val.ull, memorder); +} +#else /* ATOMIC_HAVE_64B_OPS */ +#define atomic_swap_64 atomic_invalid_type +#endif /* ATOMIC_HAVE_64B_OPS */ + +#ifdef __LP64__ +#define atomic_swap_ul atomic_swap_64 +#else /* __LP64__ */ +#define atomic_swap_ul atomic_swap_32 +#endif /* __LP64__ */ + +#define atomic_swap_ptr atomic_swap_ul + +/* atomic_fetch_add */ + +static inline unsigned int +atomic_fetch_add_32(union atomic_ptr_32 ptr, union atomic_val32 val, + int memorder) +{ + assert(atomic_ptr_aligned(ptr.ui_ptr)); + return atomic_fetch_add_n(ptr.ui_ptr, val.ui, memorder); +} + +#ifdef ATOMIC_HAVE_64B_OPS +static inline unsigned long long +atomic_fetch_add_64(union atomic_ptr_64 ptr, union atomic_val_64 val, + int memorder) +{ + assert(atomic_ptr_aligned(ptr.ull_ptr)); + return atomic_fetch_add_n(ptr.ull_ptr, val.ull, memorder); +} +#else /* ATOMIC_HAVE_64B_OPS */ +#define atomic_fetch_add_64 atomic_invalid_type +#endif /* ATOMIC_HAVE_64B_OPS */ + +#ifdef __LP64__ +#define atomic_fetch_add_ul atomic_fetch_add_64 +#else /* __LP64__ */ +#define atomic_fetch_add_ul atomic_fetch_add_32 +#endif /* __LP64__ */ + +#define atomic_fetch_add_ptr atomic_fetch_add_ul + +/* atomic_fetch_sub */ + +static inline unsigned int +atomic_fetch_sub_32(union atomic_ptr_32 ptr, union atomic_val32 val, + int memorder) +{ + assert(atomic_ptr_aligned(ptr.ui_ptr)); + return atomic_fetch_sub_n(ptr.ui_ptr, val.ui, memorder); +} + +#ifdef ATOMIC_HAVE_64B_OPS +static inline unsigned long long +atomic_fetch_sub_64(union atomic_ptr_64 ptr, union atomic_val_64 val, + int memorder) +{ + assert(atomic_ptr_aligned(ptr.ull_ptr)); + return atomic_fetch_sub_n(ptr.ull_ptr, val.ull, memorder); +} +#else /* ATOMIC_HAVE_64B_OPS */ +#define atomic_fetch_sub_64 atomic_invalid_type +#endif /* ATOMIC_HAVE_64B_OPS */ + +#ifdef __LP64__ +#define atomic_fetch_sub_ul atomic_fetch_sub_64 +#else /* __LP64__ */ +#define atomic_fetch_sub_ul atomic_fetch_sub_32 +#endif /* __LP64__ */ + +#define atomic_fetch_sub_ptr atomic_fetch_sub_ul + +/* atomic_fetch_and */ + +static inline unsigned int +atomic_fetch_and_32(union atomic_ptr_32 ptr, union atomic_val32 val, + int memorder) +{ + assert(atomic_ptr_aligned(ptr.ui_ptr)); + return atomic_fetch_and_n(ptr.ui_ptr, val.ui, memorder); +} + +#ifdef ATOMIC_HAVE_64B_OPS +static inline unsigned long long +atomic_fetch_and_64(union atomic_ptr_64 ptr, union atomic_val_64 val, + int memorder) +{ + assert(atomic_ptr_aligned(ptr.ull_ptr)); + return atomic_fetch_and_n(ptr.ull_ptr, val.ull, memorder); +} +#else /* ATOMIC_HAVE_64B_OPS */ +#define atomic_fetch_and_64 atomic_invalid_type +#endif /* ATOMIC_HAVE_64B_OPS */ + +#ifdef __LP64__ +#define atomic_fetch_and_ul atomic_fetch_and_64 +#else /* __LP64__ */ +#define atomic_fetch_and_ul atomic_fetch_and_32 +#endif /* __LP64__ */ + +#define atomic_fetch_and_ptr atomic_fetch_and_ul + +/* atomic_fetch_or */ + +static inline unsigned int +atomic_fetch_or_32(union atomic_ptr_32 ptr, union atomic_val32 val, + int memorder) +{ + assert(atomic_ptr_aligned(ptr.ui_ptr)); + return atomic_fetch_or_n(ptr.ui_ptr, val.ui, memorder); +} + +#ifdef ATOMIC_HAVE_64B_OPS +static inline unsigned long long +atomic_fetch_or_64(union atomic_ptr_64 ptr, union atomic_val_64 val, + int memorder) +{ + assert(atomic_ptr_aligned(ptr.ull_ptr)); + return atomic_fetch_or_n(ptr.ull_ptr, val.ull, memorder); +} +#else /* ATOMIC_HAVE_64B_OPS */ +#define atomic_or_64 atomic_invalid_type +#endif /* ATOMIC_HAVE_64B_OPS */ + +#ifdef __LP64__ +#define atomic_fetch_or_ul atomic_fetch_or_64 +#else /* __LP64__ */ +#define atomic_fetch_or_ul atomic_fetch_or_32 +#endif /* __LP64__ */ + +#define atomic_fetch_or_ptr atomic_fetch_or_ul + +/* atomic_fetch_xor */ + +static inline unsigned int +atomic_fetch_xor_32(union atomic_ptr_32 ptr, union atomic_val32 val, + int memorder) +{ + assert(atomic_ptr_aligned(ptr.ui_ptr)); + return atomic_fetch_xor_n(ptr.ui_ptr, val.ui, memorder); +} + +#ifdef ATOMIC_HAVE_64B_OPS +static inline unsigned long long +atomic_fetch_xor_64(union atomic_ptr_64 ptr, union atomic_val_64 val, + int memorder) +{ + assert(atomic_ptr_aligned(ptr.ull_ptr)); + return atomic_fetch_xor_n(ptr.ull_ptr, val.ull, memorder); +} +#else /* ATOMIC_HAVE_64B_OPS */ +#define atomic_xor_64 atomic_invalid_type +#endif /* ATOMIC_HAVE_64B_OPS */ + +#ifdef __LP64__ +#define atomic_fetch_xor_ul atomic_fetch_xor_64 +#else /* __LP64__ */ +#define atomic_fetch_xor_ul atomic_fetch_xor_32 +#endif /* __LP64__ */ + +#define atomic_fetch_xor_ptr atomic_fetch_xor_ul + +/* atomic_add */ + +static inline void +atomic_add_32(union atomic_ptr_32 ptr, union atomic_val32 val, int memorder) +{ + assert(atomic_ptr_aligned(ptr.ui_ptr)); + return atomic_add_n(ptr.ui_ptr, val.ui, memorder); +} + +#ifdef ATOMIC_HAVE_64B_OPS +static inline void +atomic_add_64(union atomic_ptr_64 ptr, union atomic_val_64 val, int memorder) +{ + assert(atomic_ptr_aligned(ptr.ull_ptr)); + return atomic_add_n(ptr.ull_ptr, val.ull, memorder); +} +#else /* ATOMIC_HAVE_64B_OPS */ +#define atomic_add_64 atomic_invalid_type +#endif /* ATOMIC_HAVE_64B_OPS */ + +#ifdef __LP64__ +#define atomic_add_ul atomic_add_64 +#else /* __LP64__ */ +#define atomic_add_ul atomic_add_32 +#endif /* __LP64__ */ + +#define atomic_add_ptr atomic_add_ul + +/* atomic_sub */ + +static inline void +atomic_sub_32(union atomic_ptr_32 ptr, union atomic_val32 val, int memorder) +{ + assert(atomic_ptr_aligned(ptr.ui_ptr)); + return atomic_sub_n(ptr.ui_ptr, val.ui, memorder); +} + +#ifdef ATOMIC_HAVE_64B_OPS +static inline void +atomic_sub_64(union atomic_ptr_64 ptr, union atomic_val_64 val, int memorder) +{ + assert(atomic_ptr_aligned(ptr.ull_ptr)); + return atomic_sub_n(ptr.ull_ptr, val.ull, memorder); +} +#else /* ATOMIC_HAVE_64B_OPS */ +#define atomic_sub_64 atomic_invalid_type +#endif /* ATOMIC_HAVE_64B_OPS */ + +#ifdef __LP64__ +#define atomic_sub_ul atomic_sub_64 +#else /* __LP64__ */ +#define atomic_sub_ul atomic_sub_32 +#endif /* __LP64__ */ + +#define atomic_sub_ptr atomic_sub_ul + +/* atomic_and */ + +static inline void +atomic_and_32(union atomic_ptr_32 ptr, union atomic_val32 val, int memorder) +{ + assert(atomic_ptr_aligned(ptr.ui_ptr)); + return atomic_and_n(ptr.ui_ptr, val.ui, memorder); +} + +#ifdef ATOMIC_HAVE_64B_OPS +static inline void +atomic_and_64(union atomic_ptr_64 ptr, union atomic_val_64 val, int memorder) +{ + assert(atomic_ptr_aligned(ptr.ull_ptr)); + return atomic_and_n(ptr.ull_ptr, val.ull, memorder); +} +#else /* ATOMIC_HAVE_64B_OPS */ +#define atomic_and_64 atomic_invalid_type +#endif /* ATOMIC_HAVE_64B_OPS */ + +#ifdef __LP64__ +#define atomic_and_ul atomic_and_64 +#else /* __LP64__ */ +#define atomic_and_ul atomic_and_32 +#endif /* __LP64__ */ + +#define atomic_and_ptr atomic_and_ul + +/* atomic_or */ + +static inline void +atomic_or_32(union atomic_ptr_32 ptr, union atomic_val32 val, int memorder) +{ + assert(atomic_ptr_aligned(ptr.ui_ptr)); + return atomic_or_n(ptr.ui_ptr, val.ui, memorder); +} + +#ifdef ATOMIC_HAVE_64B_OPS +static inline void +atomic_or_64(union atomic_ptr_64 ptr, union atomic_val_64 val, int memorder) +{ + assert(atomic_ptr_aligned(ptr.ull_ptr)); + return atomic_or_n(ptr.ull_ptr, val.ull, memorder); +} +#else /* ATOMIC_HAVE_64B_OPS */ +#define atomic_or_64 atomic_invalid_type +#endif /* ATOMIC_HAVE_64B_OPS */ + +#ifdef __LP64__ +#define atomic_or_ul atomic_or_64 +#else /* __LP64__ */ +#define atomic_or_ul atomic_or_32 +#endif /* __LP64__ */ + +#define atomic_or_ptr atomic_or_ul + +/* atomic_xor */ + +static inline void +atomic_xor_32(union atomic_ptr_32 ptr, union atomic_val32 val, int memorder) +{ + assert(atomic_ptr_aligned(ptr.ui_ptr)); + return atomic_xor_n(ptr.ui_ptr, val.ui, memorder); +} + +#ifdef ATOMIC_HAVE_64B_OPS +static inline void +atomic_xor_64(union atomic_ptr_64 ptr, union atomic_val_64 val, int memorder) +{ + assert(atomic_ptr_aligned(ptr.ull_ptr)); + return atomic_xor_n(ptr.ull_ptr, val.ull, memorder); +} +#else /* ATOMIC_HAVE_64B_OPS */ +#define atomic_xor_64 atomic_invalid_type +#endif /* ATOMIC_HAVE_64B_OPS */ + +#ifdef __LP64__ +#define atomic_xor_ul atomic_xor_64 +#else /* __LP64__ */ +#define atomic_xor_ul atomic_xor_32 +#endif /* __LP64__ */ + +#define atomic_xor_ptr atomic_xor_ul + +#endif /* KERN_ATOMIC_I_H */ -- cgit v1.2.3 From 5fe92212b38ea506e7d65ced630f77f9c4a9d944 Mon Sep 17 00:00:00 2001 From: Richard Braun Date: Thu, 26 Apr 2018 22:33:39 +0200 Subject: kern/atomic: minor rework Allow architectures to specifically override any of the overloaded functions. This removes the need from architecture-specific code to implement selection. --- arch/x86/machine/atomic.h | 44 +++---- kern/atomic_i.h | 298 +++++++++++++++++++--------------------------- kern/atomic_types.h | 103 ++++++++++++++++ 3 files changed, 242 insertions(+), 203 deletions(-) create mode 100644 kern/atomic_types.h (limited to 'arch/x86/machine') diff --git a/arch/x86/machine/atomic.h b/arch/x86/machine/atomic.h index 5da8568..e10d301 100644 --- a/arch/x86/machine/atomic.h +++ b/arch/x86/machine/atomic.h @@ -28,13 +28,18 @@ #include +#include #include -#ifndef __LP64__ +#ifdef __LP64__ + +/* Report that 64-bits operations are supported */ +#define ATOMIC_HAVE_64B_OPS + +#else /* __LP64__ */ /* - * XXX Clang seems to have trouble with 64-bit operations on 32-bit - * processors. + * XXX Clang doesn't provide any __atomic_xxx_8 functions on i386. */ #ifndef __clang__ @@ -42,7 +47,7 @@ #define ATOMIC_HAVE_64B_OPS /* - * On i386, the compiler generates either an FP-stack read/write, or an SSE2 + * On i386, GCC generates either an FP-stack read/write, or an SSE2 * store/load to implement these 64-bit atomic operations. Since that's not * feasible in the kernel, fall back to cmpxchg8b. * @@ -53,51 +58,38 @@ * Also note that this assumes the processor is at least an i586. */ -#define atomic_x86_select(ptr, op) \ -_Generic(*(ptr), \ - unsigned int: __atomic_ ## op ## _n, \ - unsigned long long: atomic_i386_ ## op ## _64) - static inline unsigned long long -atomic_i386_load_64(const unsigned long long *ptr, int memorder) +atomic_i386_load_64(union atomic_constptr_64 ptr, int memorder) { unsigned long long prev; prev = 0; - __atomic_compare_exchange_n((unsigned long long *)ptr, &prev, - 0, false, memorder, __ATOMIC_RELAXED); + __atomic_compare_exchange_n((unsigned long long *)(ptr.ull_ptr), + &prev, 0, false, memorder, __ATOMIC_RELAXED); return prev; } -#define atomic_load_n(ptr, memorder) \ -atomic_x86_select(ptr, load)(ptr, memorder) +#define atomic_load_64 atomic_i386_load_64 static inline void -atomic_i386_store_64(unsigned long long *ptr, unsigned long long val, +atomic_i386_store_64(union atomic_ptr_64 ptr, union atomic_val_64 val, int memorder) { unsigned long long prev; bool done; - prev = *ptr; + prev = *ptr.ull_ptr; do { - done = __atomic_compare_exchange_n(ptr, &prev, val, - false, memorder, - __ATOMIC_RELAXED); + done = __atomic_compare_exchange_n(ptr.ull_ptr, &prev, val.ull, + false, memorder, __ATOMIC_RELAXED); } while (!done); } -#define atomic_store_n(ptr, val, memorder) \ -atomic_x86_select(ptr, store)(ptr, val, memorder) +#define atomic_store_64 atomic_i386_store_64 #endif /* __clang__ */ -#else /* __LP64__ */ - -/* Report that 64-bits operations are supported */ -#define ATOMIC_HAVE_64B_OPS - #endif /* __LP64__ */ #endif /* X86_ATOMIC_H */ diff --git a/kern/atomic_i.h b/kern/atomic_i.h index 0087d03..df810a3 100644 --- a/kern/atomic_i.h +++ b/kern/atomic_i.h @@ -27,73 +27,10 @@ #include #include +#include #include #include -#ifndef atomic_load_n -#define atomic_load_n __atomic_load_n -#endif /* atomic_load_n */ - -#ifndef atomic_store_n -#define atomic_store_n __atomic_store_n -#endif /* atomic_store_n */ - -#ifndef atomic_cas_n -#define atomic_cas_n(ptr, oval, nval, memorder) \ -MACRO_BEGIN \ - typeof(*(ptr)) oval_; \ - \ - oval_ = (oval); \ - __atomic_compare_exchange_n(ptr, &oval_, (nval), false, \ - memorder, __ATOMIC_RELAXED); \ - oval_; \ -MACRO_END -#endif /* atomic_cas_n */ - -#ifndef atomic_swap_n -#define atomic_swap_n __atomic_exchange_n -#endif /* atomic_swap_n */ - -#ifndef atomic_fetch_add_n -#define atomic_fetch_add_n __atomic_fetch_add -#endif /* atomic_fetch_add_n */ - -#ifndef atomic_fetch_sub_n -#define atomic_fetch_sub_n __atomic_fetch_sub -#endif /* atomic_fetch_sub_n */ - -#ifndef atomic_fetch_and_n -#define atomic_fetch_and_n __atomic_fetch_and -#endif /* atomic_fetch_and_n */ - -#ifndef atomic_fetch_or_n -#define atomic_fetch_or_n __atomic_fetch_or -#endif /* atomic_fetch_or_n */ - -#ifndef atomic_fetch_xor_n -#define atomic_fetch_xor_n __atomic_fetch_xor -#endif /* atomic_fetch_xor_n */ - -#ifndef atomic_add_n -#define atomic_add_n (void)__atomic_add_fetch -#endif /* atomic_add_n */ - -#ifndef atomic_sub_n -#define atomic_sub_n (void)__atomic_sub_fetch -#endif /* atomic_sub_n */ - -#ifndef atomic_and_n -#define atomic_and_n (void)__atomic_and_fetch -#endif /* atomic_and_n */ - -#ifndef atomic_or_n -#define atomic_or_n (void)__atomic_or_fetch -#endif /* atomic_or_n */ - -#ifndef atomic_xor_n -#define atomic_xor_n (void)__atomic_xor_fetch -#endif /* atomic_xor_n */ - /* * This macro is used to select the appropriate function for the given * operation. The default expression is selected for pointer types. @@ -123,112 +60,31 @@ _Generic(*(ptr), \ void atomic_invalid_type(void); -/* - * After function selection, type genericity is achieved with transparent - * unions, a GCC extension. Here are a few things to keep in mind : - * - all members must have the same representation - * - calling conventions are inferred from the first member - */ - -#ifdef __LP64__ - -union atomic_ptr_32 { - int *i_ptr; - unsigned int *ui_ptr; -} __attribute__((transparent_union)); - -union atomic_constptr_32 { - const int *i_ptr; - const unsigned int *ui_ptr; -} __attribute__((transparent_union)); - -union atomic_val32 { - int i; - unsigned int ui; -} __attribute__((transparent_union)); - -#ifdef ATOMIC_HAVE_64B_OPS - -union atomic_ptr_64 { - void *ptr; - unsigned long long *ull_ptr; -} __attribute__((transparent_union)); - -union atomic_constptr_64 { - const void *ptr; - const unsigned long long *ull_ptr; -} __attribute__((transparent_union)); - -union atomic_val_64 { - void *ptr; - long l; - unsigned long ul; - long long ll; - unsigned long long ull; -} __attribute__((transparent_union)); - -#endif /* ATOMIC_HAVE_64B_OPS */ - -#else /* __LP64__ */ - -union atomic_ptr_32 { - void *ptr; - unsigned int *ui_ptr; -} __attribute__((transparent_union)); - -union atomic_constptr_32 { - const void *ptr; - const unsigned int *ui_ptr; -} __attribute__((transparent_union)); - -union atomic_val32 { - void *ptr; - int i; - unsigned int ui; - long l; - unsigned long ul; -} __attribute__((transparent_union)); - -#ifdef ATOMIC_HAVE_64B_OPS - -union atomic_ptr_64 { - long long *ll_ptr; - unsigned long long *ull_ptr; -} __attribute__((transparent_union)); - -union atomic_constptr_64 { - const long long *ll_ptr; - const unsigned long long *ull_ptr; -} __attribute__((transparent_union)); - -union atomic_val_64 { - long long ll; - unsigned long long ull; -} __attribute__((transparent_union)); - -#endif /* ATOMIC_HAVE_64B_OPS */ - -#endif /* __LP64__ */ - #define ATOMIC_ALIGN(ptr) MIN(sizeof(*(ptr)), sizeof(ptr)) #define atomic_ptr_aligned(ptr) P2ALIGNED((uintptr_t)(ptr), ATOMIC_ALIGN(ptr)) /* atomic_load */ +#ifndef atomic_load_32 static inline unsigned int atomic_load_32(union atomic_constptr_32 ptr, int memorder) { assert(atomic_ptr_aligned(ptr.ui_ptr)); - return atomic_load_n(ptr.ui_ptr, memorder); + return __atomic_load_n(ptr.ui_ptr, memorder); } +#endif /* atomic_load_32 */ #ifdef ATOMIC_HAVE_64B_OPS + +#ifndef atomic_load_64 static inline unsigned long long atomic_load_64(union atomic_constptr_64 ptr, int memorder) { assert(atomic_ptr_aligned(ptr.ull_ptr)); - return atomic_load_n(ptr.ull_ptr, memorder); + return __atomic_load_n(ptr.ull_ptr, memorder); } +#endif /* atomic_load_64 */ + #else /* ATOMIC_HAVE_64B_OPS */ #define atomic_load_64 atomic_invalid_type #endif /* ATOMIC_HAVE_64B_OPS */ @@ -243,20 +99,26 @@ atomic_load_64(union atomic_constptr_64 ptr, int memorder) /* atomic_store */ +#ifndef atomic_store_32 static inline void atomic_store_32(union atomic_ptr_32 ptr, union atomic_val32 val, int memorder) { assert(atomic_ptr_aligned(ptr.ui_ptr)); - return atomic_store_n(ptr.ui_ptr, val.ui, memorder); + return __atomic_store_n(ptr.ui_ptr, val.ui, memorder); } +#endif /* atomic_store_32 */ #ifdef ATOMIC_HAVE_64B_OPS + +#ifndef atomic_store_64 static inline void atomic_store_64(union atomic_ptr_64 ptr, union atomic_val_64 val, int memorder) { assert(atomic_ptr_aligned(ptr.ull_ptr)); - return atomic_store_n(ptr.ull_ptr, val.ull, memorder); + return __atomic_store_n(ptr.ull_ptr, val.ull, memorder); } +#endif /* atomic_store_64 */ + #else /* ATOMIC_HAVE_64B_OPS */ #define atomic_store_64 atomic_invalid_type #endif /* ATOMIC_HAVE_64B_OPS */ @@ -271,6 +133,17 @@ atomic_store_64(union atomic_ptr_64 ptr, union atomic_val_64 val, int memorder) /* atomic_cas */ +#define atomic_cas_n(ptr, oval, nval, memorder) \ +MACRO_BEGIN \ + typeof(*(ptr)) oval_; \ + \ + oval_ = (oval); \ + __atomic_compare_exchange_n(ptr, &oval_, (nval), false, \ + memorder, __ATOMIC_RELAXED); \ + oval_; \ +MACRO_END + +#ifndef atomic_cas_32 static inline unsigned int atomic_cas_32(union atomic_ptr_32 ptr, union atomic_val32 oval, union atomic_val32 nval, int memorder) @@ -278,8 +151,11 @@ atomic_cas_32(union atomic_ptr_32 ptr, union atomic_val32 oval, assert(atomic_ptr_aligned(ptr.ui_ptr)); return atomic_cas_n(ptr.ui_ptr, oval.ui, nval.ui, memorder); } +#endif /* atomic_cas_32 */ #ifdef ATOMIC_HAVE_64B_OPS + +#ifndef atomic_cas_64 static inline unsigned long long atomic_cas_64(union atomic_ptr_64 ptr, union atomic_val_64 oval, union atomic_val_64 nval, int memorder) @@ -287,6 +163,8 @@ atomic_cas_64(union atomic_ptr_64 ptr, union atomic_val_64 oval, assert(atomic_ptr_aligned(ptr.ull_ptr)); return atomic_cas_n(ptr.ull_ptr, oval.ull, nval.ull, memorder); } +#endif /* atomic_cas_64 */ + #else /* ATOMIC_HAVE_64B_OPS */ #define atomic_cas_64 atomic_invalid_type #endif /* ATOMIC_HAVE_64B_OPS */ @@ -301,20 +179,26 @@ atomic_cas_64(union atomic_ptr_64 ptr, union atomic_val_64 oval, /* atomic_swap */ +#ifndef atomic_swap_32 static inline unsigned int atomic_swap_32(union atomic_ptr_32 ptr, union atomic_val32 val, int memorder) { assert(atomic_ptr_aligned(ptr.ui_ptr)); - return atomic_swap_n(ptr.ui_ptr, val.ui, memorder); + return __atomic_exchange_n(ptr.ui_ptr, val.ui, memorder); } +#endif /* atomic_swap_32 */ #ifdef ATOMIC_HAVE_64B_OPS + +#ifndef atomic_swap_64 static inline unsigned long long atomic_swap_64(union atomic_ptr_64 ptr, union atomic_val_64 val, int memorder) { assert(atomic_ptr_aligned(ptr.ull_ptr)); - return atomic_swap_n(ptr.ull_ptr, val.ull, memorder); + return __atomic_exchange_n(ptr.ull_ptr, val.ull, memorder); } +#endif /* atomic_swap_64 */ + #else /* ATOMIC_HAVE_64B_OPS */ #define atomic_swap_64 atomic_invalid_type #endif /* ATOMIC_HAVE_64B_OPS */ @@ -329,22 +213,28 @@ atomic_swap_64(union atomic_ptr_64 ptr, union atomic_val_64 val, int memorder) /* atomic_fetch_add */ +#ifndef atomic_fetch_add_32 static inline unsigned int atomic_fetch_add_32(union atomic_ptr_32 ptr, union atomic_val32 val, int memorder) { assert(atomic_ptr_aligned(ptr.ui_ptr)); - return atomic_fetch_add_n(ptr.ui_ptr, val.ui, memorder); + return __atomic_fetch_add(ptr.ui_ptr, val.ui, memorder); } +#endif /* atomic_fetch_add_32 */ #ifdef ATOMIC_HAVE_64B_OPS + +#ifndef atomic_fetch_add_64 static inline unsigned long long atomic_fetch_add_64(union atomic_ptr_64 ptr, union atomic_val_64 val, int memorder) { assert(atomic_ptr_aligned(ptr.ull_ptr)); - return atomic_fetch_add_n(ptr.ull_ptr, val.ull, memorder); + return __atomic_fetch_add(ptr.ull_ptr, val.ull, memorder); } +#endif /* atomic_fetch_add_64 */ + #else /* ATOMIC_HAVE_64B_OPS */ #define atomic_fetch_add_64 atomic_invalid_type #endif /* ATOMIC_HAVE_64B_OPS */ @@ -359,22 +249,28 @@ atomic_fetch_add_64(union atomic_ptr_64 ptr, union atomic_val_64 val, /* atomic_fetch_sub */ +#ifndef atomic_fetch_sub_32 static inline unsigned int atomic_fetch_sub_32(union atomic_ptr_32 ptr, union atomic_val32 val, int memorder) { assert(atomic_ptr_aligned(ptr.ui_ptr)); - return atomic_fetch_sub_n(ptr.ui_ptr, val.ui, memorder); + return __atomic_fetch_sub(ptr.ui_ptr, val.ui, memorder); } +#endif /* atomic_fetch_sub_32 */ #ifdef ATOMIC_HAVE_64B_OPS + +#ifndef atomic_fetch_sub_64 static inline unsigned long long atomic_fetch_sub_64(union atomic_ptr_64 ptr, union atomic_val_64 val, int memorder) { assert(atomic_ptr_aligned(ptr.ull_ptr)); - return atomic_fetch_sub_n(ptr.ull_ptr, val.ull, memorder); + return __atomic_fetch_sub(ptr.ull_ptr, val.ull, memorder); } +#endif /* atomic_fetch_sub_64 */ + #else /* ATOMIC_HAVE_64B_OPS */ #define atomic_fetch_sub_64 atomic_invalid_type #endif /* ATOMIC_HAVE_64B_OPS */ @@ -389,22 +285,28 @@ atomic_fetch_sub_64(union atomic_ptr_64 ptr, union atomic_val_64 val, /* atomic_fetch_and */ +#ifndef atomic_fetch_and_32 static inline unsigned int atomic_fetch_and_32(union atomic_ptr_32 ptr, union atomic_val32 val, int memorder) { assert(atomic_ptr_aligned(ptr.ui_ptr)); - return atomic_fetch_and_n(ptr.ui_ptr, val.ui, memorder); + return __atomic_fetch_and(ptr.ui_ptr, val.ui, memorder); } +#endif /* atomic_fetch_and_32 */ #ifdef ATOMIC_HAVE_64B_OPS + +#ifndef atomic_fetch_and_64 static inline unsigned long long atomic_fetch_and_64(union atomic_ptr_64 ptr, union atomic_val_64 val, int memorder) { assert(atomic_ptr_aligned(ptr.ull_ptr)); - return atomic_fetch_and_n(ptr.ull_ptr, val.ull, memorder); + return __atomic_fetch_and(ptr.ull_ptr, val.ull, memorder); } +#endif /* atomic_fetch_and_64 */ + #else /* ATOMIC_HAVE_64B_OPS */ #define atomic_fetch_and_64 atomic_invalid_type #endif /* ATOMIC_HAVE_64B_OPS */ @@ -419,22 +321,28 @@ atomic_fetch_and_64(union atomic_ptr_64 ptr, union atomic_val_64 val, /* atomic_fetch_or */ +#ifndef atomic_fetch_or_32 static inline unsigned int atomic_fetch_or_32(union atomic_ptr_32 ptr, union atomic_val32 val, int memorder) { assert(atomic_ptr_aligned(ptr.ui_ptr)); - return atomic_fetch_or_n(ptr.ui_ptr, val.ui, memorder); + return __atomic_fetch_or(ptr.ui_ptr, val.ui, memorder); } +#endif /* atomic_fetch_or_32 */ #ifdef ATOMIC_HAVE_64B_OPS + +#ifndef atomic_fetch_or_64 static inline unsigned long long atomic_fetch_or_64(union atomic_ptr_64 ptr, union atomic_val_64 val, int memorder) { assert(atomic_ptr_aligned(ptr.ull_ptr)); - return atomic_fetch_or_n(ptr.ull_ptr, val.ull, memorder); + return __atomic_fetch_or(ptr.ull_ptr, val.ull, memorder); } +#endif /* atomic_fetch_or_64 */ + #else /* ATOMIC_HAVE_64B_OPS */ #define atomic_or_64 atomic_invalid_type #endif /* ATOMIC_HAVE_64B_OPS */ @@ -449,22 +357,28 @@ atomic_fetch_or_64(union atomic_ptr_64 ptr, union atomic_val_64 val, /* atomic_fetch_xor */ +#ifndef atomic_fetch_xor_32 static inline unsigned int atomic_fetch_xor_32(union atomic_ptr_32 ptr, union atomic_val32 val, int memorder) { assert(atomic_ptr_aligned(ptr.ui_ptr)); - return atomic_fetch_xor_n(ptr.ui_ptr, val.ui, memorder); + return __atomic_fetch_xor(ptr.ui_ptr, val.ui, memorder); } +#endif /* atomic_fetch_xor_32 */ #ifdef ATOMIC_HAVE_64B_OPS + +#ifndef atomic_fetch_xor_64 static inline unsigned long long atomic_fetch_xor_64(union atomic_ptr_64 ptr, union atomic_val_64 val, int memorder) { assert(atomic_ptr_aligned(ptr.ull_ptr)); - return atomic_fetch_xor_n(ptr.ull_ptr, val.ull, memorder); + return __atomic_fetch_xor(ptr.ull_ptr, val.ull, memorder); } +#endif /* atomic_fetch_xor_64 */ + #else /* ATOMIC_HAVE_64B_OPS */ #define atomic_xor_64 atomic_invalid_type #endif /* ATOMIC_HAVE_64B_OPS */ @@ -479,20 +393,26 @@ atomic_fetch_xor_64(union atomic_ptr_64 ptr, union atomic_val_64 val, /* atomic_add */ +#ifndef atomic_add_32 static inline void atomic_add_32(union atomic_ptr_32 ptr, union atomic_val32 val, int memorder) { assert(atomic_ptr_aligned(ptr.ui_ptr)); - return atomic_add_n(ptr.ui_ptr, val.ui, memorder); + __atomic_add_fetch(ptr.ui_ptr, val.ui, memorder); } +#endif /* atomic_add_32 */ #ifdef ATOMIC_HAVE_64B_OPS + +#ifndef atomic_add_64 static inline void atomic_add_64(union atomic_ptr_64 ptr, union atomic_val_64 val, int memorder) { assert(atomic_ptr_aligned(ptr.ull_ptr)); - return atomic_add_n(ptr.ull_ptr, val.ull, memorder); + __atomic_add_fetch(ptr.ull_ptr, val.ull, memorder); } +#endif /* atomic_add_64 */ + #else /* ATOMIC_HAVE_64B_OPS */ #define atomic_add_64 atomic_invalid_type #endif /* ATOMIC_HAVE_64B_OPS */ @@ -507,20 +427,26 @@ atomic_add_64(union atomic_ptr_64 ptr, union atomic_val_64 val, int memorder) /* atomic_sub */ +#ifndef atomic_sub_32 static inline void atomic_sub_32(union atomic_ptr_32 ptr, union atomic_val32 val, int memorder) { assert(atomic_ptr_aligned(ptr.ui_ptr)); - return atomic_sub_n(ptr.ui_ptr, val.ui, memorder); + __atomic_sub_fetch(ptr.ui_ptr, val.ui, memorder); } +#endif /* atomic_sub_32 */ #ifdef ATOMIC_HAVE_64B_OPS + +#ifndef atomic_sub_64 static inline void atomic_sub_64(union atomic_ptr_64 ptr, union atomic_val_64 val, int memorder) { assert(atomic_ptr_aligned(ptr.ull_ptr)); - return atomic_sub_n(ptr.ull_ptr, val.ull, memorder); + __atomic_sub_fetch(ptr.ull_ptr, val.ull, memorder); } +#endif /* atomic_sub_64 */ + #else /* ATOMIC_HAVE_64B_OPS */ #define atomic_sub_64 atomic_invalid_type #endif /* ATOMIC_HAVE_64B_OPS */ @@ -535,20 +461,26 @@ atomic_sub_64(union atomic_ptr_64 ptr, union atomic_val_64 val, int memorder) /* atomic_and */ +#ifndef atomic_and_32 static inline void atomic_and_32(union atomic_ptr_32 ptr, union atomic_val32 val, int memorder) { assert(atomic_ptr_aligned(ptr.ui_ptr)); - return atomic_and_n(ptr.ui_ptr, val.ui, memorder); + __atomic_and_fetch(ptr.ui_ptr, val.ui, memorder); } +#endif /* atomic_and_32 */ #ifdef ATOMIC_HAVE_64B_OPS + +#ifndef atomic_and_64 static inline void atomic_and_64(union atomic_ptr_64 ptr, union atomic_val_64 val, int memorder) { assert(atomic_ptr_aligned(ptr.ull_ptr)); - return atomic_and_n(ptr.ull_ptr, val.ull, memorder); + __atomic_and_fetch(ptr.ull_ptr, val.ull, memorder); } +#endif /* atomic_and_64 */ + #else /* ATOMIC_HAVE_64B_OPS */ #define atomic_and_64 atomic_invalid_type #endif /* ATOMIC_HAVE_64B_OPS */ @@ -563,20 +495,26 @@ atomic_and_64(union atomic_ptr_64 ptr, union atomic_val_64 val, int memorder) /* atomic_or */ +#ifndef atomic_or_32 static inline void atomic_or_32(union atomic_ptr_32 ptr, union atomic_val32 val, int memorder) { assert(atomic_ptr_aligned(ptr.ui_ptr)); - return atomic_or_n(ptr.ui_ptr, val.ui, memorder); + __atomic_or_fetch(ptr.ui_ptr, val.ui, memorder); } +#endif /* atomic_or_32 */ #ifdef ATOMIC_HAVE_64B_OPS + +#ifndef atomic_or_64 static inline void atomic_or_64(union atomic_ptr_64 ptr, union atomic_val_64 val, int memorder) { assert(atomic_ptr_aligned(ptr.ull_ptr)); - return atomic_or_n(ptr.ull_ptr, val.ull, memorder); + __atomic_or_fetch(ptr.ull_ptr, val.ull, memorder); } +#endif /* atomic_or_64 */ + #else /* ATOMIC_HAVE_64B_OPS */ #define atomic_or_64 atomic_invalid_type #endif /* ATOMIC_HAVE_64B_OPS */ @@ -591,20 +529,26 @@ atomic_or_64(union atomic_ptr_64 ptr, union atomic_val_64 val, int memorder) /* atomic_xor */ +#ifndef atomic_xor_32 static inline void atomic_xor_32(union atomic_ptr_32 ptr, union atomic_val32 val, int memorder) { assert(atomic_ptr_aligned(ptr.ui_ptr)); - return atomic_xor_n(ptr.ui_ptr, val.ui, memorder); + __atomic_xor_fetch(ptr.ui_ptr, val.ui, memorder); } +#endif /* atomic_xor_32 */ #ifdef ATOMIC_HAVE_64B_OPS + +#ifndef atomic_xor_64 static inline void atomic_xor_64(union atomic_ptr_64 ptr, union atomic_val_64 val, int memorder) { assert(atomic_ptr_aligned(ptr.ull_ptr)); - return atomic_xor_n(ptr.ull_ptr, val.ull, memorder); + __atomic_xor_fetch(ptr.ull_ptr, val.ull, memorder); } +#endif /* atomic_xor_64 */ + #else /* ATOMIC_HAVE_64B_OPS */ #define atomic_xor_64 atomic_invalid_type #endif /* ATOMIC_HAVE_64B_OPS */ diff --git a/kern/atomic_types.h b/kern/atomic_types.h new file mode 100644 index 0000000..cdf1161 --- /dev/null +++ b/kern/atomic_types.h @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2018 Richard Braun. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * + * Isolated type definition used to avoid inclusion circular dependencies. + */ + +#ifndef KERN_ATOMIC_TYPES_H +#define KERN_ATOMIC_TYPES_H + +/* + * After function selection, type genericity is achieved with transparent + * unions, a GCC extension. Here are a few things to keep in mind : + * - all members must have the same representation + * - calling conventions are inferred from the first member + */ + +#ifdef __LP64__ + +union atomic_ptr_32 { + int *i_ptr; + unsigned int *ui_ptr; +} __attribute__((transparent_union)); + +union atomic_constptr_32 { + const int *i_ptr; + const unsigned int *ui_ptr; +} __attribute__((transparent_union)); + +union atomic_val32 { + int i; + unsigned int ui; +} __attribute__((transparent_union)); + +union atomic_ptr_64 { + void *ptr; + unsigned long long *ull_ptr; +} __attribute__((transparent_union)); + +union atomic_constptr_64 { + const void *ptr; + const unsigned long long *ull_ptr; +} __attribute__((transparent_union)); + +union atomic_val_64 { + void *ptr; + long l; + unsigned long ul; + long long ll; + unsigned long long ull; +} __attribute__((transparent_union)); + +#else /* __LP64__ */ + +union atomic_ptr_32 { + void *ptr; + unsigned int *ui_ptr; +} __attribute__((transparent_union)); + +union atomic_constptr_32 { + const void *ptr; + const unsigned int *ui_ptr; +} __attribute__((transparent_union)); + +union atomic_val32 { + void *ptr; + int i; + unsigned int ui; + long l; + unsigned long ul; +} __attribute__((transparent_union)); + +union atomic_ptr_64 { + long long *ll_ptr; + unsigned long long *ull_ptr; +} __attribute__((transparent_union)); + +union atomic_constptr_64 { + const long long *ll_ptr; + const unsigned long long *ull_ptr; +} __attribute__((transparent_union)); + +union atomic_val_64 { + long long ll; + unsigned long long ull; +} __attribute__((transparent_union)); + +#endif /* __LP64__ */ + +#endif /* KERN_ATOMIC_TYPES_H */ -- cgit v1.2.3