diff options
Diffstat (limited to 'kern')
-rw-r--r-- | kern/atomic.h | 151 | ||||
-rw-r--r-- | kern/atomic_i.h | 535 | ||||
-rw-r--r-- | kern/atomic_types.h | 103 | ||||
-rw-r--r-- | kern/clock.c | 2 | ||||
-rw-r--r-- | kern/mutex/mutex_adaptive_i.h | 2 | ||||
-rw-r--r-- | kern/rtmutex.c | 1 | ||||
-rw-r--r-- | kern/rtmutex_i.h | 4 | ||||
-rw-r--r-- | kern/spinlock.c | 8 | ||||
-rw-r--r-- | kern/task.h | 4 | ||||
-rw-r--r-- | kern/thread.h | 4 |
10 files changed, 745 insertions, 69 deletions
diff --git a/kern/atomic.h b/kern/atomic.h index d37a28b..27f2920 100644 --- a/kern/atomic.h +++ b/kern/atomic.h @@ -1,4 +1,5 @@ /* + * Copyright (c) 2018 Richard Braun. * Copyright (c) 2017 Agustina Arzille. * * This program is free software: you can redistribute it and/or modify @@ -17,6 +18,12 @@ * * Type-generic memory-model aware atomic operations. * + * For portability reasons, this interface restricts atomic operation + * sizes to 32-bit and 64-bit. + * + * Some configurations may not support 64-bit operations. Check if the + * ATOMIC_HAVE_64B_OPS macro is defined to find out. + * * TODO Replace mentions of "memory barriers" throughout the code with * C11 memory model terminology. */ @@ -24,45 +31,33 @@ #ifndef KERN_ATOMIC_H #define KERN_ATOMIC_H +#include <assert.h> #include <stdbool.h> +#include <kern/atomic_i.h> #include <kern/macros.h> -#include <machine/atomic.h> /* * Supported memory orders. */ -#define ATOMIC_RELAXED __ATOMIC_RELAXED -#define ATOMIC_CONSUME __ATOMIC_CONSUME -#define ATOMIC_ACQUIRE __ATOMIC_ACQUIRE -#define ATOMIC_RELEASE __ATOMIC_RELEASE -#define ATOMIC_ACQ_REL __ATOMIC_ACQ_REL -#define ATOMIC_SEQ_CST __ATOMIC_SEQ_CST - -/* - * Type-generic atomic operations. - */ -#define atomic_fetch_add(ptr, val, mo) __atomic_fetch_add(ptr, val, mo) - -#define atomic_fetch_sub(ptr, val, mo) __atomic_fetch_sub(ptr, val, mo) - -#define atomic_fetch_and(ptr, val, mo) __atomic_fetch_and(ptr, val, mo) - -#define atomic_fetch_or(ptr, val, mo) __atomic_fetch_or(ptr, val, mo) - -#define atomic_fetch_xor(ptr, val, mo) __atomic_fetch_xor(ptr, val, mo) - -#define atomic_add(ptr, val, mo) (void)__atomic_add_fetch(ptr, val, mo) - -#define atomic_sub(ptr, val, mo) (void)__atomic_sub_fetch(ptr, val, mo) - -#define atomic_and(ptr, val, mo) (void)__atomic_and_fetch(ptr, val, mo) - -#define atomic_or(ptr, val, mo) (void)__atomic_or_fetch(ptr, val, mo) - -#define atomic_xor(ptr, val, mo) (void)__atomic_xor_fetch(ptr, val, mo) +#define ATOMIC_RELAXED __ATOMIC_RELAXED +#define ATOMIC_CONSUME __ATOMIC_CONSUME +#define ATOMIC_ACQUIRE __ATOMIC_ACQUIRE +#define ATOMIC_RELEASE __ATOMIC_RELEASE +#define ATOMIC_ACQ_REL __ATOMIC_ACQ_REL +#define ATOMIC_SEQ_CST __ATOMIC_SEQ_CST + +#define atomic_load(ptr, memorder) \ +MACRO_BEGIN \ + assert(atomic_ptr_aligned(ptr)); \ + ((typeof(*(ptr)))atomic_select(ptr, load)(ptr, memorder)); \ +MACRO_END -#define atomic_swap(ptr, val, mo) __atomic_exchange_n(ptr, val, mo) +#define atomic_store(ptr, val, memorder) \ +MACRO_BEGIN \ + assert(atomic_ptr_aligned(ptr)); \ + atomic_select(ptr, store)(ptr, val, memorder); \ +MACRO_END /* * For compare-and-swap, deviate a little from the standard, and only @@ -73,37 +68,79 @@ * because atomic CAS is typically used in a loop. However, if a different * code path is taken on failure (rather than retrying), then the user * should be aware that a memory fence might be necessary. - * - * Finally, although a local variable isn't strictly needed for the new - * value, some compilers seem to have trouble when all parameters don't - * have the same type. */ -#define atomic_cas(ptr, oval, nval, mo) \ -MACRO_BEGIN \ - typeof(*(ptr)) oval_, nval_; \ - \ - oval_ = (oval); \ - nval_ = (nval); \ - __atomic_compare_exchange_n(ptr, &oval_, nval_, false, \ - mo, ATOMIC_RELAXED); \ - oval_; \ +#define atomic_cas(ptr, oval, nval, memorder) \ +MACRO_BEGIN \ + assert(atomic_ptr_aligned(ptr)); \ + ((typeof(*(ptr)))atomic_select(ptr, cas)(ptr, oval, nval, memorder)); \ MACRO_END -/* - * Some architectures may need specific definitions for loads and stores, - * in order to prevent the compiler from emitting unsupported instructions. - * As such, only define these if the architecture-specific part of the - * module didn't already. - */ +#define atomic_swap(ptr, val, memorder) \ +MACRO_BEGIN \ + assert(atomic_ptr_aligned(ptr)); \ + ((typeof(*(ptr)))atomic_select(ptr, swap)(ptr, val, memorder)); \ +MACRO_END + +#define atomic_fetch_add(ptr, val, memorder) \ +MACRO_BEGIN \ + assert(atomic_ptr_aligned(ptr)); \ + ((typeof(*(ptr)))atomic_select(ptr, fetch_add)(ptr, val, memorder)); \ +MACRO_END + +#define atomic_fetch_sub(ptr, val, memorder) \ +MACRO_BEGIN \ + assert(atomic_ptr_aligned(ptr)); \ + ((typeof(*(ptr)))atomic_select(ptr, fetch_sub)(ptr, val, memorder)); \ +MACRO_END -#ifndef ATOMIC_ARCH_SPECIFIC_LOAD -#define atomic_load(ptr, mo) __atomic_load_n(ptr, mo) -#endif +#define atomic_fetch_and(ptr, val, memorder) \ +MACRO_BEGIN \ + assert(atomic_ptr_aligned(ptr)); \ + ((typeof(*(ptr)))atomic_select(ptr, fetch_and)(ptr, val, memorder)); \ +MACRO_END + +#define atomic_fetch_or(ptr, val, memorder) \ +MACRO_BEGIN \ + assert(atomic_ptr_aligned(ptr)); \ + ((typeof(*(ptr)))atomic_select(ptr, fetch_or)(ptr, val, memorder)); \ +MACRO_END + +#define atomic_fetch_xor(ptr, val, memorder) \ +MACRO_BEGIN \ + assert(atomic_ptr_aligned(ptr)); \ + ((typeof(*(ptr)))atomic_select(ptr, fetch_xor)(ptr, val, memorder)); \ +MACRO_END -#ifndef ATOMIC_ARCH_SPECIFIC_STORE -#define atomic_store(ptr, val, mo) __atomic_store_n(ptr, val, mo) -#endif +#define atomic_add(ptr, val, memorder) \ +MACRO_BEGIN \ + assert(atomic_ptr_aligned(ptr)); \ + atomic_select(ptr, add)(ptr, val, memorder); \ +MACRO_END + +#define atomic_sub(ptr, val, memorder) \ +MACRO_BEGIN \ + assert(atomic_ptr_aligned(ptr)); \ + atomic_select(ptr, sub)(ptr, val, memorder); \ +MACRO_END + +#define atomic_and(ptr, val, memorder) \ +MACRO_BEGIN \ + assert(atomic_ptr_aligned(ptr)); \ + atomic_select(ptr, and)(ptr, val, memorder); \ +MACRO_END + +#define atomic_or(ptr, val, memorder) \ +MACRO_BEGIN \ + assert(atomic_ptr_aligned(ptr)); \ + atomic_select(ptr, or)(ptr, val, memorder); \ +MACRO_END + +#define atomic_xor(ptr, val, memorder) \ +MACRO_BEGIN \ + assert(atomic_ptr_aligned(ptr)); \ + atomic_select(ptr, xor)(ptr, val, memorder); \ +MACRO_END -#define atomic_fence(mo) __atomic_thread_fence(mo) +#define atomic_fence(memorder) __atomic_thread_fence(memorder) #endif /* KERN_ATOMIC_H */ diff --git a/kern/atomic_i.h b/kern/atomic_i.h new file mode 100644 index 0000000..03b5a32 --- /dev/null +++ b/kern/atomic_i.h @@ -0,0 +1,535 @@ +/* + * Copyright (c) 2018 Richard Braun. + * Copyright (c) 2017 Agustina Arzille. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + * + * Architecture-specific code may override any of the type-specific + * functions by defining the macro of the same name. + */ + +#ifndef KERN_ATOMIC_I_H +#define KERN_ATOMIC_I_H + +#include <stdbool.h> +#include <stdint.h> + +#include <kern/atomic_types.h> +#include <kern/macros.h> +#include <machine/atomic.h> + +#define ATOMIC_ALIGN(ptr) MIN(sizeof(*(ptr)), sizeof(ptr)) +#define atomic_ptr_aligned(ptr) P2ALIGNED((uintptr_t)(ptr), ATOMIC_ALIGN(ptr)) + +/* + * This macro is used to select the appropriate function for the given + * operation. The default expression is selected for pointer types. + * In order to avoid confusing errors, all built-in types are explicitely + * listed, so that unsupported ones don't select pointer operations. + * Instead, they select a function with an explicit name indicating + * an invalid type. + */ +#define atomic_select(ptr, op) \ +_Generic(*(ptr), \ + float: atomic_invalid_type, \ + double: atomic_invalid_type, \ + long double: atomic_invalid_type, \ + bool: atomic_invalid_type, \ + char: atomic_invalid_type, \ + signed char: atomic_invalid_type, \ + unsigned char: atomic_invalid_type, \ + short: atomic_invalid_type, \ + unsigned short: atomic_invalid_type, \ + int: atomic_ ## op ## _32, \ + unsigned int: atomic_ ## op ## _32, \ + long: atomic_ ## op ## _ul, \ + unsigned long: atomic_ ## op ## _ul, \ + long long: atomic_ ## op ## _64, \ + unsigned long long: atomic_ ## op ## _64, \ + default: atomic_ ## op ## _ptr) + +void atomic_invalid_type(void); + +/* atomic_load */ + +#ifndef atomic_load_32 +static inline unsigned int +atomic_load_32(union atomic_constptr_32 ptr, int memorder) +{ + return __atomic_load_n(ptr.ui_ptr, memorder); +} +#endif /* atomic_load_32 */ + +#ifdef ATOMIC_HAVE_64B_OPS + +#ifndef atomic_load_64 +static inline unsigned long long +atomic_load_64(union atomic_constptr_64 ptr, int memorder) +{ + return __atomic_load_n(ptr.ull_ptr, memorder); +} +#endif /* atomic_load_64 */ + +#else /* ATOMIC_HAVE_64B_OPS */ +#define atomic_load_64 atomic_invalid_type +#endif /* ATOMIC_HAVE_64B_OPS */ + +#ifdef __LP64__ +#define atomic_load_ul atomic_load_64 +#else /* __LP64__ */ +#define atomic_load_ul atomic_load_32 +#endif /* __LP64__ */ + +#define atomic_load_ptr atomic_load_ul + +/* atomic_store */ + +#ifndef atomic_store_32 +static inline void +atomic_store_32(union atomic_ptr_32 ptr, union atomic_val32 val, int memorder) +{ + return __atomic_store_n(ptr.ui_ptr, val.ui, memorder); +} +#endif /* atomic_store_32 */ + +#ifdef ATOMIC_HAVE_64B_OPS + +#ifndef atomic_store_64 +static inline void +atomic_store_64(union atomic_ptr_64 ptr, union atomic_val_64 val, int memorder) +{ + return __atomic_store_n(ptr.ull_ptr, val.ull, memorder); +} +#endif /* atomic_store_64 */ + +#else /* ATOMIC_HAVE_64B_OPS */ +#define atomic_store_64 atomic_invalid_type +#endif /* ATOMIC_HAVE_64B_OPS */ + +#ifdef __LP64__ +#define atomic_store_ul atomic_store_64 +#else /* __LP64__ */ +#define atomic_store_ul atomic_store_32 +#endif /* __LP64__ */ + +#define atomic_store_ptr atomic_store_ul + +/* atomic_cas */ + +#define atomic_cas_n(ptr, oval, nval, memorder) \ +MACRO_BEGIN \ + typeof(*(ptr)) oval_; \ + \ + oval_ = (oval); \ + __atomic_compare_exchange_n(ptr, &oval_, (nval), false, \ + memorder, __ATOMIC_RELAXED); \ + oval_; \ +MACRO_END + +#ifndef atomic_cas_32 +static inline unsigned int +atomic_cas_32(union atomic_ptr_32 ptr, union atomic_val32 oval, + union atomic_val32 nval, int memorder) +{ + return atomic_cas_n(ptr.ui_ptr, oval.ui, nval.ui, memorder); +} +#endif /* atomic_cas_32 */ + +#ifdef ATOMIC_HAVE_64B_OPS + +#ifndef atomic_cas_64 +static inline unsigned long long +atomic_cas_64(union atomic_ptr_64 ptr, union atomic_val_64 oval, + union atomic_val_64 nval, int memorder) +{ + return atomic_cas_n(ptr.ull_ptr, oval.ull, nval.ull, memorder); +} +#endif /* atomic_cas_64 */ + +#else /* ATOMIC_HAVE_64B_OPS */ +#define atomic_cas_64 atomic_invalid_type +#endif /* ATOMIC_HAVE_64B_OPS */ + +#ifdef __LP64__ +#define atomic_cas_ul atomic_cas_64 +#else /* __LP64__ */ +#define atomic_cas_ul atomic_cas_32 +#endif /* __LP64__ */ + +#define atomic_cas_ptr atomic_cas_ul + +/* atomic_swap */ + +#ifndef atomic_swap_32 +static inline unsigned int +atomic_swap_32(union atomic_ptr_32 ptr, union atomic_val32 val, int memorder) +{ + return __atomic_exchange_n(ptr.ui_ptr, val.ui, memorder); +} +#endif /* atomic_swap_32 */ + +#ifdef ATOMIC_HAVE_64B_OPS + +#ifndef atomic_swap_64 +static inline unsigned long long +atomic_swap_64(union atomic_ptr_64 ptr, union atomic_val_64 val, int memorder) +{ + return __atomic_exchange_n(ptr.ull_ptr, val.ull, memorder); +} +#endif /* atomic_swap_64 */ + +#else /* ATOMIC_HAVE_64B_OPS */ +#define atomic_swap_64 atomic_invalid_type +#endif /* ATOMIC_HAVE_64B_OPS */ + +#ifdef __LP64__ +#define atomic_swap_ul atomic_swap_64 +#else /* __LP64__ */ +#define atomic_swap_ul atomic_swap_32 +#endif /* __LP64__ */ + +#define atomic_swap_ptr atomic_swap_ul + +/* atomic_fetch_add */ + +#ifndef atomic_fetch_add_32 +static inline unsigned int +atomic_fetch_add_32(union atomic_ptr_32 ptr, union atomic_val32 val, + int memorder) +{ + return __atomic_fetch_add(ptr.ui_ptr, val.ui, memorder); +} +#endif /* atomic_fetch_add_32 */ + +#ifdef ATOMIC_HAVE_64B_OPS + +#ifndef atomic_fetch_add_64 +static inline unsigned long long +atomic_fetch_add_64(union atomic_ptr_64 ptr, union atomic_val_64 val, + int memorder) +{ + return __atomic_fetch_add(ptr.ull_ptr, val.ull, memorder); +} +#endif /* atomic_fetch_add_64 */ + +#else /* ATOMIC_HAVE_64B_OPS */ +#define atomic_fetch_add_64 atomic_invalid_type +#endif /* ATOMIC_HAVE_64B_OPS */ + +#ifdef __LP64__ +#define atomic_fetch_add_ul atomic_fetch_add_64 +#else /* __LP64__ */ +#define atomic_fetch_add_ul atomic_fetch_add_32 +#endif /* __LP64__ */ + +#define atomic_fetch_add_ptr atomic_fetch_add_ul + +/* atomic_fetch_sub */ + +#ifndef atomic_fetch_sub_32 +static inline unsigned int +atomic_fetch_sub_32(union atomic_ptr_32 ptr, union atomic_val32 val, + int memorder) +{ + return __atomic_fetch_sub(ptr.ui_ptr, val.ui, memorder); +} +#endif /* atomic_fetch_sub_32 */ + +#ifdef ATOMIC_HAVE_64B_OPS + +#ifndef atomic_fetch_sub_64 +static inline unsigned long long +atomic_fetch_sub_64(union atomic_ptr_64 ptr, union atomic_val_64 val, + int memorder) +{ + return __atomic_fetch_sub(ptr.ull_ptr, val.ull, memorder); +} +#endif /* atomic_fetch_sub_64 */ + +#else /* ATOMIC_HAVE_64B_OPS */ +#define atomic_fetch_sub_64 atomic_invalid_type +#endif /* ATOMIC_HAVE_64B_OPS */ + +#ifdef __LP64__ +#define atomic_fetch_sub_ul atomic_fetch_sub_64 +#else /* __LP64__ */ +#define atomic_fetch_sub_ul atomic_fetch_sub_32 +#endif /* __LP64__ */ + +#define atomic_fetch_sub_ptr atomic_fetch_sub_ul + +/* atomic_fetch_and */ + +#ifndef atomic_fetch_and_32 +static inline unsigned int +atomic_fetch_and_32(union atomic_ptr_32 ptr, union atomic_val32 val, + int memorder) +{ + return __atomic_fetch_and(ptr.ui_ptr, val.ui, memorder); +} +#endif /* atomic_fetch_and_32 */ + +#ifdef ATOMIC_HAVE_64B_OPS + +#ifndef atomic_fetch_and_64 +static inline unsigned long long +atomic_fetch_and_64(union atomic_ptr_64 ptr, union atomic_val_64 val, + int memorder) +{ + return __atomic_fetch_and(ptr.ull_ptr, val.ull, memorder); +} +#endif /* atomic_fetch_and_64 */ + +#else /* ATOMIC_HAVE_64B_OPS */ +#define atomic_fetch_and_64 atomic_invalid_type +#endif /* ATOMIC_HAVE_64B_OPS */ + +#ifdef __LP64__ +#define atomic_fetch_and_ul atomic_fetch_and_64 +#else /* __LP64__ */ +#define atomic_fetch_and_ul atomic_fetch_and_32 +#endif /* __LP64__ */ + +#define atomic_fetch_and_ptr atomic_fetch_and_ul + +/* atomic_fetch_or */ + +#ifndef atomic_fetch_or_32 +static inline unsigned int +atomic_fetch_or_32(union atomic_ptr_32 ptr, union atomic_val32 val, + int memorder) +{ + return __atomic_fetch_or(ptr.ui_ptr, val.ui, memorder); +} +#endif /* atomic_fetch_or_32 */ + +#ifdef ATOMIC_HAVE_64B_OPS + +#ifndef atomic_fetch_or_64 +static inline unsigned long long +atomic_fetch_or_64(union atomic_ptr_64 ptr, union atomic_val_64 val, + int memorder) +{ + return __atomic_fetch_or(ptr.ull_ptr, val.ull, memorder); +} +#endif /* atomic_fetch_or_64 */ + +#else /* ATOMIC_HAVE_64B_OPS */ +#define atomic_or_64 atomic_invalid_type +#endif /* ATOMIC_HAVE_64B_OPS */ + +#ifdef __LP64__ +#define atomic_fetch_or_ul atomic_fetch_or_64 +#else /* __LP64__ */ +#define atomic_fetch_or_ul atomic_fetch_or_32 +#endif /* __LP64__ */ + +#define atomic_fetch_or_ptr atomic_fetch_or_ul + +/* atomic_fetch_xor */ + +#ifndef atomic_fetch_xor_32 +static inline unsigned int +atomic_fetch_xor_32(union atomic_ptr_32 ptr, union atomic_val32 val, + int memorder) +{ + return __atomic_fetch_xor(ptr.ui_ptr, val.ui, memorder); +} +#endif /* atomic_fetch_xor_32 */ + +#ifdef ATOMIC_HAVE_64B_OPS + +#ifndef atomic_fetch_xor_64 +static inline unsigned long long +atomic_fetch_xor_64(union atomic_ptr_64 ptr, union atomic_val_64 val, + int memorder) +{ + return __atomic_fetch_xor(ptr.ull_ptr, val.ull, memorder); +} +#endif /* atomic_fetch_xor_64 */ + +#else /* ATOMIC_HAVE_64B_OPS */ +#define atomic_xor_64 atomic_invalid_type +#endif /* ATOMIC_HAVE_64B_OPS */ + +#ifdef __LP64__ +#define atomic_fetch_xor_ul atomic_fetch_xor_64 +#else /* __LP64__ */ +#define atomic_fetch_xor_ul atomic_fetch_xor_32 +#endif /* __LP64__ */ + +#define atomic_fetch_xor_ptr atomic_fetch_xor_ul + +/* atomic_add */ + +#ifndef atomic_add_32 +static inline void +atomic_add_32(union atomic_ptr_32 ptr, union atomic_val32 val, int memorder) +{ + __atomic_add_fetch(ptr.ui_ptr, val.ui, memorder); +} +#endif /* atomic_add_32 */ + +#ifdef ATOMIC_HAVE_64B_OPS + +#ifndef atomic_add_64 +static inline void +atomic_add_64(union atomic_ptr_64 ptr, union atomic_val_64 val, int memorder) +{ + __atomic_add_fetch(ptr.ull_ptr, val.ull, memorder); +} +#endif /* atomic_add_64 */ + +#else /* ATOMIC_HAVE_64B_OPS */ +#define atomic_add_64 atomic_invalid_type +#endif /* ATOMIC_HAVE_64B_OPS */ + +#ifdef __LP64__ +#define atomic_add_ul atomic_add_64 +#else /* __LP64__ */ +#define atomic_add_ul atomic_add_32 +#endif /* __LP64__ */ + +#define atomic_add_ptr atomic_add_ul + +/* atomic_sub */ + +#ifndef atomic_sub_32 +static inline void +atomic_sub_32(union atomic_ptr_32 ptr, union atomic_val32 val, int memorder) +{ + __atomic_sub_fetch(ptr.ui_ptr, val.ui, memorder); +} +#endif /* atomic_sub_32 */ + +#ifdef ATOMIC_HAVE_64B_OPS + +#ifndef atomic_sub_64 +static inline void +atomic_sub_64(union atomic_ptr_64 ptr, union atomic_val_64 val, int memorder) +{ + __atomic_sub_fetch(ptr.ull_ptr, val.ull, memorder); +} +#endif /* atomic_sub_64 */ + +#else /* ATOMIC_HAVE_64B_OPS */ +#define atomic_sub_64 atomic_invalid_type +#endif /* ATOMIC_HAVE_64B_OPS */ + +#ifdef __LP64__ +#define atomic_sub_ul atomic_sub_64 +#else /* __LP64__ */ +#define atomic_sub_ul atomic_sub_32 +#endif /* __LP64__ */ + +#define atomic_sub_ptr atomic_sub_ul + +/* atomic_and */ + +#ifndef atomic_and_32 +static inline void +atomic_and_32(union atomic_ptr_32 ptr, union atomic_val32 val, int memorder) +{ + __atomic_and_fetch(ptr.ui_ptr, val.ui, memorder); +} +#endif /* atomic_and_32 */ + +#ifdef ATOMIC_HAVE_64B_OPS + +#ifndef atomic_and_64 +static inline void +atomic_and_64(union atomic_ptr_64 ptr, union atomic_val_64 val, int memorder) +{ + __atomic_and_fetch(ptr.ull_ptr, val.ull, memorder); +} +#endif /* atomic_and_64 */ + +#else /* ATOMIC_HAVE_64B_OPS */ +#define atomic_and_64 atomic_invalid_type +#endif /* ATOMIC_HAVE_64B_OPS */ + +#ifdef __LP64__ +#define atomic_and_ul atomic_and_64 +#else /* __LP64__ */ +#define atomic_and_ul atomic_and_32 +#endif /* __LP64__ */ + +#define atomic_and_ptr atomic_and_ul + +/* atomic_or */ + +#ifndef atomic_or_32 +static inline void +atomic_or_32(union atomic_ptr_32 ptr, union atomic_val32 val, int memorder) +{ + __atomic_or_fetch(ptr.ui_ptr, val.ui, memorder); +} +#endif /* atomic_or_32 */ + +#ifdef ATOMIC_HAVE_64B_OPS + +#ifndef atomic_or_64 +static inline void +atomic_or_64(union atomic_ptr_64 ptr, union atomic_val_64 val, int memorder) +{ + __atomic_or_fetch(ptr.ull_ptr, val.ull, memorder); +} +#endif /* atomic_or_64 */ + +#else /* ATOMIC_HAVE_64B_OPS */ +#define atomic_or_64 atomic_invalid_type +#endif /* ATOMIC_HAVE_64B_OPS */ + +#ifdef __LP64__ +#define atomic_or_ul atomic_or_64 +#else /* __LP64__ */ +#define atomic_or_ul atomic_or_32 +#endif /* __LP64__ */ + +#define atomic_or_ptr atomic_or_ul + +/* atomic_xor */ + +#ifndef atomic_xor_32 +static inline void +atomic_xor_32(union atomic_ptr_32 ptr, union atomic_val32 val, int memorder) +{ + __atomic_xor_fetch(ptr.ui_ptr, val.ui, memorder); +} +#endif /* atomic_xor_32 */ + +#ifdef ATOMIC_HAVE_64B_OPS + +#ifndef atomic_xor_64 +static inline void +atomic_xor_64(union atomic_ptr_64 ptr, union atomic_val_64 val, int memorder) +{ + __atomic_xor_fetch(ptr.ull_ptr, val.ull, memorder); +} +#endif /* atomic_xor_64 */ + +#else /* ATOMIC_HAVE_64B_OPS */ +#define atomic_xor_64 atomic_invalid_type +#endif /* ATOMIC_HAVE_64B_OPS */ + +#ifdef __LP64__ +#define atomic_xor_ul atomic_xor_64 +#else /* __LP64__ */ +#define atomic_xor_ul atomic_xor_32 +#endif /* __LP64__ */ + +#define atomic_xor_ptr atomic_xor_ul + +#endif /* KERN_ATOMIC_I_H */ diff --git a/kern/atomic_types.h b/kern/atomic_types.h new file mode 100644 index 0000000..cdf1161 --- /dev/null +++ b/kern/atomic_types.h @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2018 Richard Braun. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + * + * Isolated type definition used to avoid inclusion circular dependencies. + */ + +#ifndef KERN_ATOMIC_TYPES_H +#define KERN_ATOMIC_TYPES_H + +/* + * After function selection, type genericity is achieved with transparent + * unions, a GCC extension. Here are a few things to keep in mind : + * - all members must have the same representation + * - calling conventions are inferred from the first member + */ + +#ifdef __LP64__ + +union atomic_ptr_32 { + int *i_ptr; + unsigned int *ui_ptr; +} __attribute__((transparent_union)); + +union atomic_constptr_32 { + const int *i_ptr; + const unsigned int *ui_ptr; +} __attribute__((transparent_union)); + +union atomic_val32 { + int i; + unsigned int ui; +} __attribute__((transparent_union)); + +union atomic_ptr_64 { + void *ptr; + unsigned long long *ull_ptr; +} __attribute__((transparent_union)); + +union atomic_constptr_64 { + const void *ptr; + const unsigned long long *ull_ptr; +} __attribute__((transparent_union)); + +union atomic_val_64 { + void *ptr; + long l; + unsigned long ul; + long long ll; + unsigned long long ull; +} __attribute__((transparent_union)); + +#else /* __LP64__ */ + +union atomic_ptr_32 { + void *ptr; + unsigned int *ui_ptr; +} __attribute__((transparent_union)); + +union atomic_constptr_32 { + const void *ptr; + const unsigned int *ui_ptr; +} __attribute__((transparent_union)); + +union atomic_val32 { + void *ptr; + int i; + unsigned int ui; + long l; + unsigned long ul; +} __attribute__((transparent_union)); + +union atomic_ptr_64 { + long long *ll_ptr; + unsigned long long *ull_ptr; +} __attribute__((transparent_union)); + +union atomic_constptr_64 { + const long long *ll_ptr; + const unsigned long long *ull_ptr; +} __attribute__((transparent_union)); + +union atomic_val_64 { + long long ll; + unsigned long long ull; +} __attribute__((transparent_union)); + +#endif /* __LP64__ */ + +#endif /* KERN_ATOMIC_TYPES_H */ diff --git a/kern/clock.c b/kern/clock.c index 27fb9a2..5c48bb9 100644 --- a/kern/clock.c +++ b/kern/clock.c @@ -72,7 +72,7 @@ void clock_tick_intr(void) if (cpu_id() == 0) { #ifdef ATOMIC_HAVE_64B_OPS - atomic_add(&clock_global_time.ticks, 1, ATOMIC_RELAXED); + atomic_add(&clock_global_time.ticks, 1ULL, ATOMIC_RELAXED); #else /* ATOMIC_HAVE_64B_OPS */ diff --git a/kern/mutex/mutex_adaptive_i.h b/kern/mutex/mutex_adaptive_i.h index b123251..e29fdb4 100644 --- a/kern/mutex/mutex_adaptive_i.h +++ b/kern/mutex/mutex_adaptive_i.h @@ -40,7 +40,7 @@ * to be unlocked, potentially spinning on the owner. It forces threads * trying to lock the mutex as well as the owner to take the slow path. */ -#define MUTEX_ADAPTIVE_CONTENDED 0x1 +#define MUTEX_ADAPTIVE_CONTENDED 0x1UL static inline void mutex_adaptive_init(struct mutex *mutex) diff --git a/kern/rtmutex.c b/kern/rtmutex.c index 6567b06..10482d5 100644 --- a/kern/rtmutex.c +++ b/kern/rtmutex.c @@ -26,6 +26,7 @@ #include <kern/rtmutex.h> #include <kern/rtmutex_i.h> #include <kern/rtmutex_types.h> +#include <kern/syscnt.h> #include <kern/thread.h> #include <kern/turnstile.h> diff --git a/kern/rtmutex_i.h b/kern/rtmutex_i.h index 64ff69a..373c180 100644 --- a/kern/rtmutex_i.h +++ b/kern/rtmutex_i.h @@ -41,8 +41,8 @@ * the turnstile wait function so that only the highest priority thread * may lock the mutex. */ -#define RTMUTEX_CONTENDED 0x1 -#define RTMUTEX_FORCE_WAIT 0x2 +#define RTMUTEX_CONTENDED ((uintptr_t)0x1) +#define RTMUTEX_FORCE_WAIT ((uintptr_t)0x2) #define RTMUTEX_OWNER_MASK (~((uintptr_t)(RTMUTEX_FORCE_WAIT \ | RTMUTEX_CONTENDED))) diff --git a/kern/spinlock.c b/kern/spinlock.c index 71e60cb..5bff42f 100644 --- a/kern/spinlock.c +++ b/kern/spinlock.c @@ -102,7 +102,7 @@ static_assert(SPINLOCK_BITS <= (CHAR_BIT * sizeof(uint32_t)), struct spinlock_qnode { alignas(CPU_L1_SIZE) struct spinlock_qnode *next; - bool locked; + int locked; }; /* TODO NMI support */ @@ -194,13 +194,13 @@ spinlock_qnode_set_next(struct spinlock_qnode *qnode, struct spinlock_qnode *nex static void spinlock_qnode_set_locked(struct spinlock_qnode *qnode) { - qnode->locked = true; + qnode->locked = 1; } static void spinlock_qnode_wait_locked(const struct spinlock_qnode *qnode) { - bool locked; + int locked; for (;;) { locked = atomic_load(&qnode->locked, ATOMIC_ACQUIRE); @@ -216,7 +216,7 @@ spinlock_qnode_wait_locked(const struct spinlock_qnode *qnode) static void spinlock_qnode_clear_locked(struct spinlock_qnode *qnode) { - atomic_store(&qnode->locked, false, ATOMIC_RELEASE); + atomic_store(&qnode->locked, 0, ATOMIC_RELEASE); } static void diff --git a/kern/task.h b/kern/task.h index d6e9eb4..12e29ac 100644 --- a/kern/task.h +++ b/kern/task.h @@ -55,7 +55,7 @@ task_ref(struct task *task) { unsigned long nr_refs; - nr_refs = atomic_fetch_add(&task->nr_refs, 1, ATOMIC_RELAXED); + nr_refs = atomic_fetch_add(&task->nr_refs, 1UL, ATOMIC_RELAXED); assert(nr_refs != (unsigned long)-1); } @@ -64,7 +64,7 @@ task_unref(struct task *task) { unsigned long nr_refs; - nr_refs = atomic_fetch_sub(&task->nr_refs, 1, ATOMIC_ACQ_REL); + nr_refs = atomic_fetch_sub(&task->nr_refs, 1UL, ATOMIC_ACQ_REL); assert(nr_refs != 0); if (nr_refs == 1) { diff --git a/kern/thread.h b/kern/thread.h index b97dbed..787ccf5 100644 --- a/kern/thread.h +++ b/kern/thread.h @@ -296,7 +296,7 @@ thread_ref(struct thread *thread) { unsigned long nr_refs; - nr_refs = atomic_fetch_add(&thread->nr_refs, 1, ATOMIC_RELAXED); + nr_refs = atomic_fetch_add(&thread->nr_refs, 1UL, ATOMIC_RELAXED); assert(nr_refs != (unsigned long)-1); } @@ -305,7 +305,7 @@ thread_unref(struct thread *thread) { unsigned long nr_refs; - nr_refs = atomic_fetch_sub(&thread->nr_refs, 1, ATOMIC_ACQ_REL); + nr_refs = atomic_fetch_sub(&thread->nr_refs, 1UL, ATOMIC_ACQ_REL); assert(nr_refs != 0); if (nr_refs == 1) { |