summaryrefslogtreecommitdiff
path: root/kern/atomic.h
blob: 67f775db937f6c4890b998e0b84c9307cef10324 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
/*
 * Copyright (c) 2017 Agustina Arzille.
 *
 * This program is free software: you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation, either version 3 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 *
 *
 * Type-generic memory-model aware atomic operations.
 */

#ifndef _KERN_ATOMIC_H
#define _KERN_ATOMIC_H

#include <stdbool.h>

#include <kern/macros.h>
#include <machine/atomic.h>

/*
 * Supported memory orders.
 */
#define ATOMIC_RELAXED   __ATOMIC_RELAXED
#define ATOMIC_ACQUIRE   __ATOMIC_ACQUIRE
#define ATOMIC_RELEASE   __ATOMIC_RELEASE
#define ATOMIC_ACQ_REL   __ATOMIC_ACQ_REL
#define ATOMIC_SEQ_CST   __ATOMIC_SEQ_CST

/*
 * Type-generic atomic operations.
 */
#define atomic_fetch_add(ptr, val, mo)  __atomic_fetch_add(ptr, val, mo)

#define atomic_fetch_sub(ptr, val, mo)  __atomic_fetch_sub(ptr, val, mo)

#define atomic_fetch_and(ptr, val, mo)  __atomic_fetch_and(ptr, val, mo)

#define atomic_fetch_or(ptr, val, mo)   __atomic_fetch_or(ptr, val, mo)

#define atomic_fetch_xor(ptr, val, mo)  __atomic_fetch_xor(ptr, val, mo)

#define atomic_add(ptr, val, mo)        (void)__atomic_add_fetch(ptr, val, mo)

#define atomic_sub(ptr, val, mo)        (void)__atomic_sub_fetch(ptr, val, mo)

#define atomic_and(ptr, val, mo)        (void)__atomic_and_fetch(ptr, val, mo)

#define atomic_or(ptr, val, mo)         (void)__atomic_or_fetch(ptr, val, mo)

#define atomic_xor(ptr, val, mo)        (void)__atomic_xor_fetch(ptr, val, mo)

#define atomic_swap(ptr, val, mo)       __atomic_exchange_n(ptr, val, mo)

/*
 * For compare-and-swap, deviate a little from the standard, and only
 * return the value before the comparison, leaving it up to the user to
 * determine whether the swap was actually performed or not.
 *
 * Also, note that the memory order in case of failure is relaxed. This is
 * because atomic CAS is typically used in a loop. However, if a different
 * code path is taken on failure (rather than retrying), then the user
 * should be aware that a memory fence might be necessary.
 *
 * Finally, although a local variable isn't strictly needed for the new
 * value, some compilers seem to have trouble when all parameters don't
 * have the same type.
 */
#define atomic_cas(ptr, oval, nval, mo)                           \
MACRO_BEGIN                                                       \
    typeof(*(ptr)) ___oval, ___nval;                              \
                                                                  \
    ___oval = (oval);                                             \
    ___nval = (nval);                                             \
    __atomic_compare_exchange_n(ptr, &___oval, ___nval, false,    \
                                mo, ATOMIC_RELAXED);              \
    ___oval;                                                      \
MACRO_END

/*
 * Some architectures may need specific definitions for loads and stores,
 * in order to prevent the compiler from emitting unsupported instructions.
 * As such, only define these if the architecture-specific part of the
 * module didn't already.
 */

#ifndef ATOMIC_ARCH_SPECIFIC_LOAD
#define atomic_load(ptr, mo) __atomic_load_n(ptr, mo)
#endif

#ifndef ATOMIC_ARCH_SPECIFIC_STORE
#define atomic_store(ptr, val, mo) __atomic_store_n(ptr, val, mo)
#endif

/*
 * Common shortcuts.
 */

#define atomic_cas_acquire(ptr, oval, nval) \
    atomic_cas(ptr, oval, nval, ATOMIC_ACQUIRE)

#define atomic_cas_release(ptr, oval, nval) \
    atomic_cas(ptr, oval, nval, ATOMIC_RELEASE)

#define atomic_cas_seq_cst(ptr, oval, nval) \
    atomic_cas(ptr, oval, nval, ATOMIC_SEQ_CST)

#define atomic_swap_acquire(ptr, val)   atomic_swap(ptr, val, ATOMIC_ACQUIRE)
#define atomic_swap_release(ptr, val)   atomic_swap(ptr, val, ATOMIC_RELEASE)
#define atomic_swap_seq_cst(ptr, val)   atomic_swap(ptr, val, ATOMIC_SEQ_CST)

#define atomic_fetch_add_acq_rel(ptr, val) \
    atomic_fetch_add(ptr, val, ATOMIC_ACQ_REL)

#define atomic_fetch_sub_acq_rel(ptr, val) \
    atomic_fetch_sub(ptr, val, ATOMIC_ACQ_REL)

#define atomic_or_acq_rel(ptr, val)    atomic_or(ptr, val, ATOMIC_ACQ_REL)
#define atomic_and_acq_rel(ptr, val)   atomic_and(ptr, val, ATOMIC_ACQ_REL)

#endif /* _KERN_ATOMIC_H */