summaryrefslogtreecommitdiff
path: root/kern/atomic.h
blob: 27f29201b0d2c186a054c3b280f8718cc51f0eb1 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
/*
 * Copyright (c) 2018 Richard Braun.
 * Copyright (c) 2017 Agustina Arzille.
 *
 * This program is free software: you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation, either version 3 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 *
 *
 * Type-generic memory-model aware atomic operations.
 *
 * For portability reasons, this interface restricts atomic operation
 * sizes to 32-bit and 64-bit.
 *
 * Some configurations may not support 64-bit operations. Check if the
 * ATOMIC_HAVE_64B_OPS macro is defined to find out.
 *
 * TODO Replace mentions of "memory barriers" throughout the code with
 * C11 memory model terminology.
 */

#ifndef KERN_ATOMIC_H
#define KERN_ATOMIC_H

#include <assert.h>
#include <stdbool.h>

#include <kern/atomic_i.h>
#include <kern/macros.h>

/*
 * Supported memory orders.
 */
#define ATOMIC_RELAXED  __ATOMIC_RELAXED
#define ATOMIC_CONSUME  __ATOMIC_CONSUME
#define ATOMIC_ACQUIRE  __ATOMIC_ACQUIRE
#define ATOMIC_RELEASE  __ATOMIC_RELEASE
#define ATOMIC_ACQ_REL  __ATOMIC_ACQ_REL
#define ATOMIC_SEQ_CST  __ATOMIC_SEQ_CST

#define atomic_load(ptr, memorder)                                          \
MACRO_BEGIN                                                                 \
    assert(atomic_ptr_aligned(ptr));                                        \
    ((typeof(*(ptr)))atomic_select(ptr, load)(ptr, memorder));              \
MACRO_END

#define atomic_store(ptr, val, memorder)                                    \
MACRO_BEGIN                                                                 \
    assert(atomic_ptr_aligned(ptr));                                        \
    atomic_select(ptr, store)(ptr, val, memorder);                          \
MACRO_END

/*
 * For compare-and-swap, deviate a little from the standard, and only
 * return the value before the comparison, leaving it up to the user to
 * determine whether the swap was actually performed or not.
 *
 * Also, note that the memory order in case of failure is relaxed. This is
 * because atomic CAS is typically used in a loop. However, if a different
 * code path is taken on failure (rather than retrying), then the user
 * should be aware that a memory fence might be necessary.
 */
#define atomic_cas(ptr, oval, nval, memorder)                               \
MACRO_BEGIN                                                                 \
    assert(atomic_ptr_aligned(ptr));                                        \
    ((typeof(*(ptr)))atomic_select(ptr, cas)(ptr, oval, nval, memorder));   \
MACRO_END

#define atomic_swap(ptr, val, memorder)                                     \
MACRO_BEGIN                                                                 \
    assert(atomic_ptr_aligned(ptr));                                        \
    ((typeof(*(ptr)))atomic_select(ptr, swap)(ptr, val, memorder));         \
MACRO_END

#define atomic_fetch_add(ptr, val, memorder)                                \
MACRO_BEGIN                                                                 \
    assert(atomic_ptr_aligned(ptr));                                        \
    ((typeof(*(ptr)))atomic_select(ptr, fetch_add)(ptr, val, memorder));    \
MACRO_END

#define atomic_fetch_sub(ptr, val, memorder)                                \
MACRO_BEGIN                                                                 \
    assert(atomic_ptr_aligned(ptr));                                        \
    ((typeof(*(ptr)))atomic_select(ptr, fetch_sub)(ptr, val, memorder));    \
MACRO_END

#define atomic_fetch_and(ptr, val, memorder)                                \
MACRO_BEGIN                                                                 \
    assert(atomic_ptr_aligned(ptr));                                        \
    ((typeof(*(ptr)))atomic_select(ptr, fetch_and)(ptr, val, memorder));    \
MACRO_END

#define atomic_fetch_or(ptr, val, memorder)                                 \
MACRO_BEGIN                                                                 \
    assert(atomic_ptr_aligned(ptr));                                        \
    ((typeof(*(ptr)))atomic_select(ptr, fetch_or)(ptr, val, memorder));     \
MACRO_END

#define atomic_fetch_xor(ptr, val, memorder)                                \
MACRO_BEGIN                                                                 \
    assert(atomic_ptr_aligned(ptr));                                        \
    ((typeof(*(ptr)))atomic_select(ptr, fetch_xor)(ptr, val, memorder));    \
MACRO_END

#define atomic_add(ptr, val, memorder)                                      \
MACRO_BEGIN                                                                 \
    assert(atomic_ptr_aligned(ptr));                                        \
    atomic_select(ptr, add)(ptr, val, memorder);                            \
MACRO_END

#define atomic_sub(ptr, val, memorder)                                      \
MACRO_BEGIN                                                                 \
    assert(atomic_ptr_aligned(ptr));                                        \
    atomic_select(ptr, sub)(ptr, val, memorder);                            \
MACRO_END

#define atomic_and(ptr, val, memorder)                                      \
MACRO_BEGIN                                                                 \
    assert(atomic_ptr_aligned(ptr));                                        \
    atomic_select(ptr, and)(ptr, val, memorder);                            \
MACRO_END

#define atomic_or(ptr, val, memorder)                                       \
MACRO_BEGIN                                                                 \
    assert(atomic_ptr_aligned(ptr));                                        \
    atomic_select(ptr, or)(ptr, val, memorder);                             \
MACRO_END

#define atomic_xor(ptr, val, memorder)                                      \
MACRO_BEGIN                                                                 \
    assert(atomic_ptr_aligned(ptr));                                        \
    atomic_select(ptr, xor)(ptr, val, memorder);                            \
MACRO_END

#define atomic_fence(memorder) __atomic_thread_fence(memorder)

#endif /* KERN_ATOMIC_H */