summaryrefslogtreecommitdiff
path: root/kern/spinlock.c
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2018-04-21 10:42:11 +0200
committerRichard Braun <rbraun@sceen.net>2018-04-21 10:49:07 +0200
commitec4ddf8b95585cc3e340dd06df109f83a23b2d77 (patch)
treed70e9c7a52c6c5f60e8d971b0eeb4b275d80bb33 /kern/spinlock.c
parent89ebd57e0bb3a63f2f794ce41dd4c831b4aeb35c (diff)
Fix atomic operations argument types
In preparation of the rework of atomic operations, all atomic function calls are fixed to use fully supported, compatible types. This means that atomic operations ar erestricted to 32-bit and 64-bit, and that value types must be strictly compatible with pointer types.
Diffstat (limited to 'kern/spinlock.c')
-rw-r--r--kern/spinlock.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/kern/spinlock.c b/kern/spinlock.c
index 71e60cb6..5bff42f9 100644
--- a/kern/spinlock.c
+++ b/kern/spinlock.c
@@ -102,7 +102,7 @@ static_assert(SPINLOCK_BITS <= (CHAR_BIT * sizeof(uint32_t)),
struct spinlock_qnode {
alignas(CPU_L1_SIZE) struct spinlock_qnode *next;
- bool locked;
+ int locked;
};
/* TODO NMI support */
@@ -194,13 +194,13 @@ spinlock_qnode_set_next(struct spinlock_qnode *qnode, struct spinlock_qnode *nex
static void
spinlock_qnode_set_locked(struct spinlock_qnode *qnode)
{
- qnode->locked = true;
+ qnode->locked = 1;
}
static void
spinlock_qnode_wait_locked(const struct spinlock_qnode *qnode)
{
- bool locked;
+ int locked;
for (;;) {
locked = atomic_load(&qnode->locked, ATOMIC_ACQUIRE);
@@ -216,7 +216,7 @@ spinlock_qnode_wait_locked(const struct spinlock_qnode *qnode)
static void
spinlock_qnode_clear_locked(struct spinlock_qnode *qnode)
{
- atomic_store(&qnode->locked, false, ATOMIC_RELEASE);
+ atomic_store(&qnode->locked, 0, ATOMIC_RELEASE);
}
static void