summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2018-04-21 10:42:11 +0200
committerRichard Braun <rbraun@sceen.net>2018-04-21 10:49:07 +0200
commitec4ddf8b95585cc3e340dd06df109f83a23b2d77 (patch)
treed70e9c7a52c6c5f60e8d971b0eeb4b275d80bb33
parent89ebd57e0bb3a63f2f794ce41dd4c831b4aeb35c (diff)
Fix atomic operations argument types
In preparation of the rework of atomic operations, all atomic function calls are fixed to use fully supported, compatible types. This means that atomic operations ar erestricted to 32-bit and 64-bit, and that value types must be strictly compatible with pointer types.
-rw-r--r--arch/x86/machine/trap.c2
-rw-r--r--kern/clock.c2
-rw-r--r--kern/rtmutex_i.h4
-rw-r--r--kern/spinlock.c8
-rw-r--r--kern/task.h4
-rw-r--r--kern/thread.h4
6 files changed, 12 insertions, 12 deletions
diff --git a/arch/x86/machine/trap.c b/arch/x86/machine/trap.c
index 878f20c..534b3f6 100644
--- a/arch/x86/machine/trap.c
+++ b/arch/x86/machine/trap.c
@@ -91,7 +91,7 @@ static void __init
trap_handler_init(struct trap_handler *handler, int flags, trap_handler_fn_t fn)
{
handler->flags = flags;
- atomic_store(&handler->fn, fn, ATOMIC_RELAXED);
+ handler->fn = fn;
}
static void __init
diff --git a/kern/clock.c b/kern/clock.c
index 27fb9a2..5c48bb9 100644
--- a/kern/clock.c
+++ b/kern/clock.c
@@ -72,7 +72,7 @@ void clock_tick_intr(void)
if (cpu_id() == 0) {
#ifdef ATOMIC_HAVE_64B_OPS
- atomic_add(&clock_global_time.ticks, 1, ATOMIC_RELAXED);
+ atomic_add(&clock_global_time.ticks, 1ULL, ATOMIC_RELAXED);
#else /* ATOMIC_HAVE_64B_OPS */
diff --git a/kern/rtmutex_i.h b/kern/rtmutex_i.h
index 64ff69a..373c180 100644
--- a/kern/rtmutex_i.h
+++ b/kern/rtmutex_i.h
@@ -41,8 +41,8 @@
* the turnstile wait function so that only the highest priority thread
* may lock the mutex.
*/
-#define RTMUTEX_CONTENDED 0x1
-#define RTMUTEX_FORCE_WAIT 0x2
+#define RTMUTEX_CONTENDED ((uintptr_t)0x1)
+#define RTMUTEX_FORCE_WAIT ((uintptr_t)0x2)
#define RTMUTEX_OWNER_MASK (~((uintptr_t)(RTMUTEX_FORCE_WAIT \
| RTMUTEX_CONTENDED)))
diff --git a/kern/spinlock.c b/kern/spinlock.c
index 71e60cb..5bff42f 100644
--- a/kern/spinlock.c
+++ b/kern/spinlock.c
@@ -102,7 +102,7 @@ static_assert(SPINLOCK_BITS <= (CHAR_BIT * sizeof(uint32_t)),
struct spinlock_qnode {
alignas(CPU_L1_SIZE) struct spinlock_qnode *next;
- bool locked;
+ int locked;
};
/* TODO NMI support */
@@ -194,13 +194,13 @@ spinlock_qnode_set_next(struct spinlock_qnode *qnode, struct spinlock_qnode *nex
static void
spinlock_qnode_set_locked(struct spinlock_qnode *qnode)
{
- qnode->locked = true;
+ qnode->locked = 1;
}
static void
spinlock_qnode_wait_locked(const struct spinlock_qnode *qnode)
{
- bool locked;
+ int locked;
for (;;) {
locked = atomic_load(&qnode->locked, ATOMIC_ACQUIRE);
@@ -216,7 +216,7 @@ spinlock_qnode_wait_locked(const struct spinlock_qnode *qnode)
static void
spinlock_qnode_clear_locked(struct spinlock_qnode *qnode)
{
- atomic_store(&qnode->locked, false, ATOMIC_RELEASE);
+ atomic_store(&qnode->locked, 0, ATOMIC_RELEASE);
}
static void
diff --git a/kern/task.h b/kern/task.h
index d6e9eb4..12e29ac 100644
--- a/kern/task.h
+++ b/kern/task.h
@@ -55,7 +55,7 @@ task_ref(struct task *task)
{
unsigned long nr_refs;
- nr_refs = atomic_fetch_add(&task->nr_refs, 1, ATOMIC_RELAXED);
+ nr_refs = atomic_fetch_add(&task->nr_refs, 1UL, ATOMIC_RELAXED);
assert(nr_refs != (unsigned long)-1);
}
@@ -64,7 +64,7 @@ task_unref(struct task *task)
{
unsigned long nr_refs;
- nr_refs = atomic_fetch_sub(&task->nr_refs, 1, ATOMIC_ACQ_REL);
+ nr_refs = atomic_fetch_sub(&task->nr_refs, 1UL, ATOMIC_ACQ_REL);
assert(nr_refs != 0);
if (nr_refs == 1) {
diff --git a/kern/thread.h b/kern/thread.h
index eba9bf2..4bead75 100644
--- a/kern/thread.h
+++ b/kern/thread.h
@@ -288,7 +288,7 @@ thread_ref(struct thread *thread)
{
unsigned long nr_refs;
- nr_refs = atomic_fetch_add(&thread->nr_refs, 1, ATOMIC_RELAXED);
+ nr_refs = atomic_fetch_add(&thread->nr_refs, 1UL, ATOMIC_RELAXED);
assert(nr_refs != (unsigned long)-1);
}
@@ -297,7 +297,7 @@ thread_unref(struct thread *thread)
{
unsigned long nr_refs;
- nr_refs = atomic_fetch_sub(&thread->nr_refs, 1, ATOMIC_ACQ_REL);
+ nr_refs = atomic_fetch_sub(&thread->nr_refs, 1UL, ATOMIC_ACQ_REL);
assert(nr_refs != 0);
if (nr_refs == 1) {