diff options
-rw-r--r-- | arch/x86/machine/trap.c | 2 | ||||
-rw-r--r-- | kern/clock.c | 2 | ||||
-rw-r--r-- | kern/rtmutex_i.h | 4 | ||||
-rw-r--r-- | kern/spinlock.c | 8 | ||||
-rw-r--r-- | kern/task.h | 4 | ||||
-rw-r--r-- | kern/thread.h | 4 |
6 files changed, 12 insertions, 12 deletions
diff --git a/arch/x86/machine/trap.c b/arch/x86/machine/trap.c index 878f20c9..534b3f6f 100644 --- a/arch/x86/machine/trap.c +++ b/arch/x86/machine/trap.c @@ -91,7 +91,7 @@ static void __init trap_handler_init(struct trap_handler *handler, int flags, trap_handler_fn_t fn) { handler->flags = flags; - atomic_store(&handler->fn, fn, ATOMIC_RELAXED); + handler->fn = fn; } static void __init diff --git a/kern/clock.c b/kern/clock.c index 27fb9a23..5c48bb9d 100644 --- a/kern/clock.c +++ b/kern/clock.c @@ -72,7 +72,7 @@ void clock_tick_intr(void) if (cpu_id() == 0) { #ifdef ATOMIC_HAVE_64B_OPS - atomic_add(&clock_global_time.ticks, 1, ATOMIC_RELAXED); + atomic_add(&clock_global_time.ticks, 1ULL, ATOMIC_RELAXED); #else /* ATOMIC_HAVE_64B_OPS */ diff --git a/kern/rtmutex_i.h b/kern/rtmutex_i.h index 64ff69a0..373c180f 100644 --- a/kern/rtmutex_i.h +++ b/kern/rtmutex_i.h @@ -41,8 +41,8 @@ * the turnstile wait function so that only the highest priority thread * may lock the mutex. */ -#define RTMUTEX_CONTENDED 0x1 -#define RTMUTEX_FORCE_WAIT 0x2 +#define RTMUTEX_CONTENDED ((uintptr_t)0x1) +#define RTMUTEX_FORCE_WAIT ((uintptr_t)0x2) #define RTMUTEX_OWNER_MASK (~((uintptr_t)(RTMUTEX_FORCE_WAIT \ | RTMUTEX_CONTENDED))) diff --git a/kern/spinlock.c b/kern/spinlock.c index 71e60cb6..5bff42f9 100644 --- a/kern/spinlock.c +++ b/kern/spinlock.c @@ -102,7 +102,7 @@ static_assert(SPINLOCK_BITS <= (CHAR_BIT * sizeof(uint32_t)), struct spinlock_qnode { alignas(CPU_L1_SIZE) struct spinlock_qnode *next; - bool locked; + int locked; }; /* TODO NMI support */ @@ -194,13 +194,13 @@ spinlock_qnode_set_next(struct spinlock_qnode *qnode, struct spinlock_qnode *nex static void spinlock_qnode_set_locked(struct spinlock_qnode *qnode) { - qnode->locked = true; + qnode->locked = 1; } static void spinlock_qnode_wait_locked(const struct spinlock_qnode *qnode) { - bool locked; + int locked; for (;;) { locked = atomic_load(&qnode->locked, ATOMIC_ACQUIRE); @@ -216,7 +216,7 @@ spinlock_qnode_wait_locked(const struct spinlock_qnode *qnode) static void spinlock_qnode_clear_locked(struct spinlock_qnode *qnode) { - atomic_store(&qnode->locked, false, ATOMIC_RELEASE); + atomic_store(&qnode->locked, 0, ATOMIC_RELEASE); } static void diff --git a/kern/task.h b/kern/task.h index d6e9eb44..12e29ac8 100644 --- a/kern/task.h +++ b/kern/task.h @@ -55,7 +55,7 @@ task_ref(struct task *task) { unsigned long nr_refs; - nr_refs = atomic_fetch_add(&task->nr_refs, 1, ATOMIC_RELAXED); + nr_refs = atomic_fetch_add(&task->nr_refs, 1UL, ATOMIC_RELAXED); assert(nr_refs != (unsigned long)-1); } @@ -64,7 +64,7 @@ task_unref(struct task *task) { unsigned long nr_refs; - nr_refs = atomic_fetch_sub(&task->nr_refs, 1, ATOMIC_ACQ_REL); + nr_refs = atomic_fetch_sub(&task->nr_refs, 1UL, ATOMIC_ACQ_REL); assert(nr_refs != 0); if (nr_refs == 1) { diff --git a/kern/thread.h b/kern/thread.h index eba9bf2c..4bead755 100644 --- a/kern/thread.h +++ b/kern/thread.h @@ -288,7 +288,7 @@ thread_ref(struct thread *thread) { unsigned long nr_refs; - nr_refs = atomic_fetch_add(&thread->nr_refs, 1, ATOMIC_RELAXED); + nr_refs = atomic_fetch_add(&thread->nr_refs, 1UL, ATOMIC_RELAXED); assert(nr_refs != (unsigned long)-1); } @@ -297,7 +297,7 @@ thread_unref(struct thread *thread) { unsigned long nr_refs; - nr_refs = atomic_fetch_sub(&thread->nr_refs, 1, ATOMIC_ACQ_REL); + nr_refs = atomic_fetch_sub(&thread->nr_refs, 1UL, ATOMIC_ACQ_REL); assert(nr_refs != 0); if (nr_refs == 1) { |