diff options
author | Richard Braun <rbraun@sceen.net> | 2017-09-02 16:06:54 +0200 |
---|---|---|
committer | Richard Braun <rbraun@sceen.net> | 2017-09-02 16:06:54 +0200 |
commit | 7ded1b60d46d8582be0d86cd7176048bcfe30ccf (patch) | |
tree | 3b1ed96ad276c6aeff56c58380e59e643559e9c4 | |
parent | 1469ce26e2e58082fd4bbb343cad60f2d696a641 (diff) |
kern/thread: new preemption control macros
These new macros take care of disabling/restoring interrupts in the
appropriate order.
-rw-r--r-- | kern/llsync.c | 6 | ||||
-rw-r--r-- | kern/spinlock.h | 13 | ||||
-rw-r--r-- | kern/sref.c | 6 | ||||
-rw-r--r-- | kern/thread.c | 18 | ||||
-rw-r--r-- | kern/thread.h | 26 | ||||
-rw-r--r-- | kern/work.c | 6 |
6 files changed, 36 insertions, 39 deletions
diff --git a/kern/llsync.c b/kern/llsync.c index b4d09b5a..34a3fa0c 100644 --- a/kern/llsync.c +++ b/kern/llsync.c @@ -301,12 +301,10 @@ llsync_defer(struct work *work) struct llsync_cpu_data *cpu_data; unsigned long flags; - thread_preempt_disable(); - cpu_intr_save(&flags); + thread_preempt_disable_intr_save(&flags); cpu_data = llsync_get_cpu_data(); work_queue_push(&cpu_data->queue0, work); - cpu_intr_restore(flags); - thread_preempt_enable(); + thread_preempt_enable_intr_restore(flags); } static void diff --git a/kern/spinlock.h b/kern/spinlock.h index 50ac5401..49e6d558 100644 --- a/kern/spinlock.h +++ b/kern/spinlock.h @@ -33,7 +33,6 @@ #include <kern/spinlock_i.h> #include <kern/spinlock_types.h> #include <kern/thread.h> -#include <machine/cpu.h> struct spinlock; @@ -121,13 +120,11 @@ spinlock_trylock_intr_save(struct spinlock *lock, unsigned long *flags) { int error; - thread_preempt_disable(); - cpu_intr_save(flags); + thread_preempt_disable_intr_save(flags); error = spinlock_lock_fast(lock); if (unlikely(error)) { - cpu_intr_restore(*flags); - thread_preempt_enable(); + thread_preempt_enable_intr_restore(*flags); } return error; @@ -147,8 +144,7 @@ spinlock_trylock_intr_save(struct spinlock *lock, unsigned long *flags) static inline void spinlock_lock_intr_save(struct spinlock *lock, unsigned long *flags) { - thread_preempt_disable(); - cpu_intr_save(flags); + thread_preempt_disable_intr_save(flags); spinlock_lock_common(lock); } @@ -165,8 +161,7 @@ static inline void spinlock_unlock_intr_restore(struct spinlock *lock, unsigned long flags) { spinlock_unlock_common(lock); - cpu_intr_restore(flags); - thread_preempt_enable(); + thread_preempt_enable_intr_restore(flags); } /* diff --git a/kern/sref.c b/kern/sref.c index 95daeb0f..dd81ca62 100644 --- a/kern/sref.c +++ b/kern/sref.c @@ -798,15 +798,13 @@ sref_manage(void *arg) cache = arg; for (;;) { - thread_preempt_disable(); - cpu_intr_save(&flags); + thread_preempt_disable_intr_save(&flags); while (!sref_cache_is_dirty(cache)) { thread_sleep(NULL, cache, "sref"); } - cpu_intr_restore(flags); - thread_preempt_enable(); + thread_preempt_enable_intr_restore(flags); sref_cache_flush(cache, &queue); sref_review(&queue); diff --git a/kern/thread.c b/kern/thread.c index 5d8ac11d..9ee9b99e 100644 --- a/kern/thread.c +++ b/kern/thread.c @@ -1277,8 +1277,7 @@ thread_sched_fs_balance_scan(struct thread_runq *runq, remote_runq = NULL; - thread_preempt_disable(); - cpu_intr_save(&flags); + thread_preempt_disable_intr_save(&flags); cpumap_for_each(&thread_active_runqs, i) { tmp = percpu_ptr(thread_runq, i); @@ -1312,8 +1311,7 @@ thread_sched_fs_balance_scan(struct thread_runq *runq, spinlock_unlock(&remote_runq->lock); } - cpu_intr_restore(flags); - thread_preempt_enable(); + thread_preempt_enable_intr_restore(flags); return remote_runq; } @@ -1444,8 +1442,7 @@ thread_sched_fs_balance(struct thread_runq *runq, unsigned long *flags) remote_runq = thread_sched_fs_balance_scan(runq, highest_round); if (remote_runq != NULL) { - thread_preempt_disable(); - cpu_intr_save(flags); + thread_preempt_disable_intr_save(flags); thread_runq_double_lock(runq, remote_runq); nr_migrations = thread_sched_fs_balance_migrate(runq, remote_runq, highest_round); @@ -1472,8 +1469,7 @@ thread_sched_fs_balance(struct thread_runq *runq, unsigned long *flags) continue; } - thread_preempt_disable(); - cpu_intr_save(flags); + thread_preempt_disable_intr_save(flags); thread_runq_double_lock(runq, remote_runq); nr_migrations = thread_sched_fs_balance_migrate(runq, remote_runq, highest_round); @@ -2452,8 +2448,7 @@ thread_wakeup_common(struct thread *thread, int error) thread_unlock_runq(runq, flags); } - thread_preempt_disable(); - cpu_intr_save(&flags); + thread_preempt_disable_intr_save(&flags); if (!thread->pinned) { runq = thread_get_real_sched_ops(thread)->select_runq(thread); @@ -2469,8 +2464,7 @@ thread_wakeup_common(struct thread *thread, int error) thread->wakeup_error = error; thread_runq_wakeup(runq, thread); spinlock_unlock(&runq->lock); - cpu_intr_restore(flags); - thread_preempt_enable(); + thread_preempt_enable_intr_restore(flags); return 0; } diff --git a/kern/thread.h b/kern/thread.h index 1a811685..07973431 100644 --- a/kern/thread.h +++ b/kern/thread.h @@ -570,6 +570,17 @@ thread_preempt_enabled(void) } static inline void +thread_preempt_disable(void) +{ + struct thread *thread; + + thread = thread_self(); + thread->preempt++; + assert(thread->preempt != 0); + barrier(); +} + +static inline void thread_preempt_enable_no_resched(void) { struct thread *thread; @@ -600,14 +611,17 @@ thread_preempt_enable(void) } static inline void -thread_preempt_disable(void) +thread_preempt_disable_intr_save(unsigned long *flags) { - struct thread *thread; + thread_preempt_disable(); + cpu_intr_save(flags); +} - thread = thread_self(); - thread->preempt++; - assert(thread->preempt != 0); - barrier(); +static inline void +thread_preempt_enable_intr_restore(unsigned long flags) +{ + cpu_intr_restore(flags); + thread_preempt_enable(); } /* diff --git a/kern/work.c b/kern/work.c index 46475229..c0eb0bb1 100644 --- a/kern/work.c +++ b/kern/work.c @@ -217,8 +217,7 @@ work_pool_acquire(struct work_pool *pool, unsigned long *flags) if (pool->flags & WORK_PF_GLOBAL) { spinlock_lock_intr_save(&pool->lock, flags); } else { - thread_preempt_disable(); - cpu_intr_save(flags); + thread_preempt_disable_intr_save(flags); } } @@ -228,8 +227,7 @@ work_pool_release(struct work_pool *pool, unsigned long flags) if (pool->flags & WORK_PF_GLOBAL) { spinlock_unlock_intr_restore(&pool->lock, flags); } else { - cpu_intr_restore(flags); - thread_preempt_enable(); + thread_preempt_enable_intr_restore(flags); } } |