summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2013-05-15 22:34:54 +0200
committerRichard Braun <rbraun@sceen.net>2013-05-15 22:34:54 +0200
commitcf572e25ec9bf2d56c1fef5eeced991dff72db5d (patch)
treeb09509d8fc69cd90c2a37052e3f9c40780d7c211
parent5abf84415cb501603ac059f91f37b3d030b24454 (diff)
x86/cpu: pass flags by address to cpu_intr_save
Not a necessary change, but done for consistency.
-rw-r--r--arch/x86/machine/cpu.h10
-rw-r--r--kern/spinlock.h4
-rw-r--r--kern/thread.c8
3 files changed, 9 insertions, 13 deletions
diff --git a/arch/x86/machine/cpu.h b/arch/x86/machine/cpu.h
index edc98801..ab087d86 100644
--- a/arch/x86/machine/cpu.h
+++ b/arch/x86/machine/cpu.h
@@ -367,15 +367,11 @@ cpu_intr_restore(unsigned long eflags)
*
* Implies a compiler barrier.
*/
-static __always_inline unsigned long
-cpu_intr_save(void)
+static __always_inline void
+cpu_intr_save(unsigned long *eflags)
{
- unsigned long eflags;
-
- eflags = cpu_get_eflags();
+ *eflags = cpu_get_eflags();
cpu_intr_disable();
-
- return eflags;
}
/*
diff --git a/kern/spinlock.h b/kern/spinlock.h
index 01329f1d..4c983471 100644
--- a/kern/spinlock.h
+++ b/kern/spinlock.h
@@ -83,7 +83,7 @@ spinlock_trylock_intr_save(struct spinlock *lock, unsigned long *flags)
int busy;
thread_preempt_disable();
- *flags = cpu_intr_save();
+ cpu_intr_save(flags);
busy = spinlock_tryacquire(lock);
if (busy) {
@@ -98,7 +98,7 @@ static inline void
spinlock_lock_intr_save(struct spinlock *lock, unsigned long *flags)
{
thread_preempt_disable();
- *flags = cpu_intr_save();
+ cpu_intr_save(flags);
spinlock_acquire(lock);
}
diff --git a/kern/thread.c b/kern/thread.c
index 9db750b2..ecabbe54 100644
--- a/kern/thread.c
+++ b/kern/thread.c
@@ -1047,7 +1047,7 @@ thread_sched_ts_balance_scan(struct thread_runq *runq,
remote_runq = NULL;
thread_preempt_disable();
- flags = cpu_intr_save();
+ cpu_intr_save(&flags);
bitmap_for_each(thread_active_runqs, nr_runqs, i) {
tmp = &thread_runqs[i];
@@ -1196,7 +1196,7 @@ thread_sched_ts_balance(struct thread_runq *runq, unsigned long *flags)
if (remote_runq != NULL) {
thread_preempt_disable();
- *flags = cpu_intr_save();
+ cpu_intr_save(flags);
thread_runq_double_lock(runq, remote_runq);
nr_migrations = thread_sched_ts_balance_migrate(runq, remote_runq,
highest_round);
@@ -1222,7 +1222,7 @@ thread_sched_ts_balance(struct thread_runq *runq, unsigned long *flags)
continue;
thread_preempt_disable();
- *flags = cpu_intr_save();
+ cpu_intr_save(flags);
thread_runq_double_lock(runq, remote_runq);
nr_migrations = thread_sched_ts_balance_migrate(runq, remote_runq,
highest_round);
@@ -1835,7 +1835,7 @@ thread_wakeup(struct thread *thread)
}
thread_preempt_disable();
- flags = cpu_intr_save();
+ cpu_intr_save(&flags);
/* The returned run queue is locked */
runq = thread_sched_ops[thread->sched_class].select_runq();