summaryrefslogtreecommitdiff
path: root/kern/thread.c
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2013-05-15 22:34:54 +0200
committerRichard Braun <rbraun@sceen.net>2013-05-15 22:34:54 +0200
commitcf572e25ec9bf2d56c1fef5eeced991dff72db5d (patch)
treeb09509d8fc69cd90c2a37052e3f9c40780d7c211 /kern/thread.c
parent5abf84415cb501603ac059f91f37b3d030b24454 (diff)
x86/cpu: pass flags by address to cpu_intr_save
Not a necessary change, but done for consistency.
Diffstat (limited to 'kern/thread.c')
-rw-r--r--kern/thread.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/kern/thread.c b/kern/thread.c
index 9db750b2..ecabbe54 100644
--- a/kern/thread.c
+++ b/kern/thread.c
@@ -1047,7 +1047,7 @@ thread_sched_ts_balance_scan(struct thread_runq *runq,
remote_runq = NULL;
thread_preempt_disable();
- flags = cpu_intr_save();
+ cpu_intr_save(&flags);
bitmap_for_each(thread_active_runqs, nr_runqs, i) {
tmp = &thread_runqs[i];
@@ -1196,7 +1196,7 @@ thread_sched_ts_balance(struct thread_runq *runq, unsigned long *flags)
if (remote_runq != NULL) {
thread_preempt_disable();
- *flags = cpu_intr_save();
+ cpu_intr_save(flags);
thread_runq_double_lock(runq, remote_runq);
nr_migrations = thread_sched_ts_balance_migrate(runq, remote_runq,
highest_round);
@@ -1222,7 +1222,7 @@ thread_sched_ts_balance(struct thread_runq *runq, unsigned long *flags)
continue;
thread_preempt_disable();
- *flags = cpu_intr_save();
+ cpu_intr_save(flags);
thread_runq_double_lock(runq, remote_runq);
nr_migrations = thread_sched_ts_balance_migrate(runq, remote_runq,
highest_round);
@@ -1835,7 +1835,7 @@ thread_wakeup(struct thread *thread)
}
thread_preempt_disable();
- flags = cpu_intr_save();
+ cpu_intr_save(&flags);
/* The returned run queue is locked */
runq = thread_sched_ops[thread->sched_class].select_runq();