summaryrefslogtreecommitdiff
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2021-03-02 12:16:48 +0100
committerPeter Zijlstra <peterz@infradead.org>2021-05-12 11:43:26 +0200
commitd66f1b06b5b438cd20ba3664b8eef1f9c79e84bf (patch)
treef5f0faf4e54e9d9f3ded4f26b007a7fa544173ab /kernel/sched/core.c
parent5cb9eaa3d274f75539077a28cf01e3563195fa53 (diff)
sched: Prepare for Core-wide rq->lock
When switching on core-sched, CPUs need to agree which lock to use for their RQ. The new rule will be that rq->core_enabled will be toggled while holding all rq->__locks that belong to a core. This means we need to double check the rq->core_enabled value after each lock acquire and retry if it changed. This also has implications for those sites that take multiple RQ locks, they need to be careful that the second lock doesn't end up being the first lock. Verify the lock pointer after acquiring the first lock, because if they're on the same core, holding any of the rq->__lock instances will pin the core state. While there, change the rq->__lock order to CPU number, instead of rq address, this greatly simplifies the next patch. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Don Hiatt <dhiatt@digitalocean.com> Tested-by: Hongyu Ning <hongyu.ning@linux.intel.com> Tested-by: Vincent Guittot <vincent.guittot@linaro.org> Link: https://lkml.kernel.org/r/YJUNY0dmrJMD/BIm@hirez.programming.kicks-ass.net
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c48
1 files changed, 46 insertions, 2 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 5e6f5f5750a3..8bd2f12810e3 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -186,12 +186,37 @@ int sysctl_sched_rt_runtime = 950000;
void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
{
- raw_spin_lock_nested(rq_lockp(rq), subclass);
+ raw_spinlock_t *lock;
+
+ if (sched_core_disabled()) {
+ raw_spin_lock_nested(&rq->__lock, subclass);
+ return;
+ }
+
+ for (;;) {
+ lock = rq_lockp(rq);
+ raw_spin_lock_nested(lock, subclass);
+ if (likely(lock == rq_lockp(rq)))
+ return;
+ raw_spin_unlock(lock);
+ }
}
bool raw_spin_rq_trylock(struct rq *rq)
{
- return raw_spin_trylock(rq_lockp(rq));
+ raw_spinlock_t *lock;
+ bool ret;
+
+ if (sched_core_disabled())
+ return raw_spin_trylock(&rq->__lock);
+
+ for (;;) {
+ lock = rq_lockp(rq);
+ ret = raw_spin_trylock(lock);
+ if (!ret || (likely(lock == rq_lockp(rq))))
+ return ret;
+ raw_spin_unlock(lock);
+ }
}
void raw_spin_rq_unlock(struct rq *rq)
@@ -199,6 +224,25 @@ void raw_spin_rq_unlock(struct rq *rq)
raw_spin_unlock(rq_lockp(rq));
}
+#ifdef CONFIG_SMP
+/*
+ * double_rq_lock - safely lock two runqueues
+ */
+void double_rq_lock(struct rq *rq1, struct rq *rq2)
+{
+ lockdep_assert_irqs_disabled();
+
+ if (rq_order_less(rq2, rq1))
+ swap(rq1, rq2);
+
+ raw_spin_rq_lock(rq1);
+ if (rq_lockp(rq1) == rq_lockp(rq2))
+ return;
+
+ raw_spin_rq_lock_nested(rq2, SINGLE_DEPTH_NESTING);
+}
+#endif
+
/*
* __task_rq_lock - lock the rq @p resides on.
*/