diff options
author | Michal Koutný <mkoutny@suse.com> | 2025-03-10 18:04:40 +0100 |
---|---|---|
committer | Peter Zijlstra <peterz@infradead.org> | 2025-04-08 20:55:54 +0200 |
commit | 87f1fb77d87a6dac9968a321bb10799ae6d2039c (patch) | |
tree | d8d3d11a643117610c6d9b8585de517182f9b9f1 | |
parent | d6809c2f606c14f9e95be87d75a576901d2fa050 (diff) |
sched: Add RT_GROUP WARN checks for non-root task_groups
With CONFIG_RT_GROUP_SCHED but runtime disabling of RT_GROUPs we expect
the existence of the root task_group only and all rt_sched_entity'ies
should be queued on root's rt_rq.
If we get a non-root RT_GROUP something went wrong.
Signed-off-by: Michal Koutný <mkoutny@suse.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20250310170442.504716-9-mkoutny@suse.com
-rw-r--r-- | kernel/sched/rt.c | 14 |
1 files changed, 12 insertions, 2 deletions
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index b6119341f0e2..778911bebacb 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -176,11 +176,14 @@ static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se) static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) { + /* Cannot fold with non-CONFIG_RT_GROUP_SCHED version, layout */ + WARN_ON(!rt_group_sched_enabled() && rt_rq->tg != &root_task_group); return rt_rq->rq; } static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) { + WARN_ON(!rt_group_sched_enabled() && rt_se->rt_rq->tg != &root_task_group); return rt_se->rt_rq; } @@ -188,6 +191,7 @@ static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) { struct rt_rq *rt_rq = rt_se->rt_rq; + WARN_ON(!rt_group_sched_enabled() && rt_rq->tg != &root_task_group); return rt_rq->rq; } @@ -504,8 +508,10 @@ typedef struct task_group *rt_rq_iter_t; static inline struct task_group *next_task_group(struct task_group *tg) { - if (!rt_group_sched_enabled()) + if (!rt_group_sched_enabled()) { + WARN_ON(tg != &root_task_group); return NULL; + } do { tg = list_entry_rcu(tg->list.next, @@ -2607,8 +2613,9 @@ static int task_is_throttled_rt(struct task_struct *p, int cpu) { struct rt_rq *rt_rq; -#ifdef CONFIG_RT_GROUP_SCHED +#ifdef CONFIG_RT_GROUP_SCHED // XXX maybe add task_rt_rq(), see also sched_rt_period_rt_rq rt_rq = task_group(p)->rt_rq[cpu]; + WARN_ON(!rt_group_sched_enabled() && rt_rq->tg != &root_task_group); #else rt_rq = &cpu_rq(cpu)->rt; #endif @@ -2718,6 +2725,9 @@ static int tg_rt_schedulable(struct task_group *tg, void *data) tg->rt_bandwidth.rt_runtime && tg_has_rt_tasks(tg)) return -EBUSY; + if (WARN_ON(!rt_group_sched_enabled() && tg != &root_task_group)) + return -EBUSY; + total = to_ratio(period, runtime); /* |