diff options
author | Andrea Righi <arighi@nvidia.com> | 2025-04-05 15:39:22 +0200 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2025-04-07 07:13:52 -1000 |
commit | 23c63a965275ce5d6268075bbfe7ce8b6ffe9a35 (patch) | |
tree | f24e735839d8ebc845cae604d091609f302333b9 /kernel/sched/ext_idle.c | |
parent | 29f512f555ec16446525c11aa7422ae09236ab32 (diff) |
sched_ext: idle: Explicitly pass allowed cpumask to scx_select_cpu_dfl()
Modify scx_select_cpu_dfl() to take the allowed cpumask as an explicit
argument, instead of implicitly using @p->cpus_ptr.
This prepares for future changes where arbitrary cpumasks may be passed
to the built-in idle CPU selection policy.
This is a pure refactoring with no functional changes.
Signed-off-by: Andrea Righi <arighi@nvidia.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/sched/ext_idle.c')
-rw-r--r-- | kernel/sched/ext_idle.c | 20 |
1 files changed, 11 insertions, 9 deletions
diff --git a/kernel/sched/ext_idle.c b/kernel/sched/ext_idle.c index ed37fb8e4518..5d6253c6ed90 100644 --- a/kernel/sched/ext_idle.c +++ b/kernel/sched/ext_idle.c @@ -438,9 +438,11 @@ static inline bool task_affinity_all(const struct task_struct *p) * NOTE: tasks that can only run on 1 CPU are excluded by this logic, because * we never call ops.select_cpu() for them, see select_task_rq(). */ -s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64 flags) +s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, + const struct cpumask *cpus_allowed, u64 flags) { const struct cpumask *llc_cpus = NULL, *numa_cpus = NULL; + const struct cpumask *allowed = cpus_allowed ?: p->cpus_ptr; int node = scx_cpu_node_if_enabled(prev_cpu); s32 cpu; @@ -460,9 +462,9 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64 struct cpumask *local_cpus = this_cpu_cpumask_var_ptr(local_numa_idle_cpumask); const struct cpumask *cpus = numa_span(prev_cpu); - if (task_affinity_all(p)) + if (allowed == p->cpus_ptr && task_affinity_all(p)) numa_cpus = cpus; - else if (cpus && cpumask_and(local_cpus, p->cpus_ptr, cpus)) + else if (cpus && cpumask_and(local_cpus, allowed, cpus)) numa_cpus = local_cpus; } @@ -470,9 +472,9 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64 struct cpumask *local_cpus = this_cpu_cpumask_var_ptr(local_llc_idle_cpumask); const struct cpumask *cpus = llc_span(prev_cpu); - if (task_affinity_all(p)) + if (allowed == p->cpus_ptr && task_affinity_all(p)) llc_cpus = cpus; - else if (cpus && cpumask_and(local_cpus, p->cpus_ptr, cpus)) + else if (cpus && cpumask_and(local_cpus, allowed, cpus)) llc_cpus = local_cpus; } @@ -511,7 +513,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64 cpu_rq(cpu)->scx.local_dsq.nr == 0 && (!(flags & SCX_PICK_IDLE_IN_NODE) || (waker_node == node)) && !cpumask_empty(idle_cpumask(waker_node)->cpu)) { - if (cpumask_test_cpu(cpu, p->cpus_ptr)) + if (cpumask_test_cpu(cpu, allowed)) goto out_unlock; } } @@ -556,7 +558,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64 * begin in prev_cpu's node and proceed to other nodes in * order of increasing distance. */ - cpu = scx_pick_idle_cpu(p->cpus_ptr, node, flags | SCX_PICK_IDLE_CORE); + cpu = scx_pick_idle_cpu(allowed, node, flags | SCX_PICK_IDLE_CORE); if (cpu >= 0) goto out_unlock; @@ -604,7 +606,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64 * in prev_cpu's node and proceed to other nodes in order of * increasing distance. */ - cpu = scx_pick_idle_cpu(p->cpus_ptr, node, flags); + cpu = scx_pick_idle_cpu(allowed, node, flags); out_unlock: rcu_read_unlock(); @@ -858,7 +860,7 @@ __bpf_kfunc s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, goto prev_cpu; #ifdef CONFIG_SMP - cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags, 0); + cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags, NULL, 0); if (cpu >= 0) { *is_idle = true; return cpu; |