diff options
author | Benjamin Tissoires <benjamin.tissoires@redhat.com> | 2022-10-05 10:21:55 +0100 |
---|---|---|
committer | Benjamin Tissoires <benjamin.tissoires@redhat.com> | 2022-10-05 10:21:55 +0100 |
commit | edd1533d3ccd82dd5d600986d27d524e6be4c5fd (patch) | |
tree | 1ac5ae82ea63114d5c13212e2819531e4507f800 /kernel/sched/core.c | |
parent | 7d8fe4cfc54b5fb2093e12cffa8ca74d3c88e0fa (diff) | |
parent | 98d67f250472cdd0f8d083830be3ec9dbb0c65a8 (diff) |
Merge branch 'for-6.1/logitech' into for-linus
- Add hanlding of all Bluetooth HID++ devices and fixes in hid++
(Bastien Nocera)
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r-- | kernel/sched/core.c | 16 |
1 files changed, 11 insertions, 5 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 189999007f327..ee28253c9ac0c 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3802,7 +3802,7 @@ bool cpus_share_cache(int this_cpu, int that_cpu) return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu); } -static inline bool ttwu_queue_cond(int cpu) +static inline bool ttwu_queue_cond(struct task_struct *p, int cpu) { /* * Do not complicate things with the async wake_list while the CPU is @@ -3811,6 +3811,10 @@ static inline bool ttwu_queue_cond(int cpu) if (!cpu_active(cpu)) return false; + /* Ensure the task will still be allowed to run on the CPU. */ + if (!cpumask_test_cpu(cpu, p->cpus_ptr)) + return false; + /* * If the CPU does not share cache, then queue the task on the * remote rqs wakelist to avoid accessing remote data. @@ -3840,7 +3844,7 @@ static inline bool ttwu_queue_cond(int cpu) static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) { - if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(cpu)) { + if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(p, cpu)) { sched_clock_cpu(cpu); /* Sync clocks across CPUs */ __ttwu_queue_wakelist(p, cpu, wake_flags); return true; @@ -9012,7 +9016,7 @@ int cpuset_cpumask_can_shrink(const struct cpumask *cur, } int task_can_attach(struct task_struct *p, - const struct cpumask *cs_cpus_allowed) + const struct cpumask *cs_effective_cpus) { int ret = 0; @@ -9031,9 +9035,11 @@ int task_can_attach(struct task_struct *p, } if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span, - cs_cpus_allowed)) { - int cpu = cpumask_any_and(cpu_active_mask, cs_cpus_allowed); + cs_effective_cpus)) { + int cpu = cpumask_any_and(cpu_active_mask, cs_effective_cpus); + if (unlikely(cpu >= nr_cpu_ids)) + return -EINVAL; ret = dl_cpu_busy(cpu, p); } |