summaryrefslogtreecommitdiff
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
authorMauro Carvalho Chehab <mchehab+huawei@kernel.org>2024-08-05 08:25:09 +0200
committerMauro Carvalho Chehab <mchehab+huawei@kernel.org>2024-08-05 08:25:09 +0200
commit2c25dcc2361949bc7da730d22de36c019c6bf1e3 (patch)
tree3422f89adb17748bde8419918cb7959d79c773f3 /kernel/sched/core.c
parentba5c778cab1dd3e4918f940989e771e2818afee8 (diff)
parentde9c2c66ad8e787abec7c9d7eff4f8c3cdd28aed (diff)
Merge tag 'v6.11-rc2' into media_stage
Linux 6.11-rc2 * tag 'v6.11-rc2': (283 commits) Linux 6.11-rc2 profiling: remove profile=sleep support arm: dts: arm: versatile-ab: Fix duplicate clock node name runtime constants: deal with old decrepit linkers clocksource: Fix brown-bag boolean thinko in cs_watchdog_read() cifs: update internal version number smb: client: fix FSCTL_GET_REPARSE_POINT against NetApp smb3: add dynamic tracepoints for shutdown ioctl cifs: Remove cifs_aio_ctx smb: client: handle lack of FSCTL_GET_REPARSE_POINT support arm64: jump_label: Ensure patched jump_labels are visible to all CPUs syscalls: fix syscall macros for newfstat/newfstatat uretprobe: change syscall number, again thermal: core: Update thermal zone registration documentation Revert "nouveau: rip out busy fence waits" protect the fetch of ->fd[fd] in do_dup2() from mispredictions x86/uaccess: Zero the 8-byte get_range case on failure on 32-bit riscv: Fix linear mapping checks for non-contiguous memory regions KVM: x86/mmu: fix determination of max NPT mapping level for private pages PCI: pciehp: Retain Power Indicator bits for userspace indicators ...
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c68
1 files changed, 47 insertions, 21 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index a9f655025607..f3951e4a55e5 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7845,6 +7845,30 @@ void set_rq_offline(struct rq *rq)
}
}
+static inline void sched_set_rq_online(struct rq *rq, int cpu)
+{
+ struct rq_flags rf;
+
+ rq_lock_irqsave(rq, &rf);
+ if (rq->rd) {
+ BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
+ set_rq_online(rq);
+ }
+ rq_unlock_irqrestore(rq, &rf);
+}
+
+static inline void sched_set_rq_offline(struct rq *rq, int cpu)
+{
+ struct rq_flags rf;
+
+ rq_lock_irqsave(rq, &rf);
+ if (rq->rd) {
+ BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
+ set_rq_offline(rq);
+ }
+ rq_unlock_irqrestore(rq, &rf);
+}
+
/*
* used to mark begin/end of suspend/resume:
*/
@@ -7895,10 +7919,25 @@ static int cpuset_cpu_inactive(unsigned int cpu)
return 0;
}
+static inline void sched_smt_present_inc(int cpu)
+{
+#ifdef CONFIG_SCHED_SMT
+ if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
+ static_branch_inc_cpuslocked(&sched_smt_present);
+#endif
+}
+
+static inline void sched_smt_present_dec(int cpu)
+{
+#ifdef CONFIG_SCHED_SMT
+ if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
+ static_branch_dec_cpuslocked(&sched_smt_present);
+#endif
+}
+
int sched_cpu_activate(unsigned int cpu)
{
struct rq *rq = cpu_rq(cpu);
- struct rq_flags rf;
/*
* Clear the balance_push callback and prepare to schedule
@@ -7906,13 +7945,10 @@ int sched_cpu_activate(unsigned int cpu)
*/
balance_push_set(cpu, false);
-#ifdef CONFIG_SCHED_SMT
/*
* When going up, increment the number of cores with SMT present.
*/
- if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
- static_branch_inc_cpuslocked(&sched_smt_present);
-#endif
+ sched_smt_present_inc(cpu);
set_cpu_active(cpu, true);
if (sched_smp_initialized) {
@@ -7930,12 +7966,7 @@ int sched_cpu_activate(unsigned int cpu)
* 2) At runtime, if cpuset_cpu_active() fails to rebuild the
* domains.
*/
- rq_lock_irqsave(rq, &rf);
- if (rq->rd) {
- BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
- set_rq_online(rq);
- }
- rq_unlock_irqrestore(rq, &rf);
+ sched_set_rq_online(rq, cpu);
return 0;
}
@@ -7943,7 +7974,6 @@ int sched_cpu_activate(unsigned int cpu)
int sched_cpu_deactivate(unsigned int cpu)
{
struct rq *rq = cpu_rq(cpu);
- struct rq_flags rf;
int ret;
/*
@@ -7974,20 +8004,14 @@ int sched_cpu_deactivate(unsigned int cpu)
*/
synchronize_rcu();
- rq_lock_irqsave(rq, &rf);
- if (rq->rd) {
- BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
- set_rq_offline(rq);
- }
- rq_unlock_irqrestore(rq, &rf);
+ sched_set_rq_offline(rq, cpu);
-#ifdef CONFIG_SCHED_SMT
/*
* When going down, decrement the number of cores with SMT present.
*/
- if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
- static_branch_dec_cpuslocked(&sched_smt_present);
+ sched_smt_present_dec(cpu);
+#ifdef CONFIG_SCHED_SMT
sched_core_cpu_deactivate(cpu);
#endif
@@ -7997,6 +8021,8 @@ int sched_cpu_deactivate(unsigned int cpu)
sched_update_numa(cpu, false);
ret = cpuset_cpu_inactive(cpu);
if (ret) {
+ sched_smt_present_inc(cpu);
+ sched_set_rq_online(rq, cpu);
balance_push_set(cpu, false);
set_cpu_active(cpu, true);
sched_update_numa(cpu, true);