diff options
author | Peter Griffin <peter.griffin@linaro.org> | 2025-07-17 17:22:36 +0100 |
---|---|---|
committer | Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org> | 2025-08-11 09:04:54 +0200 |
commit | 78b72897a5c8bb9d3b51fc6494a1eb09265487a4 (patch) | |
tree | f1b85bcbeef417e9a7d61331b8bb36040fbd8a5f | |
parent | 8f5ae30d69d7543eee0d70083daf4de8fe15d585 (diff) |
soc: samsung: exynos-pmu: Enable CPU Idle for gs101
Register cpu pm notifiers for gs101 which call the
gs101_cpu_pmu_online/offline callbacks which in turn program the ACPM
C2 hint. This hint is required to actually enter the C2 idle state in
addition to the PSCI calls due to limitations in the el3mon/ACPM firmware.
A couple of corner cases are handled, namely when the system is rebooting
or suspending we ignore the request. Additionally the request is ignored if
the CPU is in CPU hot plug. Some common code is refactored so that it can
be called from both the CPU hot plug callbacks and CPU PM notifier taking
into account that CPU PM notifiers are called with IRQs disabled whereas
CPU hotplug callbacks are not.
Additionally due to CPU PM notifiers using raw_spinlock the locking is
updated to use raw_spinlock variants, this includes updating the pmu_regs
regmap to use .use_raw_spinlock = true and additionally creating and
registering a custom pmu-intr-gen regmap instead of using the regmap
provided by syscon.
Note: this patch has a runtime dependency on adding 'local-timer-stop' dt
property to the CPU nodes. This informs the time framework to switch to a
broadcast timer as the local timer will be shutdown. Without that DT
property specified the system hangs in early boot with this patch applied.
Signed-off-by: Peter Griffin <peter.griffin@linaro.org>
Link: https://lore.kernel.org/r/20250717-gs101-cpuidle-v7-1-33d51770114b@linaro.org
Signed-off-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
-rw-r--r-- | drivers/soc/samsung/exynos-pmu.c | 276 |
1 files changed, 254 insertions, 22 deletions
diff --git a/drivers/soc/samsung/exynos-pmu.c b/drivers/soc/samsung/exynos-pmu.c index a77288f49d24..22c50ca2aa79 100644 --- a/drivers/soc/samsung/exynos-pmu.c +++ b/drivers/soc/samsung/exynos-pmu.c @@ -7,7 +7,9 @@ #include <linux/array_size.h> #include <linux/arm-smccc.h> +#include <linux/bitmap.h> #include <linux/cpuhotplug.h> +#include <linux/cpu_pm.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/mfd/core.h> @@ -15,6 +17,7 @@ #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/delay.h> +#include <linux/reboot.h> #include <linux/regmap.h> #include <linux/soc/samsung/exynos-regs-pmu.h> @@ -35,6 +38,15 @@ struct exynos_pmu_context { const struct exynos_pmu_data *pmu_data; struct regmap *pmureg; struct regmap *pmuintrgen; + /* + * Serialization lock for CPU hot plug and cpuidle ACPM hint + * programming. Also protects in_cpuhp, sys_insuspend & sys_inreboot + * flags. + */ + raw_spinlock_t cpupm_lock; + unsigned long *in_cpuhp; + bool sys_insuspend; + bool sys_inreboot; }; void __iomem *pmu_base_addr; @@ -221,6 +233,15 @@ static const struct regmap_config regmap_smccfg = { .reg_read = tensor_sec_reg_read, .reg_write = tensor_sec_reg_write, .reg_update_bits = tensor_sec_update_bits, + .use_raw_spinlock = true, +}; + +static const struct regmap_config regmap_pmu_intr = { + .name = "pmu_intr_gen", + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .use_raw_spinlock = true, }; static const struct exynos_pmu_data gs101_pmu_data = { @@ -330,13 +351,19 @@ struct regmap *exynos_get_pmu_regmap_by_phandle(struct device_node *np, EXPORT_SYMBOL_GPL(exynos_get_pmu_regmap_by_phandle); /* - * CPU_INFORM register hint values which are used by - * EL3 firmware (el3mon). + * CPU_INFORM register "hint" values are required to be programmed in addition to + * the standard PSCI calls to have functional CPU hotplug and CPU idle states. + * This is required to workaround limitations in the el3mon/ACPM firmware. */ #define CPU_INFORM_CLEAR 0 #define CPU_INFORM_C2 1 -static int gs101_cpuhp_pmu_online(unsigned int cpu) +/* + * __gs101_cpu_pmu_ prefix functions are common code shared by CPU PM notifiers + * (CPUIdle) and CPU hotplug callbacks. Functions should be called with IRQs + * disabled and cpupm_lock held. + */ +static int __gs101_cpu_pmu_online(unsigned int cpu) { unsigned int cpuhint = smp_processor_id(); u32 reg, mask; @@ -358,10 +385,48 @@ static int gs101_cpuhp_pmu_online(unsigned int cpu) return 0; } -static int gs101_cpuhp_pmu_offline(unsigned int cpu) +/* Called from CPU PM notifier (CPUIdle code path) with IRQs disabled */ +static int gs101_cpu_pmu_online(void) +{ + int cpu; + + raw_spin_lock(&pmu_context->cpupm_lock); + + if (pmu_context->sys_inreboot) { + raw_spin_unlock(&pmu_context->cpupm_lock); + return NOTIFY_OK; + } + + cpu = smp_processor_id(); + __gs101_cpu_pmu_online(cpu); + raw_spin_unlock(&pmu_context->cpupm_lock); + + return NOTIFY_OK; +} + +/* Called from CPU hot plug callback with IRQs enabled */ +static int gs101_cpuhp_pmu_online(unsigned int cpu) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&pmu_context->cpupm_lock, flags); + + __gs101_cpu_pmu_online(cpu); + /* + * Mark this CPU as having finished the hotplug. + * This means this CPU can now enter C2 idle state. + */ + clear_bit(cpu, pmu_context->in_cpuhp); + raw_spin_unlock_irqrestore(&pmu_context->cpupm_lock, flags); + + return 0; +} + +/* Common function shared by both CPU hot plug and CPUIdle */ +static int __gs101_cpu_pmu_offline(unsigned int cpu) { - u32 reg, mask; unsigned int cpuhint = smp_processor_id(); + u32 reg, mask; /* set cpu inform hint */ regmap_write(pmu_context->pmureg, GS101_CPU_INFORM(cpuhint), @@ -379,6 +444,165 @@ static int gs101_cpuhp_pmu_offline(unsigned int cpu) regmap_read(pmu_context->pmuintrgen, GS101_GRP1_INTR_BID_UPEND, ®); regmap_write(pmu_context->pmuintrgen, GS101_GRP1_INTR_BID_CLEAR, reg & mask); + + return 0; +} + +/* Called from CPU PM notifier (CPUIdle code path) with IRQs disabled */ +static int gs101_cpu_pmu_offline(void) +{ + int cpu; + + raw_spin_lock(&pmu_context->cpupm_lock); + cpu = smp_processor_id(); + + if (test_bit(cpu, pmu_context->in_cpuhp)) { + raw_spin_unlock(&pmu_context->cpupm_lock); + return NOTIFY_BAD; + } + + /* Ignore CPU_PM_ENTER event in reboot or suspend sequence. */ + if (pmu_context->sys_insuspend || pmu_context->sys_inreboot) { + raw_spin_unlock(&pmu_context->cpupm_lock); + return NOTIFY_OK; + } + + __gs101_cpu_pmu_offline(cpu); + raw_spin_unlock(&pmu_context->cpupm_lock); + + return NOTIFY_OK; +} + +/* Called from CPU hot plug callback with IRQs enabled */ +static int gs101_cpuhp_pmu_offline(unsigned int cpu) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&pmu_context->cpupm_lock, flags); + /* + * Mark this CPU as entering hotplug. So as not to confuse + * ACPM the CPU entering hotplug should not enter C2 idle state. + */ + set_bit(cpu, pmu_context->in_cpuhp); + __gs101_cpu_pmu_offline(cpu); + + raw_spin_unlock_irqrestore(&pmu_context->cpupm_lock, flags); + + return 0; +} + +static int gs101_cpu_pm_notify_callback(struct notifier_block *self, + unsigned long action, void *v) +{ + switch (action) { + case CPU_PM_ENTER: + return gs101_cpu_pmu_offline(); + + case CPU_PM_EXIT: + return gs101_cpu_pmu_online(); + } + + return NOTIFY_OK; +} + +static struct notifier_block gs101_cpu_pm_notifier = { + .notifier_call = gs101_cpu_pm_notify_callback, + /* + * We want to be called first, as the ACPM hint and handshake is what + * puts the CPU into C2. + */ + .priority = INT_MAX +}; + +static int exynos_cpupm_reboot_notifier(struct notifier_block *nb, + unsigned long event, void *v) +{ + unsigned long flags; + + switch (event) { + case SYS_POWER_OFF: + case SYS_RESTART: + raw_spin_lock_irqsave(&pmu_context->cpupm_lock, flags); + pmu_context->sys_inreboot = true; + raw_spin_unlock_irqrestore(&pmu_context->cpupm_lock, flags); + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block exynos_cpupm_reboot_nb = { + .priority = INT_MAX, + .notifier_call = exynos_cpupm_reboot_notifier, +}; + +static int setup_cpuhp_and_cpuidle(struct device *dev) +{ + struct device_node *intr_gen_node; + struct resource intrgen_res; + void __iomem *virt_addr; + int ret, cpu; + + intr_gen_node = of_parse_phandle(dev->of_node, + "google,pmu-intr-gen-syscon", 0); + if (!intr_gen_node) { + /* + * To maintain support for older DTs that didn't specify syscon + * phandle just issue a warning rather than fail to probe. + */ + dev_warn(dev, "pmu-intr-gen syscon unavailable\n"); + return 0; + } + + /* + * To avoid lockdep issues (CPU PM notifiers use raw spinlocks) create + * a mmio regmap for pmu-intr-gen that uses raw spinlocks instead of + * syscon provided regmap. + */ + ret = of_address_to_resource(intr_gen_node, 0, &intrgen_res); + of_node_put(intr_gen_node); + + virt_addr = devm_ioremap(dev, intrgen_res.start, + resource_size(&intrgen_res)); + if (!virt_addr) + return -ENOMEM; + + pmu_context->pmuintrgen = devm_regmap_init_mmio(dev, virt_addr, + ®map_pmu_intr); + if (IS_ERR(pmu_context->pmuintrgen)) { + dev_err(dev, "failed to initialize pmu-intr-gen regmap\n"); + return PTR_ERR(pmu_context->pmuintrgen); + } + + /* register custom mmio regmap with syscon */ + ret = of_syscon_register_regmap(intr_gen_node, + pmu_context->pmuintrgen); + if (ret) + return ret; + + pmu_context->in_cpuhp = devm_bitmap_zalloc(dev, num_possible_cpus(), + GFP_KERNEL); + if (!pmu_context->in_cpuhp) + return -ENOMEM; + + raw_spin_lock_init(&pmu_context->cpupm_lock); + pmu_context->sys_inreboot = false; + pmu_context->sys_insuspend = false; + + /* set PMU to power on */ + for_each_online_cpu(cpu) + gs101_cpuhp_pmu_online(cpu); + + /* register CPU hotplug callbacks */ + cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "soc/exynos-pmu:prepare", + gs101_cpuhp_pmu_online, NULL); + + cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "soc/exynos-pmu:online", + NULL, gs101_cpuhp_pmu_offline); + + /* register CPU PM notifiers for cpuidle */ + cpu_pm_register_notifier(&gs101_cpu_pm_notifier); + register_reboot_notifier(&exynos_cpupm_reboot_nb); return 0; } @@ -435,23 +659,9 @@ static int exynos_pmu_probe(struct platform_device *pdev) pmu_context->dev = dev; if (pmu_context->pmu_data && pmu_context->pmu_data->pmu_cpuhp) { - pmu_context->pmuintrgen = syscon_regmap_lookup_by_phandle(dev->of_node, - "google,pmu-intr-gen-syscon"); - if (IS_ERR(pmu_context->pmuintrgen)) { - /* - * To maintain support for older DTs that didn't specify syscon phandle - * just issue a warning rather than fail to probe. - */ - dev_warn(&pdev->dev, "pmu-intr-gen syscon unavailable\n"); - } else { - cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, - "soc/exynos-pmu:prepare", - gs101_cpuhp_pmu_online, NULL); - - cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, - "soc/exynos-pmu:online", - NULL, gs101_cpuhp_pmu_offline); - } + ret = setup_cpuhp_and_cpuidle(dev); + if (ret) + return ret; } if (pmu_context->pmu_data && pmu_context->pmu_data->pmu_init) @@ -471,10 +681,32 @@ static int exynos_pmu_probe(struct platform_device *pdev) return 0; } +static int exynos_cpupm_suspend_noirq(struct device *dev) +{ + raw_spin_lock(&pmu_context->cpupm_lock); + pmu_context->sys_insuspend = true; + raw_spin_unlock(&pmu_context->cpupm_lock); + return 0; +} + +static int exynos_cpupm_resume_noirq(struct device *dev) +{ + raw_spin_lock(&pmu_context->cpupm_lock); + pmu_context->sys_insuspend = false; + raw_spin_unlock(&pmu_context->cpupm_lock); + return 0; +} + +static const struct dev_pm_ops cpupm_pm_ops = { + NOIRQ_SYSTEM_SLEEP_PM_OPS(exynos_cpupm_suspend_noirq, + exynos_cpupm_resume_noirq) +}; + static struct platform_driver exynos_pmu_driver = { .driver = { .name = "exynos-pmu", .of_match_table = exynos_pmu_of_device_ids, + .pm = pm_sleep_ptr(&cpupm_pm_ops), }, .probe = exynos_pmu_probe, }; |