summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/dev-tools/kunit/start.rst13
-rw-r--r--Documentation/devicetree/bindings/arm/cpus.yaml15
-rw-r--r--Documentation/devicetree/bindings/arm/psci.yaml104
-rw-r--r--MAINTAINERS2
-rw-r--r--Makefile2
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916.dtsi57
-rw-r--r--arch/riscv/kernel/entry.S1
-rw-r--r--arch/riscv/kernel/riscv_ksyms.c3
-rw-r--r--arch/riscv/lib/uaccess.S4
-rw-r--r--arch/riscv/mm/cacheflush.c1
-rw-r--r--block/compat_ioctl.c16
-rw-r--r--drivers/ata/ahci_brcm.c133
-rw-r--r--drivers/ata/libahci_platform.c6
-rw-r--r--drivers/ata/libata-core.c24
-rw-r--r--drivers/ata/sata_fsl.c2
-rw-r--r--drivers/ata/sata_mv.c2
-rw-r--r--drivers/ata/sata_nv.c2
-rw-r--r--drivers/base/power/domain.c38
-rw-r--r--drivers/block/pktcdvd.c2
-rw-r--r--drivers/clk/clk-scmi.c2
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c2
-rw-r--r--drivers/cpuidle/Makefile4
-rw-r--r--drivers/cpuidle/cpuidle-psci-domain.c308
-rw-r--r--drivers/cpuidle/cpuidle-psci.c161
-rw-r--r--drivers/cpuidle/cpuidle-psci.h17
-rw-r--r--drivers/cpuidle/dt_idle_states.c5
-rw-r--r--drivers/devfreq/Kconfig5
-rw-r--r--drivers/firmware/arm_scmi/bus.c29
-rw-r--r--drivers/firmware/arm_scmi/clock.c2
-rw-r--r--drivers/firmware/arm_scmi/common.h2
-rw-r--r--drivers/firmware/arm_scmi/driver.c110
-rw-r--r--drivers/firmware/arm_scmi/perf.c2
-rw-r--r--drivers/firmware/arm_scmi/power.c2
-rw-r--r--drivers/firmware/arm_scmi/reset.c2
-rw-r--r--drivers/firmware/arm_scmi/scmi_pm_domain.c2
-rw-r--r--drivers/firmware/arm_scmi/sensors.c2
-rw-r--r--drivers/firmware/psci/psci.c18
-rw-r--r--drivers/gpio/Kconfig4
-rw-r--r--drivers/gpio/gpio-aspeed-sgpio.c2
-rw-r--r--drivers/gpio/gpio-mockup.c7
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c1
-rw-r--r--drivers/gpio/gpio-pca953x.c26
-rw-r--r--drivers/gpio/gpio-xgs-iproc.c2
-rw-r--r--drivers/gpio/gpio-xtensa.c7
-rw-r--r--drivers/gpio/gpiolib.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.h34
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c17
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_clflush.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c26
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h23
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c10
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c73
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.h2
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c10
-rw-r--r--drivers/hwmon/scmi-hwmon.c2
-rw-r--r--drivers/of/base.c36
-rw-r--r--drivers/reset/reset-scmi.c2
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c10
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c1
-rw-r--r--drivers/soc/bcm/brcmstb/biuctrl.c30
-rw-r--r--drivers/soc/renesas/Kconfig14
-rw-r--r--drivers/soc/renesas/rcar-rst.c2
-rw-r--r--drivers/target/target_core_iblock.c4
-rw-r--r--drivers/tee/optee/core.c153
-rw-r--r--fs/cifs/cifsglob.h1
-rw-r--r--fs/cifs/readdir.c63
-rw-r--r--fs/cifs/smb2file.c2
-rw-r--r--fs/io-wq.c10
-rw-r--r--fs/io_uring.c690
-rw-r--r--fs/locks.c2
-rw-r--r--include/linux/ahci_platform.h2
-rw-r--r--include/linux/cpuhotplug.h1
-rw-r--r--include/linux/libata.h1
-rw-r--r--include/linux/of.h8
-rw-r--r--include/linux/pm_domain.h8
-rw-r--r--include/linux/psci.h2
-rw-r--r--include/linux/scmi_protocol.h5
-rw-r--r--include/trace/events/scmi.h90
-rwxr-xr-xtools/testing/kunit/kunit.py18
-rw-r--r--tools/testing/kunit/kunit_kernel.py10
-rwxr-xr-xtools/testing/kunit/kunit_tool_test.py10
-rw-r--r--tools/testing/selftests/filesystems/epoll/Makefile2
-rwxr-xr-xtools/testing/selftests/firmware/fw_lib.sh6
-rw-r--r--tools/testing/selftests/livepatch/functions.sh15
-rwxr-xr-xtools/testing/selftests/livepatch/test-state.sh3
-rw-r--r--tools/testing/selftests/rseq/param_test.c18
-rw-r--r--tools/testing/selftests/rseq/rseq.h12
-rw-r--r--tools/testing/selftests/rseq/settings1
96 files changed, 1911 insertions, 720 deletions
diff --git a/Documentation/dev-tools/kunit/start.rst b/Documentation/dev-tools/kunit/start.rst
index 9d6db892c41c..4e1d24db6b13 100644
--- a/Documentation/dev-tools/kunit/start.rst
+++ b/Documentation/dev-tools/kunit/start.rst
@@ -24,19 +24,16 @@ The wrapper can be run with:
For more information on this wrapper (also called kunit_tool) checkout the
:doc:`kunit-tool` page.
-Creating a kunitconfig
-======================
+Creating a .kunitconfig
+=======================
The Python script is a thin wrapper around Kbuild. As such, it needs to be
-configured with a ``kunitconfig`` file. This file essentially contains the
+configured with a ``.kunitconfig`` file. This file essentially contains the
regular Kernel config, with the specific test targets as well.
.. code-block:: bash
- git clone -b master https://kunit.googlesource.com/kunitconfig $PATH_TO_KUNITCONFIG_REPO
cd $PATH_TO_LINUX_REPO
- ln -s $PATH_TO_KUNIT_CONFIG_REPO/kunitconfig kunitconfig
-
-You may want to add kunitconfig to your local gitignore.
+ cp arch/um/configs/kunit_defconfig .kunitconfig
Verifying KUnit Works
---------------------
@@ -151,7 +148,7 @@ and the following to ``drivers/misc/Makefile``:
obj-$(CONFIG_MISC_EXAMPLE_TEST) += example-test.o
-Now add it to your ``kunitconfig``:
+Now add it to your ``.kunitconfig``:
.. code-block:: none
diff --git a/Documentation/devicetree/bindings/arm/cpus.yaml b/Documentation/devicetree/bindings/arm/cpus.yaml
index c23c24ff7575..7a9c3ce2dbef 100644
--- a/Documentation/devicetree/bindings/arm/cpus.yaml
+++ b/Documentation/devicetree/bindings/arm/cpus.yaml
@@ -242,6 +242,21 @@ properties:
where voltage is in V, frequency is in MHz.
+ power-domains:
+ $ref: '/schemas/types.yaml#/definitions/phandle-array'
+ description:
+ List of phandles and PM domain specifiers, as defined by bindings of the
+ PM domain provider (see also ../power_domain.txt).
+
+ power-domain-names:
+ $ref: '/schemas/types.yaml#/definitions/string-array'
+ description:
+ A list of power domain name strings sorted in the same order as the
+ power-domains property.
+
+ For PSCI based platforms, the name corresponding to the index of the PSCI
+ PM domain provider, must be "psci".
+
qcom,saw:
$ref: '/schemas/types.yaml#/definitions/phandle'
description: |
diff --git a/Documentation/devicetree/bindings/arm/psci.yaml b/Documentation/devicetree/bindings/arm/psci.yaml
index 7abdf58b335e..8ef85420b2ab 100644
--- a/Documentation/devicetree/bindings/arm/psci.yaml
+++ b/Documentation/devicetree/bindings/arm/psci.yaml
@@ -102,6 +102,34 @@ properties:
[1] Kernel documentation - ARM idle states bindings
Documentation/devicetree/bindings/arm/idle-states.txt
+ "#power-domain-cells":
+ description:
+ The number of cells in a PM domain specifier as per binding in [3].
+ Must be 0 as to represent a single PM domain.
+
+ ARM systems can have multiple cores, sometimes in an hierarchical
+ arrangement. This often, but not always, maps directly to the processor
+ power topology of the system. Individual nodes in a topology have their
+ own specific power states and can be better represented hierarchically.
+
+ For these cases, the definitions of the idle states for the CPUs and the
+ CPU topology, must conform to the binding in [3]. The idle states
+ themselves must conform to the binding in [4] and must specify the
+ arm,psci-suspend-param property.
+
+ It should also be noted that, in PSCI firmware v1.0 the OS-Initiated
+ (OSI) CPU suspend mode is introduced. Using a hierarchical representation
+ helps to implement support for OSI mode and OS implementations may choose
+ to mandate it.
+
+ [3] Documentation/devicetree/bindings/power/power_domain.txt
+ [4] Documentation/devicetree/bindings/power/domain-idle-state.txt
+
+ power-domains:
+ $ref: '/schemas/types.yaml#/definitions/phandle-array'
+ description:
+ List of phandles and PM domain specifiers, as defined by bindings of the
+ PM domain provider.
required:
- compatible
@@ -160,4 +188,80 @@ examples:
cpu_on = <0x95c10002>;
cpu_off = <0x95c10001>;
};
+
+ - |+
+
+ // Case 4: CPUs and CPU idle states described using the hierarchical model.
+
+ cpus {
+ #size-cells = <0>;
+ #address-cells = <1>;
+
+ CPU0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0x0>;
+ enable-method = "psci";
+ power-domains = <&CPU_PD0>;
+ power-domain-names = "psci";
+ };
+
+ CPU1: cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57", "arm,armv8";
+ reg = <0x100>;
+ enable-method = "psci";
+ power-domains = <&CPU_PD1>;
+ power-domain-names = "psci";
+ };
+
+ idle-states {
+
+ CPU_PWRDN: cpu-power-down {
+ compatible = "arm,idle-state";
+ arm,psci-suspend-param = <0x0000001>;
+ entry-latency-us = <10>;
+ exit-latency-us = <10>;
+ min-residency-us = <100>;
+ };
+
+ CLUSTER_RET: cluster-retention {
+ compatible = "domain-idle-state";
+ arm,psci-suspend-param = <0x1000011>;
+ entry-latency-us = <500>;
+ exit-latency-us = <500>;
+ min-residency-us = <2000>;
+ };
+
+ CLUSTER_PWRDN: cluster-power-down {
+ compatible = "domain-idle-state";
+ arm,psci-suspend-param = <0x1000031>;
+ entry-latency-us = <2000>;
+ exit-latency-us = <2000>;
+ min-residency-us = <6000>;
+ };
+ };
+ };
+
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+
+ CPU_PD0: cpu-pd0 {
+ #power-domain-cells = <0>;
+ domain-idle-states = <&CPU_PWRDN>;
+ power-domains = <&CLUSTER_PD>;
+ };
+
+ CPU_PD1: cpu-pd1 {
+ #power-domain-cells = <0>;
+ domain-idle-states = <&CPU_PWRDN>;
+ power-domains = <&CLUSTER_PD>;
+ };
+
+ CLUSTER_PD: cluster-pd {
+ #power-domain-cells = <0>;
+ domain-idle-states = <&CLUSTER_RET>, <&CLUSTER_PWRDN>;
+ };
+ };
...
diff --git a/MAINTAINERS b/MAINTAINERS
index ffa3371bc750..e6321c7ea4aa 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -7034,6 +7034,7 @@ L: linux-acpi@vger.kernel.org
S: Maintained
F: Documentation/firmware-guide/acpi/gpio-properties.rst
F: drivers/gpio/gpiolib-acpi.c
+F: drivers/gpio/gpiolib-acpi.h
GPIO IR Transmitter
M: Sean Young <sean@mess.org>
@@ -15976,6 +15977,7 @@ F: drivers/firmware/arm_scpi.c
F: drivers/firmware/arm_scmi/
F: drivers/reset/reset-scmi.c
F: include/linux/sc[mp]i_protocol.h
+F: include/trace/events/scmi.h
SYSTEM RESET/SHUTDOWN DRIVERS
M: Sebastian Reichel <sre@kernel.org>
diff --git a/Makefile b/Makefile
index caf14acf1953..b99d95df8075 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 5
SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc4
NAME = Kleptomaniac Octopus
# *DOCUMENTATION*
diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
index 8686e101905c..282c36c8fa3b 100644
--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
@@ -102,10 +102,11 @@
reg = <0x0>;
next-level-cache = <&L2_0>;
enable-method = "psci";
- cpu-idle-states = <&CPU_SLEEP_0>;
clocks = <&apcs>;
operating-points-v2 = <&cpu_opp_table>;
#cooling-cells = <2>;
+ power-domains = <&CPU_PD0>;
+ power-domain-names = "psci";
};
CPU1: cpu@1 {
@@ -114,10 +115,11 @@
reg = <0x1>;
next-level-cache = <&L2_0>;
enable-method = "psci";
- cpu-idle-states = <&CPU_SLEEP_0>;
clocks = <&apcs>;
operating-points-v2 = <&cpu_opp_table>;
#cooling-cells = <2>;
+ power-domains = <&CPU_PD1>;
+ power-domain-names = "psci";
};
CPU2: cpu@2 {
@@ -126,10 +128,11 @@
reg = <0x2>;
next-level-cache = <&L2_0>;
enable-method = "psci";
- cpu-idle-states = <&CPU_SLEEP_0>;
clocks = <&apcs>;
operating-points-v2 = <&cpu_opp_table>;
#cooling-cells = <2>;
+ power-domains = <&CPU_PD2>;
+ power-domain-names = "psci";
};
CPU3: cpu@3 {
@@ -138,10 +141,11 @@
reg = <0x3>;
next-level-cache = <&L2_0>;
enable-method = "psci";
- cpu-idle-states = <&CPU_SLEEP_0>;
clocks = <&apcs>;
operating-points-v2 = <&cpu_opp_table>;
#cooling-cells = <2>;
+ power-domains = <&CPU_PD3>;
+ power-domain-names = "psci";
};
L2_0: l2-cache {
@@ -161,12 +165,57 @@
min-residency-us = <2000>;
local-timer-stop;
};
+
+ CLUSTER_RET: cluster-retention {
+ compatible = "domain-idle-state";
+ arm,psci-suspend-param = <0x41000012>;
+ entry-latency-us = <500>;
+ exit-latency-us = <500>;
+ min-residency-us = <2000>;
+ };
+
+ CLUSTER_PWRDN: cluster-gdhs {
+ compatible = "domain-idle-state";
+ arm,psci-suspend-param = <0x41000032>;
+ entry-latency-us = <2000>;
+ exit-latency-us = <2000>;
+ min-residency-us = <6000>;
+ };
};
};
psci {
compatible = "arm,psci-1.0";
method = "smc";
+
+ CPU_PD0: cpu-pd0 {
+ #power-domain-cells = <0>;
+ power-domains = <&CLUSTER_PD>;
+ domain-idle-states = <&CPU_SLEEP_0>;
+ };
+
+ CPU_PD1: cpu-pd1 {
+ #power-domain-cells = <0>;
+ power-domains = <&CLUSTER_PD>;
+ domain-idle-states = <&CPU_SLEEP_0>;
+ };
+
+ CPU_PD2: cpu-pd2 {
+ #power-domain-cells = <0>;
+ power-domains = <&CLUSTER_PD>;
+ domain-idle-states = <&CPU_SLEEP_0>;
+ };
+
+ CPU_PD3: cpu-pd3 {
+ #power-domain-cells = <0>;
+ power-domains = <&CLUSTER_PD>;
+ domain-idle-states = <&CPU_SLEEP_0>;
+ };
+
+ CLUSTER_PD: cluster-pd {
+ #power-domain-cells = <0>;
+ domain-idle-states = <&CLUSTER_RET>, <&CLUSTER_PWRDN>;
+ };
};
pmu {
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index a1349ca64669..e163b7b64c86 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -246,6 +246,7 @@ check_syscall_nr:
*/
li t1, -1
beq a7, t1, ret_from_syscall_rejected
+ blt a7, t1, 1f
/* Call syscall */
la s0, sys_call_table
slli t0, a7, RISCV_LGPTR
diff --git a/arch/riscv/kernel/riscv_ksyms.c b/arch/riscv/kernel/riscv_ksyms.c
index 4800cf703186..2a02b7eebee0 100644
--- a/arch/riscv/kernel/riscv_ksyms.c
+++ b/arch/riscv/kernel/riscv_ksyms.c
@@ -9,8 +9,5 @@
/*
* Assembly functions that may be used (directly or indirectly) by modules
*/
-EXPORT_SYMBOL(__clear_user);
-EXPORT_SYMBOL(__asm_copy_to_user);
-EXPORT_SYMBOL(__asm_copy_from_user);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memcpy);
diff --git a/arch/riscv/lib/uaccess.S b/arch/riscv/lib/uaccess.S
index fecd65657a6f..f29d2ba2c0a6 100644
--- a/arch/riscv/lib/uaccess.S
+++ b/arch/riscv/lib/uaccess.S
@@ -1,4 +1,5 @@
#include <linux/linkage.h>
+#include <asm-generic/export.h>
#include <asm/asm.h>
#include <asm/csr.h>
@@ -66,6 +67,8 @@ ENTRY(__asm_copy_from_user)
j 3b
ENDPROC(__asm_copy_to_user)
ENDPROC(__asm_copy_from_user)
+EXPORT_SYMBOL(__asm_copy_to_user)
+EXPORT_SYMBOL(__asm_copy_from_user)
ENTRY(__clear_user)
@@ -108,6 +111,7 @@ ENTRY(__clear_user)
bltu a0, a3, 5b
j 3b
ENDPROC(__clear_user)
+EXPORT_SYMBOL(__clear_user)
.section .fixup,"ax"
.balign 4
diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c
index 8f1900686640..8930ab7278e6 100644
--- a/arch/riscv/mm/cacheflush.c
+++ b/arch/riscv/mm/cacheflush.c
@@ -22,6 +22,7 @@ void flush_icache_all(void)
else
on_each_cpu(ipi_remote_fence_i, NULL, 1);
}
+EXPORT_SYMBOL(flush_icache_all);
/*
* Performs an icache flush for the given MM context. RISC-V has no direct
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index 6ca015f92766..3ed7a0f144a9 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -6,6 +6,7 @@
#include <linux/compat.h>
#include <linux/elevator.h>
#include <linux/hdreg.h>
+#include <linux/pr.h>
#include <linux/slab.h>
#include <linux/syscalls.h>
#include <linux/types.h>
@@ -354,6 +355,13 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
* but we call blkdev_ioctl, which gets the lock for us
*/
case BLKRRPART:
+ case BLKREPORTZONE:
+ case BLKRESETZONE:
+ case BLKOPENZONE:
+ case BLKCLOSEZONE:
+ case BLKFINISHZONE:
+ case BLKGETZONESZ:
+ case BLKGETNRZONES:
return blkdev_ioctl(bdev, mode, cmd,
(unsigned long)compat_ptr(arg));
case BLKBSZSET_32:
@@ -401,6 +409,14 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
case BLKTRACETEARDOWN: /* compatible */
ret = blk_trace_ioctl(bdev, cmd, compat_ptr(arg));
return ret;
+ case IOC_PR_REGISTER:
+ case IOC_PR_RESERVE:
+ case IOC_PR_RELEASE:
+ case IOC_PR_PREEMPT:
+ case IOC_PR_PREEMPT_ABORT:
+ case IOC_PR_CLEAR:
+ return blkdev_ioctl(bdev, mode, cmd,
+ (unsigned long)compat_ptr(arg));
default:
if (disk->fops->compat_ioctl)
ret = disk->fops->compat_ioctl(bdev, mode, cmd, arg);
diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c
index f41744b9b38a..66a570d0da83 100644
--- a/drivers/ata/ahci_brcm.c
+++ b/drivers/ata/ahci_brcm.c
@@ -76,8 +76,7 @@ enum brcm_ahci_version {
};
enum brcm_ahci_quirks {
- BRCM_AHCI_QUIRK_NO_NCQ = BIT(0),
- BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE = BIT(1),
+ BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE = BIT(0),
};
struct brcm_ahci_priv {
@@ -213,19 +212,12 @@ static void brcm_sata_phys_disable(struct brcm_ahci_priv *priv)
brcm_sata_phy_disable(priv, i);
}
-static u32 brcm_ahci_get_portmask(struct platform_device *pdev,
+static u32 brcm_ahci_get_portmask(struct ahci_host_priv *hpriv,
struct brcm_ahci_priv *priv)
{
- void __iomem *ahci;
- struct resource *res;
u32 impl;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ahci");
- ahci = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(ahci))
- return 0;
-
- impl = readl(ahci + HOST_PORTS_IMPL);
+ impl = readl(hpriv->mmio + HOST_PORTS_IMPL);
if (fls(impl) > SATA_TOP_MAX_PHYS)
dev_warn(priv->dev, "warning: more ports than PHYs (%#x)\n",
@@ -233,9 +225,6 @@ static u32 brcm_ahci_get_portmask(struct platform_device *pdev,
else if (!impl)
dev_info(priv->dev, "no ports found\n");
- devm_iounmap(&pdev->dev, ahci);
- devm_release_mem_region(&pdev->dev, res->start, resource_size(res));
-
return impl;
}
@@ -285,6 +274,13 @@ static unsigned int brcm_ahci_read_id(struct ata_device *dev,
/* Perform the SATA PHY reset sequence */
brcm_sata_phy_disable(priv, ap->port_no);
+ /* Reset the SATA clock */
+ ahci_platform_disable_clks(hpriv);
+ msleep(10);
+
+ ahci_platform_enable_clks(hpriv);
+ msleep(10);
+
/* Bring the PHY back on */
brcm_sata_phy_enable(priv, ap->port_no);
@@ -347,11 +343,10 @@ static int brcm_ahci_suspend(struct device *dev)
struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
struct brcm_ahci_priv *priv = hpriv->plat_data;
- int ret;
- ret = ahci_platform_suspend(dev);
brcm_sata_phys_disable(priv);
- return ret;
+
+ return ahci_platform_suspend(dev);
}
static int brcm_ahci_resume(struct device *dev)
@@ -359,11 +354,44 @@ static int brcm_ahci_resume(struct device *dev)
struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
struct brcm_ahci_priv *priv = hpriv->plat_data;
+ int ret;
+
+ /* Make sure clocks are turned on before re-configuration */
+ ret = ahci_platform_enable_clks(hpriv);
+ if (ret)
+ return ret;
brcm_sata_init(priv);
brcm_sata_phys_enable(priv);
brcm_sata_alpm_init(hpriv);
- return ahci_platform_resume(dev);
+
+ /* Since we had to enable clocks earlier on, we cannot use
+ * ahci_platform_resume() as-is since a second call to
+ * ahci_platform_enable_resources() would bump up the resources
+ * (regulators, clocks, PHYs) count artificially so we copy the part
+ * after ahci_platform_enable_resources().
+ */
+ ret = ahci_platform_enable_phys(hpriv);
+ if (ret)
+ goto out_disable_phys;
+
+ ret = ahci_platform_resume_host(dev);
+ if (ret)
+ goto out_disable_platform_phys;
+
+ /* We resumed so update PM runtime state */
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ return 0;
+
+out_disable_platform_phys:
+ ahci_platform_disable_phys(hpriv);
+out_disable_phys:
+ brcm_sata_phys_disable(priv);
+ ahci_platform_disable_clks(hpriv);
+ return ret;
}
#endif
@@ -410,44 +438,71 @@ static int brcm_ahci_probe(struct platform_device *pdev)
if (!IS_ERR_OR_NULL(priv->rcdev))
reset_control_deassert(priv->rcdev);
- if ((priv->version == BRCM_SATA_BCM7425) ||
- (priv->version == BRCM_SATA_NSP)) {
- priv->quirks |= BRCM_AHCI_QUIRK_NO_NCQ;
+ hpriv = ahci_platform_get_resources(pdev, 0);
+ if (IS_ERR(hpriv)) {
+ ret = PTR_ERR(hpriv);
+ goto out_reset;
+ }
+
+ hpriv->plat_data = priv;
+ hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP | AHCI_HFLAG_NO_WRITE_TO_RO;
+
+ switch (priv->version) {
+ case BRCM_SATA_BCM7425:
+ hpriv->flags |= AHCI_HFLAG_DELAY_ENGINE;
+ /* fall through */
+ case BRCM_SATA_NSP:
+ hpriv->flags |= AHCI_HFLAG_NO_NCQ;
priv->quirks |= BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE;
+ break;
+ default:
+ break;
}
+ ret = ahci_platform_enable_clks(hpriv);
+ if (ret)
+ goto out_reset;
+
+ /* Must be first so as to configure endianness including that
+ * of the standard AHCI register space.
+ */
brcm_sata_init(priv);
- priv->port_mask = brcm_ahci_get_portmask(pdev, priv);
- if (!priv->port_mask)
- return -ENODEV;
+ /* Initializes priv->port_mask which is used below */
+ priv->port_mask = brcm_ahci_get_portmask(hpriv, priv);
+ if (!priv->port_mask) {
+ ret = -ENODEV;
+ goto out_disable_clks;
+ }
+ /* Must be done before ahci_platform_enable_phys() */
brcm_sata_phys_enable(priv);
- hpriv = ahci_platform_get_resources(pdev, 0);
- if (IS_ERR(hpriv))
- return PTR_ERR(hpriv);
- hpriv->plat_data = priv;
- hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP;
-
brcm_sata_alpm_init(hpriv);
- ret = ahci_platform_enable_resources(hpriv);
+ ret = ahci_platform_enable_phys(hpriv);
if (ret)
- return ret;
-
- if (priv->quirks & BRCM_AHCI_QUIRK_NO_NCQ)
- hpriv->flags |= AHCI_HFLAG_NO_NCQ;
- hpriv->flags |= AHCI_HFLAG_NO_WRITE_TO_RO;
+ goto out_disable_phys;
ret = ahci_platform_init_host(pdev, hpriv, &ahci_brcm_port_info,
&ahci_platform_sht);
if (ret)
- return ret;
+ goto out_disable_platform_phys;
dev_info(dev, "Broadcom AHCI SATA3 registered\n");
return 0;
+
+out_disable_platform_phys:
+ ahci_platform_disable_phys(hpriv);
+out_disable_phys:
+ brcm_sata_phys_disable(priv);
+out_disable_clks:
+ ahci_platform_disable_clks(hpriv);
+out_reset:
+ if (!IS_ERR_OR_NULL(priv->rcdev))
+ reset_control_assert(priv->rcdev);
+ return ret;
}
static int brcm_ahci_remove(struct platform_device *pdev)
@@ -457,12 +512,12 @@ static int brcm_ahci_remove(struct platform_device *pdev)
struct brcm_ahci_priv *priv = hpriv->plat_data;
int ret;
+ brcm_sata_phys_disable(priv);
+
ret = ata_platform_remove_one(pdev);
if (ret)
return ret;
- brcm_sata_phys_disable(priv);
-
return 0;
}
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index 8befce036af8..129556fcf6be 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -43,7 +43,7 @@ EXPORT_SYMBOL_GPL(ahci_platform_ops);
* RETURNS:
* 0 on success otherwise a negative error code
*/
-static int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
+int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
{
int rc, i;
@@ -74,6 +74,7 @@ disable_phys:
}
return rc;
}
+EXPORT_SYMBOL_GPL(ahci_platform_enable_phys);
/**
* ahci_platform_disable_phys - Disable PHYs
@@ -81,7 +82,7 @@ disable_phys:
*
* This function disables all PHYs found in hpriv->phys.
*/
-static void ahci_platform_disable_phys(struct ahci_host_priv *hpriv)
+void ahci_platform_disable_phys(struct ahci_host_priv *hpriv)
{
int i;
@@ -90,6 +91,7 @@ static void ahci_platform_disable_phys(struct ahci_host_priv *hpriv)
phy_exit(hpriv->phys[i]);
}
}
+EXPORT_SYMBOL_GPL(ahci_platform_disable_phys);
/**
* ahci_platform_enable_clks - Enable platform clocks
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index e9017c570bc5..6f4ab5c5b52d 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -5329,6 +5329,30 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
}
/**
+ * ata_qc_get_active - get bitmask of active qcs
+ * @ap: port in question
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ *
+ * RETURNS:
+ * Bitmask of active qcs
+ */
+u64 ata_qc_get_active(struct ata_port *ap)
+{
+ u64 qc_active = ap->qc_active;
+
+ /* ATA_TAG_INTERNAL is sent to hw as tag 0 */
+ if (qc_active & (1ULL << ATA_TAG_INTERNAL)) {
+ qc_active |= (1 << 0);
+ qc_active &= ~(1ULL << ATA_TAG_INTERNAL);
+ }
+
+ return qc_active;
+}
+EXPORT_SYMBOL_GPL(ata_qc_get_active);
+
+/**
* ata_qc_complete_multiple - Complete multiple qcs successfully
* @ap: port in question
* @qc_active: new qc_active mask
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 9239615d8a04..d55ee244d693 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -1280,7 +1280,7 @@ static void sata_fsl_host_intr(struct ata_port *ap)
i, ioread32(hcr_base + CC),
ioread32(hcr_base + CA));
}
- ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
+ ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
return;
} else if ((ap->qc_active & (1ULL << ATA_TAG_INTERNAL))) {
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 277f11909fc1..d7228f8e9297 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -2829,7 +2829,7 @@ static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp
}
if (work_done) {
- ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
+ ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
/* Update the software queue position index in hardware */
writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index f3e62f5528bd..eb9dc14e5147 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -984,7 +984,7 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
check_commands = 0;
check_commands &= ~(1 << pos);
}
- ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
+ ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
}
}
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 8e5725b11ee8..959d6d5eb000 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -2303,6 +2303,44 @@ out:
EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
/**
+ * of_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
+ * @parent_spec: OF phandle args to use for parent PM domain look-up
+ * @subdomain_spec: OF phandle args to use for subdomain look-up
+ *
+ * Looks-up a parent PM domain and subdomain based upon phandle args
+ * provided and removes the subdomain from the parent PM domain. Returns a
+ * negative error code on failure.
+ */
+int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec,
+ struct of_phandle_args *subdomain_spec)
+{
+ struct generic_pm_domain *parent, *subdomain;
+ int ret;
+
+ mutex_lock(&gpd_list_lock);
+
+ parent = genpd_get_from_provider(parent_spec);
+ if (IS_ERR(parent)) {
+ ret = PTR_ERR(parent);
+ goto out;
+ }
+
+ subdomain = genpd_get_from_provider(subdomain_spec);
+ if (IS_ERR(subdomain)) {
+ ret = PTR_ERR(subdomain);
+ goto out;
+ }
+
+ ret = pm_genpd_remove_subdomain(parent, subdomain);
+
+out:
+ mutex_unlock(&gpd_list_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(of_genpd_remove_subdomain);
+
+/**
* of_genpd_remove_last - Remove the last PM domain registered for a provider
* @provider: Pointer to device structure associated with provider
*
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index ee67bf929fac..861fc65a1b75 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2707,7 +2707,7 @@ static const struct block_device_operations pktcdvd_ops = {
.release = pkt_close,
.ioctl = pkt_ioctl,
#ifdef CONFIG_COMPAT
- .ioctl = pkt_compat_ioctl,
+ .compat_ioctl = pkt_compat_ioctl,
#endif
.check_events = pkt_check_events,
};
diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c
index 886f7c5df51a..c491f5de0f3f 100644
--- a/drivers/clk/clk-scmi.c
+++ b/drivers/clk/clk-scmi.c
@@ -176,7 +176,7 @@ static int scmi_clocks_probe(struct scmi_device *sdev)
}
static const struct scmi_device_id scmi_id_table[] = {
- { SCMI_PROTOCOL_CLOCK },
+ { SCMI_PROTOCOL_CLOCK, "clocks" },
{ },
};
MODULE_DEVICE_TABLE(scmi, scmi_id_table);
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index e6182c89df79..61623e2ff149 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -261,7 +261,7 @@ static void scmi_cpufreq_remove(struct scmi_device *sdev)
}
static const struct scmi_device_id scmi_id_table[] = {
- { SCMI_PROTOCOL_PERF },
+ { SCMI_PROTOCOL_PERF, "cpufreq" },
{ },
};
MODULE_DEVICE_TABLE(scmi, scmi_id_table);
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index ee70d5cc5b99..cc8c769d7fa9 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -21,7 +21,9 @@ obj-$(CONFIG_ARM_U8500_CPUIDLE) += cpuidle-ux500.o
obj-$(CONFIG_ARM_AT91_CPUIDLE) += cpuidle-at91.o
obj-$(CONFIG_ARM_EXYNOS_CPUIDLE) += cpuidle-exynos.o
obj-$(CONFIG_ARM_CPUIDLE) += cpuidle-arm.o
-obj-$(CONFIG_ARM_PSCI_CPUIDLE) += cpuidle-psci.o
+obj-$(CONFIG_ARM_PSCI_CPUIDLE) += cpuidle_psci.o
+cpuidle_psci-y := cpuidle-psci.o
+cpuidle_psci-$(CONFIG_PM_GENERIC_DOMAINS_OF) += cpuidle-psci-domain.o
###############################################################################
# MIPS drivers
diff --git a/drivers/cpuidle/cpuidle-psci-domain.c b/drivers/cpuidle/cpuidle-psci-domain.c
new file mode 100644
index 000000000000..423f03bbeb74
--- /dev/null
+++ b/drivers/cpuidle/cpuidle-psci-domain.c
@@ -0,0 +1,308 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PM domains for CPUs via genpd - managed by cpuidle-psci.
+ *
+ * Copyright (C) 2019 Linaro Ltd.
+ * Author: Ulf Hansson <ulf.hansson@linaro.org>
+ *
+ */
+
+#define pr_fmt(fmt) "CPUidle PSCI: " fmt
+
+#include <linux/cpu.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/psci.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include "cpuidle-psci.h"
+
+struct psci_pd_provider {
+ struct list_head link;
+ struct device_node *node;
+};
+
+static LIST_HEAD(psci_pd_providers);
+static bool osi_mode_enabled __initdata;
+
+static int psci_pd_power_off(struct generic_pm_domain *pd)
+{
+ struct genpd_power_state *state = &pd->states[pd->state_idx];
+ u32 *pd_state;
+
+ if (!state->data)
+ return 0;
+
+ /* OSI mode is enabled, set the corresponding domain state. */
+ pd_state = state->data;
+ psci_set_domain_state(*pd_state);
+
+ return 0;
+}
+
+static int __init psci_pd_parse_state_nodes(struct genpd_power_state *states,
+ int state_count)
+{
+ int i, ret;
+ u32 psci_state, *psci_state_buf;
+
+ for (i = 0; i < state_count; i++) {
+ ret = psci_dt_parse_state_node(to_of_node(states[i].fwnode),
+ &psci_state);
+ if (ret)
+ goto free_state;
+
+ psci_state_buf = kmalloc(sizeof(u32), GFP_KERNEL);
+ if (!psci_state_buf) {
+ ret = -ENOMEM;
+ goto free_state;
+ }
+ *psci_state_buf = psci_state;
+ states[i].data = psci_state_buf;
+ }
+
+ return 0;
+
+free_state:
+ i--;
+ for (; i >= 0; i--)
+ kfree(states[i].data);
+ return ret;
+}
+
+static int __init psci_pd_parse_states(struct device_node *np,
+ struct genpd_power_state **states, int *state_count)
+{
+ int ret;
+
+ /* Parse the domain idle states. */
+ ret = of_genpd_parse_idle_states(np, states, state_count);
+ if (ret)
+ return ret;
+
+ /* Fill out the PSCI specifics for each found state. */
+ ret = psci_pd_parse_state_nodes(*states, *state_count);
+ if (ret)
+ kfree(*states);
+
+ return ret;
+}
+
+static void psci_pd_free_states(struct genpd_power_state *states,
+ unsigned int state_count)
+{
+ int i;
+
+ for (i = 0; i < state_count; i++)
+ kfree(states[i].data);
+ kfree(states);
+}
+
+static int __init psci_pd_init(struct device_node *np)
+{
+ struct generic_pm_domain *pd;
+ struct psci_pd_provider *pd_provider;
+ struct dev_power_governor *pd_gov;
+ struct genpd_power_state *states = NULL;
+ int ret = -ENOMEM, state_count = 0;
+
+ pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ goto out;
+
+ pd_provider = kzalloc(sizeof(*pd_provider), GFP_KERNEL);
+ if (!pd_provider)
+ goto free_pd;
+
+ pd->name = kasprintf(GFP_KERNEL, "%pOF", np);
+ if (!pd->name)
+ goto free_pd_prov;
+
+ /*
+ * Parse the domain idle states and let genpd manage the state selection
+ * for those being compatible with "domain-idle-state".
+ */
+ ret = psci_pd_parse_states(np, &states, &state_count);
+ if (ret)
+ goto free_name;
+
+ pd->free_states = psci_pd_free_states;
+ pd->name = kbasename(pd->name);
+ pd->power_off = psci_pd_power_off;
+ pd->states = states;
+ pd->state_count = state_count;
+ pd->flags |= GENPD_FLAG_IRQ_SAFE | GENPD_FLAG_CPU_DOMAIN;
+
+ /* Use governor for CPU PM domains if it has some states to manage. */
+ pd_gov = state_count > 0 ? &pm_domain_cpu_gov : NULL;
+
+ ret = pm_genpd_init(pd, pd_gov, false);
+ if (ret) {
+ psci_pd_free_states(states, state_count);
+ goto free_name;
+ }
+
+ ret = of_genpd_add_provider_simple(np, pd);
+ if (ret)
+ goto remove_pd;
+
+ pd_provider->node = of_node_get(np);
+ list_add(&pd_provider->link, &psci_pd_providers);
+
+ pr_debug("init PM domain %s\n", pd->name);
+ return 0;
+
+remove_pd:
+ pm_genpd_remove(pd);
+free_name:
+ kfree(pd->name);
+free_pd_prov:
+ kfree(pd_provider);
+free_pd:
+ kfree(pd);
+out:
+ pr_err("failed to init PM domain ret=%d %pOF\n", ret, np);
+ return ret;
+}
+
+static void __init psci_pd_remove(void)
+{
+ struct psci_pd_provider *pd_provider, *it;
+ struct generic_pm_domain *genpd;
+
+ list_for_each_entry_safe(pd_provider, it, &psci_pd_providers, link) {
+ of_genpd_del_provider(pd_provider->node);
+
+ genpd = of_genpd_remove_last(pd_provider->node);
+ if (!IS_ERR(genpd))
+ kfree(genpd);
+
+ of_node_put(pd_provider->node);
+ list_del(&pd_provider->link);
+ kfree(pd_provider);
+ }
+}
+
+static int __init psci_pd_init_topology(struct device_node *np, bool add)
+{
+ struct device_node *node;
+ struct of_phandle_args child, parent;
+ int ret;
+
+ for_each_child_of_node(np, node) {
+ if (of_parse_phandle_with_args(node, "power-domains",
+ "#power-domain-cells", 0, &parent))
+ continue;
+
+ child.np = node;
+ child.args_count = 0;
+
+ ret = add ? of_genpd_add_subdomain(&parent, &child) :
+ of_genpd_remove_subdomain(&parent, &child);
+ of_node_put(parent.np);
+ if (ret) {
+ of_node_put(node);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int __init psci_pd_add_topology(struct device_node *np)
+{
+ return psci_pd_init_topology(np, true);
+}
+
+static void __init psci_pd_remove_topology(struct device_node *np)
+{
+ psci_pd_init_topology(np, false);
+}
+
+static const struct of_device_id psci_of_match[] __initconst = {
+ { .compatible = "arm,psci-1.0" },
+ {}
+};
+
+static int __init psci_idle_init_domains(void)
+{
+ struct device_node *np = of_find_matching_node(NULL, psci_of_match);
+ struct device_node *node;
+ int ret = 0, pd_count = 0;
+
+ if (!np)
+ return -ENODEV;
+
+ /* Currently limit the hierarchical topology to be used in OSI mode. */
+ if (!psci_has_osi_support())
+ goto out;
+
+ /*
+ * Parse child nodes for the "#power-domain-cells" property and
+ * initialize a genpd/genpd-of-provider pair when it's found.
+ */
+ for_each_child_of_node(np, node) {
+ if (!of_find_property(node, "#power-domain-cells", NULL))
+ continue;
+
+ ret = psci_pd_init(node);
+ if (ret)
+ goto put_node;
+
+ pd_count++;
+ }
+
+ /* Bail out if not using the hierarchical CPU topology. */
+ if (!pd_count)
+ goto out;
+
+ /* Link genpd masters/subdomains to model the CPU topology. */
+ ret = psci_pd_add_topology(np);
+ if (ret)
+ goto remove_pd;
+
+ /* Try to enable OSI mode. */
+ ret = psci_set_osi_mode();
+ if (ret) {
+ pr_warn("failed to enable OSI mode: %d\n", ret);
+ psci_pd_remove_topology(np);
+ goto remove_pd;
+ }
+
+ osi_mode_enabled = true;
+ of_node_put(np);
+ pr_info("Initialized CPU PM domain topology\n");
+ return pd_count;
+
+put_node:
+ of_node_put(node);
+remove_pd:
+ if (pd_count)
+ psci_pd_remove();
+ pr_err("failed to create CPU PM domains ret=%d\n", ret);
+out:
+ of_node_put(np);
+ return ret;
+}
+subsys_initcall(psci_idle_init_domains);
+
+struct device __init *psci_dt_attach_cpu(int cpu)
+{
+ struct device *dev;
+
+ if (!osi_mode_enabled)
+ return NULL;
+
+ dev = dev_pm_domain_attach_by_name(get_cpu_device(cpu), "psci");
+ if (IS_ERR_OR_NULL(dev))
+ return dev;
+
+ pm_runtime_irq_safe(dev);
+ if (cpu_online(cpu))
+ pm_runtime_get_sync(dev);
+
+ return dev;
+}
diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c
index f3c1a2396f98..edd7a54ef0d3 100644
--- a/drivers/cpuidle/cpuidle-psci.c
+++ b/drivers/cpuidle/cpuidle-psci.c
@@ -8,6 +8,7 @@
#define pr_fmt(fmt) "CPUidle PSCI: " fmt
+#include <linux/cpuhotplug.h>
#include <linux/cpuidle.h>
#include <linux/cpumask.h>
#include <linux/cpu_pm.h>
@@ -16,21 +17,107 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/psci.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <asm/cpuidle.h>
+#include "cpuidle-psci.h"
#include "dt_idle_states.h"
-static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state);
+struct psci_cpuidle_data {
+ u32 *psci_states;
+ struct device *dev;
+};
+
+static DEFINE_PER_CPU_READ_MOSTLY(struct psci_cpuidle_data, psci_cpuidle_data);
+static DEFINE_PER_CPU(u32, domain_state);
+static bool psci_cpuidle_use_cpuhp __initdata;
+
+void psci_set_domain_state(u32 state)
+{
+ __this_cpu_write(domain_state, state);
+}
+
+static inline u32 psci_get_domain_state(void)
+{
+ return __this_cpu_read(domain_state);
+}
+
+static inline int psci_enter_state(int idx, u32 state)
+{
+ return CPU_PM_CPU_IDLE_ENTER_PARAM(psci_cpu_suspend_enter, idx, state);
+}
+
+static int psci_enter_domain_idle_state(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx)
+{
+ struct psci_cpuidle_data *data = this_cpu_ptr(&psci_cpuidle_data);
+ u32 *states = data->psci_states;
+ struct device *pd_dev = data->dev;
+ u32 state;
+ int ret;
+
+ /* Do runtime PM to manage a hierarchical CPU toplogy. */
+ pm_runtime_put_sync_suspend(pd_dev);
+
+ state = psci_get_domain_state();
+ if (!state)
+ state = states[idx];
+
+ ret = psci_enter_state(idx, state);
+
+ pm_runtime_get_sync(pd_dev);
+
+ /* Clear the domain state to start fresh when back from idle. */
+ psci_set_domain_state(0);
+ return ret;
+}
+
+static int psci_idle_cpuhp_up(unsigned int cpu)
+{
+ struct device *pd_dev = __this_cpu_read(psci_cpuidle_data.dev);
+
+ if (pd_dev)
+ pm_runtime_get_sync(pd_dev);
+
+ return 0;
+}
+
+static int psci_idle_cpuhp_down(unsigned int cpu)
+{
+ struct device *pd_dev = __this_cpu_read(psci_cpuidle_data.dev);
+
+ if (pd_dev) {
+ pm_runtime_put_sync(pd_dev);
+ /* Clear domain state to start fresh at next online. */
+ psci_set_domain_state(0);
+ }
+
+ return 0;
+}
+
+static void __init psci_idle_init_cpuhp(void)
+{
+ int err;
+
+ if (!psci_cpuidle_use_cpuhp)
+ return;
+
+ err = cpuhp_setup_state_nocalls(CPUHP_AP_CPU_PM_STARTING,
+ "cpuidle/psci:online",
+ psci_idle_cpuhp_up,
+ psci_idle_cpuhp_down);
+ if (err)
+ pr_warn("Failed %d while setup cpuhp state\n", err);
+}
static int psci_enter_idle_state(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int idx)
{
- u32 *state = __this_cpu_read(psci_power_state);
+ u32 *state = __this_cpu_read(psci_cpuidle_data.psci_states);
- return CPU_PM_CPU_IDLE_ENTER_PARAM(psci_cpu_suspend_enter,
- idx, state[idx - 1]);
+ return psci_enter_state(idx, state[idx]);
}
static struct cpuidle_driver psci_idle_driver __initdata = {
@@ -56,7 +143,7 @@ static const struct of_device_id psci_idle_state_match[] __initconst = {
{ },
};
-static int __init psci_dt_parse_state_node(struct device_node *np, u32 *state)
+int __init psci_dt_parse_state_node(struct device_node *np, u32 *state)
{
int err = of_property_read_u32(np, "arm,psci-suspend-param", state);
@@ -73,28 +160,25 @@ static int __init psci_dt_parse_state_node(struct device_node *np, u32 *state)
return 0;
}
-static int __init psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu)
+static int __init psci_dt_cpu_init_idle(struct cpuidle_driver *drv,
+ struct device_node *cpu_node,
+ unsigned int state_count, int cpu)
{
- int i, ret = 0, count = 0;
+ int i, ret = 0;
u32 *psci_states;
struct device_node *state_node;
+ struct psci_cpuidle_data *data = per_cpu_ptr(&psci_cpuidle_data, cpu);
- /* Count idle states */
- while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states",
- count))) {
- count++;
- of_node_put(state_node);
- }
-
- if (!count)
- return -ENODEV;
-
- psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL);
+ state_count++; /* Add WFI state too */
+ psci_states = kcalloc(state_count, sizeof(*psci_states), GFP_KERNEL);
if (!psci_states)
return -ENOMEM;
- for (i = 0; i < count; i++) {
- state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
+ for (i = 1; i < state_count; i++) {
+ state_node = of_get_cpu_state_node(cpu_node, i - 1);
+ if (!state_node)
+ break;
+
ret = psci_dt_parse_state_node(state_node, &psci_states[i]);
of_node_put(state_node);
@@ -104,8 +188,33 @@ static int __init psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu)
pr_debug("psci-power-state %#x index %d\n", psci_states[i], i);
}
- /* Idle states parsed correctly, initialize per-cpu pointer */
- per_cpu(psci_power_state, cpu) = psci_states;
+ if (i != state_count) {
+ ret = -ENODEV;
+ goto free_mem;
+ }
+
+ /* Currently limit the hierarchical topology to be used in OSI mode. */
+ if (psci_has_osi_support()) {
+ data->dev = psci_dt_attach_cpu(cpu);
+ if (IS_ERR(data->dev)) {
+ ret = PTR_ERR(data->dev);
+ goto free_mem;
+ }
+
+ /*
+ * Using the deepest state for the CPU to trigger a potential
+ * selection of a shared state for the domain, assumes the
+ * domain states are all deeper states.
+ */
+ if (data->dev) {
+ drv->states[state_count - 1].enter =
+ psci_enter_domain_idle_state;
+ psci_cpuidle_use_cpuhp = true;
+ }
+ }
+
+ /* Idle states parsed correctly, store them in the per-cpu struct. */
+ data->psci_states = psci_states;
return 0;
free_mem:
@@ -113,7 +222,8 @@ free_mem:
return ret;
}
-static __init int psci_cpu_init_idle(unsigned int cpu)
+static __init int psci_cpu_init_idle(struct cpuidle_driver *drv,
+ unsigned int cpu, unsigned int state_count)
{
struct device_node *cpu_node;
int ret;
@@ -129,7 +239,7 @@ static __init int psci_cpu_init_idle(unsigned int cpu)
if (!cpu_node)
return -ENODEV;
- ret = psci_dt_cpu_init_idle(cpu_node, cpu);
+ ret = psci_dt_cpu_init_idle(drv, cpu_node, state_count, cpu);
of_node_put(cpu_node);
@@ -185,7 +295,7 @@ static int __init psci_idle_init_cpu(int cpu)
/*
* Initialize PSCI idle states.
*/
- ret = psci_cpu_init_idle(cpu);
+ ret = psci_cpu_init_idle(drv, cpu, ret);
if (ret) {
pr_err("CPU %d failed to PSCI idle\n", cpu);
goto out_kfree_drv;
@@ -221,6 +331,7 @@ static int __init psci_idle_init(void)
goto out_fail;
}
+ psci_idle_init_cpuhp();
return 0;
out_fail:
diff --git a/drivers/cpuidle/cpuidle-psci.h b/drivers/cpuidle/cpuidle-psci.h
new file mode 100644
index 000000000000..7299a04dd467
--- /dev/null
+++ b/drivers/cpuidle/cpuidle-psci.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __CPUIDLE_PSCI_H
+#define __CPUIDLE_PSCI_H
+
+struct device_node;
+
+void psci_set_domain_state(u32 state);
+int __init psci_dt_parse_state_node(struct device_node *np, u32 *state);
+
+#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
+struct device __init *psci_dt_attach_cpu(int cpu);
+#else
+static inline struct device __init *psci_dt_attach_cpu(int cpu) { return NULL; }
+#endif
+
+#endif /* __CPUIDLE_PSCI_H */
diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c
index d06d21a9525d..252f2a9686a6 100644
--- a/drivers/cpuidle/dt_idle_states.c
+++ b/drivers/cpuidle/dt_idle_states.c
@@ -111,8 +111,7 @@ static bool idle_state_valid(struct device_node *state_node, unsigned int idx,
for (cpu = cpumask_next(cpumask_first(cpumask), cpumask);
cpu < nr_cpu_ids; cpu = cpumask_next(cpu, cpumask)) {
cpu_node = of_cpu_device_node_get(cpu);
- curr_state_node = of_parse_phandle(cpu_node, "cpu-idle-states",
- idx);
+ curr_state_node = of_get_cpu_state_node(cpu_node, idx);
if (state_node != curr_state_node)
valid = false;
@@ -170,7 +169,7 @@ int dt_init_idle_driver(struct cpuidle_driver *drv,
cpu_node = of_cpu_device_node_get(cpumask_first(cpumask));
for (i = 0; ; i++) {
- state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
+ state_node = of_get_cpu_state_node(cpu_node, i);
if (!state_node)
break;
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index defe1d438710..35535833b6f7 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -83,7 +83,6 @@ config ARM_EXYNOS_BUS_DEVFREQ
select DEVFREQ_GOV_PASSIVE
select DEVFREQ_EVENT_EXYNOS_PPMU
select PM_DEVFREQ_EVENT
- select PM_OPP
help
This adds the common DEVFREQ driver for Exynos Memory bus. Exynos
Memory bus has one more group of memory bus (e.g, MIF and INT block).
@@ -98,7 +97,7 @@ config ARM_TEGRA_DEVFREQ
ARCH_TEGRA_132_SOC || ARCH_TEGRA_124_SOC || \
ARCH_TEGRA_210_SOC || \
COMPILE_TEST
- select PM_OPP
+ depends on COMMON_CLK
help
This adds the DEVFREQ driver for the Tegra family of SoCs.
It reads ACTMON counters of memory controllers and adjusts the
@@ -109,7 +108,6 @@ config ARM_TEGRA20_DEVFREQ
depends on (TEGRA_MC && TEGRA20_EMC) || COMPILE_TEST
depends on COMMON_CLK
select DEVFREQ_GOV_SIMPLE_ONDEMAND
- select PM_OPP
help
This adds the DEVFREQ driver for the Tegra20 family of SoCs.
It reads Memory Controller counters and adjusts the operating
@@ -121,7 +119,6 @@ config ARM_RK3399_DMC_DEVFREQ
select DEVFREQ_EVENT_ROCKCHIP_DFI
select DEVFREQ_GOV_SIMPLE_ONDEMAND
select PM_DEVFREQ_EVENT
- select PM_OPP
help
This adds the DEVFREQ driver for the RK3399 DMC(Dynamic Memory Controller).
It sets the frequency for the memory controller and reads the usage counts
diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
index 7a30952b463d..db55c43a2cbd 100644
--- a/drivers/firmware/arm_scmi/bus.c
+++ b/drivers/firmware/arm_scmi/bus.c
@@ -28,8 +28,12 @@ scmi_dev_match_id(struct scmi_device *scmi_dev, struct scmi_driver *scmi_drv)
return NULL;
for (; id->protocol_id; id++)
- if (id->protocol_id == scmi_dev->protocol_id)
- return id;
+ if (id->protocol_id == scmi_dev->protocol_id) {
+ if (!id->name)
+ return id;
+ else if (!strcmp(id->name, scmi_dev->name))
+ return id;
+ }
return NULL;
}
@@ -56,6 +60,11 @@ static int scmi_protocol_init(int protocol_id, struct scmi_handle *handle)
return fn(handle);
}
+static int scmi_protocol_dummy_init(struct scmi_handle *handle)
+{
+ return 0;
+}
+
static int scmi_dev_probe(struct device *dev)
{
struct scmi_driver *scmi_drv = to_scmi_driver(dev->driver);
@@ -74,6 +83,10 @@ static int scmi_dev_probe(struct device *dev)
if (ret)
return ret;
+ /* Skip protocol initialisation for additional devices */
+ idr_replace(&scmi_protocols, &scmi_protocol_dummy_init,
+ scmi_dev->protocol_id);
+
return scmi_drv->probe(scmi_dev);
}
@@ -125,7 +138,8 @@ static void scmi_device_release(struct device *dev)
}
struct scmi_device *
-scmi_device_create(struct device_node *np, struct device *parent, int protocol)
+scmi_device_create(struct device_node *np, struct device *parent, int protocol,
+ const char *name)
{
int id, retval;
struct scmi_device *scmi_dev;
@@ -134,8 +148,15 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol)
if (!scmi_dev)
return NULL;
+ scmi_dev->name = kstrdup_const(name ?: "unknown", GFP_KERNEL);
+ if (!scmi_dev->name) {
+ kfree(scmi_dev);
+ return NULL;
+ }
+
id = ida_simple_get(&scmi_bus_id, 1, 0, GFP_KERNEL);
if (id < 0) {
+ kfree_const(scmi_dev->name);
kfree(scmi_dev);
return NULL;
}
@@ -154,6 +175,7 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol)
return scmi_dev;
put_dev:
+ kfree_const(scmi_dev->name);
put_device(&scmi_dev->dev);
ida_simple_remove(&scmi_bus_id, id);
return NULL;
@@ -161,6 +183,7 @@ put_dev:
void scmi_device_destroy(struct scmi_device *scmi_dev)
{
+ kfree_const(scmi_dev->name);
scmi_handle_put(scmi_dev->handle);
ida_simple_remove(&scmi_bus_id, scmi_dev->id);
device_unregister(&scmi_dev->dev);
diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
index 32526a793f3a..4c2227662b26 100644
--- a/drivers/firmware/arm_scmi/clock.c
+++ b/drivers/firmware/arm_scmi/clock.c
@@ -65,6 +65,7 @@ struct scmi_clock_set_rate {
};
struct clock_info {
+ u32 version;
int num_clocks;
int max_async_req;
atomic_t cur_async_req;
@@ -340,6 +341,7 @@ static int scmi_clock_protocol_init(struct scmi_handle *handle)
scmi_clock_describe_rates_get(handle, clkid, clk);
}
+ cinfo->version = version;
handle->clk_ops = &clk_ops;
handle->clk_priv = cinfo;
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
index 5237c2ff79fe..df35358ff324 100644
--- a/drivers/firmware/arm_scmi/common.h
+++ b/drivers/firmware/arm_scmi/common.h
@@ -81,6 +81,7 @@ struct scmi_msg {
/**
* struct scmi_xfer - Structure representing a message flow
*
+ * @transfer_id: Unique ID for debug & profiling purpose
* @hdr: Transmit message header
* @tx: Transmit message
* @rx: Receive message, the buffer should be pre-allocated to store
@@ -90,6 +91,7 @@ struct scmi_msg {
* @async: pointer to delayed response message received event completion
*/
struct scmi_xfer {
+ int transfer_id;
struct scmi_msg_hdr hdr;
struct scmi_msg tx;
struct scmi_msg rx;
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 3eb0382491ce..2c96f6b5a7d8 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -29,6 +29,9 @@
#include "common.h"
+#define CREATE_TRACE_POINTS
+#include <trace/events/scmi.h>
+
#define MSG_ID_MASK GENMASK(7, 0)
#define MSG_XTRACT_ID(hdr) FIELD_GET(MSG_ID_MASK, (hdr))
#define MSG_TYPE_MASK GENMASK(9, 8)
@@ -61,6 +64,8 @@ enum scmi_error_codes {
static LIST_HEAD(scmi_list);
/* Protection for the entire list */
static DEFINE_MUTEX(scmi_list_mutex);
+/* Track the unique id for the transfers for debug & profiling purpose */
+static atomic_t transfer_last_id;
/**
* struct scmi_xfers_info - Structure to manage transfer information
@@ -304,6 +309,7 @@ static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
xfer = &minfo->xfer_block[xfer_id];
xfer->hdr.seq = xfer_id;
reinit_completion(&xfer->done);
+ xfer->transfer_id = atomic_inc_return(&transfer_last_id);
return xfer;
}
@@ -374,6 +380,10 @@ static void scmi_rx_callback(struct mbox_client *cl, void *m)
scmi_fetch_response(xfer, mem);
+ trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
+ xfer->hdr.protocol_id, xfer->hdr.seq,
+ msg_type);
+
if (msg_type == MSG_TYPE_DELAYED_RESP)
complete(xfer->async_done);
else
@@ -439,6 +449,10 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
if (unlikely(!cinfo))
return -EINVAL;
+ trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
+ xfer->hdr.protocol_id, xfer->hdr.seq,
+ xfer->hdr.poll_completion);
+
ret = mbox_send_message(cinfo->chan, xfer);
if (ret < 0) {
dev_dbg(dev, "mbox send fail %d\n", ret);
@@ -478,6 +492,10 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
*/
mbox_client_txdone(cinfo->chan, ret);
+ trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
+ xfer->hdr.protocol_id, xfer->hdr.seq,
+ xfer->hdr.status);
+
return ret;
}
@@ -735,6 +753,11 @@ static int scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev,
idx = tx ? 0 : 1;
idr = tx ? &info->tx_idr : &info->rx_idr;
+ /* check if already allocated, used for multiple device per protocol */
+ cinfo = idr_find(idr, prot_id);
+ if (cinfo)
+ return 0;
+
if (scmi_mailbox_check(np, idx)) {
cinfo = idr_find(idr, SCMI_PROTOCOL_BASE);
if (unlikely(!cinfo)) /* Possible only if platform has no Rx */
@@ -803,11 +826,11 @@ scmi_mbox_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
static inline void
scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
- int prot_id)
+ int prot_id, const char *name)
{
struct scmi_device *sdev;
- sdev = scmi_device_create(np, info->dev, prot_id);
+ sdev = scmi_device_create(np, info->dev, prot_id, name);
if (!sdev) {
dev_err(info->dev, "failed to create %d protocol device\n",
prot_id);
@@ -824,6 +847,40 @@ scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
scmi_set_handle(sdev);
}
+#define MAX_SCMI_DEV_PER_PROTOCOL 2
+struct scmi_prot_devnames {
+ int protocol_id;
+ char *names[MAX_SCMI_DEV_PER_PROTOCOL];
+};
+
+static struct scmi_prot_devnames devnames[] = {
+ { SCMI_PROTOCOL_POWER, { "genpd" },},
+ { SCMI_PROTOCOL_PERF, { "cpufreq" },},
+ { SCMI_PROTOCOL_CLOCK, { "clocks" },},
+ { SCMI_PROTOCOL_SENSOR, { "hwmon" },},
+ { SCMI_PROTOCOL_RESET, { "reset" },},
+};
+
+static inline void
+scmi_create_protocol_devices(struct device_node *np, struct scmi_info *info,
+ int prot_id)
+{
+ int loop, cnt;
+
+ for (loop = 0; loop < ARRAY_SIZE(devnames); loop++) {
+ if (devnames[loop].protocol_id != prot_id)
+ continue;
+
+ for (cnt = 0; cnt < ARRAY_SIZE(devnames[loop].names); cnt++) {
+ const char *name = devnames[loop].names[cnt];
+
+ if (name)
+ scmi_create_protocol_device(np, info, prot_id,
+ name);
+ }
+ }
+}
+
static int scmi_probe(struct platform_device *pdev)
{
int ret;
@@ -892,7 +949,7 @@ static int scmi_probe(struct platform_device *pdev)
continue;
}
- scmi_create_protocol_device(child, info, prot_id);
+ scmi_create_protocol_devices(child, info, prot_id);
}
return 0;
@@ -940,6 +997,52 @@ static int scmi_remove(struct platform_device *pdev)
return ret;
}
+static ssize_t protocol_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scmi_info *info = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%u.%u\n", info->version.major_ver,
+ info->version.minor_ver);
+}
+static DEVICE_ATTR_RO(protocol_version);
+
+static ssize_t firmware_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scmi_info *info = dev_get_drvdata(dev);
+
+ return sprintf(buf, "0x%x\n", info->version.impl_ver);
+}
+static DEVICE_ATTR_RO(firmware_version);
+
+static ssize_t vendor_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scmi_info *info = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", info->version.vendor_id);
+}
+static DEVICE_ATTR_RO(vendor_id);
+
+static ssize_t sub_vendor_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scmi_info *info = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", info->version.sub_vendor_id);
+}
+static DEVICE_ATTR_RO(sub_vendor_id);
+
+static struct attribute *versions_attrs[] = {
+ &dev_attr_firmware_version.attr,
+ &dev_attr_protocol_version.attr,
+ &dev_attr_vendor_id.attr,
+ &dev_attr_sub_vendor_id.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(versions);
+
static const struct scmi_desc scmi_generic_desc = {
.max_rx_timeout_ms = 30, /* We may increase this if required */
.max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */
@@ -958,6 +1061,7 @@ static struct platform_driver scmi_driver = {
.driver = {
.name = "arm-scmi",
.of_match_table = scmi_of_match,
+ .dev_groups = versions_groups,
},
.probe = scmi_probe,
.remove = scmi_remove,
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
index 601af4edad5e..ec81e6f7e7a4 100644
--- a/drivers/firmware/arm_scmi/perf.c
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -145,6 +145,7 @@ struct perf_dom_info {
};
struct scmi_perf_info {
+ u32 version;
int num_domains;
bool power_scale_mw;
u64 stats_addr;
@@ -736,6 +737,7 @@ static int scmi_perf_protocol_init(struct scmi_handle *handle)
scmi_perf_domain_init_fc(handle, domain, &dom->fc_info);
}
+ pinfo->version = version;
handle->perf_ops = &perf_ops;
handle->perf_priv = pinfo;
diff --git a/drivers/firmware/arm_scmi/power.c b/drivers/firmware/arm_scmi/power.c
index 5abef7079c0a..214886ce84f1 100644
--- a/drivers/firmware/arm_scmi/power.c
+++ b/drivers/firmware/arm_scmi/power.c
@@ -50,6 +50,7 @@ struct power_dom_info {
};
struct scmi_power_info {
+ u32 version;
int num_domains;
u64 stats_addr;
u32 stats_size;
@@ -207,6 +208,7 @@ static int scmi_power_protocol_init(struct scmi_handle *handle)
scmi_power_domain_attributes_get(handle, domain, dom);
}
+ pinfo->version = version;
handle->power_ops = &power_ops;
handle->power_priv = pinfo;
diff --git a/drivers/firmware/arm_scmi/reset.c b/drivers/firmware/arm_scmi/reset.c
index ab42c21c5517..de73054554f3 100644
--- a/drivers/firmware/arm_scmi/reset.c
+++ b/drivers/firmware/arm_scmi/reset.c
@@ -48,6 +48,7 @@ struct reset_dom_info {
};
struct scmi_reset_info {
+ u32 version;
int num_domains;
struct reset_dom_info *dom_info;
};
@@ -217,6 +218,7 @@ static int scmi_reset_protocol_init(struct scmi_handle *handle)
scmi_reset_domain_attributes_get(handle, domain, dom);
}
+ pinfo->version = version;
handle->reset_ops = &reset_ops;
handle->reset_priv = pinfo;
diff --git a/drivers/firmware/arm_scmi/scmi_pm_domain.c b/drivers/firmware/arm_scmi/scmi_pm_domain.c
index 87f737e01473..bafbfe358f97 100644
--- a/drivers/firmware/arm_scmi/scmi_pm_domain.c
+++ b/drivers/firmware/arm_scmi/scmi_pm_domain.c
@@ -112,7 +112,7 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev)
}
static const struct scmi_device_id scmi_id_table[] = {
- { SCMI_PROTOCOL_POWER },
+ { SCMI_PROTOCOL_POWER, "genpd" },
{ },
};
MODULE_DEVICE_TABLE(scmi, scmi_id_table);
diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c
index a400ea805fc2..eba61b9c1f53 100644
--- a/drivers/firmware/arm_scmi/sensors.c
+++ b/drivers/firmware/arm_scmi/sensors.c
@@ -68,6 +68,7 @@ struct scmi_msg_sensor_reading_get {
};
struct sensors_info {
+ u32 version;
int num_sensors;
int max_requests;
u64 reg_addr;
@@ -294,6 +295,7 @@ static int scmi_sensors_protocol_init(struct scmi_handle *handle)
scmi_sensor_description_get(handle, sinfo);
+ sinfo->version = version;
handle->sensor_ops = &sensor_ops;
handle->sensor_priv = sinfo;
diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c
index b3b6c15e7b36..2937d44b5df4 100644
--- a/drivers/firmware/psci/psci.c
+++ b/drivers/firmware/psci/psci.c
@@ -97,7 +97,7 @@ static inline bool psci_has_ext_power_state(void)
PSCI_1_0_FEATURES_CPU_SUSPEND_PF_MASK;
}
-static inline bool psci_has_osi_support(void)
+bool psci_has_osi_support(void)
{
return psci_cpu_suspend_feature & PSCI_1_0_OS_INITIATED;
}
@@ -162,6 +162,15 @@ static u32 psci_get_version(void)
return invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0);
}
+int psci_set_osi_mode(void)
+{
+ int err;
+
+ err = invoke_psci_fn(PSCI_1_0_FN_SET_SUSPEND_MODE,
+ PSCI_1_0_SUSPEND_MODE_OSI, 0, 0);
+ return psci_to_linux_errno(err);
+}
+
static int psci_cpu_suspend(u32 state, unsigned long entry_point)
{
int err;
@@ -544,9 +553,14 @@ static int __init psci_1_0_init(struct device_node *np)
if (err)
return err;
- if (psci_has_osi_support())
+ if (psci_has_osi_support()) {
pr_info("OSI mode supported.\n");
+ /* Default to PC mode. */
+ invoke_psci_fn(PSCI_1_0_FN_SET_SUSPEND_MODE,
+ PSCI_1_0_SUSPEND_MODE_PC, 0, 0);
+ }
+
return 0;
}
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 8adffd42f8cb..6ab25fe1c423 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -553,8 +553,8 @@ config GPIO_TEGRA
config GPIO_TEGRA186
tristate "NVIDIA Tegra186 GPIO support"
- default ARCH_TEGRA_186_SOC
- depends on ARCH_TEGRA_186_SOC || COMPILE_TEST
+ default ARCH_TEGRA_186_SOC || ARCH_TEGRA_194_SOC
+ depends on ARCH_TEGRA_186_SOC || ARCH_TEGRA_194_SOC || COMPILE_TEST
depends on OF_GPIO
select GPIOLIB_IRQCHIP
select IRQ_DOMAIN_HIERARCHY
diff --git a/drivers/gpio/gpio-aspeed-sgpio.c b/drivers/gpio/gpio-aspeed-sgpio.c
index 7e99860ca447..8319812593e3 100644
--- a/drivers/gpio/gpio-aspeed-sgpio.c
+++ b/drivers/gpio/gpio-aspeed-sgpio.c
@@ -107,7 +107,7 @@ static void __iomem *bank_reg(struct aspeed_sgpio *gpio,
return gpio->base + bank->irq_regs + GPIO_IRQ_STATUS;
default:
/* acturally if code runs to here, it's an error case */
- BUG_ON(1);
+ BUG();
}
}
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index 56d647a30e3e..c4fdc192ea4e 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -226,7 +226,7 @@ static int gpio_mockup_get_direction(struct gpio_chip *gc, unsigned int offset)
int direction;
mutex_lock(&chip->lock);
- direction = !chip->lines[offset].dir;
+ direction = chip->lines[offset].dir;
mutex_unlock(&chip->lock);
return direction;
@@ -395,7 +395,7 @@ static int gpio_mockup_probe(struct platform_device *pdev)
struct gpio_chip *gc;
struct device *dev;
const char *name;
- int rv, base;
+ int rv, base, i;
u16 ngpio;
dev = &pdev->dev;
@@ -447,6 +447,9 @@ static int gpio_mockup_probe(struct platform_device *pdev)
if (!chip->lines)
return -ENOMEM;
+ for (i = 0; i < gc->ngpio; i++)
+ chip->lines[i].dir = GPIO_LINE_DIRECTION_IN;
+
if (device_property_read_bool(dev, "named-gpio-lines")) {
rv = gpio_mockup_name_lines(dev, chip);
if (rv)
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index f1e164cecff8..5ae30de3490a 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -346,6 +346,7 @@ static int mpc8xxx_probe(struct platform_device *pdev)
return -ENOMEM;
gc = &mpc8xxx_gc->gc;
+ gc->parent = &pdev->dev;
if (of_property_read_bool(np, "little-endian")) {
ret = bgpio_init(gc, &pdev->dev, 4,
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 6652bee01966..9853547e7276 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -568,16 +568,18 @@ static void pca953x_irq_mask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct pca953x_chip *chip = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
- chip->irq_mask[d->hwirq / BANK_SZ] &= ~BIT(d->hwirq % BANK_SZ);
+ clear_bit(hwirq, chip->irq_mask);
}
static void pca953x_irq_unmask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct pca953x_chip *chip = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
- chip->irq_mask[d->hwirq / BANK_SZ] |= BIT(d->hwirq % BANK_SZ);
+ set_bit(hwirq, chip->irq_mask);
}
static int pca953x_irq_set_wake(struct irq_data *d, unsigned int on)
@@ -635,8 +637,7 @@ static int pca953x_irq_set_type(struct irq_data *d, unsigned int type)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct pca953x_chip *chip = gpiochip_get_data(gc);
- int bank_nb = d->hwirq / BANK_SZ;
- u8 mask = BIT(d->hwirq % BANK_SZ);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
if (!(type & IRQ_TYPE_EDGE_BOTH)) {
dev_err(&chip->client->dev, "irq %d: unsupported type %d\n",
@@ -644,15 +645,8 @@ static int pca953x_irq_set_type(struct irq_data *d, unsigned int type)
return -EINVAL;
}
- if (type & IRQ_TYPE_EDGE_FALLING)
- chip->irq_trig_fall[bank_nb] |= mask;
- else
- chip->irq_trig_fall[bank_nb] &= ~mask;
-
- if (type & IRQ_TYPE_EDGE_RISING)
- chip->irq_trig_raise[bank_nb] |= mask;
- else
- chip->irq_trig_raise[bank_nb] &= ~mask;
+ assign_bit(hwirq, chip->irq_trig_fall, type & IRQ_TYPE_EDGE_FALLING);
+ assign_bit(hwirq, chip->irq_trig_raise, type & IRQ_TYPE_EDGE_RISING);
return 0;
}
@@ -661,10 +655,10 @@ static void pca953x_irq_shutdown(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct pca953x_chip *chip = gpiochip_get_data(gc);
- u8 mask = BIT(d->hwirq % BANK_SZ);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
- chip->irq_trig_raise[d->hwirq / BANK_SZ] &= ~mask;
- chip->irq_trig_fall[d->hwirq / BANK_SZ] &= ~mask;
+ clear_bit(hwirq, chip->irq_trig_raise);
+ clear_bit(hwirq, chip->irq_trig_fall);
}
static bool pca953x_irq_pending(struct pca953x_chip *chip, unsigned long *pending)
diff --git a/drivers/gpio/gpio-xgs-iproc.c b/drivers/gpio/gpio-xgs-iproc.c
index 773e5c24309e..b21c2e436b61 100644
--- a/drivers/gpio/gpio-xgs-iproc.c
+++ b/drivers/gpio/gpio-xgs-iproc.c
@@ -280,7 +280,7 @@ static int iproc_gpio_probe(struct platform_device *pdev)
return 0;
}
-static int __exit iproc_gpio_remove(struct platform_device *pdev)
+static int iproc_gpio_remove(struct platform_device *pdev)
{
struct iproc_gpio_chip *chip;
diff --git a/drivers/gpio/gpio-xtensa.c b/drivers/gpio/gpio-xtensa.c
index 08d7c3b32038..c8af34a6368f 100644
--- a/drivers/gpio/gpio-xtensa.c
+++ b/drivers/gpio/gpio-xtensa.c
@@ -44,15 +44,14 @@ static inline unsigned long enable_cp(unsigned long *cpenable)
unsigned long flags;
local_irq_save(flags);
- RSR_CPENABLE(*cpenable);
- WSR_CPENABLE(*cpenable | BIT(XCHAL_CP_ID_XTIOP));
-
+ *cpenable = xtensa_get_sr(cpenable);
+ xtensa_set_sr(*cpenable | BIT(XCHAL_CP_ID_XTIOP), cpenable);
return flags;
}
static inline void disable_cp(unsigned long flags, unsigned long cpenable)
{
- WSR_CPENABLE(cpenable);
+ xtensa_set_sr(cpenable, cpenable);
local_irq_restore(flags);
}
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 9913886ede90..78a16e42f222 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -220,6 +220,14 @@ int gpiod_get_direction(struct gpio_desc *desc)
chip = gpiod_to_chip(desc);
offset = gpio_chip_hwgpio(desc);
+ /*
+ * Open drain emulation using input mode may incorrectly report
+ * input here, fix that up.
+ */
+ if (test_bit(FLAG_OPEN_DRAIN, &desc->flags) &&
+ test_bit(FLAG_IS_OUT, &desc->flags))
+ return 0;
+
if (!chip->get_direction)
return -ENOTSUPP;
@@ -4472,8 +4480,9 @@ static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id,
if (chip->ngpio <= p->chip_hwnum) {
dev_err(dev,
- "requested GPIO %d is out of range [0..%d] for chip %s\n",
- idx, chip->ngpio, chip->label);
+ "requested GPIO %u (%u) is out of range [0..%u] for chip %s\n",
+ idx, p->chip_hwnum, chip->ngpio - 1,
+ chip->label);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 6f5e3bd13ad1..effc4250b230 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -15112,7 +15112,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
return ret;
fb_obj_bump_render_priority(obj);
- intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_DIRTYFB);
+ i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB);
if (!new_plane_state->base.fence) { /* implicit fencing */
struct dma_fence *fence;
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
index 84b164f31895..6cb02c912acc 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
@@ -229,11 +229,11 @@ static void frontbuffer_release(struct kref *ref)
vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
spin_unlock(&obj->vma.lock);
- obj->frontbuffer = NULL;
+ RCU_INIT_POINTER(obj->frontbuffer, NULL);
spin_unlock(&to_i915(obj->base.dev)->fb_tracking.lock);
i915_gem_object_put(obj);
- kfree(front);
+ kfree_rcu(front, rcu);
}
struct intel_frontbuffer *
@@ -242,11 +242,7 @@ intel_frontbuffer_get(struct drm_i915_gem_object *obj)
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct intel_frontbuffer *front;
- spin_lock(&i915->fb_tracking.lock);
- front = obj->frontbuffer;
- if (front)
- kref_get(&front->ref);
- spin_unlock(&i915->fb_tracking.lock);
+ front = __intel_frontbuffer_get(obj);
if (front)
return front;
@@ -262,13 +258,13 @@ intel_frontbuffer_get(struct drm_i915_gem_object *obj)
i915_active_may_sleep(frontbuffer_retire));
spin_lock(&i915->fb_tracking.lock);
- if (obj->frontbuffer) {
+ if (rcu_access_pointer(obj->frontbuffer)) {
kfree(front);
- front = obj->frontbuffer;
+ front = rcu_dereference_protected(obj->frontbuffer, true);
kref_get(&front->ref);
} else {
i915_gem_object_get(obj);
- obj->frontbuffer = front;
+ rcu_assign_pointer(obj->frontbuffer, front);
}
spin_unlock(&i915->fb_tracking.lock);
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.h b/drivers/gpu/drm/i915/display/intel_frontbuffer.h
index adc64d61a4a5..6d41f5394425 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.h
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.h
@@ -27,10 +27,10 @@
#include <linux/atomic.h>
#include <linux/kref.h>
+#include "gem/i915_gem_object_types.h"
#include "i915_active.h"
struct drm_i915_private;
-struct drm_i915_gem_object;
enum fb_op_origin {
ORIGIN_GTT,
@@ -45,6 +45,7 @@ struct intel_frontbuffer {
atomic_t bits;
struct i915_active write;
struct drm_i915_gem_object *obj;
+ struct rcu_head rcu;
};
void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915,
@@ -54,6 +55,35 @@ void intel_frontbuffer_flip_complete(struct drm_i915_private *i915,
void intel_frontbuffer_flip(struct drm_i915_private *i915,
unsigned frontbuffer_bits);
+void intel_frontbuffer_put(struct intel_frontbuffer *front);
+
+static inline struct intel_frontbuffer *
+__intel_frontbuffer_get(const struct drm_i915_gem_object *obj)
+{
+ struct intel_frontbuffer *front;
+
+ if (likely(!rcu_access_pointer(obj->frontbuffer)))
+ return NULL;
+
+ rcu_read_lock();
+ do {
+ front = rcu_dereference(obj->frontbuffer);
+ if (!front)
+ break;
+
+ if (unlikely(!kref_get_unless_zero(&front->ref)))
+ continue;
+
+ if (likely(front == rcu_access_pointer(obj->frontbuffer)))
+ break;
+
+ intel_frontbuffer_put(front);
+ } while (1);
+ rcu_read_unlock();
+
+ return front;
+}
+
struct intel_frontbuffer *
intel_frontbuffer_get(struct drm_i915_gem_object *obj);
@@ -119,6 +149,4 @@ void intel_frontbuffer_track(struct intel_frontbuffer *old,
struct intel_frontbuffer *new,
unsigned int frontbuffer_bits);
-void intel_frontbuffer_put(struct intel_frontbuffer *front);
-
#endif /* __INTEL_FRONTBUFFER_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 848ce07a8ec2..8a98a1aa7adc 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -279,12 +279,21 @@ static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
struct i915_vma *vma)
{
enum pipe pipe = overlay->crtc->pipe;
+ struct intel_frontbuffer *from = NULL, *to = NULL;
WARN_ON(overlay->old_vma);
- intel_frontbuffer_track(overlay->vma ? overlay->vma->obj->frontbuffer : NULL,
- vma ? vma->obj->frontbuffer : NULL,
- INTEL_FRONTBUFFER_OVERLAY(pipe));
+ if (overlay->vma)
+ from = intel_frontbuffer_get(overlay->vma->obj);
+ if (vma)
+ to = intel_frontbuffer_get(vma->obj);
+
+ intel_frontbuffer_track(from, to, INTEL_FRONTBUFFER_OVERLAY(pipe));
+
+ if (to)
+ intel_frontbuffer_put(to);
+ if (from)
+ intel_frontbuffer_put(from);
intel_frontbuffer_flip_prepare(overlay->i915,
INTEL_FRONTBUFFER_OVERLAY(pipe));
@@ -766,7 +775,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
ret = PTR_ERR(vma);
goto out_pin_section;
}
- intel_frontbuffer_flush(new_bo->frontbuffer, ORIGIN_DIRTYFB);
+ i915_gem_object_flush_frontbuffer(new_bo, ORIGIN_DIRTYFB);
if (!overlay->active) {
u32 oconfig;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
index b9f504ba3b32..18ee708585a9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
@@ -20,7 +20,8 @@ static void __do_clflush(struct drm_i915_gem_object *obj)
{
GEM_BUG_ON(!i915_gem_object_has_pages(obj));
drm_clflush_sg(obj->mm.pages);
- intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
+
+ i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
}
static int clflush_work(struct dma_fence_work *base)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index 9937b4c341f1..f86400a191b0 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -664,7 +664,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
i915_gem_object_unlock(obj);
if (write_domain)
- intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
+ i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
out_unpin:
i915_gem_object_unpin_pages(obj);
@@ -784,7 +784,7 @@ int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
}
out:
- intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
+ i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
obj->mm.dirty = true;
/* return with the pages pinned */
return 0;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index a50296cce0d8..a596548c07bf 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -280,7 +280,7 @@ i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
for_each_ggtt_vma(vma, obj)
intel_gt_flush_ggtt_writes(vma->vm->gt);
- intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
+ i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
for_each_ggtt_vma(vma, obj) {
if (vma->iomap)
@@ -308,6 +308,30 @@ i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
obj->write_domain = 0;
}
+void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
+ enum fb_op_origin origin)
+{
+ struct intel_frontbuffer *front;
+
+ front = __intel_frontbuffer_get(obj);
+ if (front) {
+ intel_frontbuffer_flush(front, origin);
+ intel_frontbuffer_put(front);
+ }
+}
+
+void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
+ enum fb_op_origin origin)
+{
+ struct intel_frontbuffer *front;
+
+ front = __intel_frontbuffer_get(obj);
+ if (front) {
+ intel_frontbuffer_invalidate(front, origin);
+ intel_frontbuffer_put(front);
+ }
+}
+
void i915_gem_init__objects(struct drm_i915_private *i915)
{
INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 458cd51331f1..4b93591fd5c7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -13,8 +13,8 @@
#include <drm/i915_drm.h>
+#include "display/intel_frontbuffer.h"
#include "i915_gem_object_types.h"
-
#include "i915_gem_gtt.h"
void i915_gem_init__objects(struct drm_i915_private *i915);
@@ -463,4 +463,25 @@ int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
unsigned int flags,
const struct i915_sched_attr *attr);
+void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
+ enum fb_op_origin origin);
+void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
+ enum fb_op_origin origin);
+
+static inline void
+i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
+ enum fb_op_origin origin)
+{
+ if (unlikely(rcu_access_pointer(obj->frontbuffer)))
+ __i915_gem_object_flush_frontbuffer(obj, origin);
+}
+
+static inline void
+i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
+ enum fb_op_origin origin)
+{
+ if (unlikely(rcu_access_pointer(obj->frontbuffer)))
+ __i915_gem_object_invalidate_frontbuffer(obj, origin);
+}
+
#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 96008374a412..e3f3944fbd90 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -150,7 +150,7 @@ struct drm_i915_gem_object {
*/
u16 write_domain;
- struct intel_frontbuffer *frontbuffer;
+ struct intel_frontbuffer __rcu *frontbuffer;
/** Current tiling stride for the object, if it's tiled. */
unsigned int tiling_and_stride;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index a459a42ad5c2..7e64b7d7d330 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -94,8 +94,9 @@ static int __gt_park(struct intel_wakeref *wf)
intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
}
+ /* Defer dropping the display power well for 100ms, it's slow! */
GEM_BUG_ON(!wakeref);
- intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref);
+ intel_display_power_put_async(i915, POWER_DOMAIN_GT_IRQ, wakeref);
i915_globals_park();
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d034fa413164..905890e3ac24 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -161,7 +161,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
* We manually control the domain here and pretend that it
* remains coherent i.e. in the GTT domain, like shmem_pwrite.
*/
- intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
+ i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
if (copy_from_user(vaddr, user_data, args->size))
return -EFAULT;
@@ -169,7 +169,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
drm_clflush_virt_range(vaddr, args->size);
intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
- intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
+ i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
return 0;
}
@@ -589,7 +589,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
goto out_unpin;
}
- intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
+ i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
user_data = u64_to_user_ptr(args->data_ptr);
offset = args->offset;
@@ -631,7 +631,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
user_data += page_length;
offset += page_length;
}
- intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
+ i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
i915_gem_object_unlock_fence(obj, fence);
out_unpin:
@@ -721,7 +721,7 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
offset = 0;
}
- intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
+ i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
i915_gem_object_unlock_fence(obj, fence);
return ret;
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 2814218c5ba1..6f09aa0be80a 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -144,61 +144,40 @@ static inline s64 ktime_since(const ktime_t kt)
return ktime_to_ns(ktime_sub(ktime_get(), kt));
}
-static u64 __pmu_estimate_rc6(struct i915_pmu *pmu)
-{
- u64 val;
-
- /*
- * We think we are runtime suspended.
- *
- * Report the delta from when the device was suspended to now,
- * on top of the last known real value, as the approximated RC6
- * counter value.
- */
- val = ktime_since(pmu->sleep_last);
- val += pmu->sample[__I915_SAMPLE_RC6].cur;
-
- pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
-
- return val;
-}
-
-static u64 __pmu_update_rc6(struct i915_pmu *pmu, u64 val)
-{
- /*
- * If we are coming back from being runtime suspended we must
- * be careful not to report a larger value than returned
- * previously.
- */
- if (val >= pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
- pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0;
- pmu->sample[__I915_SAMPLE_RC6].cur = val;
- } else {
- val = pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
- }
-
- return val;
-}
-
static u64 get_rc6(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
struct i915_pmu *pmu = &i915->pmu;
unsigned long flags;
+ bool awake = false;
u64 val;
- val = 0;
if (intel_gt_pm_get_if_awake(gt)) {
val = __get_rc6(gt);
intel_gt_pm_put_async(gt);
+ awake = true;
}
spin_lock_irqsave(&pmu->lock, flags);
- if (val)
- val = __pmu_update_rc6(pmu, val);
+ if (awake) {
+ pmu->sample[__I915_SAMPLE_RC6].cur = val;
+ } else {
+ /*
+ * We think we are runtime suspended.
+ *
+ * Report the delta from when the device was suspended to now,
+ * on top of the last known real value, as the approximated RC6
+ * counter value.
+ */
+ val = ktime_since(pmu->sleep_last);
+ val += pmu->sample[__I915_SAMPLE_RC6].cur;
+ }
+
+ if (val < pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur)
+ val = pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur;
else
- val = __pmu_estimate_rc6(pmu);
+ pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = val;
spin_unlock_irqrestore(&pmu->lock, flags);
@@ -210,20 +189,11 @@ static void park_rc6(struct drm_i915_private *i915)
struct i915_pmu *pmu = &i915->pmu;
if (pmu->enable & config_enabled_mask(I915_PMU_RC6_RESIDENCY))
- __pmu_update_rc6(pmu, __get_rc6(&i915->gt));
+ pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
pmu->sleep_last = ktime_get();
}
-static void unpark_rc6(struct drm_i915_private *i915)
-{
- struct i915_pmu *pmu = &i915->pmu;
-
- /* Estimate how long we slept and accumulate that into rc6 counters */
- if (pmu->enable & config_enabled_mask(I915_PMU_RC6_RESIDENCY))
- __pmu_estimate_rc6(pmu);
-}
-
#else
static u64 get_rc6(struct intel_gt *gt)
@@ -232,7 +202,6 @@ static u64 get_rc6(struct intel_gt *gt)
}
static void park_rc6(struct drm_i915_private *i915) {}
-static void unpark_rc6(struct drm_i915_private *i915) {}
#endif
@@ -281,8 +250,6 @@ void i915_pmu_gt_unparked(struct drm_i915_private *i915)
*/
__i915_pmu_maybe_start_timer(pmu);
- unpark_rc6(i915);
-
spin_unlock_irq(&pmu->lock);
}
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
index bf52e3983631..6c1647c5daf2 100644
--- a/drivers/gpu/drm/i915/i915_pmu.h
+++ b/drivers/gpu/drm/i915/i915_pmu.h
@@ -18,7 +18,7 @@ enum {
__I915_SAMPLE_FREQ_ACT = 0,
__I915_SAMPLE_FREQ_REQ,
__I915_SAMPLE_RC6,
- __I915_SAMPLE_RC6_ESTIMATED,
+ __I915_SAMPLE_RC6_LAST_REPORTED,
__I915_NUM_PMU_SAMPLERS
};
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index e5512f26e20a..01c822256b39 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -1104,8 +1104,14 @@ int i915_vma_move_to_active(struct i915_vma *vma,
return err;
if (flags & EXEC_OBJECT_WRITE) {
- if (intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CS))
- i915_active_add_request(&obj->frontbuffer->write, rq);
+ struct intel_frontbuffer *front;
+
+ front = __intel_frontbuffer_get(obj);
+ if (unlikely(front)) {
+ if (intel_frontbuffer_invalidate(front, ORIGIN_CS))
+ i915_active_add_request(&front->write, rq);
+ intel_frontbuffer_put(front);
+ }
dma_resv_add_excl_fence(vma->resv, &rq->fence);
obj->write_domain = I915_GEM_DOMAIN_RENDER;
diff --git a/drivers/hwmon/scmi-hwmon.c b/drivers/hwmon/scmi-hwmon.c
index 8a7732c0bef3..286d3cfda7de 100644
--- a/drivers/hwmon/scmi-hwmon.c
+++ b/drivers/hwmon/scmi-hwmon.c
@@ -259,7 +259,7 @@ static int scmi_hwmon_probe(struct scmi_device *sdev)
}
static const struct scmi_device_id scmi_id_table[] = {
- { SCMI_PROTOCOL_SENSOR },
+ { SCMI_PROTOCOL_SENSOR, "hwmon" },
{ },
};
MODULE_DEVICE_TABLE(scmi, scmi_id_table);
diff --git a/drivers/of/base.c b/drivers/of/base.c
index db7fbc0c0893..614f0c674995 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -490,6 +490,42 @@ int of_cpu_node_to_id(struct device_node *cpu_node)
EXPORT_SYMBOL(of_cpu_node_to_id);
/**
+ * of_get_cpu_state_node - Get CPU's idle state node at the given index
+ *
+ * @cpu_node: The device node for the CPU
+ * @index: The index in the list of the idle states
+ *
+ * Two generic methods can be used to describe a CPU's idle states, either via
+ * a flattened description through the "cpu-idle-states" binding or via the
+ * hierarchical layout, using the "power-domains" and the "domain-idle-states"
+ * bindings. This function check for both and returns the idle state node for
+ * the requested index.
+ *
+ * In case an idle state node is found at @index, the refcount is incremented
+ * for it, so call of_node_put() on it when done. Returns NULL if not found.
+ */
+struct device_node *of_get_cpu_state_node(struct device_node *cpu_node,
+ int index)
+{
+ struct of_phandle_args args;
+ int err;
+
+ err = of_parse_phandle_with_args(cpu_node, "power-domains",
+ "#power-domain-cells", 0, &args);
+ if (!err) {
+ struct device_node *state_node =
+ of_parse_phandle(args.np, "domain-idle-states", index);
+
+ of_node_put(args.np);
+ if (state_node)
+ return state_node;
+ }
+
+ return of_parse_phandle(cpu_node, "cpu-idle-states", index);
+}
+EXPORT_SYMBOL(of_get_cpu_state_node);
+
+/**
* __of_device_is_compatible() - Check if the node matches given constraints
* @device: pointer to node
* @compat: required compatible string, NULL or "" for any match
diff --git a/drivers/reset/reset-scmi.c b/drivers/reset/reset-scmi.c
index b46df80ec6c3..8d3a858e3b19 100644
--- a/drivers/reset/reset-scmi.c
+++ b/drivers/reset/reset-scmi.c
@@ -108,7 +108,7 @@ static int scmi_reset_probe(struct scmi_device *sdev)
}
static const struct scmi_device_id scmi_id_table[] = {
- { SCMI_PROTOCOL_RESET },
+ { SCMI_PROTOCOL_RESET, "reset" },
{ },
};
MODULE_DEVICE_TABLE(scmi, scmi_id_table);
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index c4e4b0136f86..4bc794d2f51c 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -121,7 +121,8 @@ static inline void cxgbi_device_destroy(struct cxgbi_device *cdev)
"cdev 0x%p, p# %u.\n", cdev, cdev->nports);
cxgbi_hbas_remove(cdev);
cxgbi_device_portmap_cleanup(cdev);
- cxgbi_ppm_release(cdev->cdev2ppm(cdev));
+ if (cdev->cdev2ppm)
+ cxgbi_ppm_release(cdev->cdev2ppm(cdev));
if (cdev->pmap.max_connect)
cxgbi_free_big_mem(cdev->pmap.port_csk);
kfree(cdev);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 2e6a68d9ea4f..a5ecbce4eda2 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -5385,7 +5385,6 @@ static const struct file_operations lpfc_debugfs_ras_log = {
.read = lpfc_debugfs_read,
.release = lpfc_debugfs_ras_log_release,
};
-#endif
#undef lpfc_debugfs_op_dumpHBASlim
static const struct file_operations lpfc_debugfs_op_dumpHBASlim = {
@@ -5557,7 +5556,7 @@ static const struct file_operations lpfc_idiag_op_extAcc = {
.write = lpfc_idiag_extacc_write,
.release = lpfc_idiag_cmd_release,
};
-
+#endif
/* lpfc_idiag_mbxacc_dump_bsg_mbox - idiag debugfs dump bsg mailbox command
* @phba: Pointer to HBA context object.
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 6298b1729098..6a04fdb3fbf2 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -5883,7 +5883,7 @@ void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "1804 Invalid asynchrous event code: "
+ "1804 Invalid asynchronous event code: "
"x%x\n", bf_get(lpfc_trailer_code,
&cq_event->cqe.mcqe_cmpl));
break;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index c82b5792da98..625c046ac4ef 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -8555,7 +8555,7 @@ lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
spin_unlock_irq(&phba->hbalock);
- /* wake up worker thread to post asynchronlous mailbox command */
+ /* wake up worker thread to post asynchronous mailbox command */
lpfc_worker_wake_up(phba);
}
@@ -8823,7 +8823,7 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
return rc;
}
- /* Now, interrupt mode asynchrous mailbox command */
+ /* Now, interrupt mode asynchronous mailbox command */
rc = lpfc_mbox_cmd_check(phba, mboxq);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
@@ -13112,11 +13112,11 @@ lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
}
/**
- * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
+ * lpfc_sli4_sp_handle_async_event - Handle an asynchronous event
* @phba: Pointer to HBA context object.
* @cqe: Pointer to mailbox completion queue entry.
*
- * This routine process a mailbox completion queue entry with asynchrous
+ * This routine process a mailbox completion queue entry with asynchronous
* event.
*
* Return: true if work posted to worker thread, otherwise false.
@@ -13270,7 +13270,7 @@ out_no_mqe_complete:
* @cqe: Pointer to mailbox completion queue entry.
*
* This routine process a mailbox completion queue entry, it invokes the
- * proper mailbox complete handling or asynchrous event handling routine
+ * proper mailbox complete handling or asynchronous event handling routine
* according to the MCQE's async bit.
*
* Return: true if work posted to worker thread, otherwise false.
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 848fbec7bda6..45fd8dfb7c40 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -5248,7 +5248,6 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
&ct->chain_buffer_dma);
if (!ct->chain_buffer) {
ioc_err(ioc, "chain_lookup: pci_pool_alloc failed\n");
- _base_release_memory_pools(ioc);
goto out;
}
}
diff --git a/drivers/soc/bcm/brcmstb/biuctrl.c b/drivers/soc/bcm/brcmstb/biuctrl.c
index d326915e0f40..61731e01f94b 100644
--- a/drivers/soc/bcm/brcmstb/biuctrl.c
+++ b/drivers/soc/bcm/brcmstb/biuctrl.c
@@ -63,7 +63,7 @@ static const int b15_cpubiuctrl_regs[] = {
[CPU_WRITEBACK_CTRL_REG] = -1,
};
-/* Odd cases, e.g: 7260 */
+/* Odd cases, e.g: 7260A0 */
static const int b53_cpubiuctrl_no_wb_regs[] = {
[CPU_CREDIT_REG] = 0x0b0,
[CPU_MCP_FLOW_REG] = 0x0b4,
@@ -76,6 +76,12 @@ static const int b53_cpubiuctrl_regs[] = {
[CPU_WRITEBACK_CTRL_REG] = 0x22c,
};
+static const int a72_cpubiuctrl_regs[] = {
+ [CPU_CREDIT_REG] = 0x18,
+ [CPU_MCP_FLOW_REG] = 0x1c,
+ [CPU_WRITEBACK_CTRL_REG] = 0x20,
+};
+
#define NUM_CPU_BIUCTRL_REGS 3
static int __init mcp_write_pairing_set(void)
@@ -101,25 +107,29 @@ static int __init mcp_write_pairing_set(void)
return 0;
}
-static const u32 b53_mach_compat[] = {
+static const u32 a72_b53_mach_compat[] = {
+ 0x7211,
+ 0x7216,
+ 0x7255,
+ 0x7260,
0x7268,
0x7271,
0x7278,
};
-static void __init mcp_b53_set(void)
+static void __init mcp_a72_b53_set(void)
{
unsigned int i;
u32 reg;
reg = brcmstb_get_family_id();
- for (i = 0; i < ARRAY_SIZE(b53_mach_compat); i++) {
- if (BRCM_ID(reg) == b53_mach_compat[i])
+ for (i = 0; i < ARRAY_SIZE(a72_b53_mach_compat); i++) {
+ if (BRCM_ID(reg) == a72_b53_mach_compat[i])
break;
}
- if (i == ARRAY_SIZE(b53_mach_compat))
+ if (i == ARRAY_SIZE(a72_b53_mach_compat))
return;
/* Set all 3 MCP interfaces to 8 credits */
@@ -157,6 +167,7 @@ static void __init mcp_b53_set(void)
static int __init setup_hifcpubiuctrl_regs(struct device_node *np)
{
struct device_node *cpu_dn;
+ u32 family_id;
int ret = 0;
cpubiuctrl_base = of_iomap(np, 0);
@@ -179,13 +190,16 @@ static int __init setup_hifcpubiuctrl_regs(struct device_node *np)
cpubiuctrl_regs = b15_cpubiuctrl_regs;
else if (of_device_is_compatible(cpu_dn, "brcm,brahma-b53"))
cpubiuctrl_regs = b53_cpubiuctrl_regs;
+ else if (of_device_is_compatible(cpu_dn, "arm,cortex-a72"))
+ cpubiuctrl_regs = a72_cpubiuctrl_regs;
else {
pr_err("unsupported CPU\n");
ret = -EINVAL;
}
of_node_put(cpu_dn);
- if (BRCM_ID(brcmstb_get_family_id()) == 0x7260)
+ family_id = brcmstb_get_family_id();
+ if (BRCM_ID(family_id) == 0x7260 && BRCM_REV(family_id) == 0)
cpubiuctrl_regs = b53_cpubiuctrl_no_wb_regs;
out:
of_node_put(np);
@@ -248,7 +262,7 @@ static int __init brcmstb_biuctrl_init(void)
return ret;
}
- mcp_b53_set();
+ mcp_a72_b53_set();
#ifdef CONFIG_PM_SLEEP
register_syscore_ops(&brcmstb_cpu_credit_syscore_ops);
#endif
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index f93492b72c04..ba2b8b51d2d9 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -192,21 +192,25 @@ config ARCH_R8A774C0
help
This enables support for the Renesas RZ/G2E SoC.
+config ARCH_R8A77950
+ bool
+
+config ARCH_R8A77951
+ bool
+
config ARCH_R8A7795
bool "Renesas R-Car H3 SoC Platform"
+ select ARCH_R8A77950
+ select ARCH_R8A77951
select ARCH_RCAR_GEN3
select SYSC_R8A7795
help
This enables support for the Renesas R-Car H3 SoC.
config ARCH_R8A77960
- bool
+ bool "Renesas R-Car M3-W SoC Platform"
select ARCH_RCAR_GEN3
select SYSC_R8A77960
-
-config ARCH_R8A7796
- bool "Renesas R-Car M3-W SoC Platform"
- select ARCH_R8A77960
help
This enables support for the Renesas R-Car M3-W SoC.
diff --git a/drivers/soc/renesas/rcar-rst.c b/drivers/soc/renesas/rcar-rst.c
index 14d05a070dd3..2af2e0dd83fe 100644
--- a/drivers/soc/renesas/rcar-rst.c
+++ b/drivers/soc/renesas/rcar-rst.c
@@ -21,7 +21,7 @@ static int rcar_rst_enable_wdt_reset(void __iomem *base)
struct rst_config {
unsigned int modemr; /* Mode Monitoring Register Offset */
- int (*configure)(void *base); /* Platform specific configuration */
+ int (*configure)(void __iomem *base); /* Platform specific config */
};
static const struct rst_config rcar_rst_gen1 __initconst = {
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 6949ea8bc387..51ffd5c002de 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -646,7 +646,9 @@ iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio,
}
bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
- bip_set_seed(bip, bio->bi_iter.bi_sector);
+ /* virtual start sector must be in integrity interval units */
+ bip_set_seed(bip, bio->bi_iter.bi_sector >>
+ (bi->interval_exp - SECTOR_SHIFT));
pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size,
(unsigned long long)bip->bip_iter.bi_sector);
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
index b830e0a87fba..99698b8a3a74 100644
--- a/drivers/tee/optee/core.c
+++ b/drivers/tee/optee/core.c
@@ -534,13 +534,13 @@ static void optee_smccc_hvc(unsigned long a0, unsigned long a1,
arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res);
}
-static optee_invoke_fn *get_invoke_func(struct device_node *np)
+static optee_invoke_fn *get_invoke_func(struct device *dev)
{
const char *method;
- pr_info("probing for conduit method from DT.\n");
+ pr_info("probing for conduit method.\n");
- if (of_property_read_string(np, "method", &method)) {
+ if (device_property_read_string(dev, "method", &method)) {
pr_warn("missing \"method\" property\n");
return ERR_PTR(-ENXIO);
}
@@ -554,7 +554,37 @@ static optee_invoke_fn *get_invoke_func(struct device_node *np)
return ERR_PTR(-EINVAL);
}
-static struct optee *optee_probe(struct device_node *np)
+static int optee_remove(struct platform_device *pdev)
+{
+ struct optee *optee = platform_get_drvdata(pdev);
+
+ /*
+ * Ask OP-TEE to free all cached shared memory objects to decrease
+ * reference counters and also avoid wild pointers in secure world
+ * into the old shared memory range.
+ */
+ optee_disable_shm_cache(optee);
+
+ /*
+ * The two devices have to be unregistered before we can free the
+ * other resources.
+ */
+ tee_device_unregister(optee->supp_teedev);
+ tee_device_unregister(optee->teedev);
+
+ tee_shm_pool_free(optee->pool);
+ if (optee->memremaped_shm)
+ memunmap(optee->memremaped_shm);
+ optee_wait_queue_exit(&optee->wait_queue);
+ optee_supp_uninit(&optee->supp);
+ mutex_destroy(&optee->call_queue.mutex);
+
+ kfree(optee);
+
+ return 0;
+}
+
+static int optee_probe(struct platform_device *pdev)
{
optee_invoke_fn *invoke_fn;
struct tee_shm_pool *pool = ERR_PTR(-EINVAL);
@@ -564,25 +594,25 @@ static struct optee *optee_probe(struct device_node *np)
u32 sec_caps;
int rc;
- invoke_fn = get_invoke_func(np);
+ invoke_fn = get_invoke_func(&pdev->dev);
if (IS_ERR(invoke_fn))
- return (void *)invoke_fn;
+ return PTR_ERR(invoke_fn);
if (!optee_msg_api_uid_is_optee_api(invoke_fn)) {
pr_warn("api uid mismatch\n");
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
optee_msg_get_os_revision(invoke_fn);
if (!optee_msg_api_revision_is_compatible(invoke_fn)) {
pr_warn("api revision mismatch\n");
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps)) {
pr_warn("capabilities mismatch\n");
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
/*
@@ -598,7 +628,7 @@ static struct optee *optee_probe(struct device_node *np)
pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm);
if (IS_ERR(pool))
- return (void *)pool;
+ return PTR_ERR(pool);
optee = kzalloc(sizeof(*optee), GFP_KERNEL);
if (!optee) {
@@ -643,7 +673,16 @@ static struct optee *optee_probe(struct device_node *np)
if (optee->sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
pr_info("dynamic shared memory is enabled\n");
- return optee;
+ platform_set_drvdata(pdev, optee);
+
+ rc = optee_enumerate_devices();
+ if (rc) {
+ optee_remove(pdev);
+ return rc;
+ }
+
+ pr_info("initialized driver\n");
+ return 0;
err:
if (optee) {
/*
@@ -659,92 +698,28 @@ err:
tee_shm_pool_free(pool);
if (memremaped_shm)
memunmap(memremaped_shm);
- return ERR_PTR(rc);
-}
-
-static void optee_remove(struct optee *optee)
-{
- /*
- * Ask OP-TEE to free all cached shared memory objects to decrease
- * reference counters and also avoid wild pointers in secure world
- * into the old shared memory range.
- */
- optee_disable_shm_cache(optee);
-
- /*
- * The two devices has to be unregistered before we can free the
- * other resources.
- */
- tee_device_unregister(optee->supp_teedev);
- tee_device_unregister(optee->teedev);
-
- tee_shm_pool_free(optee->pool);
- if (optee->memremaped_shm)
- memunmap(optee->memremaped_shm);
- optee_wait_queue_exit(&optee->wait_queue);
- optee_supp_uninit(&optee->supp);
- mutex_destroy(&optee->call_queue.mutex);
-
- kfree(optee);
+ return rc;
}
-static const struct of_device_id optee_match[] = {
+static const struct of_device_id optee_dt_match[] = {
{ .compatible = "linaro,optee-tz" },
{},
};
-
-static struct optee *optee_svc;
-
-static int __init optee_driver_init(void)
-{
- struct device_node *fw_np = NULL;
- struct device_node *np = NULL;
- struct optee *optee = NULL;
- int rc = 0;
-
- /* Node is supposed to be below /firmware */
- fw_np = of_find_node_by_name(NULL, "firmware");
- if (!fw_np)
- return -ENODEV;
-
- np = of_find_matching_node(fw_np, optee_match);
- if (!np || !of_device_is_available(np)) {
- of_node_put(np);
- return -ENODEV;
- }
-
- optee = optee_probe(np);
- of_node_put(np);
-
- if (IS_ERR(optee))
- return PTR_ERR(optee);
-
- rc = optee_enumerate_devices();
- if (rc) {
- optee_remove(optee);
- return rc;
- }
-
- pr_info("initialized driver\n");
-
- optee_svc = optee;
-
- return 0;
-}
-module_init(optee_driver_init);
-
-static void __exit optee_driver_exit(void)
-{
- struct optee *optee = optee_svc;
-
- optee_svc = NULL;
- if (optee)
- optee_remove(optee);
-}
-module_exit(optee_driver_exit);
+MODULE_DEVICE_TABLE(of, optee_dt_match);
+
+static struct platform_driver optee_driver = {
+ .probe = optee_probe,
+ .remove = optee_remove,
+ .driver = {
+ .name = "optee",
+ .of_match_table = optee_dt_match,
+ },
+};
+module_platform_driver(optee_driver);
MODULE_AUTHOR("Linaro");
MODULE_DESCRIPTION("OP-TEE driver");
MODULE_SUPPORTED_DEVICE("");
MODULE_VERSION("1.0");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:optee");
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index ce9bac756c2a..40705e862451 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -1693,6 +1693,7 @@ struct cifs_fattr {
struct timespec64 cf_atime;
struct timespec64 cf_mtime;
struct timespec64 cf_ctime;
+ u32 cf_cifstag;
};
static inline void free_dfs_info_param(struct dfs_info3_param *param)
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 3925a7bfc74d..d17587c2c4ab 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -139,6 +139,28 @@ retry:
dput(dentry);
}
+static bool reparse_file_needs_reval(const struct cifs_fattr *fattr)
+{
+ if (!(fattr->cf_cifsattrs & ATTR_REPARSE))
+ return false;
+ /*
+ * The DFS tags should be only intepreted by server side as per
+ * MS-FSCC 2.1.2.1, but let's include them anyway.
+ *
+ * Besides, if cf_cifstag is unset (0), then we still need it to be
+ * revalidated to know exactly what reparse point it is.
+ */
+ switch (fattr->cf_cifstag) {
+ case IO_REPARSE_TAG_DFS:
+ case IO_REPARSE_TAG_DFSR:
+ case IO_REPARSE_TAG_SYMLINK:
+ case IO_REPARSE_TAG_NFS:
+ case 0:
+ return true;
+ }
+ return false;
+}
+
static void
cifs_fill_common_info(struct cifs_fattr *fattr, struct cifs_sb_info *cifs_sb)
{
@@ -158,7 +180,7 @@ cifs_fill_common_info(struct cifs_fattr *fattr, struct cifs_sb_info *cifs_sb)
* is a symbolic link, DFS referral or a reparse point with a direct
* access like junctions, deduplicated files, NFS symlinks.
*/
- if (fattr->cf_cifsattrs & ATTR_REPARSE)
+ if (reparse_file_needs_reval(fattr))
fattr->cf_flags |= CIFS_FATTR_NEED_REVAL;
/* non-unix readdir doesn't provide nlink */
@@ -194,19 +216,37 @@ cifs_fill_common_info(struct cifs_fattr *fattr, struct cifs_sb_info *cifs_sb)
}
}
+static void __dir_info_to_fattr(struct cifs_fattr *fattr, const void *info)
+{
+ const FILE_DIRECTORY_INFO *fi = info;
+
+ memset(fattr, 0, sizeof(*fattr));
+ fattr->cf_cifsattrs = le32_to_cpu(fi->ExtFileAttributes);
+ fattr->cf_eof = le64_to_cpu(fi->EndOfFile);
+ fattr->cf_bytes = le64_to_cpu(fi->AllocationSize);
+ fattr->cf_createtime = le64_to_cpu(fi->CreationTime);
+ fattr->cf_atime = cifs_NTtimeToUnix(fi->LastAccessTime);
+ fattr->cf_ctime = cifs_NTtimeToUnix(fi->ChangeTime);
+ fattr->cf_mtime = cifs_NTtimeToUnix(fi->LastWriteTime);
+}
+
void
cifs_dir_info_to_fattr(struct cifs_fattr *fattr, FILE_DIRECTORY_INFO *info,
struct cifs_sb_info *cifs_sb)
{
- memset(fattr, 0, sizeof(*fattr));
- fattr->cf_cifsattrs = le32_to_cpu(info->ExtFileAttributes);
- fattr->cf_eof = le64_to_cpu(info->EndOfFile);
- fattr->cf_bytes = le64_to_cpu(info->AllocationSize);
- fattr->cf_createtime = le64_to_cpu(info->CreationTime);
- fattr->cf_atime = cifs_NTtimeToUnix(info->LastAccessTime);
- fattr->cf_ctime = cifs_NTtimeToUnix(info->ChangeTime);
- fattr->cf_mtime = cifs_NTtimeToUnix(info->LastWriteTime);
+ __dir_info_to_fattr(fattr, info);
+ cifs_fill_common_info(fattr, cifs_sb);
+}
+static void cifs_fulldir_info_to_fattr(struct cifs_fattr *fattr,
+ SEARCH_ID_FULL_DIR_INFO *info,
+ struct cifs_sb_info *cifs_sb)
+{
+ __dir_info_to_fattr(fattr, info);
+
+ /* See MS-FSCC 2.4.18 FileIdFullDirectoryInformation */
+ if (fattr->cf_cifsattrs & ATTR_REPARSE)
+ fattr->cf_cifstag = le32_to_cpu(info->EaSize);
cifs_fill_common_info(fattr, cifs_sb);
}
@@ -755,6 +795,11 @@ static int cifs_filldir(char *find_entry, struct file *file,
(FIND_FILE_STANDARD_INFO *)find_entry,
cifs_sb);
break;
+ case SMB_FIND_FILE_ID_FULL_DIR_INFO:
+ cifs_fulldir_info_to_fattr(&fattr,
+ (SEARCH_ID_FULL_DIR_INFO *)find_entry,
+ cifs_sb);
+ break;
default:
cifs_dir_info_to_fattr(&fattr,
(FILE_DIRECTORY_INFO *)find_entry,
diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
index 8b0b512c5792..afe1f03aabe3 100644
--- a/fs/cifs/smb2file.c
+++ b/fs/cifs/smb2file.c
@@ -67,7 +67,7 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms,
goto out;
- if (oparms->tcon->use_resilient) {
+ if (oparms->tcon->use_resilient) {
/* default timeout is 0, servers pick default (120 seconds) */
nr_ioctl_req.Timeout =
cpu_to_le32(oparms->tcon->handle_timeout);
diff --git a/fs/io-wq.c b/fs/io-wq.c
index 11e80b7252a8..541c8a3e0bbb 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -92,7 +92,6 @@ struct io_wqe {
struct io_wqe_acct acct[2];
struct hlist_nulls_head free_list;
- struct hlist_nulls_head busy_list;
struct list_head all_list;
struct io_wq *wq;
@@ -327,7 +326,6 @@ static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker,
if (worker->flags & IO_WORKER_F_FREE) {
worker->flags &= ~IO_WORKER_F_FREE;
hlist_nulls_del_init_rcu(&worker->nulls_node);
- hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->busy_list);
}
/*
@@ -365,7 +363,6 @@ static bool __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker)
{
if (!(worker->flags & IO_WORKER_F_FREE)) {
worker->flags |= IO_WORKER_F_FREE;
- hlist_nulls_del_init_rcu(&worker->nulls_node);
hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
}
@@ -432,6 +429,8 @@ next:
if (signal_pending(current))
flush_signals(current);
+ cond_resched();
+
spin_lock_irq(&worker->lock);
worker->cur_work = work;
spin_unlock_irq(&worker->lock);
@@ -798,10 +797,6 @@ void io_wq_cancel_all(struct io_wq *wq)
set_bit(IO_WQ_BIT_CANCEL, &wq->state);
- /*
- * Browse both lists, as there's a gap between handing work off
- * to a worker and the worker putting itself on the busy_list
- */
rcu_read_lock();
for_each_node(node) {
struct io_wqe *wqe = wq->wqes[node];
@@ -1049,7 +1044,6 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
spin_lock_init(&wqe->lock);
INIT_WQ_LIST(&wqe->work_list);
INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0);
- INIT_HLIST_NULLS_HEAD(&wqe->busy_list, 1);
INIT_LIST_HEAD(&wqe->all_list);
}
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 6f084e3cf835..562e3a1a1bf9 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -330,6 +330,26 @@ struct io_timeout {
struct file *file;
u64 addr;
int flags;
+ unsigned count;
+};
+
+struct io_rw {
+ /* NOTE: kiocb has the file as the first member, so don't do it here */
+ struct kiocb kiocb;
+ u64 addr;
+ u64 len;
+};
+
+struct io_connect {
+ struct file *file;
+ struct sockaddr __user *addr;
+ int addr_len;
+};
+
+struct io_sr_msg {
+ struct file *file;
+ struct user_msghdr __user *msg;
+ int msg_flags;
};
struct io_async_connect {
@@ -351,7 +371,6 @@ struct io_async_rw {
};
struct io_async_ctx {
- struct io_uring_sqe sqe;
union {
struct io_async_rw rw;
struct io_async_msghdr msg;
@@ -369,15 +388,16 @@ struct io_async_ctx {
struct io_kiocb {
union {
struct file *file;
- struct kiocb rw;
+ struct io_rw rw;
struct io_poll_iocb poll;
struct io_accept accept;
struct io_sync sync;
struct io_cancel cancel;
struct io_timeout timeout;
+ struct io_connect connect;
+ struct io_sr_msg sr_msg;
};
- const struct io_uring_sqe *sqe;
struct io_async_ctx *io;
struct file *ring_file;
int ring_fd;
@@ -411,7 +431,6 @@ struct io_kiocb {
#define REQ_F_INFLIGHT 16384 /* on inflight list */
#define REQ_F_COMP_LOCKED 32768 /* completion under lock */
#define REQ_F_HARDLINK 65536 /* doesn't sever on completion < 0 */
-#define REQ_F_PREPPED 131072 /* request already opcode prepared */
u64 user_data;
u32 result;
u32 sequence;
@@ -609,33 +628,31 @@ static inline bool io_prep_async_work(struct io_kiocb *req,
{
bool do_hashed = false;
- if (req->sqe) {
- switch (req->opcode) {
- case IORING_OP_WRITEV:
- case IORING_OP_WRITE_FIXED:
- /* only regular files should be hashed for writes */
- if (req->flags & REQ_F_ISREG)
- do_hashed = true;
- /* fall-through */
- case IORING_OP_READV:
- case IORING_OP_READ_FIXED:
- case IORING_OP_SENDMSG:
- case IORING_OP_RECVMSG:
- case IORING_OP_ACCEPT:
- case IORING_OP_POLL_ADD:
- case IORING_OP_CONNECT:
- /*
- * We know REQ_F_ISREG is not set on some of these
- * opcodes, but this enables us to keep the check in
- * just one place.
- */
- if (!(req->flags & REQ_F_ISREG))
- req->work.flags |= IO_WQ_WORK_UNBOUND;
- break;
- }
- if (io_req_needs_user(req))
- req->work.flags |= IO_WQ_WORK_NEEDS_USER;
+ switch (req->opcode) {
+ case IORING_OP_WRITEV:
+ case IORING_OP_WRITE_FIXED:
+ /* only regular files should be hashed for writes */
+ if (req->flags & REQ_F_ISREG)
+ do_hashed = true;
+ /* fall-through */
+ case IORING_OP_READV:
+ case IORING_OP_READ_FIXED:
+ case IORING_OP_SENDMSG:
+ case IORING_OP_RECVMSG:
+ case IORING_OP_ACCEPT:
+ case IORING_OP_POLL_ADD:
+ case IORING_OP_CONNECT:
+ /*
+ * We know REQ_F_ISREG is not set on some of these
+ * opcodes, but this enables us to keep the check in
+ * just one place.
+ */
+ if (!(req->flags & REQ_F_ISREG))
+ req->work.flags |= IO_WQ_WORK_UNBOUND;
+ break;
}
+ if (io_req_needs_user(req))
+ req->work.flags |= IO_WQ_WORK_NEEDS_USER;
*link = io_prep_linked_timeout(req);
return do_hashed;
@@ -1180,7 +1197,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
ret = 0;
list_for_each_entry_safe(req, tmp, &ctx->poll_list, list) {
- struct kiocb *kiocb = &req->rw;
+ struct kiocb *kiocb = &req->rw.kiocb;
/*
* Move completed entries to our local list. If we find a
@@ -1335,7 +1352,7 @@ static inline void req_set_fail_links(struct io_kiocb *req)
static void io_complete_rw_common(struct kiocb *kiocb, long res)
{
- struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
+ struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
if (kiocb->ki_flags & IOCB_WRITE)
kiocb_end_write(req);
@@ -1347,7 +1364,7 @@ static void io_complete_rw_common(struct kiocb *kiocb, long res)
static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
{
- struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
+ struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
io_complete_rw_common(kiocb, res);
io_put_req(req);
@@ -1355,7 +1372,7 @@ static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
static struct io_kiocb *__io_complete_rw(struct kiocb *kiocb, long res)
{
- struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
+ struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
struct io_kiocb *nxt = NULL;
io_complete_rw_common(kiocb, res);
@@ -1366,7 +1383,7 @@ static struct io_kiocb *__io_complete_rw(struct kiocb *kiocb, long res)
static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
{
- struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
+ struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
if (kiocb->ki_flags & IOCB_WRITE)
kiocb_end_write(req);
@@ -1400,7 +1417,7 @@ static void io_iopoll_req_issued(struct io_kiocb *req)
list_req = list_first_entry(&ctx->poll_list, struct io_kiocb,
list);
- if (list_req->rw.ki_filp != req->rw.ki_filp)
+ if (list_req->file != req->file)
ctx->poll_multi_file = true;
}
@@ -1471,11 +1488,11 @@ static bool io_file_supports_async(struct file *file)
return false;
}
-static int io_prep_rw(struct io_kiocb *req, bool force_nonblock)
+static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+ bool force_nonblock)
{
- const struct io_uring_sqe *sqe = req->sqe;
struct io_ring_ctx *ctx = req->ctx;
- struct kiocb *kiocb = &req->rw;
+ struct kiocb *kiocb = &req->rw.kiocb;
unsigned ioprio;
int ret;
@@ -1524,6 +1541,12 @@ static int io_prep_rw(struct io_kiocb *req, bool force_nonblock)
return -EINVAL;
kiocb->ki_complete = io_complete_rw;
}
+
+ req->rw.addr = READ_ONCE(sqe->addr);
+ req->rw.len = READ_ONCE(sqe->len);
+ /* we own ->private, reuse it for the buffer index */
+ req->rw.kiocb.private = (void *) (unsigned long)
+ READ_ONCE(sqe->buf_index);
return 0;
}
@@ -1557,11 +1580,11 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret, struct io_kiocb **nxt,
io_rw_done(kiocb, ret);
}
-static ssize_t io_import_fixed(struct io_ring_ctx *ctx, int rw,
- const struct io_uring_sqe *sqe,
+static ssize_t io_import_fixed(struct io_kiocb *req, int rw,
struct iov_iter *iter)
{
- size_t len = READ_ONCE(sqe->len);
+ struct io_ring_ctx *ctx = req->ctx;
+ size_t len = req->rw.len;
struct io_mapped_ubuf *imu;
unsigned index, buf_index;
size_t offset;
@@ -1571,13 +1594,13 @@ static ssize_t io_import_fixed(struct io_ring_ctx *ctx, int rw,
if (unlikely(!ctx->user_bufs))
return -EFAULT;
- buf_index = READ_ONCE(sqe->buf_index);
+ buf_index = (unsigned long) req->rw.kiocb.private;
if (unlikely(buf_index >= ctx->nr_user_bufs))
return -EFAULT;
index = array_index_nospec(buf_index, ctx->nr_user_bufs);
imu = &ctx->user_bufs[index];
- buf_addr = READ_ONCE(sqe->addr);
+ buf_addr = req->rw.addr;
/* overflow */
if (buf_addr + len < buf_addr)
@@ -1634,25 +1657,20 @@ static ssize_t io_import_fixed(struct io_ring_ctx *ctx, int rw,
static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
struct iovec **iovec, struct iov_iter *iter)
{
- const struct io_uring_sqe *sqe = req->sqe;
- void __user *buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
- size_t sqe_len = READ_ONCE(sqe->len);
+ void __user *buf = u64_to_user_ptr(req->rw.addr);
+ size_t sqe_len = req->rw.len;
u8 opcode;
- /*
- * We're reading ->opcode for the second time, but the first read
- * doesn't care whether it's _FIXED or not, so it doesn't matter
- * whether ->opcode changes concurrently. The first read does care
- * about whether it is a READ or a WRITE, so we don't trust this read
- * for that purpose and instead let the caller pass in the read/write
- * flag.
- */
opcode = req->opcode;
if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
*iovec = NULL;
- return io_import_fixed(req->ctx, rw, sqe, iter);
+ return io_import_fixed(req, rw, iter);
}
+ /* buffer index only valid with fixed read/write */
+ if (req->rw.kiocb.private)
+ return -EINVAL;
+
if (req->io) {
struct io_async_rw *iorw = &req->io->rw;
@@ -1750,13 +1768,7 @@ static void io_req_map_rw(struct io_kiocb *req, ssize_t io_size,
static int io_alloc_async_ctx(struct io_kiocb *req)
{
req->io = kmalloc(sizeof(*req->io), GFP_KERNEL);
- if (req->io) {
- memcpy(&req->io->sqe, req->sqe, sizeof(req->io->sqe));
- req->sqe = &req->io->sqe;
- return 0;
- }
-
- return 1;
+ return req->io == NULL;
}
static void io_rw_async(struct io_wq_work **workptr)
@@ -1782,46 +1794,52 @@ static int io_setup_async_rw(struct io_kiocb *req, ssize_t io_size,
return 0;
}
-static int io_read_prep(struct io_kiocb *req, struct iovec **iovec,
- struct iov_iter *iter, bool force_nonblock)
+static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+ bool force_nonblock)
{
+ struct io_async_ctx *io;
+ struct iov_iter iter;
ssize_t ret;
- ret = io_prep_rw(req, force_nonblock);
+ ret = io_prep_rw(req, sqe, force_nonblock);
if (ret)
return ret;
if (unlikely(!(req->file->f_mode & FMODE_READ)))
return -EBADF;
- return io_import_iovec(READ, req, iovec, iter);
+ if (!req->io)
+ return 0;
+
+ io = req->io;
+ io->rw.iov = io->rw.fast_iov;
+ req->io = NULL;
+ ret = io_import_iovec(READ, req, &io->rw.iov, &iter);
+ req->io = io;
+ if (ret < 0)
+ return ret;
+
+ io_req_map_rw(req, ret, io->rw.iov, io->rw.fast_iov, &iter);
+ return 0;
}
static int io_read(struct io_kiocb *req, struct io_kiocb **nxt,
bool force_nonblock)
{
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
- struct kiocb *kiocb = &req->rw;
+ struct kiocb *kiocb = &req->rw.kiocb;
struct iov_iter iter;
- struct file *file;
size_t iov_count;
ssize_t io_size, ret;
- if (!req->io) {
- ret = io_read_prep(req, &iovec, &iter, force_nonblock);
- if (ret < 0)
- return ret;
- } else {
- ret = io_import_iovec(READ, req, &iovec, &iter);
- if (ret < 0)
- return ret;
- }
+ ret = io_import_iovec(READ, req, &iovec, &iter);
+ if (ret < 0)
+ return ret;
/* Ensure we clear previously set non-block flag */
if (!force_nonblock)
- req->rw.ki_flags &= ~IOCB_NOWAIT;
+ req->rw.kiocb.ki_flags &= ~IOCB_NOWAIT;
- file = req->file;
io_size = ret;
if (req->flags & REQ_F_LINK)
req->result = io_size;
@@ -1830,20 +1848,20 @@ static int io_read(struct io_kiocb *req, struct io_kiocb **nxt,
* If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
* we know to async punt it even if it was opened O_NONBLOCK
*/
- if (force_nonblock && !io_file_supports_async(file)) {
+ if (force_nonblock && !io_file_supports_async(req->file)) {
req->flags |= REQ_F_MUST_PUNT;
goto copy_iov;
}
iov_count = iov_iter_count(&iter);
- ret = rw_verify_area(READ, file, &kiocb->ki_pos, iov_count);
+ ret = rw_verify_area(READ, req->file, &kiocb->ki_pos, iov_count);
if (!ret) {
ssize_t ret2;
- if (file->f_op->read_iter)
- ret2 = call_read_iter(file, kiocb, &iter);
+ if (req->file->f_op->read_iter)
+ ret2 = call_read_iter(req->file, kiocb, &iter);
else
- ret2 = loop_rw_iter(READ, file, kiocb, &iter);
+ ret2 = loop_rw_iter(READ, req->file, kiocb, &iter);
/*
* In case of a short read, punt to async. This can happen
@@ -1875,46 +1893,52 @@ out_free:
return ret;
}
-static int io_write_prep(struct io_kiocb *req, struct iovec **iovec,
- struct iov_iter *iter, bool force_nonblock)
+static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+ bool force_nonblock)
{
+ struct io_async_ctx *io;
+ struct iov_iter iter;
ssize_t ret;
- ret = io_prep_rw(req, force_nonblock);
+ ret = io_prep_rw(req, sqe, force_nonblock);
if (ret)
return ret;
if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
return -EBADF;
- return io_import_iovec(WRITE, req, iovec, iter);
+ if (!req->io)
+ return 0;
+
+ io = req->io;
+ io->rw.iov = io->rw.fast_iov;
+ req->io = NULL;
+ ret = io_import_iovec(WRITE, req, &io->rw.iov, &iter);
+ req->io = io;
+ if (ret < 0)
+ return ret;
+
+ io_req_map_rw(req, ret, io->rw.iov, io->rw.fast_iov, &iter);
+ return 0;
}
static int io_write(struct io_kiocb *req, struct io_kiocb **nxt,
bool force_nonblock)
{
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
- struct kiocb *kiocb = &req->rw;
+ struct kiocb *kiocb = &req->rw.kiocb;
struct iov_iter iter;
- struct file *file;
size_t iov_count;
ssize_t ret, io_size;
- if (!req->io) {
- ret = io_write_prep(req, &iovec, &iter, force_nonblock);
- if (ret < 0)
- return ret;
- } else {
- ret = io_import_iovec(WRITE, req, &iovec, &iter);
- if (ret < 0)
- return ret;
- }
+ ret = io_import_iovec(WRITE, req, &iovec, &iter);
+ if (ret < 0)
+ return ret;
/* Ensure we clear previously set non-block flag */
if (!force_nonblock)
- req->rw.ki_flags &= ~IOCB_NOWAIT;
+ req->rw.kiocb.ki_flags &= ~IOCB_NOWAIT;
- file = kiocb->ki_filp;
io_size = ret;
if (req->flags & REQ_F_LINK)
req->result = io_size;
@@ -1934,7 +1958,7 @@ static int io_write(struct io_kiocb *req, struct io_kiocb **nxt,
goto copy_iov;
iov_count = iov_iter_count(&iter);
- ret = rw_verify_area(WRITE, file, &kiocb->ki_pos, iov_count);
+ ret = rw_verify_area(WRITE, req->file, &kiocb->ki_pos, iov_count);
if (!ret) {
ssize_t ret2;
@@ -1946,17 +1970,17 @@ static int io_write(struct io_kiocb *req, struct io_kiocb **nxt,
* we return to userspace.
*/
if (req->flags & REQ_F_ISREG) {
- __sb_start_write(file_inode(file)->i_sb,
+ __sb_start_write(file_inode(req->file)->i_sb,
SB_FREEZE_WRITE, true);
- __sb_writers_release(file_inode(file)->i_sb,
+ __sb_writers_release(file_inode(req->file)->i_sb,
SB_FREEZE_WRITE);
}
kiocb->ki_flags |= IOCB_WRITE;
- if (file->f_op->write_iter)
- ret2 = call_write_iter(file, kiocb, &iter);
+ if (req->file->f_op->write_iter)
+ ret2 = call_write_iter(req->file, kiocb, &iter);
else
- ret2 = loop_rw_iter(WRITE, file, kiocb, &iter);
+ ret2 = loop_rw_iter(WRITE, req->file, kiocb, &iter);
if (!force_nonblock || ret2 != -EAGAIN) {
kiocb_done(kiocb, ret2, nxt, req->in_async);
} else {
@@ -1989,13 +2013,10 @@ static int io_nop(struct io_kiocb *req)
return 0;
}
-static int io_prep_fsync(struct io_kiocb *req)
+static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- const struct io_uring_sqe *sqe = req->sqe;
struct io_ring_ctx *ctx = req->ctx;
- if (req->flags & REQ_F_PREPPED)
- return 0;
if (!req->file)
return -EBADF;
@@ -2010,7 +2031,6 @@ static int io_prep_fsync(struct io_kiocb *req)
req->sync.off = READ_ONCE(sqe->off);
req->sync.len = READ_ONCE(sqe->len);
- req->flags |= REQ_F_PREPPED;
return 0;
}
@@ -2036,7 +2056,7 @@ static void io_fsync_finish(struct io_wq_work **workptr)
if (io_req_cancelled(req))
return;
- ret = vfs_fsync_range(req->rw.ki_filp, req->sync.off,
+ ret = vfs_fsync_range(req->file, req->sync.off,
end > 0 ? end : LLONG_MAX,
req->sync.flags & IORING_FSYNC_DATASYNC);
if (ret < 0)
@@ -2051,11 +2071,6 @@ static int io_fsync(struct io_kiocb *req, struct io_kiocb **nxt,
bool force_nonblock)
{
struct io_wq_work *work, *old_work;
- int ret;
-
- ret = io_prep_fsync(req);
- if (ret)
- return ret;
/* fsync always requires a blocking context */
if (force_nonblock) {
@@ -2071,13 +2086,10 @@ static int io_fsync(struct io_kiocb *req, struct io_kiocb **nxt,
return 0;
}
-static int io_prep_sfr(struct io_kiocb *req)
+static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- const struct io_uring_sqe *sqe = req->sqe;
struct io_ring_ctx *ctx = req->ctx;
- if (req->flags & REQ_F_PREPPED)
- return 0;
if (!req->file)
return -EBADF;
@@ -2089,7 +2101,6 @@ static int io_prep_sfr(struct io_kiocb *req)
req->sync.off = READ_ONCE(sqe->off);
req->sync.len = READ_ONCE(sqe->len);
req->sync.flags = READ_ONCE(sqe->sync_range_flags);
- req->flags |= REQ_F_PREPPED;
return 0;
}
@@ -2102,7 +2113,7 @@ static void io_sync_file_range_finish(struct io_wq_work **workptr)
if (io_req_cancelled(req))
return;
- ret = sync_file_range(req->rw.ki_filp, req->sync.off, req->sync.len,
+ ret = sync_file_range(req->file, req->sync.off, req->sync.len,
req->sync.flags);
if (ret < 0)
req_set_fail_links(req);
@@ -2116,11 +2127,6 @@ static int io_sync_file_range(struct io_kiocb *req, struct io_kiocb **nxt,
bool force_nonblock)
{
struct io_wq_work *work, *old_work;
- int ret;
-
- ret = io_prep_sfr(req);
- if (ret)
- return ret;
/* sync_file_range always requires a blocking context */
if (force_nonblock) {
@@ -2149,19 +2155,23 @@ static void io_sendrecv_async(struct io_wq_work **workptr)
}
#endif
-static int io_sendmsg_prep(struct io_kiocb *req, struct io_async_ctx *io)
+static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
#if defined(CONFIG_NET)
- const struct io_uring_sqe *sqe = req->sqe;
- struct user_msghdr __user *msg;
- unsigned flags;
+ struct io_sr_msg *sr = &req->sr_msg;
+ struct io_async_ctx *io = req->io;
+
+ sr->msg_flags = READ_ONCE(sqe->msg_flags);
+ sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr));
+
+ if (!io)
+ return 0;
- flags = READ_ONCE(sqe->msg_flags);
- msg = (struct user_msghdr __user *)(unsigned long) READ_ONCE(sqe->addr);
io->msg.iov = io->msg.fast_iov;
- return sendmsg_copy_msghdr(&io->msg.msg, msg, flags, &io->msg.iov);
+ return sendmsg_copy_msghdr(&io->msg.msg, sr->msg, sr->msg_flags,
+ &io->msg.iov);
#else
- return 0;
+ return -EOPNOTSUPP;
#endif
}
@@ -2169,7 +2179,6 @@ static int io_sendmsg(struct io_kiocb *req, struct io_kiocb **nxt,
bool force_nonblock)
{
#if defined(CONFIG_NET)
- const struct io_uring_sqe *sqe = req->sqe;
struct io_async_msghdr *kmsg = NULL;
struct socket *sock;
int ret;
@@ -2183,12 +2192,6 @@ static int io_sendmsg(struct io_kiocb *req, struct io_kiocb **nxt,
struct sockaddr_storage addr;
unsigned flags;
- flags = READ_ONCE(sqe->msg_flags);
- if (flags & MSG_DONTWAIT)
- req->flags |= REQ_F_NOWAIT;
- else if (force_nonblock)
- flags |= MSG_DONTWAIT;
-
if (req->io) {
kmsg = &req->io->msg;
kmsg->msg.msg_name = &addr;
@@ -2197,13 +2200,24 @@ static int io_sendmsg(struct io_kiocb *req, struct io_kiocb **nxt,
kmsg->iov = kmsg->fast_iov;
kmsg->msg.msg_iter.iov = kmsg->iov;
} else {
+ struct io_sr_msg *sr = &req->sr_msg;
+
kmsg = &io.msg;
kmsg->msg.msg_name = &addr;
- ret = io_sendmsg_prep(req, &io);
+
+ io.msg.iov = io.msg.fast_iov;
+ ret = sendmsg_copy_msghdr(&io.msg.msg, sr->msg,
+ sr->msg_flags, &io.msg.iov);
if (ret)
- goto out;
+ return ret;
}
+ flags = req->sr_msg.msg_flags;
+ if (flags & MSG_DONTWAIT)
+ req->flags |= REQ_F_NOWAIT;
+ else if (force_nonblock)
+ flags |= MSG_DONTWAIT;
+
ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
if (force_nonblock && ret == -EAGAIN) {
if (req->io)
@@ -2218,7 +2232,6 @@ static int io_sendmsg(struct io_kiocb *req, struct io_kiocb **nxt,
ret = -EINTR;
}
-out:
if (!io_wq_current_is_worker() && kmsg && kmsg->iov != kmsg->fast_iov)
kfree(kmsg->iov);
io_cqring_add_event(req, ret);
@@ -2231,20 +2244,24 @@ out:
#endif
}
-static int io_recvmsg_prep(struct io_kiocb *req, struct io_async_ctx *io)
+static int io_recvmsg_prep(struct io_kiocb *req,
+ const struct io_uring_sqe *sqe)
{
#if defined(CONFIG_NET)
- const struct io_uring_sqe *sqe = req->sqe;
- struct user_msghdr __user *msg;
- unsigned flags;
+ struct io_sr_msg *sr = &req->sr_msg;
+ struct io_async_ctx *io = req->io;
+
+ sr->msg_flags = READ_ONCE(sqe->msg_flags);
+ sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr));
+
+ if (!io)
+ return 0;
- flags = READ_ONCE(sqe->msg_flags);
- msg = (struct user_msghdr __user *)(unsigned long) READ_ONCE(sqe->addr);
io->msg.iov = io->msg.fast_iov;
- return recvmsg_copy_msghdr(&io->msg.msg, msg, flags, &io->msg.uaddr,
- &io->msg.iov);
+ return recvmsg_copy_msghdr(&io->msg.msg, sr->msg, sr->msg_flags,
+ &io->msg.uaddr, &io->msg.iov);
#else
- return 0;
+ return -EOPNOTSUPP;
#endif
}
@@ -2252,7 +2269,6 @@ static int io_recvmsg(struct io_kiocb *req, struct io_kiocb **nxt,
bool force_nonblock)
{
#if defined(CONFIG_NET)
- const struct io_uring_sqe *sqe = req->sqe;
struct io_async_msghdr *kmsg = NULL;
struct socket *sock;
int ret;
@@ -2262,19 +2278,10 @@ static int io_recvmsg(struct io_kiocb *req, struct io_kiocb **nxt,
sock = sock_from_file(req->file, &ret);
if (sock) {
- struct user_msghdr __user *msg;
struct io_async_ctx io;
struct sockaddr_storage addr;
unsigned flags;
- flags = READ_ONCE(sqe->msg_flags);
- if (flags & MSG_DONTWAIT)
- req->flags |= REQ_F_NOWAIT;
- else if (force_nonblock)
- flags |= MSG_DONTWAIT;
-
- msg = (struct user_msghdr __user *) (unsigned long)
- READ_ONCE(sqe->addr);
if (req->io) {
kmsg = &req->io->msg;
kmsg->msg.msg_name = &addr;
@@ -2283,14 +2290,27 @@ static int io_recvmsg(struct io_kiocb *req, struct io_kiocb **nxt,
kmsg->iov = kmsg->fast_iov;
kmsg->msg.msg_iter.iov = kmsg->iov;
} else {
+ struct io_sr_msg *sr = &req->sr_msg;
+
kmsg = &io.msg;
kmsg->msg.msg_name = &addr;
- ret = io_recvmsg_prep(req, &io);
+
+ io.msg.iov = io.msg.fast_iov;
+ ret = recvmsg_copy_msghdr(&io.msg.msg, sr->msg,
+ sr->msg_flags, &io.msg.uaddr,
+ &io.msg.iov);
if (ret)
- goto out;
+ return ret;
}
- ret = __sys_recvmsg_sock(sock, &kmsg->msg, msg, kmsg->uaddr, flags);
+ flags = req->sr_msg.msg_flags;
+ if (flags & MSG_DONTWAIT)
+ req->flags |= REQ_F_NOWAIT;
+ else if (force_nonblock)
+ flags |= MSG_DONTWAIT;
+
+ ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.msg,
+ kmsg->uaddr, flags);
if (force_nonblock && ret == -EAGAIN) {
if (req->io)
return -EAGAIN;
@@ -2304,7 +2324,6 @@ static int io_recvmsg(struct io_kiocb *req, struct io_kiocb **nxt,
ret = -EINTR;
}
-out:
if (!io_wq_current_is_worker() && kmsg && kmsg->iov != kmsg->fast_iov)
kfree(kmsg->iov);
io_cqring_add_event(req, ret);
@@ -2317,25 +2336,19 @@ out:
#endif
}
-static int io_accept_prep(struct io_kiocb *req)
+static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
#if defined(CONFIG_NET)
- const struct io_uring_sqe *sqe = req->sqe;
struct io_accept *accept = &req->accept;
- if (req->flags & REQ_F_PREPPED)
- return 0;
-
if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
return -EINVAL;
if (sqe->ioprio || sqe->len || sqe->buf_index)
return -EINVAL;
- accept->addr = (struct sockaddr __user *)
- (unsigned long) READ_ONCE(sqe->addr);
- accept->addr_len = (int __user *) (unsigned long) READ_ONCE(sqe->addr2);
+ accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
+ accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
accept->flags = READ_ONCE(sqe->accept_flags);
- req->flags |= REQ_F_PREPPED;
return 0;
#else
return -EOPNOTSUPP;
@@ -2383,10 +2396,6 @@ static int io_accept(struct io_kiocb *req, struct io_kiocb **nxt,
#if defined(CONFIG_NET)
int ret;
- ret = io_accept_prep(req);
- if (ret)
- return ret;
-
ret = __io_accept(req, nxt, force_nonblock);
if (ret == -EAGAIN && force_nonblock) {
req->work.func = io_accept_finish;
@@ -2400,18 +2409,27 @@ static int io_accept(struct io_kiocb *req, struct io_kiocb **nxt,
#endif
}
-static int io_connect_prep(struct io_kiocb *req, struct io_async_ctx *io)
+static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
#if defined(CONFIG_NET)
- const struct io_uring_sqe *sqe = req->sqe;
- struct sockaddr __user *addr;
- int addr_len;
+ struct io_connect *conn = &req->connect;
+ struct io_async_ctx *io = req->io;
+
+ if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
+ return -EINVAL;
+ if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags)
+ return -EINVAL;
- addr = (struct sockaddr __user *) (unsigned long) READ_ONCE(sqe->addr);
- addr_len = READ_ONCE(sqe->addr2);
- return move_addr_to_kernel(addr, addr_len, &io->connect.address);
+ conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
+ conn->addr_len = READ_ONCE(sqe->addr2);
+
+ if (!io)
+ return 0;
+
+ return move_addr_to_kernel(conn->addr, conn->addr_len,
+ &io->connect.address);
#else
- return 0;
+ return -EOPNOTSUPP;
#endif
}
@@ -2419,30 +2437,25 @@ static int io_connect(struct io_kiocb *req, struct io_kiocb **nxt,
bool force_nonblock)
{
#if defined(CONFIG_NET)
- const struct io_uring_sqe *sqe = req->sqe;
struct io_async_ctx __io, *io;
unsigned file_flags;
- int addr_len, ret;
-
- if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
- return -EINVAL;
- if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags)
- return -EINVAL;
-
- addr_len = READ_ONCE(sqe->addr2);
- file_flags = force_nonblock ? O_NONBLOCK : 0;
+ int ret;
if (req->io) {
io = req->io;
} else {
- ret = io_connect_prep(req, &__io);
+ ret = move_addr_to_kernel(req->connect.addr,
+ req->connect.addr_len,
+ &__io.connect.address);
if (ret)
goto out;
io = &__io;
}
- ret = __sys_connect_file(req->file, &io->connect.address, addr_len,
- file_flags);
+ file_flags = force_nonblock ? O_NONBLOCK : 0;
+
+ ret = __sys_connect_file(req->file, &io->connect.address,
+ req->connect.addr_len, file_flags);
if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
if (req->io)
return -EAGAIN;
@@ -2513,12 +2526,9 @@ static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr)
return -ENOENT;
}
-static int io_poll_remove_prep(struct io_kiocb *req)
+static int io_poll_remove_prep(struct io_kiocb *req,
+ const struct io_uring_sqe *sqe)
{
- const struct io_uring_sqe *sqe = req->sqe;
-
- if (req->flags & REQ_F_PREPPED)
- return 0;
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
@@ -2526,7 +2536,6 @@ static int io_poll_remove_prep(struct io_kiocb *req)
return -EINVAL;
req->poll.addr = READ_ONCE(sqe->addr);
- req->flags |= REQ_F_PREPPED;
return 0;
}
@@ -2540,10 +2549,6 @@ static int io_poll_remove(struct io_kiocb *req)
u64 addr;
int ret;
- ret = io_poll_remove_prep(req);
- if (ret)
- return ret;
-
addr = req->poll.addr;
spin_lock_irq(&ctx->completion_lock);
ret = io_poll_cancel(ctx, addr);
@@ -2681,14 +2686,11 @@ static void io_poll_req_insert(struct io_kiocb *req)
hlist_add_head(&req->hash_node, list);
}
-static int io_poll_add_prep(struct io_kiocb *req)
+static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- const struct io_uring_sqe *sqe = req->sqe;
struct io_poll_iocb *poll = &req->poll;
u16 events;
- if (req->flags & REQ_F_PREPPED)
- return 0;
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
@@ -2696,7 +2698,6 @@ static int io_poll_add_prep(struct io_kiocb *req)
if (!poll->file)
return -EBADF;
- req->flags |= REQ_F_PREPPED;
events = READ_ONCE(sqe->poll_events);
poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP;
return 0;
@@ -2709,11 +2710,6 @@ static int io_poll_add(struct io_kiocb *req, struct io_kiocb **nxt)
struct io_poll_table ipt;
bool cancel = false;
__poll_t mask;
- int ret;
-
- ret = io_poll_add_prep(req);
- if (ret)
- return ret;
INIT_IO_WORK(&req->work, io_poll_complete_work);
INIT_HLIST_NODE(&req->hash_node);
@@ -2832,12 +2828,9 @@ static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
return 0;
}
-static int io_timeout_remove_prep(struct io_kiocb *req)
+static int io_timeout_remove_prep(struct io_kiocb *req,
+ const struct io_uring_sqe *sqe)
{
- const struct io_uring_sqe *sqe = req->sqe;
-
- if (req->flags & REQ_F_PREPPED)
- return 0;
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->len)
@@ -2848,7 +2841,6 @@ static int io_timeout_remove_prep(struct io_kiocb *req)
if (req->timeout.flags)
return -EINVAL;
- req->flags |= REQ_F_PREPPED;
return 0;
}
@@ -2860,10 +2852,6 @@ static int io_timeout_remove(struct io_kiocb *req)
struct io_ring_ctx *ctx = req->ctx;
int ret;
- ret = io_timeout_remove_prep(req);
- if (ret)
- return ret;
-
spin_lock_irq(&ctx->completion_lock);
ret = io_timeout_cancel(ctx, req->timeout.addr);
@@ -2877,10 +2865,9 @@ static int io_timeout_remove(struct io_kiocb *req)
return 0;
}
-static int io_timeout_prep(struct io_kiocb *req, struct io_async_ctx *io,
+static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
bool is_timeout_link)
{
- const struct io_uring_sqe *sqe = req->sqe;
struct io_timeout_data *data;
unsigned flags;
@@ -2894,7 +2881,12 @@ static int io_timeout_prep(struct io_kiocb *req, struct io_async_ctx *io,
if (flags & ~IORING_TIMEOUT_ABS)
return -EINVAL;
- data = &io->timeout;
+ req->timeout.count = READ_ONCE(sqe->off);
+
+ if (!req->io && io_alloc_async_ctx(req))
+ return -ENOMEM;
+
+ data = &req->io->timeout;
data->req = req;
req->flags |= REQ_F_TIMEOUT;
@@ -2912,21 +2904,12 @@ static int io_timeout_prep(struct io_kiocb *req, struct io_async_ctx *io,
static int io_timeout(struct io_kiocb *req)
{
- const struct io_uring_sqe *sqe = req->sqe;
unsigned count;
struct io_ring_ctx *ctx = req->ctx;
struct io_timeout_data *data;
struct list_head *entry;
unsigned span = 0;
- int ret;
- if (!req->io) {
- if (io_alloc_async_ctx(req))
- return -ENOMEM;
- ret = io_timeout_prep(req, req->io, false);
- if (ret)
- return ret;
- }
data = &req->io->timeout;
/*
@@ -2934,7 +2917,7 @@ static int io_timeout(struct io_kiocb *req)
* timeout event to be satisfied. If it isn't set, then this is
* a pure timeout request, sequence isn't used.
*/
- count = READ_ONCE(sqe->off);
+ count = req->timeout.count;
if (!count) {
req->flags |= REQ_F_TIMEOUT_NOSEQ;
spin_lock_irq(&ctx->completion_lock);
@@ -3052,19 +3035,15 @@ done:
io_put_req_find_next(req, nxt);
}
-static int io_async_cancel_prep(struct io_kiocb *req)
+static int io_async_cancel_prep(struct io_kiocb *req,
+ const struct io_uring_sqe *sqe)
{
- const struct io_uring_sqe *sqe = req->sqe;
-
- if (req->flags & REQ_F_PREPPED)
- return 0;
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
if (sqe->flags || sqe->ioprio || sqe->off || sqe->len ||
sqe->cancel_flags)
return -EINVAL;
- req->flags |= REQ_F_PREPPED;
req->cancel.addr = READ_ONCE(sqe->addr);
return 0;
}
@@ -3072,21 +3051,14 @@ static int io_async_cancel_prep(struct io_kiocb *req)
static int io_async_cancel(struct io_kiocb *req, struct io_kiocb **nxt)
{
struct io_ring_ctx *ctx = req->ctx;
- int ret;
-
- ret = io_async_cancel_prep(req);
- if (ret)
- return ret;
io_async_find_and_cancel(ctx, req, req->cancel.addr, nxt, 0);
return 0;
}
-static int io_req_defer_prep(struct io_kiocb *req)
+static int io_req_defer_prep(struct io_kiocb *req,
+ const struct io_uring_sqe *sqe)
{
- struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
- struct io_async_ctx *io = req->io;
- struct iov_iter iter;
ssize_t ret = 0;
switch (req->opcode) {
@@ -3094,61 +3066,47 @@ static int io_req_defer_prep(struct io_kiocb *req)
break;
case IORING_OP_READV:
case IORING_OP_READ_FIXED:
- /* ensure prep does right import */
- req->io = NULL;
- ret = io_read_prep(req, &iovec, &iter, true);
- req->io = io;
- if (ret < 0)
- break;
- io_req_map_rw(req, ret, iovec, inline_vecs, &iter);
- ret = 0;
+ ret = io_read_prep(req, sqe, true);
break;
case IORING_OP_WRITEV:
case IORING_OP_WRITE_FIXED:
- /* ensure prep does right import */
- req->io = NULL;
- ret = io_write_prep(req, &iovec, &iter, true);
- req->io = io;
- if (ret < 0)
- break;
- io_req_map_rw(req, ret, iovec, inline_vecs, &iter);
- ret = 0;
+ ret = io_write_prep(req, sqe, true);
break;
case IORING_OP_POLL_ADD:
- ret = io_poll_add_prep(req);
+ ret = io_poll_add_prep(req, sqe);
break;
case IORING_OP_POLL_REMOVE:
- ret = io_poll_remove_prep(req);
+ ret = io_poll_remove_prep(req, sqe);
break;
case IORING_OP_FSYNC:
- ret = io_prep_fsync(req);
+ ret = io_prep_fsync(req, sqe);
break;
case IORING_OP_SYNC_FILE_RANGE:
- ret = io_prep_sfr(req);
+ ret = io_prep_sfr(req, sqe);
break;
case IORING_OP_SENDMSG:
- ret = io_sendmsg_prep(req, io);
+ ret = io_sendmsg_prep(req, sqe);
break;
case IORING_OP_RECVMSG:
- ret = io_recvmsg_prep(req, io);
+ ret = io_recvmsg_prep(req, sqe);
break;
case IORING_OP_CONNECT:
- ret = io_connect_prep(req, io);
+ ret = io_connect_prep(req, sqe);
break;
case IORING_OP_TIMEOUT:
- ret = io_timeout_prep(req, io, false);
+ ret = io_timeout_prep(req, sqe, false);
break;
case IORING_OP_TIMEOUT_REMOVE:
- ret = io_timeout_remove_prep(req);
+ ret = io_timeout_remove_prep(req, sqe);
break;
case IORING_OP_ASYNC_CANCEL:
- ret = io_async_cancel_prep(req);
+ ret = io_async_cancel_prep(req, sqe);
break;
case IORING_OP_LINK_TIMEOUT:
- ret = io_timeout_prep(req, io, true);
+ ret = io_timeout_prep(req, sqe, true);
break;
case IORING_OP_ACCEPT:
- ret = io_accept_prep(req);
+ ret = io_accept_prep(req, sqe);
break;
default:
printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
@@ -3160,7 +3118,7 @@ static int io_req_defer_prep(struct io_kiocb *req)
return ret;
}
-static int io_req_defer(struct io_kiocb *req)
+static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_ring_ctx *ctx = req->ctx;
int ret;
@@ -3169,10 +3127,10 @@ static int io_req_defer(struct io_kiocb *req)
if (!req_need_defer(req) && list_empty(&ctx->defer_list))
return 0;
- if (io_alloc_async_ctx(req))
+ if (!req->io && io_alloc_async_ctx(req))
return -EAGAIN;
- ret = io_req_defer_prep(req);
+ ret = io_req_defer_prep(req, sqe);
if (ret < 0)
return ret;
@@ -3188,9 +3146,8 @@ static int io_req_defer(struct io_kiocb *req)
return -EIOCBQUEUED;
}
-__attribute__((nonnull))
-static int io_issue_sqe(struct io_kiocb *req, struct io_kiocb **nxt,
- bool force_nonblock)
+static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+ struct io_kiocb **nxt, bool force_nonblock)
{
struct io_ring_ctx *ctx = req->ctx;
int ret;
@@ -3200,52 +3157,109 @@ static int io_issue_sqe(struct io_kiocb *req, struct io_kiocb **nxt,
ret = io_nop(req);
break;
case IORING_OP_READV:
- if (unlikely(req->sqe->buf_index))
- return -EINVAL;
- ret = io_read(req, nxt, force_nonblock);
- break;
- case IORING_OP_WRITEV:
- if (unlikely(req->sqe->buf_index))
- return -EINVAL;
- ret = io_write(req, nxt, force_nonblock);
- break;
case IORING_OP_READ_FIXED:
+ if (sqe) {
+ ret = io_read_prep(req, sqe, force_nonblock);
+ if (ret < 0)
+ break;
+ }
ret = io_read(req, nxt, force_nonblock);
break;
+ case IORING_OP_WRITEV:
case IORING_OP_WRITE_FIXED:
+ if (sqe) {
+ ret = io_write_prep(req, sqe, force_nonblock);
+ if (ret < 0)
+ break;
+ }
ret = io_write(req, nxt, force_nonblock);
break;
case IORING_OP_FSYNC:
+ if (sqe) {
+ ret = io_prep_fsync(req, sqe);
+ if (ret < 0)
+ break;
+ }
ret = io_fsync(req, nxt, force_nonblock);
break;
case IORING_OP_POLL_ADD:
+ if (sqe) {
+ ret = io_poll_add_prep(req, sqe);
+ if (ret)
+ break;
+ }
ret = io_poll_add(req, nxt);
break;
case IORING_OP_POLL_REMOVE:
+ if (sqe) {
+ ret = io_poll_remove_prep(req, sqe);
+ if (ret < 0)
+ break;
+ }
ret = io_poll_remove(req);
break;
case IORING_OP_SYNC_FILE_RANGE:
+ if (sqe) {
+ ret = io_prep_sfr(req, sqe);
+ if (ret < 0)
+ break;
+ }
ret = io_sync_file_range(req, nxt, force_nonblock);
break;
case IORING_OP_SENDMSG:
+ if (sqe) {
+ ret = io_sendmsg_prep(req, sqe);
+ if (ret < 0)
+ break;
+ }
ret = io_sendmsg(req, nxt, force_nonblock);
break;
case IORING_OP_RECVMSG:
+ if (sqe) {
+ ret = io_recvmsg_prep(req, sqe);
+ if (ret)
+ break;
+ }
ret = io_recvmsg(req, nxt, force_nonblock);
break;
case IORING_OP_TIMEOUT:
+ if (sqe) {
+ ret = io_timeout_prep(req, sqe, false);
+ if (ret)
+ break;
+ }
ret = io_timeout(req);
break;
case IORING_OP_TIMEOUT_REMOVE:
+ if (sqe) {
+ ret = io_timeout_remove_prep(req, sqe);
+ if (ret)
+ break;
+ }
ret = io_timeout_remove(req);
break;
case IORING_OP_ACCEPT:
+ if (sqe) {
+ ret = io_accept_prep(req, sqe);
+ if (ret)
+ break;
+ }
ret = io_accept(req, nxt, force_nonblock);
break;
case IORING_OP_CONNECT:
+ if (sqe) {
+ ret = io_connect_prep(req, sqe);
+ if (ret)
+ break;
+ }
ret = io_connect(req, nxt, force_nonblock);
break;
case IORING_OP_ASYNC_CANCEL:
+ if (sqe) {
+ ret = io_async_cancel_prep(req, sqe);
+ if (ret)
+ break;
+ }
ret = io_async_cancel(req, nxt);
break;
default:
@@ -3289,7 +3303,7 @@ static void io_wq_submit_work(struct io_wq_work **workptr)
req->has_user = (work->flags & IO_WQ_WORK_HAS_MM) != 0;
req->in_async = true;
do {
- ret = io_issue_sqe(req, &nxt, false);
+ ret = io_issue_sqe(req, NULL, &nxt, false);
/*
* We can get EAGAIN for polled IO even though we're
* forcing a sync submission from here, since we can't
@@ -3355,14 +3369,15 @@ static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
return table->files[index & IORING_FILE_TABLE_MASK];
}
-static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req)
+static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req,
+ const struct io_uring_sqe *sqe)
{
struct io_ring_ctx *ctx = req->ctx;
unsigned flags;
int fd, ret;
- flags = READ_ONCE(req->sqe->flags);
- fd = READ_ONCE(req->sqe->fd);
+ flags = READ_ONCE(sqe->flags);
+ fd = READ_ONCE(sqe->fd);
if (flags & IOSQE_IO_DRAIN)
req->flags |= REQ_F_IO_DRAIN;
@@ -3494,7 +3509,7 @@ static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
return nxt;
}
-static void __io_queue_sqe(struct io_kiocb *req)
+static void __io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_kiocb *linked_timeout;
struct io_kiocb *nxt = NULL;
@@ -3503,7 +3518,7 @@ static void __io_queue_sqe(struct io_kiocb *req)
again:
linked_timeout = io_prep_linked_timeout(req);
- ret = io_issue_sqe(req, &nxt, true);
+ ret = io_issue_sqe(req, sqe, &nxt, true);
/*
* We async punt it if the file wasn't marked NOWAIT, or if the file
@@ -3550,7 +3565,7 @@ done_req:
}
}
-static void io_queue_sqe(struct io_kiocb *req)
+static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
int ret;
@@ -3560,7 +3575,7 @@ static void io_queue_sqe(struct io_kiocb *req)
}
req->ctx->drain_next = (req->flags & REQ_F_DRAIN_LINK);
- ret = io_req_defer(req);
+ ret = io_req_defer(req, sqe);
if (ret) {
if (ret != -EIOCBQUEUED) {
io_cqring_add_event(req, ret);
@@ -3568,7 +3583,7 @@ static void io_queue_sqe(struct io_kiocb *req)
io_double_put_req(req);
}
} else
- __io_queue_sqe(req);
+ __io_queue_sqe(req, sqe);
}
static inline void io_queue_link_head(struct io_kiocb *req)
@@ -3577,25 +3592,25 @@ static inline void io_queue_link_head(struct io_kiocb *req)
io_cqring_add_event(req, -ECANCELED);
io_double_put_req(req);
} else
- io_queue_sqe(req);
+ io_queue_sqe(req, NULL);
}
#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
IOSQE_IO_HARDLINK)
-static bool io_submit_sqe(struct io_kiocb *req, struct io_submit_state *state,
- struct io_kiocb **link)
+static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+ struct io_submit_state *state, struct io_kiocb **link)
{
struct io_ring_ctx *ctx = req->ctx;
int ret;
/* enforce forwards compatibility on users */
- if (unlikely(req->sqe->flags & ~SQE_VALID_FLAGS)) {
+ if (unlikely(sqe->flags & ~SQE_VALID_FLAGS)) {
ret = -EINVAL;
goto err_req;
}
- ret = io_req_set_file(state, req);
+ ret = io_req_set_file(state, req, sqe);
if (unlikely(ret)) {
err_req:
io_cqring_add_event(req, ret);
@@ -3613,10 +3628,10 @@ err_req:
if (*link) {
struct io_kiocb *prev = *link;
- if (req->sqe->flags & IOSQE_IO_DRAIN)
+ if (sqe->flags & IOSQE_IO_DRAIN)
(*link)->flags |= REQ_F_DRAIN_LINK | REQ_F_IO_DRAIN;
- if (req->sqe->flags & IOSQE_IO_HARDLINK)
+ if (sqe->flags & IOSQE_IO_HARDLINK)
req->flags |= REQ_F_HARDLINK;
if (io_alloc_async_ctx(req)) {
@@ -3624,7 +3639,7 @@ err_req:
goto err_req;
}
- ret = io_req_defer_prep(req);
+ ret = io_req_defer_prep(req, sqe);
if (ret) {
/* fail even hard links since we don't submit */
prev->flags |= REQ_F_FAIL_LINK;
@@ -3632,15 +3647,18 @@ err_req:
}
trace_io_uring_link(ctx, req, prev);
list_add_tail(&req->link_list, &prev->link_list);
- } else if (req->sqe->flags & (IOSQE_IO_LINK|IOSQE_IO_HARDLINK)) {
+ } else if (sqe->flags & (IOSQE_IO_LINK|IOSQE_IO_HARDLINK)) {
req->flags |= REQ_F_LINK;
- if (req->sqe->flags & IOSQE_IO_HARDLINK)
+ if (sqe->flags & IOSQE_IO_HARDLINK)
req->flags |= REQ_F_HARDLINK;
INIT_LIST_HEAD(&req->link_list);
+ ret = io_req_defer_prep(req, sqe);
+ if (ret)
+ req->flags |= REQ_F_FAIL_LINK;
*link = req;
} else {
- io_queue_sqe(req);
+ io_queue_sqe(req, sqe);
}
return true;
@@ -3685,14 +3703,15 @@ static void io_commit_sqring(struct io_ring_ctx *ctx)
}
/*
- * Fetch an sqe, if one is available. Note that req->sqe will point to memory
+ * Fetch an sqe, if one is available. Note that sqe_ptr will point to memory
* that is mapped by userspace. This means that care needs to be taken to
* ensure that reads are stable, as we cannot rely on userspace always
* being a good citizen. If members of the sqe are validated and then later
* used, it's important that those reads are done through READ_ONCE() to
* prevent a re-load down the line.
*/
-static bool io_get_sqring(struct io_ring_ctx *ctx, struct io_kiocb *req)
+static bool io_get_sqring(struct io_ring_ctx *ctx, struct io_kiocb *req,
+ const struct io_uring_sqe **sqe_ptr)
{
struct io_rings *rings = ctx->rings;
u32 *sq_array = ctx->sq_array;
@@ -3719,9 +3738,9 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct io_kiocb *req)
* link list.
*/
req->sequence = ctx->cached_sq_head;
- req->sqe = &ctx->sq_sqes[head];
- req->opcode = READ_ONCE(req->sqe->opcode);
- req->user_data = READ_ONCE(req->sqe->user_data);
+ *sqe_ptr = &ctx->sq_sqes[head];
+ req->opcode = READ_ONCE((*sqe_ptr)->opcode);
+ req->user_data = READ_ONCE((*sqe_ptr)->user_data);
ctx->cached_sq_head++;
return true;
}
@@ -3753,6 +3772,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
}
for (i = 0; i < nr; i++) {
+ const struct io_uring_sqe *sqe;
struct io_kiocb *req;
unsigned int sqe_flags;
@@ -3762,7 +3782,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
submitted = -EAGAIN;
break;
}
- if (!io_get_sqring(ctx, req)) {
+ if (!io_get_sqring(ctx, req, &sqe)) {
__io_free_req(req);
break;
}
@@ -3776,7 +3796,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
}
submitted++;
- sqe_flags = req->sqe->flags;
+ sqe_flags = sqe->flags;
req->ring_file = ring_file;
req->ring_fd = ring_fd;
@@ -3784,7 +3804,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
req->in_async = async;
req->needs_fixed_file = async;
trace_io_uring_submit_sqe(ctx, req->user_data, true, async);
- if (!io_submit_sqe(req, statep, &link))
+ if (!io_submit_sqe(req, sqe, statep, &link))
break;
/*
* If previous wasn't linked and we have a linked command,
@@ -4702,7 +4722,7 @@ static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
return -EFAULT;
- dst->iov_base = (void __user *) (unsigned long) ciov.iov_base;
+ dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
dst->iov_len = ciov.iov_len;
return 0;
}
diff --git a/fs/locks.c b/fs/locks.c
index 6970f55daf54..44b6da032842 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -2853,7 +2853,7 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl,
}
if (inode) {
/* userspace relies on this representation of dev_t */
- seq_printf(f, "%d %02x:%02x:%ld ", fl_pid,
+ seq_printf(f, "%d %02x:%02x:%lu ", fl_pid,
MAJOR(inode->i_sb->s_dev),
MINOR(inode->i_sb->s_dev), inode->i_ino);
} else {
diff --git a/include/linux/ahci_platform.h b/include/linux/ahci_platform.h
index 6782f0d45ebe..49e5383d4222 100644
--- a/include/linux/ahci_platform.h
+++ b/include/linux/ahci_platform.h
@@ -19,6 +19,8 @@ struct ahci_host_priv;
struct platform_device;
struct scsi_host_template;
+int ahci_platform_enable_phys(struct ahci_host_priv *hpriv);
+void ahci_platform_disable_phys(struct ahci_host_priv *hpriv);
int ahci_platform_enable_clks(struct ahci_host_priv *hpriv);
void ahci_platform_disable_clks(struct ahci_host_priv *hpriv);
int ahci_platform_enable_regulators(struct ahci_host_priv *hpriv);
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index e51ee772b9f5..01f04ed6ad92 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -95,6 +95,7 @@ enum cpuhp_state {
CPUHP_AP_OFFLINE,
CPUHP_AP_SCHED_STARTING,
CPUHP_AP_RCUTREE_DYING,
+ CPUHP_AP_CPU_PM_STARTING,
CPUHP_AP_IRQ_GIC_STARTING,
CPUHP_AP_IRQ_HIP04_STARTING,
CPUHP_AP_IRQ_ARMADA_XP_STARTING,
diff --git a/include/linux/libata.h b/include/linux/libata.h
index d3bbfddf616a..2dbde119721d 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -1175,6 +1175,7 @@ extern unsigned int ata_do_dev_read_id(struct ata_device *dev,
struct ata_taskfile *tf, u16 *id);
extern void ata_qc_complete(struct ata_queued_cmd *qc);
extern int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active);
+extern u64 ata_qc_get_active(struct ata_port *ap);
extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd);
extern int ata_std_bios_param(struct scsi_device *sdev,
struct block_device *bdev,
diff --git a/include/linux/of.h b/include/linux/of.h
index 844f89e1b039..c669c0a4732f 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -351,6 +351,8 @@ extern const void *of_get_property(const struct device_node *node,
int *lenp);
extern struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
extern struct device_node *of_get_next_cpu_node(struct device_node *prev);
+extern struct device_node *of_get_cpu_state_node(struct device_node *cpu_node,
+ int index);
#define for_each_property_of_node(dn, pp) \
for (pp = dn->properties; pp != NULL; pp = pp->next)
@@ -765,6 +767,12 @@ static inline struct device_node *of_get_next_cpu_node(struct device_node *prev)
return NULL;
}
+static inline struct device_node *of_get_cpu_state_node(struct device_node *cpu_node,
+ int index)
+{
+ return NULL;
+}
+
static inline int of_n_addr_cells(struct device_node *np)
{
return 0;
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 5a31c711b896..9ec78ee53652 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -284,6 +284,8 @@ void of_genpd_del_provider(struct device_node *np);
int of_genpd_add_device(struct of_phandle_args *args, struct device *dev);
int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
struct of_phandle_args *subdomain_spec);
+int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec,
+ struct of_phandle_args *subdomain_spec);
struct generic_pm_domain *of_genpd_remove_last(struct device_node *np);
int of_genpd_parse_idle_states(struct device_node *dn,
struct genpd_power_state **states, int *n);
@@ -322,6 +324,12 @@ static inline int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
return -ENODEV;
}
+static inline int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec,
+ struct of_phandle_args *subdomain_spec)
+{
+ return -ENODEV;
+}
+
static inline int of_genpd_parse_idle_states(struct device_node *dn,
struct genpd_power_state **states, int *n)
{
diff --git a/include/linux/psci.h b/include/linux/psci.h
index ebe0a881d13d..a67712b73b6c 100644
--- a/include/linux/psci.h
+++ b/include/linux/psci.h
@@ -18,6 +18,8 @@ bool psci_tos_resident_on(int cpu);
int psci_cpu_suspend_enter(u32 state);
bool psci_power_state_is_valid(u32 state);
+int psci_set_osi_mode(void);
+bool psci_has_osi_support(void);
enum smccc_version {
SMCCC_VERSION_1_0,
diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h
index 881fea47c83d..5c873a59b387 100644
--- a/include/linux/scmi_protocol.h
+++ b/include/linux/scmi_protocol.h
@@ -257,6 +257,7 @@ enum scmi_std_protocol {
struct scmi_device {
u32 id;
u8 protocol_id;
+ const char *name;
struct device dev;
struct scmi_handle *handle;
};
@@ -264,11 +265,13 @@ struct scmi_device {
#define to_scmi_dev(d) container_of(d, struct scmi_device, dev)
struct scmi_device *
-scmi_device_create(struct device_node *np, struct device *parent, int protocol);
+scmi_device_create(struct device_node *np, struct device *parent, int protocol,
+ const char *name);
void scmi_device_destroy(struct scmi_device *scmi_dev);
struct scmi_device_id {
u8 protocol_id;
+ const char *name;
};
struct scmi_driver {
diff --git a/include/trace/events/scmi.h b/include/trace/events/scmi.h
new file mode 100644
index 000000000000..f076c430d243
--- /dev/null
+++ b/include/trace/events/scmi.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM scmi
+
+#if !defined(_TRACE_SCMI_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SCMI_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(scmi_xfer_begin,
+ TP_PROTO(int transfer_id, u8 msg_id, u8 protocol_id, u16 seq,
+ bool poll),
+ TP_ARGS(transfer_id, msg_id, protocol_id, seq, poll),
+
+ TP_STRUCT__entry(
+ __field(int, transfer_id)
+ __field(u8, msg_id)
+ __field(u8, protocol_id)
+ __field(u16, seq)
+ __field(bool, poll)
+ ),
+
+ TP_fast_assign(
+ __entry->transfer_id = transfer_id;
+ __entry->msg_id = msg_id;
+ __entry->protocol_id = protocol_id;
+ __entry->seq = seq;
+ __entry->poll = poll;
+ ),
+
+ TP_printk("transfer_id=%d msg_id=%u protocol_id=%u seq=%u poll=%u",
+ __entry->transfer_id, __entry->msg_id, __entry->protocol_id,
+ __entry->seq, __entry->poll)
+);
+
+TRACE_EVENT(scmi_xfer_end,
+ TP_PROTO(int transfer_id, u8 msg_id, u8 protocol_id, u16 seq,
+ u32 status),
+ TP_ARGS(transfer_id, msg_id, protocol_id, seq, status),
+
+ TP_STRUCT__entry(
+ __field(int, transfer_id)
+ __field(u8, msg_id)
+ __field(u8, protocol_id)
+ __field(u16, seq)
+ __field(u32, status)
+ ),
+
+ TP_fast_assign(
+ __entry->transfer_id = transfer_id;
+ __entry->msg_id = msg_id;
+ __entry->protocol_id = protocol_id;
+ __entry->seq = seq;
+ __entry->status = status;
+ ),
+
+ TP_printk("transfer_id=%d msg_id=%u protocol_id=%u seq=%u status=%u",
+ __entry->transfer_id, __entry->msg_id, __entry->protocol_id,
+ __entry->seq, __entry->status)
+);
+
+TRACE_EVENT(scmi_rx_done,
+ TP_PROTO(int transfer_id, u8 msg_id, u8 protocol_id, u16 seq,
+ u8 msg_type),
+ TP_ARGS(transfer_id, msg_id, protocol_id, seq, msg_type),
+
+ TP_STRUCT__entry(
+ __field(int, transfer_id)
+ __field(u8, msg_id)
+ __field(u8, protocol_id)
+ __field(u16, seq)
+ __field(u8, msg_type)
+ ),
+
+ TP_fast_assign(
+ __entry->transfer_id = transfer_id;
+ __entry->msg_id = msg_id;
+ __entry->protocol_id = protocol_id;
+ __entry->seq = seq;
+ __entry->msg_type = msg_type;
+ ),
+
+ TP_printk("transfer_id=%d msg_id=%u protocol_id=%u seq=%u msg_type=%u",
+ __entry->transfer_id, __entry->msg_id, __entry->protocol_id,
+ __entry->seq, __entry->msg_type)
+);
+#endif /* _TRACE_SCMI_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/tools/testing/kunit/kunit.py b/tools/testing/kunit/kunit.py
index efe06d621983..e59eb9e7f923 100755
--- a/tools/testing/kunit/kunit.py
+++ b/tools/testing/kunit/kunit.py
@@ -31,15 +31,12 @@ class KunitStatus(Enum):
TEST_FAILURE = auto()
def create_default_kunitconfig():
- if not os.path.exists(kunit_kernel.KUNITCONFIG_PATH):
+ if not os.path.exists(kunit_kernel.kunitconfig_path):
shutil.copyfile('arch/um/configs/kunit_defconfig',
- kunit_kernel.KUNITCONFIG_PATH)
+ kunit_kernel.kunitconfig_path)
def run_tests(linux: kunit_kernel.LinuxSourceTree,
request: KunitRequest) -> KunitResult:
- if request.defconfig:
- create_default_kunitconfig()
-
config_start = time.time()
success = linux.build_reconfig(request.build_dir)
config_end = time.time()
@@ -108,15 +105,22 @@ def main(argv, linux=None):
run_parser.add_argument('--build_dir',
help='As in the make command, it specifies the build '
'directory.',
- type=str, default=None, metavar='build_dir')
+ type=str, default='', metavar='build_dir')
run_parser.add_argument('--defconfig',
- help='Uses a default kunitconfig.',
+ help='Uses a default .kunitconfig.',
action='store_true')
cli_args = parser.parse_args(argv)
if cli_args.subcommand == 'run':
+ if cli_args.build_dir:
+ if not os.path.exists(cli_args.build_dir):
+ os.mkdir(cli_args.build_dir)
+ kunit_kernel.kunitconfig_path = os.path.join(
+ cli_args.build_dir,
+ kunit_kernel.kunitconfig_path)
+
if cli_args.defconfig:
create_default_kunitconfig()
diff --git a/tools/testing/kunit/kunit_kernel.py b/tools/testing/kunit/kunit_kernel.py
index bf3876835331..cc5d844ecca1 100644
--- a/tools/testing/kunit/kunit_kernel.py
+++ b/tools/testing/kunit/kunit_kernel.py
@@ -14,7 +14,7 @@ import os
import kunit_config
KCONFIG_PATH = '.config'
-KUNITCONFIG_PATH = 'kunitconfig'
+kunitconfig_path = '.kunitconfig'
class ConfigError(Exception):
"""Represents an error trying to configure the Linux kernel."""
@@ -82,7 +82,7 @@ class LinuxSourceTree(object):
def __init__(self):
self._kconfig = kunit_config.Kconfig()
- self._kconfig.read_from_file(KUNITCONFIG_PATH)
+ self._kconfig.read_from_file(kunitconfig_path)
self._ops = LinuxSourceTreeOperations()
def clean(self):
@@ -111,7 +111,7 @@ class LinuxSourceTree(object):
return True
def build_reconfig(self, build_dir):
- """Creates a new .config if it is not a subset of the kunitconfig."""
+ """Creates a new .config if it is not a subset of the .kunitconfig."""
kconfig_path = get_kconfig_path(build_dir)
if os.path.exists(kconfig_path):
existing_kconfig = kunit_config.Kconfig()
@@ -140,10 +140,10 @@ class LinuxSourceTree(object):
return False
return True
- def run_kernel(self, args=[], timeout=None, build_dir=None):
+ def run_kernel(self, args=[], timeout=None, build_dir=''):
args.extend(['mem=256M'])
process = self._ops.linux_bin(args, timeout, build_dir)
- with open('test.log', 'w') as f:
+ with open(os.path.join(build_dir, 'test.log'), 'w') as f:
for line in process.stdout:
f.write(line.rstrip().decode('ascii') + '\n')
yield line.rstrip().decode('ascii')
diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py
index a2a8ea6beae3..cba97756ac4a 100755
--- a/tools/testing/kunit/kunit_tool_test.py
+++ b/tools/testing/kunit/kunit_tool_test.py
@@ -174,6 +174,7 @@ class KUnitMainTest(unittest.TestCase):
kunit.main(['run'], self.linux_source_mock)
assert self.linux_source_mock.build_reconfig.call_count == 1
assert self.linux_source_mock.run_kernel.call_count == 1
+ self.linux_source_mock.run_kernel.assert_called_once_with(build_dir='', timeout=300)
self.print_mock.assert_any_call(StrContains('Testing complete.'))
def test_run_passes_args_fail(self):
@@ -199,7 +200,14 @@ class KUnitMainTest(unittest.TestCase):
timeout = 3453
kunit.main(['run', '--timeout', str(timeout)], self.linux_source_mock)
assert self.linux_source_mock.build_reconfig.call_count == 1
- self.linux_source_mock.run_kernel.assert_called_once_with(build_dir=None, timeout=timeout)
+ self.linux_source_mock.run_kernel.assert_called_once_with(build_dir='', timeout=timeout)
+ self.print_mock.assert_any_call(StrContains('Testing complete.'))
+
+ def test_run_builddir(self):
+ build_dir = '.kunit'
+ kunit.main(['run', '--build_dir', build_dir], self.linux_source_mock)
+ assert self.linux_source_mock.build_reconfig.call_count == 1
+ self.linux_source_mock.run_kernel.assert_called_once_with(build_dir=build_dir, timeout=300)
self.print_mock.assert_any_call(StrContains('Testing complete.'))
if __name__ == '__main__':
diff --git a/tools/testing/selftests/filesystems/epoll/Makefile b/tools/testing/selftests/filesystems/epoll/Makefile
index e62f3d4f68da..78ae4aaf7141 100644
--- a/tools/testing/selftests/filesystems/epoll/Makefile
+++ b/tools/testing/selftests/filesystems/epoll/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
CFLAGS += -I../../../../../usr/include/
-LDFLAGS += -lpthread
+LDLIBS += -lpthread
TEST_GEN_PROGS := epoll_wakeup_test
include ../../lib.mk
diff --git a/tools/testing/selftests/firmware/fw_lib.sh b/tools/testing/selftests/firmware/fw_lib.sh
index b879305a766d..5b8c0fedee76 100755
--- a/tools/testing/selftests/firmware/fw_lib.sh
+++ b/tools/testing/selftests/firmware/fw_lib.sh
@@ -34,6 +34,12 @@ test_modprobe()
check_mods()
{
+ local uid=$(id -u)
+ if [ $uid -ne 0 ]; then
+ echo "skip all tests: must be run as root" >&2
+ exit $ksft_skip
+ fi
+
trap "test_modprobe" EXIT
if [ ! -d $DIR ]; then
modprobe test_firmware
diff --git a/tools/testing/selftests/livepatch/functions.sh b/tools/testing/selftests/livepatch/functions.sh
index 31eb09e38729..a6e3d5517a6f 100644
--- a/tools/testing/selftests/livepatch/functions.sh
+++ b/tools/testing/selftests/livepatch/functions.sh
@@ -7,6 +7,9 @@
MAX_RETRIES=600
RETRY_INTERVAL=".1" # seconds
+# Kselftest framework requirement - SKIP code is 4
+ksft_skip=4
+
# log(msg) - write message to kernel log
# msg - insightful words
function log() {
@@ -18,7 +21,16 @@ function log() {
function skip() {
log "SKIP: $1"
echo "SKIP: $1" >&2
- exit 4
+ exit $ksft_skip
+}
+
+# root test
+function is_root() {
+ uid=$(id -u)
+ if [ $uid -ne 0 ]; then
+ echo "skip all tests: must be run as root" >&2
+ exit $ksft_skip
+ fi
}
# die(msg) - game over, man
@@ -62,6 +74,7 @@ function set_ftrace_enabled() {
# for verbose livepatching output and turn on
# the ftrace_enabled sysctl.
function setup_config() {
+ is_root
push_config
set_dynamic_debug
set_ftrace_enabled 1
diff --git a/tools/testing/selftests/livepatch/test-state.sh b/tools/testing/selftests/livepatch/test-state.sh
index dc2908c22c26..a08212708115 100755
--- a/tools/testing/selftests/livepatch/test-state.sh
+++ b/tools/testing/selftests/livepatch/test-state.sh
@@ -8,8 +8,7 @@ MOD_LIVEPATCH=test_klp_state
MOD_LIVEPATCH2=test_klp_state2
MOD_LIVEPATCH3=test_klp_state3
-set_dynamic_debug
-
+setup_config
# TEST: Loading and removing a module that modifies the system state
diff --git a/tools/testing/selftests/rseq/param_test.c b/tools/testing/selftests/rseq/param_test.c
index eec2663261f2..e8a657a5f48a 100644
--- a/tools/testing/selftests/rseq/param_test.c
+++ b/tools/testing/selftests/rseq/param_test.c
@@ -15,7 +15,7 @@
#include <errno.h>
#include <stddef.h>
-static inline pid_t gettid(void)
+static inline pid_t rseq_gettid(void)
{
return syscall(__NR_gettid);
}
@@ -373,11 +373,12 @@ void *test_percpu_spinlock_thread(void *arg)
rseq_percpu_unlock(&data->lock, cpu);
#ifndef BENCHMARK
if (i != 0 && !(i % (reps / 10)))
- printf_verbose("tid %d: count %lld\n", (int) gettid(), i);
+ printf_verbose("tid %d: count %lld\n",
+ (int) rseq_gettid(), i);
#endif
}
printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n",
- (int) gettid(), nr_abort, signals_delivered);
+ (int) rseq_gettid(), nr_abort, signals_delivered);
if (!opt_disable_rseq && thread_data->reg &&
rseq_unregister_current_thread())
abort();
@@ -454,11 +455,12 @@ void *test_percpu_inc_thread(void *arg)
} while (rseq_unlikely(ret));
#ifndef BENCHMARK
if (i != 0 && !(i % (reps / 10)))
- printf_verbose("tid %d: count %lld\n", (int) gettid(), i);
+ printf_verbose("tid %d: count %lld\n",
+ (int) rseq_gettid(), i);
#endif
}
printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n",
- (int) gettid(), nr_abort, signals_delivered);
+ (int) rseq_gettid(), nr_abort, signals_delivered);
if (!opt_disable_rseq && thread_data->reg &&
rseq_unregister_current_thread())
abort();
@@ -605,7 +607,7 @@ void *test_percpu_list_thread(void *arg)
}
printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n",
- (int) gettid(), nr_abort, signals_delivered);
+ (int) rseq_gettid(), nr_abort, signals_delivered);
if (!opt_disable_rseq && rseq_unregister_current_thread())
abort();
@@ -796,7 +798,7 @@ void *test_percpu_buffer_thread(void *arg)
}
printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n",
- (int) gettid(), nr_abort, signals_delivered);
+ (int) rseq_gettid(), nr_abort, signals_delivered);
if (!opt_disable_rseq && rseq_unregister_current_thread())
abort();
@@ -1011,7 +1013,7 @@ void *test_percpu_memcpy_buffer_thread(void *arg)
}
printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n",
- (int) gettid(), nr_abort, signals_delivered);
+ (int) rseq_gettid(), nr_abort, signals_delivered);
if (!opt_disable_rseq && rseq_unregister_current_thread())
abort();
diff --git a/tools/testing/selftests/rseq/rseq.h b/tools/testing/selftests/rseq/rseq.h
index d40d60e7499e..3f63eb362b92 100644
--- a/tools/testing/selftests/rseq/rseq.h
+++ b/tools/testing/selftests/rseq/rseq.h
@@ -149,11 +149,13 @@ static inline void rseq_clear_rseq_cs(void)
/*
* rseq_prepare_unload() should be invoked by each thread executing a rseq
* critical section at least once between their last critical section and
- * library unload of the library defining the rseq critical section
- * (struct rseq_cs). This also applies to use of rseq in code generated by
- * JIT: rseq_prepare_unload() should be invoked at least once by each
- * thread executing a rseq critical section before reclaim of the memory
- * holding the struct rseq_cs.
+ * library unload of the library defining the rseq critical section (struct
+ * rseq_cs) or the code referred to by the struct rseq_cs start_ip and
+ * post_commit_offset fields. This also applies to use of rseq in code
+ * generated by JIT: rseq_prepare_unload() should be invoked at least once by
+ * each thread executing a rseq critical section before reclaim of the memory
+ * holding the struct rseq_cs or reclaim of the code pointed to by struct
+ * rseq_cs start_ip and post_commit_offset fields.
*/
static inline void rseq_prepare_unload(void)
{
diff --git a/tools/testing/selftests/rseq/settings b/tools/testing/selftests/rseq/settings
new file mode 100644
index 000000000000..e7b9417537fb
--- /dev/null
+++ b/tools/testing/selftests/rseq/settings
@@ -0,0 +1 @@
+timeout=0