summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--CREDITS7
-rw-r--r--Documentation/devicetree/bindings/display/msm/qcom,mdp5.yaml1
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.yaml2
-rw-r--r--MAINTAINERS11
-rw-r--r--arch/powerpc/boot/Makefile6
-rwxr-xr-xarch/powerpc/boot/install.sh14
-rw-r--r--arch/powerpc/kernel/Makefile4
-rw-r--r--arch/powerpc/kernel/kvm.c8
-rw-r--r--arch/powerpc/kernel/prom_init_check.sh16
-rw-r--r--arch/powerpc/kernel/setup_64.c5
-rw-r--r--arch/powerpc/kvm/powerpc.c2
-rw-r--r--arch/powerpc/platforms/8xx/cpm1-ic.c3
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype13
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c5
-rw-r--r--block/blk-rq-qos.h13
-rw-r--r--block/blk-zoned.c11
-rw-r--r--drivers/ata/ahci.c57
-rw-r--r--drivers/ata/ahci.h1
-rw-r--r--drivers/ata/ahci_xgene.c7
-rw-r--r--drivers/atm/atmtcp.c17
-rw-r--r--drivers/base/power/main.c4
-rw-r--r--drivers/block/loop.c26
-rw-r--r--drivers/block/ublk_drv.c72
-rw-r--r--drivers/gpio/gpiolib-acpi-quirks.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c8
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c18
-rw-r--r--drivers/gpu/drm/drm_gpuvm.c80
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c21
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c8
-rw-r--r--drivers/gpu/drm/mediatek/mtk_plane.c3
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c47
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h38
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gen7_0_0_snapshot.h19
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gen7_2_0_snapshot.h10
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h34
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c4
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c59
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h1
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c11
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c13
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c72
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c60
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c20
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c16
-rw-r--r--drivers/gpu/drm/msm/msm_kms.c10
-rw-r--r--drivers/gpu/drm/msm/msm_mdss.c2
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx.xml14
-rw-r--r--drivers/gpu/drm/msm/registers/display/dsi.xml28
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c5
-rw-r--r--drivers/gpu/drm/tegra/gem.c2
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c8
-rw-r--r--drivers/gpu/drm/xe/xe_gen_wa_oob.c10
-rw-r--r--drivers/gpu/drm/xe/xe_sync.c2
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c8
-rw-r--r--drivers/gpu/drm/xe/xe_vm.h15
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c12
-rw-r--r--drivers/media/i2c/alvium-csi2.c1
-rw-r--r--drivers/media/i2c/ccs/ccs-core.c7
-rw-r--r--drivers/media/i2c/dw9768.c1
-rw-r--r--drivers/media/i2c/gc0308.c3
-rw-r--r--drivers/media/i2c/gc2145.c3
-rw-r--r--drivers/media/i2c/imx219.c2
-rw-r--r--drivers/media/i2c/imx283.c3
-rw-r--r--drivers/media/i2c/imx290.c3
-rw-r--r--drivers/media/i2c/imx296.c1
-rw-r--r--drivers/media/i2c/imx415.c1
-rw-r--r--drivers/media/i2c/mt9m114.c6
-rw-r--r--drivers/media/i2c/ov4689.c3
-rw-r--r--drivers/media/i2c/ov5640.c4
-rw-r--r--drivers/media/i2c/ov5645.c3
-rw-r--r--drivers/media/i2c/ov64a40.c7
-rw-r--r--drivers/media/i2c/ov8858.c2
-rw-r--r--drivers/media/i2c/st-mipid02.c2
-rw-r--r--drivers/media/i2c/tc358746.c5
-rw-r--r--drivers/media/i2c/thp7312.c4
-rw-r--r--drivers/media/i2c/vd55g1.c4
-rw-r--r--drivers/media/i2c/vd56g3.c4
-rw-r--r--drivers/media/i2c/video-i2c.c4
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c4
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c5
-rw-r--r--drivers/media/platform/nvidia/tegra-vde/h264.c2
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_queue.c1
-rw-r--r--drivers/media/platform/raspberrypi/pisp_be/pisp_be.c2
-rw-r--r--drivers/media/platform/rockchip/rkvdec/rkvdec.c17
-rw-r--r--drivers/media/platform/verisilicon/hantro_drv.c1
-rw-r--r--drivers/media/rc/gpio-ir-recv.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c36
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c11
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adapter.c49
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adapter.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.c44
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c16
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c2
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c61
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c723
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.h87
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h14
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/rep.c13
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/rep.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c126
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c1
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.c4
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_pci.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c6
-rw-r--r--drivers/net/hyperv/netvsc.c17
-rw-r--r--drivers/net/hyperv/rndis_filter.c23
-rw-r--r--drivers/net/phy/mscc/mscc.h4
-rw-r--r--drivers/net/phy/mscc/mscc_main.c4
-rw-r--r--drivers/net/phy/mscc/mscc_ptp.c34
-rw-r--r--drivers/net/usb/qmi_wwan.c3
-rw-r--r--drivers/net/virtio_net.c7
-rw-r--r--drivers/of/device.c4
-rw-r--r--drivers/of/dynamic.c9
-rw-r--r--drivers/of/of_reserved_mem.c17
-rw-r--r--drivers/pinctrl/Kconfig1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-airoha.c8
-rw-r--r--drivers/pinctrl/meson/pinctrl-amlogic-a4.c2
-rw-r--r--drivers/platform/x86/intel/int3472/discrete.c6
-rw-r--r--drivers/soc/qcom/ubwc_config.c23
-rw-r--r--drivers/vhost/net.c9
-rw-r--r--drivers/virtio/virtio_input.c4
-rw-r--r--drivers/virtio/virtio_pci_legacy_dev.c4
-rw-r--r--drivers/virtio/virtio_pci_modern_dev.c4
-rw-r--r--include/drm/drm_gpuvm.h10
-rw-r--r--include/linux/atmdev.h1
-rw-r--r--include/linux/dma-map-ops.h3
-rw-r--r--include/linux/memblock.h5
-rw-r--r--include/linux/platform_data/x86/int3472.h1
-rw-r--r--include/linux/skbuff.h2
-rw-r--r--include/linux/virtio_config.h2
-rw-r--r--include/net/bluetooth/hci_sync.h2
-rw-r--r--include/net/rose.h18
-rw-r--r--include/uapi/linux/vhost.h4
-rw-r--r--io_uring/kbuf.c20
-rw-r--r--kernel/dma/contiguous.c2
-rw-r--r--kernel/dma/pool.c4
-rw-r--r--mm/memblock.c19
-rw-r--r--mm/numa_emulation.c4
-rw-r--r--mm/numa_memblks.c6
-rw-r--r--net/atm/common.c15
-rw-r--r--net/bluetooth/hci_conn.c58
-rw-r--r--net/bluetooth/hci_event.c25
-rw-r--r--net/bluetooth/hci_sync.c6
-rw-r--r--net/bluetooth/mgmt.c9
-rw-r--r--net/core/datagram.c14
-rw-r--r--net/core/page_pool.c6
-rw-r--r--net/ipv4/route.c10
-rw-r--r--net/l2tp/l2tp_ppp.c25
-rw-r--r--net/rose/af_rose.c13
-rw-r--r--net/rose/rose_in.c12
-rw-r--r--net/rose/rose_route.c62
-rw-r--r--net/rose/rose_timer.c2
-rw-r--r--net/sctp/ipv6.c2
-rw-r--r--net/vmw_vsock/virtio_transport_common.c8
-rw-r--r--tools/arch/arm64/include/asm/cputype.h28
-rw-r--r--tools/arch/powerpc/include/uapi/asm/kvm.h13
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h10
-rw-r--r--tools/arch/x86/include/asm/msr-index.h7
-rw-r--r--tools/arch/x86/include/uapi/asm/kvm.h8
-rw-r--r--tools/include/linux/bits.h29
-rw-r--r--tools/include/linux/cfi_types.h23
-rw-r--r--tools/include/uapi/asm-generic/unistd.h8
-rw-r--r--tools/include/uapi/linux/kvm.h27
-rw-r--r--tools/perf/arch/arm/entry/syscalls/syscall.tbl2
-rw-r--r--tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl2
-rw-r--r--tools/perf/arch/powerpc/entry/syscalls/syscall.tbl2
-rw-r--r--tools/perf/arch/s390/entry/syscalls/syscall.tbl2
-rw-r--r--tools/perf/arch/sh/entry/syscalls/syscall.tbl2
-rw-r--r--tools/perf/arch/sparc/entry/syscalls/syscall.tbl2
-rw-r--r--tools/perf/arch/x86/entry/syscalls/syscall_32.tbl2
-rw-r--r--tools/perf/arch/x86/entry/syscalls/syscall_64.tbl2
-rw-r--r--tools/perf/arch/x86/tests/topdown.c1
-rw-r--r--tools/perf/arch/xtensa/entry/syscalls/syscall.tbl2
-rw-r--r--tools/perf/bench/inject-buildid.c2
-rw-r--r--tools/perf/builtin-buildid-cache.c8
-rw-r--r--tools/perf/builtin-inject.c4
-rw-r--r--tools/perf/tests/sdt.c2
-rw-r--r--tools/perf/trace/beauty/include/uapi/linux/fcntl.h18
-rw-r--r--tools/perf/trace/beauty/include/uapi/linux/fs.h88
-rw-r--r--tools/perf/trace/beauty/include/uapi/linux/prctl.h9
-rw-r--r--tools/perf/trace/beauty/include/uapi/linux/vhost.h35
-rw-r--r--tools/perf/util/build-id.c4
-rw-r--r--tools/perf/util/debuginfo.c8
-rw-r--r--tools/perf/util/dsos.c4
-rw-r--r--tools/perf/util/symbol-elf.c9
-rw-r--r--tools/perf/util/symbol-minimal.c59
-rw-r--r--tools/perf/util/symbol.c8
-rw-r--r--tools/perf/util/symbol.h2
-rw-r--r--tools/perf/util/synthetic-events.c2
-rw-r--r--tools/scripts/syscall.tbl2
-rw-r--r--tools/testing/selftests/ublk/file_backed.c10
-rw-r--r--tools/testing/selftests/ublk/kublk.c38
-rw-r--r--tools/testing/selftests/ublk/kublk.h45
-rw-r--r--tools/testing/selftests/ublk/null.c4
-rw-r--r--tools/testing/selftests/ublk/stripe.c4
-rwxr-xr-xtools/testing/selftests/ublk/test_stress_04.sh6
230 files changed, 2167 insertions, 1460 deletions
diff --git a/CREDITS b/CREDITS
index a357f9cbb05d..a687c3c35c4c 100644
--- a/CREDITS
+++ b/CREDITS
@@ -3222,6 +3222,10 @@ D: AIC5800 IEEE 1394, RAW I/O on 1394
D: Starter of Linux1394 effort
S: ask per mail for current address
+N: Boris Pismenny
+E: borisp@mellanox.com
+D: Kernel TLS implementation and offload support.
+
N: Nicolas Pitre
E: nico@fluxnic.net
D: StrongARM SA1100 support integrator & hacker
@@ -4168,6 +4172,9 @@ S: 1513 Brewster Dr.
S: Carrollton, TX 75010
S: USA
+N: Dave Watson
+D: Kernel TLS implementation.
+
N: Tim Waugh
E: tim@cyberelk.net
D: Co-architect of the parallel-port sharing system
diff --git a/Documentation/devicetree/bindings/display/msm/qcom,mdp5.yaml b/Documentation/devicetree/bindings/display/msm/qcom,mdp5.yaml
index e153f8d26e7a..2735c78b0b67 100644
--- a/Documentation/devicetree/bindings/display/msm/qcom,mdp5.yaml
+++ b/Documentation/devicetree/bindings/display/msm/qcom,mdp5.yaml
@@ -60,7 +60,6 @@ properties:
- const: bus
- const: core
- const: vsync
- - const: lut
- const: tbu
- const: tbu_rt
# MSM8996 has additional iommu clock
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml
index 77160cd47f54..9ec8947dfcad 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
+++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
@@ -507,6 +507,8 @@ patternProperties:
description: Espressif Systems Co. Ltd.
"^est,.*":
description: ESTeem Wireless Modems
+ "^eswin,.*":
+ description: Beijing ESWIN Technology Group Co. Ltd.
"^ettus,.*":
description: NI Ettus Research
"^eukrea,.*":
diff --git a/MAINTAINERS b/MAINTAINERS
index fed6cd812d79..e94d68c980c5 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -937,7 +937,7 @@ S: Maintained
F: drivers/gpio/gpio-altera.c
ALTERA TRIPLE SPEED ETHERNET DRIVER
-M: Joyce Ooi <joyce.ooi@intel.com>
+M: Boon Khai Ng <boon.khai.ng@altera.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/altera/
@@ -4205,7 +4205,7 @@ W: http://www.baycom.org/~tom/ham/ham.html
F: drivers/net/hamradio/baycom*
BCACHE (BLOCK LAYER CACHE)
-M: Coly Li <colyli@kernel.org>
+M: Coly Li <colyli@fnnas.com>
M: Kent Overstreet <kent.overstreet@linux.dev>
L: linux-bcache@vger.kernel.org
S: Maintained
@@ -4216,7 +4216,7 @@ F: drivers/md/bcache/
BCACHEFS
M: Kent Overstreet <kent.overstreet@linux.dev>
L: linux-bcachefs@vger.kernel.org
-S: Supported
+S: Externally maintained
C: irc://irc.oftc.net/bcache
P: Documentation/filesystems/bcachefs/SubmittingPatches.rst
T: git https://evilpiepirate.org/git/bcachefs.git
@@ -17848,7 +17848,6 @@ F: net/ipv6/syncookies.c
F: net/ipv6/tcp*.c
NETWORKING [TLS]
-M: Boris Pismenny <borisp@nvidia.com>
M: John Fastabend <john.fastabend@gmail.com>
M: Jakub Kicinski <kuba@kernel.org>
L: netdev@vger.kernel.org
@@ -20878,8 +20877,8 @@ S: Maintained
F: drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
QUALCOMM RMNET DRIVER
-M: Subash Abhinov Kasiviswanathan <quic_subashab@quicinc.com>
-M: Sean Tranchetti <quic_stranche@quicinc.com>
+M: Subash Abhinov Kasiviswanathan <subash.a.kasiviswanathan@oss.qualcomm.com>
+M: Sean Tranchetti <sean.tranchetti@oss.qualcomm.com>
L: netdev@vger.kernel.org
S: Maintained
F: Documentation/networking/device_drivers/cellular/qualcomm/rmnet.rst
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index a7ab087d412c..c47b78c1d3e7 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -243,13 +243,13 @@ $(obj)/wrapper.a: $(obj-wlib) FORCE
hostprogs := addnote hack-coff mktree
targets += $(patsubst $(obj)/%,%,$(obj-boot) wrapper.a) zImage.lds
-extra-y := $(obj)/wrapper.a $(obj-plat) $(obj)/empty.o \
+always-y := $(obj)/wrapper.a $(obj-plat) $(obj)/empty.o \
$(obj)/zImage.lds $(obj)/zImage.coff.lds $(obj)/zImage.ps3.lds
dtstree := $(src)/dts
wrapper := $(src)/wrapper
-wrapperbits := $(extra-y) $(addprefix $(obj)/,addnote hack-coff mktree) \
+wrapperbits := $(always-y) $(addprefix $(obj)/,addnote hack-coff mktree) \
$(wrapper) FORCE
#############
@@ -456,7 +456,7 @@ WRAPPER_DTSDIR := /usr/lib/kernel-wrapper/dts
WRAPPER_BINDIR := /usr/sbin
INSTALL := install
-extra-installed := $(patsubst $(obj)/%, $(DESTDIR)$(WRAPPER_OBJDIR)/%, $(extra-y))
+extra-installed := $(patsubst $(obj)/%, $(DESTDIR)$(WRAPPER_OBJDIR)/%, $(always-y))
hostprogs-installed := $(patsubst %, $(DESTDIR)$(WRAPPER_BINDIR)/%, $(hostprogs))
wrapper-installed := $(DESTDIR)$(WRAPPER_BINDIR)/wrapper
dts-installed := $(patsubst $(dtstree)/%, $(DESTDIR)$(WRAPPER_DTSDIR)/%, $(wildcard $(dtstree)/*.dts))
diff --git a/arch/powerpc/boot/install.sh b/arch/powerpc/boot/install.sh
index 101fcb397a0f..c3df6c27ce75 100755
--- a/arch/powerpc/boot/install.sh
+++ b/arch/powerpc/boot/install.sh
@@ -19,19 +19,19 @@
set -e
# this should work for both the pSeries zImage and the iSeries vmlinux.sm
-image_name=`basename $2`
+image_name=$(basename "$2")
echo "Warning: '${INSTALLKERNEL}' command not available... Copying" \
"directly to $4/$image_name-$1" >&2
-if [ -f $4/$image_name-$1 ]; then
- mv $4/$image_name-$1 $4/$image_name-$1.old
+if [ -f "$4"/"$image_name"-"$1" ]; then
+ mv "$4"/"$image_name"-"$1" "$4"/"$image_name"-"$1".old
fi
-if [ -f $4/System.map-$1 ]; then
- mv $4/System.map-$1 $4/System-$1.old
+if [ -f "$4"/System.map-"$1" ]; then
+ mv "$4"/System.map-"$1" "$4"/System-"$1".old
fi
-cat $2 > $4/$image_name-$1
-cp $3 $4/System.map-$1
+cat "$2" > "$4"/"$image_name"-"$1"
+cp "$3" "$4"/System.map-"$1"
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index fb2b95267022..2f0a2e69c607 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -199,7 +199,9 @@ obj-$(CONFIG_ALTIVEC) += vector.o
obj-$(CONFIG_PPC_OF_BOOT_TRAMPOLINE) += prom_init.o
obj64-$(CONFIG_PPC_OF_BOOT_TRAMPOLINE) += prom_entry_64.o
-extra-$(CONFIG_PPC_OF_BOOT_TRAMPOLINE) += prom_init_check
+ifdef KBUILD_BUILTIN
+always-$(CONFIG_PPC_OF_BOOT_TRAMPOLINE) += prom_init_check
+endif
obj-$(CONFIG_PPC64) += $(obj64-y)
obj-$(CONFIG_PPC32) += $(obj32-y)
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
index 5b3c093611ba..7209d00a9c25 100644
--- a/arch/powerpc/kernel/kvm.c
+++ b/arch/powerpc/kernel/kvm.c
@@ -632,19 +632,19 @@ static void __init kvm_check_ins(u32 *inst, u32 features)
#endif
}
- switch (inst_no_rt & ~KVM_MASK_RB) {
#ifdef CONFIG_PPC_BOOK3S_32
+ switch (inst_no_rt & ~KVM_MASK_RB) {
case KVM_INST_MTSRIN:
if (features & KVM_MAGIC_FEAT_SR) {
u32 inst_rb = _inst & KVM_MASK_RB;
kvm_patch_ins_mtsrin(inst, inst_rt, inst_rb);
}
break;
-#endif
}
+#endif
- switch (_inst) {
#ifdef CONFIG_BOOKE
+ switch (_inst) {
case KVM_INST_WRTEEI_0:
kvm_patch_ins_wrteei_0(inst);
break;
@@ -652,8 +652,8 @@ static void __init kvm_check_ins(u32 *inst, u32 features)
case KVM_INST_WRTEEI_1:
kvm_patch_ins_wrtee(inst, 0, 1);
break;
-#endif
}
+#endif
}
extern u32 kvm_template_start[];
diff --git a/arch/powerpc/kernel/prom_init_check.sh b/arch/powerpc/kernel/prom_init_check.sh
index 69623b9045d5..3090b97258ae 100644
--- a/arch/powerpc/kernel/prom_init_check.sh
+++ b/arch/powerpc/kernel/prom_init_check.sh
@@ -15,8 +15,8 @@
has_renamed_memintrinsics()
{
- grep -q "^CONFIG_KASAN=y$" ${KCONFIG_CONFIG} && \
- ! grep -q "^CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX=y" ${KCONFIG_CONFIG}
+ grep -q "^CONFIG_KASAN=y$" "${KCONFIG_CONFIG}" && \
+ ! grep -q "^CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX=y" "${KCONFIG_CONFIG}"
}
if has_renamed_memintrinsics
@@ -42,15 +42,15 @@ check_section()
{
file=$1
section=$2
- size=$(objdump -h -j $section $file 2>/dev/null | awk "\$2 == \"$section\" {print \$3}")
+ size=$(objdump -h -j "$section" "$file" 2>/dev/null | awk "\$2 == \"$section\" {print \$3}")
size=${size:-0}
- if [ $size -ne 0 ]; then
+ if [ "$size" -ne 0 ]; then
ERROR=1
echo "Error: Section $section not empty in prom_init.c" >&2
fi
}
-for UNDEF in $($NM -u $OBJ | awk '{print $2}')
+for UNDEF in $($NM -u "$OBJ" | awk '{print $2}')
do
# On 64-bit nm gives us the function descriptors, which have
# a leading . on the name, so strip it off here.
@@ -87,8 +87,8 @@ do
fi
done
-check_section $OBJ .data
-check_section $OBJ .bss
-check_section $OBJ .init.data
+check_section "$OBJ" .data
+check_section "$OBJ" .bss
+check_section "$OBJ" .init.data
exit $ERROR
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 7284c8021eeb..8fd7cbf3bd04 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -141,10 +141,7 @@ void __init check_smt_enabled(void)
smt_enabled_at_boot = 0;
else {
int smt;
- int rc;
-
- rc = kstrtoint(smt_enabled_cmdline, 10, &smt);
- if (!rc)
+ if (!kstrtoint(smt_enabled_cmdline, 10, &smt))
smt_enabled_at_boot =
min(threads_per_core, smt);
}
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 153587741864..2ba057171ebe 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -69,7 +69,7 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
/*
* Common checks before entering the guest world. Call with interrupts
- * disabled.
+ * enabled.
*
* returns:
*
diff --git a/arch/powerpc/platforms/8xx/cpm1-ic.c b/arch/powerpc/platforms/8xx/cpm1-ic.c
index a49d4a9ab3bc..3292071e4da3 100644
--- a/arch/powerpc/platforms/8xx/cpm1-ic.c
+++ b/arch/powerpc/platforms/8xx/cpm1-ic.c
@@ -110,8 +110,7 @@ static int cpm_pic_probe(struct platform_device *pdev)
out_be32(&data->reg->cpic_cimr, 0);
- data->host = irq_domain_create_linear(of_fwnode_handle(dev->of_node),
- 64, &cpm_pic_host_ops, data);
+ data->host = irq_domain_create_linear(dev_fwnode(dev), 64, &cpm_pic_host_ops, data);
if (!data->host)
return -ENODEV;
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 613b383ed8b3..7b527d18aa5e 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -122,16 +122,11 @@ choice
If unsure, select Generic.
config POWERPC64_CPU
- bool "Generic (POWER5 and PowerPC 970 and above)"
- depends on PPC_BOOK3S_64 && !CPU_LITTLE_ENDIAN
- select PPC_64S_HASH_MMU
-
-config POWERPC64_CPU
- bool "Generic (POWER8 and above)"
- depends on PPC_BOOK3S_64 && CPU_LITTLE_ENDIAN
- select ARCH_HAS_FAST_MULTIPLIER
+ bool "Generic 64 bits powerpc"
+ depends on PPC_BOOK3S_64
+ select ARCH_HAS_FAST_MULTIPLIER if CPU_LITTLE_ENDIAN
select PPC_64S_HASH_MMU
- select PPC_HAS_LBARX_LHARX
+ select PPC_HAS_LBARX_LHARX if CPU_LITTLE_ENDIAN
config POWERPC_CPU
bool "Generic 32 bits powerpc"
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index 4fe8a7b1b288..2a007bfb038d 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -412,9 +412,8 @@ static int fsl_of_msi_probe(struct platform_device *dev)
}
platform_set_drvdata(dev, msi);
- msi->irqhost = irq_domain_create_linear(of_fwnode_handle(dev->dev.of_node),
- NR_MSI_IRQS_MAX, &fsl_msi_host_ops, msi);
-
+ msi->irqhost = irq_domain_create_linear(dev_fwnode(&dev->dev), NR_MSI_IRQS_MAX,
+ &fsl_msi_host_ops, msi);
if (msi->irqhost == NULL) {
dev_err(&dev->dev, "No memory for MSI irqhost\n");
err = -ENOMEM;
diff --git a/block/blk-rq-qos.h b/block/blk-rq-qos.h
index 1fe22000a379..b538f2c0febc 100644
--- a/block/blk-rq-qos.h
+++ b/block/blk-rq-qos.h
@@ -149,12 +149,15 @@ static inline void rq_qos_done_bio(struct bio *bio)
q = bdev_get_queue(bio->bi_bdev);
/*
- * If a bio has BIO_QOS_xxx set, it implicitly implies that
- * q->rq_qos is present. So, we skip re-checking q->rq_qos
- * here as an extra optimization and directly call
- * __rq_qos_done_bio().
+ * A BIO may carry BIO_QOS_* flags even if the associated request_queue
+ * does not have rq_qos enabled. This can happen with stacked block
+ * devices — for example, NVMe multipath, where it's possible that the
+ * bottom device has QoS enabled but the top device does not. Therefore,
+ * always verify that q->rq_qos is present and QoS is enabled before
+ * calling __rq_qos_done_bio().
*/
- __rq_qos_done_bio(q->rq_qos, bio);
+ if (test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags) && q->rq_qos)
+ __rq_qos_done_bio(q->rq_qos, bio);
}
static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
diff --git a/block/blk-zoned.c b/block/blk-zoned.c
index ef43aaca49f4..5e2a5788dc3b 100644
--- a/block/blk-zoned.c
+++ b/block/blk-zoned.c
@@ -1286,14 +1286,14 @@ static void blk_zone_wplug_bio_work(struct work_struct *work)
struct block_device *bdev;
unsigned long flags;
struct bio *bio;
+ bool prepared;
/*
* Submit the next plugged BIO. If we do not have any, clear
* the plugged flag.
*/
- spin_lock_irqsave(&zwplug->lock, flags);
-
again:
+ spin_lock_irqsave(&zwplug->lock, flags);
bio = bio_list_pop(&zwplug->bio_list);
if (!bio) {
zwplug->flags &= ~BLK_ZONE_WPLUG_PLUGGED;
@@ -1304,13 +1304,14 @@ again:
trace_blk_zone_wplug_bio(zwplug->disk->queue, zwplug->zone_no,
bio->bi_iter.bi_sector, bio_sectors(bio));
- if (!blk_zone_wplug_prepare_bio(zwplug, bio)) {
+ prepared = blk_zone_wplug_prepare_bio(zwplug, bio);
+ spin_unlock_irqrestore(&zwplug->lock, flags);
+
+ if (!prepared) {
blk_zone_wplug_bio_io_error(zwplug, bio);
goto again;
}
- spin_unlock_irqrestore(&zwplug->lock, flags);
-
bdev = bio->bi_bdev;
/*
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index e1c24bbacf64..7a7f88b3fa2b 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -689,40 +689,50 @@ MODULE_PARM_DESC(mask_port_map,
"where <pci_dev> is the PCI ID of an AHCI controller in the "
"form \"domain:bus:dev.func\"");
-static void ahci_apply_port_map_mask(struct device *dev,
- struct ahci_host_priv *hpriv, char *mask_s)
+static char *ahci_mask_port_ext;
+module_param_named(mask_port_ext, ahci_mask_port_ext, charp, 0444);
+MODULE_PARM_DESC(mask_port_ext,
+ "32-bits mask to ignore the external/hotplug capability of ports. "
+ "Valid values are: "
+ "\"<mask>\" to apply the same mask to all AHCI controller "
+ "devices, and \"<pci_dev>=<mask>,<pci_dev>=<mask>,...\" to "
+ "specify different masks for the controllers specified, "
+ "where <pci_dev> is the PCI ID of an AHCI controller in the "
+ "form \"domain:bus:dev.func\"");
+
+static u32 ahci_port_mask(struct device *dev, char *mask_s)
{
unsigned int mask;
if (kstrtouint(mask_s, 0, &mask)) {
dev_err(dev, "Invalid port map mask\n");
- return;
+ return 0;
}
- hpriv->mask_port_map = mask;
+ return mask;
}
-static void ahci_get_port_map_mask(struct device *dev,
- struct ahci_host_priv *hpriv)
+static u32 ahci_get_port_mask(struct device *dev, char *mask_p)
{
char *param, *end, *str, *mask_s;
char *name;
+ u32 mask = 0;
- if (!strlen(ahci_mask_port_map))
- return;
+ if (!mask_p || !strlen(mask_p))
+ return 0;
- str = kstrdup(ahci_mask_port_map, GFP_KERNEL);
+ str = kstrdup(mask_p, GFP_KERNEL);
if (!str)
- return;
+ return 0;
/* Handle single mask case */
if (!strchr(str, '=')) {
- ahci_apply_port_map_mask(dev, hpriv, str);
+ mask = ahci_port_mask(dev, str);
goto free;
}
/*
- * Mask list case: parse the parameter to apply the mask only if
+ * Mask list case: parse the parameter to get the mask only if
* the device name matches.
*/
param = str;
@@ -752,11 +762,13 @@ static void ahci_get_port_map_mask(struct device *dev,
param++;
}
- ahci_apply_port_map_mask(dev, hpriv, mask_s);
+ mask = ahci_port_mask(dev, mask_s);
}
free:
kfree(str);
+
+ return mask;
}
static void ahci_pci_save_initial_config(struct pci_dev *pdev,
@@ -782,8 +794,10 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
}
/* Handle port map masks passed as module parameter. */
- if (ahci_mask_port_map)
- ahci_get_port_map_mask(&pdev->dev, hpriv);
+ hpriv->mask_port_map =
+ ahci_get_port_mask(&pdev->dev, ahci_mask_port_map);
+ hpriv->mask_port_ext =
+ ahci_get_port_mask(&pdev->dev, ahci_mask_port_ext);
ahci_save_initial_config(&pdev->dev, hpriv);
}
@@ -1757,11 +1771,20 @@ static void ahci_mark_external_port(struct ata_port *ap)
void __iomem *port_mmio = ahci_port_base(ap);
u32 tmp;
- /* mark external ports (hotplug-capable, eSATA) */
+ /*
+ * Mark external ports (hotplug-capable, eSATA), unless we were asked to
+ * ignore this feature.
+ */
tmp = readl(port_mmio + PORT_CMD);
if (((tmp & PORT_CMD_ESP) && (hpriv->cap & HOST_CAP_SXS)) ||
- (tmp & PORT_CMD_HPCP))
+ (tmp & PORT_CMD_HPCP)) {
+ if (hpriv->mask_port_ext & (1U << ap->port_no)) {
+ ata_port_info(ap,
+ "Ignoring external/hotplug capability\n");
+ return;
+ }
ap->pflags |= ATA_PFLAG_EXTERNAL;
+ }
}
static void ahci_update_initial_lpm_policy(struct ata_port *ap)
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 2c10c8f440d1..293b7fb216b5 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -330,6 +330,7 @@ struct ahci_host_priv {
/* Input fields */
unsigned int flags; /* AHCI_HFLAG_* */
u32 mask_port_map; /* Mask of valid ports */
+ u32 mask_port_ext; /* Mask of ports ext capability */
void __iomem * mmio; /* bus-independent mem map */
u32 cap; /* cap to use */
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
index 5d5a51a77f5d..6b8844646fcd 100644
--- a/drivers/ata/ahci_xgene.c
+++ b/drivers/ata/ahci_xgene.c
@@ -450,7 +450,6 @@ static int xgene_ahci_pmp_softreset(struct ata_link *link, unsigned int *class,
{
int pmp = sata_srst_pmp(link);
struct ata_port *ap = link->ap;
- u32 rc;
void __iomem *port_mmio = ahci_port_base(ap);
u32 port_fbs;
@@ -463,9 +462,7 @@ static int xgene_ahci_pmp_softreset(struct ata_link *link, unsigned int *class,
port_fbs |= pmp << PORT_FBS_DEV_OFFSET;
writel(port_fbs, port_mmio + PORT_FBS);
- rc = ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
-
- return rc;
+ return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
}
/**
@@ -500,7 +497,7 @@ static int xgene_ahci_softreset(struct ata_link *link, unsigned int *class,
u32 port_fbs;
u32 port_fbs_save;
u32 retry = 1;
- u32 rc;
+ int rc;
port_fbs_save = readl(port_mmio + PORT_FBS);
diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
index eeae160c898d..fa3c76a2b49d 100644
--- a/drivers/atm/atmtcp.c
+++ b/drivers/atm/atmtcp.c
@@ -279,6 +279,19 @@ static struct atm_vcc *find_vcc(struct atm_dev *dev, short vpi, int vci)
return NULL;
}
+static int atmtcp_c_pre_send(struct atm_vcc *vcc, struct sk_buff *skb)
+{
+ struct atmtcp_hdr *hdr;
+
+ if (skb->len < sizeof(struct atmtcp_hdr))
+ return -EINVAL;
+
+ hdr = (struct atmtcp_hdr *)skb->data;
+ if (hdr->length == ATMTCP_HDR_MAGIC)
+ return -EINVAL;
+
+ return 0;
+}
static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
{
@@ -288,9 +301,6 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
struct sk_buff *new_skb;
int result = 0;
- if (skb->len < sizeof(struct atmtcp_hdr))
- goto done;
-
dev = vcc->dev_data;
hdr = (struct atmtcp_hdr *) skb->data;
if (hdr->length == ATMTCP_HDR_MAGIC) {
@@ -347,6 +357,7 @@ static const struct atmdev_ops atmtcp_v_dev_ops = {
static const struct atmdev_ops atmtcp_c_dev_ops = {
.close = atmtcp_c_close,
+ .pre_send = atmtcp_c_pre_send,
.send = atmtcp_c_send
};
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index dbf5456cd891..2ea6e05e6ec9 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -675,7 +675,7 @@ static void dpm_async_resume_subordinate(struct device *dev, async_func_t func)
idx = device_links_read_lock();
/* Start processing the device's "async" consumers. */
- list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
+ list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
if (READ_ONCE(link->status) != DL_STATE_DORMANT)
dpm_async_with_cleanup(link->consumer, func);
@@ -1330,7 +1330,7 @@ static void dpm_async_suspend_superior(struct device *dev, async_func_t func)
idx = device_links_read_lock();
/* Start processing the device's "async" suppliers. */
- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+ list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
if (READ_ONCE(link->status) != DL_STATE_DORMANT)
dpm_async_with_cleanup(link->supplier, func);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 57263c273f0f..053a086d547e 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -139,20 +139,26 @@ static int part_shift;
static loff_t lo_calculate_size(struct loop_device *lo, struct file *file)
{
- struct kstat stat;
loff_t loopsize;
int ret;
- /*
- * Get the accurate file size. This provides better results than
- * cached inode data, particularly for network filesystems where
- * metadata may be stale.
- */
- ret = vfs_getattr_nosec(&file->f_path, &stat, STATX_SIZE, 0);
- if (ret)
- return 0;
+ if (S_ISBLK(file_inode(file)->i_mode)) {
+ loopsize = i_size_read(file->f_mapping->host);
+ } else {
+ struct kstat stat;
+
+ /*
+ * Get the accurate file size. This provides better results than
+ * cached inode data, particularly for network filesystems where
+ * metadata may be stale.
+ */
+ ret = vfs_getattr_nosec(&file->f_path, &stat, STATX_SIZE, 0);
+ if (ret)
+ return 0;
+
+ loopsize = stat.size;
+ }
- loopsize = stat.size;
if (lo->lo_offset > 0)
loopsize -= lo->lo_offset;
/* offset is beyond i_size, weird but possible */
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 99abd67b708b..67d4a867aec4 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -239,6 +239,7 @@ struct ublk_device {
struct mutex cancel_mutex;
bool canceling;
pid_t ublksrv_tgid;
+ struct delayed_work exit_work;
};
/* header of ublk_params */
@@ -1595,13 +1596,63 @@ static void ublk_set_canceling(struct ublk_device *ub, bool canceling)
ublk_get_queue(ub, i)->canceling = canceling;
}
-static int ublk_ch_release(struct inode *inode, struct file *filp)
+static bool ublk_check_and_reset_active_ref(struct ublk_device *ub)
{
- struct ublk_device *ub = filp->private_data;
+ int i, j;
+
+ if (!(ub->dev_info.flags & (UBLK_F_SUPPORT_ZERO_COPY |
+ UBLK_F_AUTO_BUF_REG)))
+ return false;
+
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
+ struct ublk_queue *ubq = ublk_get_queue(ub, i);
+
+ for (j = 0; j < ubq->q_depth; j++) {
+ struct ublk_io *io = &ubq->ios[j];
+ unsigned int refs = refcount_read(&io->ref) +
+ io->task_registered_buffers;
+
+ /*
+ * UBLK_REFCOUNT_INIT or zero means no active
+ * reference
+ */
+ if (refs != UBLK_REFCOUNT_INIT && refs != 0)
+ return true;
+
+ /* reset to zero if the io hasn't active references */
+ refcount_set(&io->ref, 0);
+ io->task_registered_buffers = 0;
+ }
+ }
+ return false;
+}
+
+static void ublk_ch_release_work_fn(struct work_struct *work)
+{
+ struct ublk_device *ub =
+ container_of(work, struct ublk_device, exit_work.work);
struct gendisk *disk;
int i;
/*
+ * For zero-copy and auto buffer register modes, I/O references
+ * might not be dropped naturally when the daemon is killed, but
+ * io_uring guarantees that registered bvec kernel buffers are
+ * unregistered finally when freeing io_uring context, then the
+ * active references are dropped.
+ *
+ * Wait until active references are dropped for avoiding use-after-free
+ *
+ * registered buffer may be unregistered in io_ring's release hander,
+ * so have to wait by scheduling work function for avoiding the two
+ * file release dependency.
+ */
+ if (ublk_check_and_reset_active_ref(ub)) {
+ schedule_delayed_work(&ub->exit_work, 1);
+ return;
+ }
+
+ /*
* disk isn't attached yet, either device isn't live, or it has
* been removed already, so we needn't to do anything
*/
@@ -1673,6 +1724,23 @@ unlock:
ublk_reset_ch_dev(ub);
out:
clear_bit(UB_STATE_OPEN, &ub->state);
+
+ /* put the reference grabbed in ublk_ch_release() */
+ ublk_put_device(ub);
+}
+
+static int ublk_ch_release(struct inode *inode, struct file *filp)
+{
+ struct ublk_device *ub = filp->private_data;
+
+ /*
+ * Grab ublk device reference, so it won't be gone until we are
+ * really released from work function.
+ */
+ ublk_get_device(ub);
+
+ INIT_DELAYED_WORK(&ub->exit_work, ublk_ch_release_work_fn);
+ schedule_delayed_work(&ub->exit_work, 0);
return 0;
}
diff --git a/drivers/gpio/gpiolib-acpi-quirks.c b/drivers/gpio/gpiolib-acpi-quirks.c
index c13545dce349..bfb04e67c4bc 100644
--- a/drivers/gpio/gpiolib-acpi-quirks.c
+++ b/drivers/gpio/gpiolib-acpi-quirks.c
@@ -344,6 +344,20 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
.ignore_interrupt = "AMDI0030:00@8",
},
},
+ {
+ /*
+ * Spurious wakeups from TP_ATTN# pin
+ * Found in BIOS 5.35
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/4482
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "ProArt PX13"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "ASCP1A00:00@8",
+ },
+ },
{} /* Terminating entry */
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
index dfb6cfd83760..02138aa55793 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
@@ -88,8 +88,8 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
}
r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, size,
- AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
- AMDGPU_VM_PAGE_EXECUTABLE);
+ AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
+ AMDGPU_PTE_EXECUTABLE);
if (r) {
DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 5743ebb2f1b7..ce27cb5bb05e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -285,6 +285,36 @@ static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
return ret;
}
+static int amdgpu_dma_buf_vmap(struct dma_buf *dma_buf, struct iosys_map *map)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+ int ret;
+
+ /*
+ * Pin to keep buffer in place while it's vmap'ed. The actual
+ * domain is not that important as long as it's mapable. Using
+ * GTT and VRAM should be compatible with most use cases.
+ */
+ ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM);
+ if (ret)
+ return ret;
+ ret = drm_gem_dmabuf_vmap(dma_buf, map);
+ if (ret)
+ amdgpu_bo_unpin(bo);
+
+ return ret;
+}
+
+static void amdgpu_dma_buf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+
+ drm_gem_dmabuf_vunmap(dma_buf, map);
+ amdgpu_bo_unpin(bo);
+}
+
const struct dma_buf_ops amdgpu_dmabuf_ops = {
.attach = amdgpu_dma_buf_attach,
.pin = amdgpu_dma_buf_pin,
@@ -294,8 +324,8 @@ const struct dma_buf_ops amdgpu_dmabuf_ops = {
.release = drm_gem_dmabuf_release,
.begin_cpu_access = amdgpu_dma_buf_begin_cpu_access,
.mmap = drm_gem_dmabuf_mmap,
- .vmap = drm_gem_dmabuf_vmap,
- .vunmap = drm_gem_dmabuf_vunmap,
+ .vmap = amdgpu_dma_buf_vmap,
+ .vunmap = amdgpu_dma_buf_vunmap,
};
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
index c3ace8030530..8190c24a649a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
@@ -471,6 +471,7 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
if (index == (uint64_t)-EINVAL) {
drm_file_err(uq_mgr->file, "Failed to get doorbell for queue\n");
kfree(queue);
+ r = -EINVAL;
goto unlock;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index c01c241a1b06..c85de8c8f6f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -1612,9 +1612,9 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
if (!adev->gfx.disable_uq &&
- adev->gfx.me_fw_version >= 2390 &&
- adev->gfx.pfp_fw_version >= 2530 &&
- adev->gfx.mec_fw_version >= 2600 &&
+ adev->gfx.me_fw_version >= 2420 &&
+ adev->gfx.pfp_fw_version >= 2580 &&
+ adev->gfx.mec_fw_version >= 2650 &&
adev->mes.fw_version[0] >= 120) {
adev->userq_funcs[AMDGPU_HW_IP_GFX] = &userq_mes_funcs;
adev->userq_funcs[AMDGPU_HW_IP_COMPUTE] = &userq_mes_funcs;
@@ -4129,6 +4129,8 @@ static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
#endif
if (prop->tmz_queue)
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, TMZ_MATCH, 1);
+ if (!prop->kernel_queue)
+ tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_NON_PRIV, 1);
mqd->cp_gfx_hqd_cntl = tmp;
/* set up cp_doorbell_control */
@@ -4281,8 +4283,10 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH,
prop->allow_tunneling);
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
+ if (prop->kernel_queue) {
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
+ }
if (prop->tmz_queue)
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TMZ, 1);
mqd->cp_hqd_pq_control = tmp;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
index 3e138527d534..fd44d5503e28 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
@@ -3026,6 +3026,8 @@ static int gfx_v12_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
#endif
if (prop->tmz_queue)
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, TMZ_MATCH, 1);
+ if (!prop->kernel_queue)
+ tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_NON_PRIV, 1);
mqd->cp_gfx_hqd_cntl = tmp;
/* set up cp_doorbell_control */
@@ -3175,8 +3177,10 @@ static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
(order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
+ if (prop->kernel_queue) {
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
+ }
if (prop->tmz_queue)
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TMZ, 1);
mqd->cp_hqd_pq_control = tmp;
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 4b64851fdb42..5fbfe7333b54 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -3458,14 +3458,16 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
effective_mode &= ~S_IWUSR;
/* not implemented yet for APUs other than GC 10.3.1 (vangogh) and 9.4.3 */
- if (((adev->family == AMDGPU_FAMILY_SI) ||
- ((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(10, 3, 1)) &&
- (gc_ver != IP_VERSION(9, 4, 3) && gc_ver != IP_VERSION(9, 4, 4)))) &&
- (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
- attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr ||
- attr == &sensor_dev_attr_power1_cap.dev_attr.attr ||
- attr == &sensor_dev_attr_power1_cap_default.dev_attr.attr))
- return 0;
+ if (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
+ attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr ||
+ attr == &sensor_dev_attr_power1_cap.dev_attr.attr ||
+ attr == &sensor_dev_attr_power1_cap_default.dev_attr.attr) {
+ if (adev->family == AMDGPU_FAMILY_SI ||
+ ((adev->flags & AMD_IS_APU) && gc_ver != IP_VERSION(10, 3, 1) &&
+ (gc_ver != IP_VERSION(9, 4, 3) && gc_ver != IP_VERSION(9, 4, 4))) ||
+ (amdgpu_sriov_vf(adev) && gc_ver == IP_VERSION(11, 0, 3)))
+ return 0;
+ }
/* not implemented yet for APUs having < GC 9.3.0 (Renoir) */
if (((adev->family == AMDGPU_FAMILY_SI) ||
diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c
index 74d949995a72..db9b089ef62c 100644
--- a/drivers/gpu/drm/drm_gpuvm.c
+++ b/drivers/gpu/drm/drm_gpuvm.c
@@ -40,7 +40,7 @@
* mapping's backing &drm_gem_object buffers.
*
* &drm_gem_object buffers maintain a list of &drm_gpuva objects representing
- * all existent GPU VA mappings using this &drm_gem_object as backing buffer.
+ * all existing GPU VA mappings using this &drm_gem_object as backing buffer.
*
* GPU VAs can be flagged as sparse, such that drivers may use GPU VAs to also
* keep track of sparse PTEs in order to support Vulkan 'Sparse Resources'.
@@ -72,7 +72,7 @@
* but it can also be a 'dummy' object, which can be allocated with
* drm_gpuvm_resv_object_alloc().
*
- * In order to connect a struct drm_gpuva its backing &drm_gem_object each
+ * In order to connect a struct drm_gpuva to its backing &drm_gem_object each
* &drm_gem_object maintains a list of &drm_gpuvm_bo structures, and each
* &drm_gpuvm_bo contains a list of &drm_gpuva structures.
*
@@ -81,7 +81,7 @@
* This is ensured by the API through drm_gpuvm_bo_obtain() and
* drm_gpuvm_bo_obtain_prealloc() which first look into the corresponding
* &drm_gem_object list of &drm_gpuvm_bos for an existing instance of this
- * particular combination. If not existent a new instance is created and linked
+ * particular combination. If not present, a new instance is created and linked
* to the &drm_gem_object.
*
* &drm_gpuvm_bo structures, since unique for a given &drm_gpuvm, are also used
@@ -108,7 +108,7 @@
* sequence of operations to satisfy a given map or unmap request.
*
* Therefore the DRM GPU VA manager provides an algorithm implementing splitting
- * and merging of existent GPU VA mappings with the ones that are requested to
+ * and merging of existing GPU VA mappings with the ones that are requested to
* be mapped or unmapped. This feature is required by the Vulkan API to
* implement Vulkan 'Sparse Memory Bindings' - drivers UAPIs often refer to this
* as VM BIND.
@@ -119,7 +119,7 @@
* execute in order to integrate the new mapping cleanly into the current state
* of the GPU VA space.
*
- * Depending on how the new GPU VA mapping intersects with the existent mappings
+ * Depending on how the new GPU VA mapping intersects with the existing mappings
* of the GPU VA space the &drm_gpuvm_ops callbacks contain an arbitrary amount
* of unmap operations, a maximum of two remap operations and a single map
* operation. The caller might receive no callback at all if no operation is
@@ -139,16 +139,16 @@
* one unmap operation and one or two map operations, such that drivers can
* derive the page table update delta accordingly.
*
- * Note that there can't be more than two existent mappings to split up, one at
+ * Note that there can't be more than two existing mappings to split up, one at
* the beginning and one at the end of the new mapping, hence there is a
* maximum of two remap operations.
*
* Analogous to drm_gpuvm_sm_map() drm_gpuvm_sm_unmap() uses &drm_gpuvm_ops to
* call back into the driver in order to unmap a range of GPU VA space. The
- * logic behind this function is way simpler though: For all existent mappings
+ * logic behind this function is way simpler though: For all existing mappings
* enclosed by the given range unmap operations are created. For mappings which
- * are only partically located within the given range, remap operations are
- * created such that those mappings are split up and re-mapped partically.
+ * are only partially located within the given range, remap operations are
+ * created such that those mappings are split up and re-mapped partially.
*
* As an alternative to drm_gpuvm_sm_map() and drm_gpuvm_sm_unmap(),
* drm_gpuvm_sm_map_ops_create() and drm_gpuvm_sm_unmap_ops_create() can be used
@@ -168,7 +168,7 @@
* provided helper functions drm_gpuva_map(), drm_gpuva_remap() and
* drm_gpuva_unmap() instead.
*
- * The following diagram depicts the basic relationships of existent GPU VA
+ * The following diagram depicts the basic relationships of existing GPU VA
* mappings, a newly requested mapping and the resulting mappings as implemented
* by drm_gpuvm_sm_map() - it doesn't cover any arbitrary combinations of these.
*
@@ -218,7 +218,7 @@
*
*
* 4) Existent mapping is a left aligned subset of the requested one, hence
- * replace the existent one.
+ * replace the existing one.
*
* ::
*
@@ -236,9 +236,9 @@
* and/or non-contiguous BO offset.
*
*
- * 5) Requested mapping's range is a left aligned subset of the existent one,
+ * 5) Requested mapping's range is a left aligned subset of the existing one,
* but backed by a different BO. Hence, map the requested mapping and split
- * the existent one adjusting its BO offset.
+ * the existing one adjusting its BO offset.
*
* ::
*
@@ -271,9 +271,9 @@
* new: |-----|-----| (a.bo_offset=n, a'.bo_offset=n+1)
*
*
- * 7) Requested mapping's range is a right aligned subset of the existent one,
+ * 7) Requested mapping's range is a right aligned subset of the existing one,
* but backed by a different BO. Hence, map the requested mapping and split
- * the existent one, without adjusting the BO offset.
+ * the existing one, without adjusting the BO offset.
*
* ::
*
@@ -304,7 +304,7 @@
*
* 9) Existent mapping is overlapped at the end by the requested mapping backed
* by a different BO. Hence, map the requested mapping and split up the
- * existent one, without adjusting the BO offset.
+ * existing one, without adjusting the BO offset.
*
* ::
*
@@ -334,9 +334,9 @@
* new: |-----|-----------| (a'.bo_offset=n, a.bo_offset=n+1)
*
*
- * 11) Requested mapping's range is a centered subset of the existent one
+ * 11) Requested mapping's range is a centered subset of the existing one
* having a different backing BO. Hence, map the requested mapping and split
- * up the existent one in two mappings, adjusting the BO offset of the right
+ * up the existing one in two mappings, adjusting the BO offset of the right
* one accordingly.
*
* ::
@@ -351,7 +351,7 @@
* new: |-----|-----|-----| (a.bo_offset=n,b.bo_offset=m,a'.bo_offset=n+2)
*
*
- * 12) Requested mapping is a contiguous subset of the existent one. Split it
+ * 12) Requested mapping is a contiguous subset of the existing one. Split it
* up, but indicate that the backing PTEs could be kept.
*
* ::
@@ -367,7 +367,7 @@
*
*
* 13) Existent mapping is a right aligned subset of the requested one, hence
- * replace the existent one.
+ * replace the existing one.
*
* ::
*
@@ -386,7 +386,7 @@
*
*
* 14) Existent mapping is a centered subset of the requested one, hence
- * replace the existent one.
+ * replace the existing one.
*
* ::
*
@@ -406,7 +406,7 @@
*
* 15) Existent mappings is overlapped at the beginning by the requested mapping
* backed by a different BO. Hence, map the requested mapping and split up
- * the existent one, adjusting its BO offset accordingly.
+ * the existing one, adjusting its BO offset accordingly.
*
* ::
*
@@ -469,8 +469,8 @@
* make use of them.
*
* The below code is strictly limited to illustrate the generic usage pattern.
- * To maintain simplicitly, it doesn't make use of any abstractions for common
- * code, different (asyncronous) stages with fence signalling critical paths,
+ * To maintain simplicity, it doesn't make use of any abstractions for common
+ * code, different (asynchronous) stages with fence signalling critical paths,
* any other helpers or error handling in terms of freeing memory and dropping
* previously taken locks.
*
@@ -479,7 +479,7 @@
* // Allocates a new &drm_gpuva.
* struct drm_gpuva * driver_gpuva_alloc(void);
*
- * // Typically drivers would embedd the &drm_gpuvm and &drm_gpuva
+ * // Typically drivers would embed the &drm_gpuvm and &drm_gpuva
* // structure in individual driver structures and lock the dma-resv with
* // drm_exec or similar helpers.
* int driver_mapping_create(struct drm_gpuvm *gpuvm,
@@ -582,7 +582,7 @@
* .sm_step_unmap = driver_gpuva_unmap,
* };
*
- * // Typically drivers would embedd the &drm_gpuvm and &drm_gpuva
+ * // Typically drivers would embed the &drm_gpuvm and &drm_gpuva
* // structure in individual driver structures and lock the dma-resv with
* // drm_exec or similar helpers.
* int driver_mapping_create(struct drm_gpuvm *gpuvm,
@@ -680,7 +680,7 @@
*
* This helper is here to provide lockless list iteration. Lockless as in, the
* iterator releases the lock immediately after picking the first element from
- * the list, so list insertion deletion can happen concurrently.
+ * the list, so list insertion and deletion can happen concurrently.
*
* Elements popped from the original list are kept in a local list, so removal
* and is_empty checks can still happen while we're iterating the list.
@@ -1160,7 +1160,7 @@ drm_gpuvm_prepare_objects_locked(struct drm_gpuvm *gpuvm,
}
/**
- * drm_gpuvm_prepare_objects() - prepare all assoiciated BOs
+ * drm_gpuvm_prepare_objects() - prepare all associated BOs
* @gpuvm: the &drm_gpuvm
* @exec: the &drm_exec locking context
* @num_fences: the amount of &dma_fences to reserve
@@ -1230,13 +1230,13 @@ drm_gpuvm_prepare_range(struct drm_gpuvm *gpuvm, struct drm_exec *exec,
EXPORT_SYMBOL_GPL(drm_gpuvm_prepare_range);
/**
- * drm_gpuvm_exec_lock() - lock all dma-resv of all assoiciated BOs
+ * drm_gpuvm_exec_lock() - lock all dma-resv of all associated BOs
* @vm_exec: the &drm_gpuvm_exec wrapper
*
* Acquires all dma-resv locks of all &drm_gem_objects the given
* &drm_gpuvm contains mappings of.
*
- * Addionally, when calling this function with struct drm_gpuvm_exec::extra
+ * Additionally, when calling this function with struct drm_gpuvm_exec::extra
* being set the driver receives the given @fn callback to lock additional
* dma-resv in the context of the &drm_gpuvm_exec instance. Typically, drivers
* would call drm_exec_prepare_obj() from within this callback.
@@ -1293,7 +1293,7 @@ fn_lock_array(struct drm_gpuvm_exec *vm_exec)
}
/**
- * drm_gpuvm_exec_lock_array() - lock all dma-resv of all assoiciated BOs
+ * drm_gpuvm_exec_lock_array() - lock all dma-resv of all associated BOs
* @vm_exec: the &drm_gpuvm_exec wrapper
* @objs: additional &drm_gem_objects to lock
* @num_objs: the number of additional &drm_gem_objects to lock
@@ -1588,7 +1588,7 @@ drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm,
EXPORT_SYMBOL_GPL(drm_gpuvm_bo_find);
/**
- * drm_gpuvm_bo_obtain() - obtains and instance of the &drm_gpuvm_bo for the
+ * drm_gpuvm_bo_obtain() - obtains an instance of the &drm_gpuvm_bo for the
* given &drm_gpuvm and &drm_gem_object
* @gpuvm: The &drm_gpuvm the @obj is mapped in.
* @obj: The &drm_gem_object being mapped in the @gpuvm.
@@ -1624,7 +1624,7 @@ drm_gpuvm_bo_obtain(struct drm_gpuvm *gpuvm,
EXPORT_SYMBOL_GPL(drm_gpuvm_bo_obtain);
/**
- * drm_gpuvm_bo_obtain_prealloc() - obtains and instance of the &drm_gpuvm_bo
+ * drm_gpuvm_bo_obtain_prealloc() - obtains an instance of the &drm_gpuvm_bo
* for the given &drm_gpuvm and &drm_gem_object
* @__vm_bo: A pre-allocated struct drm_gpuvm_bo.
*
@@ -1688,7 +1688,7 @@ EXPORT_SYMBOL_GPL(drm_gpuvm_bo_extobj_add);
* @vm_bo: the &drm_gpuvm_bo to add or remove
* @evict: indicates whether the object is evicted
*
- * Adds a &drm_gpuvm_bo to or removes it from the &drm_gpuvms evicted list.
+ * Adds a &drm_gpuvm_bo to or removes it from the &drm_gpuvm's evicted list.
*/
void
drm_gpuvm_bo_evict(struct drm_gpuvm_bo *vm_bo, bool evict)
@@ -1790,7 +1790,7 @@ __drm_gpuva_remove(struct drm_gpuva *va)
* drm_gpuva_remove() - remove a &drm_gpuva
* @va: the &drm_gpuva to remove
*
- * This removes the given &va from the underlaying tree.
+ * This removes the given &va from the underlying tree.
*
* It is safe to use this function using the safe versions of iterating the GPU
* VA space, such as drm_gpuvm_for_each_va_safe() and
@@ -2358,7 +2358,7 @@ EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map);
*
* This function iterates the given range of the GPU VA space. It utilizes the
* &drm_gpuvm_ops to call back into the driver providing the operations to
- * unmap and, if required, split existent mappings.
+ * unmap and, if required, split existing mappings.
*
* Drivers may use these callbacks to update the GPU VA space right away within
* the callback. In case the driver decides to copy and store the operations for
@@ -2430,7 +2430,7 @@ static const struct drm_gpuvm_ops lock_ops = {
* remapped, and locks+prepares (drm_exec_prepare_object()) objects that
* will be newly mapped.
*
- * The expected usage is:
+ * The expected usage is::
*
* .. code-block:: c
*
@@ -2475,7 +2475,7 @@ static const struct drm_gpuvm_ops lock_ops = {
* required without the earlier DRIVER_OP_MAP. This is safe because we've
* already locked the GEM object in the earlier DRIVER_OP_MAP step.
*
- * Returns: 0 on success or a negative error codec
+ * Returns: 0 on success or a negative error code
*/
int
drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm,
@@ -2619,12 +2619,12 @@ static const struct drm_gpuvm_ops gpuvm_list_ops = {
* @req_offset: the offset within the &drm_gem_object
*
* This function creates a list of operations to perform splitting and merging
- * of existent mapping(s) with the newly requested one.
+ * of existing mapping(s) with the newly requested one.
*
* The list can be iterated with &drm_gpuva_for_each_op and must be processed
* in the given order. It can contain map, unmap and remap operations, but it
* also can be empty if no operation is required, e.g. if the requested mapping
- * already exists is the exact same way.
+ * already exists in the exact same way.
*
* There can be an arbitrary amount of unmap operations, a maximum of two remap
* operations and a single map operation. The latter one represents the original
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index d5e6bab36414..f8a817689e16 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -387,19 +387,19 @@ static bool mtk_drm_get_all_drm_priv(struct device *dev)
of_id = of_match_node(mtk_drm_of_ids, node);
if (!of_id)
- continue;
+ goto next_put_node;
pdev = of_find_device_by_node(node);
if (!pdev)
- continue;
+ goto next_put_node;
drm_dev = device_find_child(&pdev->dev, NULL, mtk_drm_match);
if (!drm_dev)
- continue;
+ goto next_put_device_pdev_dev;
temp_drm_priv = dev_get_drvdata(drm_dev);
if (!temp_drm_priv)
- continue;
+ goto next_put_device_drm_dev;
if (temp_drm_priv->data->main_len)
all_drm_priv[CRTC_MAIN] = temp_drm_priv;
@@ -411,10 +411,17 @@ static bool mtk_drm_get_all_drm_priv(struct device *dev)
if (temp_drm_priv->mtk_drm_bound)
cnt++;
- if (cnt == MAX_CRTC) {
- of_node_put(node);
+next_put_device_drm_dev:
+ put_device(drm_dev);
+
+next_put_device_pdev_dev:
+ put_device(&pdev->dev);
+
+next_put_node:
+ of_node_put(node);
+
+ if (cnt == MAX_CRTC)
break;
- }
}
if (drm_priv->data->mmsys_dev_num == cnt) {
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index d7726091819c..0e2bcd5f67b7 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -1002,6 +1002,12 @@ static int mtk_dsi_host_attach(struct mipi_dsi_host *host,
return PTR_ERR(dsi->next_bridge);
}
+ /*
+ * set flag to request the DSI host bridge be pre-enabled before device bridge
+ * in the chain, so the DSI host is ready when the device bridge is pre-enabled
+ */
+ dsi->next_bridge->pre_enable_prev_first = true;
+
drm_bridge_add(&dsi->bridge);
ret = component_add(host->dev, &mtk_dsi_component_ops);
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 845fd8aa43c3..b766dd5e6c8d 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -182,8 +182,8 @@ static inline struct mtk_hdmi *hdmi_ctx_from_bridge(struct drm_bridge *b)
static void mtk_hdmi_hw_vid_black(struct mtk_hdmi *hdmi, bool black)
{
- regmap_update_bits(hdmi->regs, VIDEO_SOURCE_SEL,
- VIDEO_CFG_4, black ? GEN_RGB : NORMAL_PATH);
+ regmap_update_bits(hdmi->regs, VIDEO_CFG_4,
+ VIDEO_SOURCE_SEL, black ? GEN_RGB : NORMAL_PATH);
}
static void mtk_hdmi_hw_make_reg_writable(struct mtk_hdmi *hdmi, bool enable)
@@ -310,8 +310,8 @@ static void mtk_hdmi_hw_send_info_frame(struct mtk_hdmi *hdmi, u8 *buffer,
static void mtk_hdmi_hw_send_aud_packet(struct mtk_hdmi *hdmi, bool enable)
{
- regmap_update_bits(hdmi->regs, AUDIO_PACKET_OFF,
- GRL_SHIFT_R2, enable ? 0 : AUDIO_PACKET_OFF);
+ regmap_update_bits(hdmi->regs, GRL_SHIFT_R2,
+ AUDIO_PACKET_OFF, enable ? 0 : AUDIO_PACKET_OFF);
}
static void mtk_hdmi_hw_config_sys(struct mtk_hdmi *hdmi)
diff --git a/drivers/gpu/drm/mediatek/mtk_plane.c b/drivers/gpu/drm/mediatek/mtk_plane.c
index cbc4f37da8ba..02349bd44001 100644
--- a/drivers/gpu/drm/mediatek/mtk_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_plane.c
@@ -292,7 +292,8 @@ static void mtk_plane_atomic_disable(struct drm_plane *plane,
wmb(); /* Make sure the above parameter is set before update */
mtk_plane_state->pending.dirty = true;
- mtk_crtc_plane_disable(old_state->crtc, plane);
+ if (old_state && old_state->crtc)
+ mtk_crtc_plane_disable(old_state->crtc, plane);
}
static void mtk_plane_atomic_update(struct drm_plane *plane,
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
index faca2a0243ab..d5d1271fce61 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
@@ -11,7 +11,7 @@
static const unsigned int *gen7_0_0_external_core_regs[] __always_unused;
static const unsigned int *gen7_2_0_external_core_regs[] __always_unused;
static const unsigned int *gen7_9_0_external_core_regs[] __always_unused;
-static struct gen7_sptp_cluster_registers gen7_9_0_sptp_clusters[] __always_unused;
+static const struct gen7_sptp_cluster_registers gen7_9_0_sptp_clusters[] __always_unused;
static const u32 gen7_9_0_cx_debugbus_blocks[] __always_unused;
#include "adreno_gen7_0_0_snapshot.h"
@@ -174,8 +174,15 @@ static int a6xx_crashdumper_run(struct msm_gpu *gpu,
static int debugbus_read(struct msm_gpu *gpu, u32 block, u32 offset,
u32 *data)
{
- u32 reg = A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX(offset) |
- A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL(block);
+ u32 reg;
+
+ if (to_adreno_gpu(gpu)->info->family >= ADRENO_7XX_GEN1) {
+ reg = A7XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX(offset) |
+ A7XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL(block);
+ } else {
+ reg = A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX(offset) |
+ A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL(block);
+ }
gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_A, reg);
gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_B, reg);
@@ -198,11 +205,18 @@ static int debugbus_read(struct msm_gpu *gpu, u32 block, u32 offset,
readl((ptr) + ((offset) << 2))
/* read a value from the CX debug bus */
-static int cx_debugbus_read(void __iomem *cxdbg, u32 block, u32 offset,
+static int cx_debugbus_read(struct msm_gpu *gpu, void __iomem *cxdbg, u32 block, u32 offset,
u32 *data)
{
- u32 reg = A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX(offset) |
- A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL(block);
+ u32 reg;
+
+ if (to_adreno_gpu(gpu)->info->family >= ADRENO_7XX_GEN1) {
+ reg = A7XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX(offset) |
+ A7XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL(block);
+ } else {
+ reg = A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX(offset) |
+ A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL(block);
+ }
cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_A, reg);
cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_B, reg);
@@ -315,7 +329,8 @@ static void a6xx_get_debugbus_block(struct msm_gpu *gpu,
ptr += debugbus_read(gpu, block->id, i, ptr);
}
-static void a6xx_get_cx_debugbus_block(void __iomem *cxdbg,
+static void a6xx_get_cx_debugbus_block(struct msm_gpu *gpu,
+ void __iomem *cxdbg,
struct a6xx_gpu_state *a6xx_state,
const struct a6xx_debugbus_block *block,
struct a6xx_gpu_state_obj *obj)
@@ -330,7 +345,7 @@ static void a6xx_get_cx_debugbus_block(void __iomem *cxdbg,
obj->handle = block;
for (ptr = obj->data, i = 0; i < block->count; i++)
- ptr += cx_debugbus_read(cxdbg, block->id, i, ptr);
+ ptr += cx_debugbus_read(gpu, cxdbg, block->id, i, ptr);
}
static void a6xx_get_debugbus_blocks(struct msm_gpu *gpu,
@@ -423,8 +438,9 @@ static void a7xx_get_debugbus_blocks(struct msm_gpu *gpu,
a6xx_state, &a7xx_debugbus_blocks[gbif_debugbus_blocks[i]],
&a6xx_state->debugbus[i + debugbus_blocks_count]);
}
- }
+ a6xx_state->nr_debugbus = total_debugbus_blocks;
+ }
}
static void a6xx_get_debugbus(struct msm_gpu *gpu,
@@ -526,7 +542,8 @@ static void a6xx_get_debugbus(struct msm_gpu *gpu,
int i;
for (i = 0; i < nr_cx_debugbus_blocks; i++)
- a6xx_get_cx_debugbus_block(cxdbg,
+ a6xx_get_cx_debugbus_block(gpu,
+ cxdbg,
a6xx_state,
&cx_debugbus_blocks[i],
&a6xx_state->cx_debugbus[i]);
@@ -759,15 +776,15 @@ static void a7xx_get_cluster(struct msm_gpu *gpu,
size_t datasize;
int i, regcount = 0;
- /* Some clusters need a selector register to be programmed too */
- if (cluster->sel)
- in += CRASHDUMP_WRITE(in, cluster->sel->cd_reg, cluster->sel->val);
-
in += CRASHDUMP_WRITE(in, REG_A7XX_CP_APERTURE_CNTL_CD,
A7XX_CP_APERTURE_CNTL_CD_PIPE(cluster->pipe_id) |
A7XX_CP_APERTURE_CNTL_CD_CLUSTER(cluster->cluster_id) |
A7XX_CP_APERTURE_CNTL_CD_CONTEXT(cluster->context_id));
+ /* Some clusters need a selector register to be programmed too */
+ if (cluster->sel)
+ in += CRASHDUMP_WRITE(in, cluster->sel->cd_reg, cluster->sel->val);
+
for (i = 0; cluster->regs[i] != UINT_MAX; i += 2) {
int count = RANGE(cluster->regs, i);
@@ -1796,6 +1813,7 @@ static void a7xx_show_shader(struct a6xx_gpu_state_obj *obj,
print_name(p, " - type: ", a7xx_statetype_names[block->statetype]);
print_name(p, " - pipe: ", a7xx_pipe_names[block->pipeid]);
+ drm_printf(p, " - location: %d\n", block->location);
for (i = 0; i < block->num_sps; i++) {
drm_printf(p, " - sp: %d\n", i);
@@ -1873,6 +1891,7 @@ static void a7xx_show_dbgahb_cluster(struct a6xx_gpu_state_obj *obj,
print_name(p, " - pipe: ", a7xx_pipe_names[dbgahb->pipe_id]);
print_name(p, " - cluster-name: ", a7xx_cluster_names[dbgahb->cluster_id]);
drm_printf(p, " - context: %d\n", dbgahb->context_id);
+ drm_printf(p, " - location: %d\n", dbgahb->location_id);
a7xx_show_registers_indented(dbgahb->regs, obj->data, p, 4);
}
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
index 95d93ac6812a..1c18499b60bb 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
@@ -419,47 +419,47 @@ static const struct a6xx_indexed_registers a6xx_indexed_reglist[] = {
REG_A6XX_CP_SQE_STAT_DATA, 0x33, NULL },
{ "CP_DRAW_STATE", REG_A6XX_CP_DRAW_STATE_ADDR,
REG_A6XX_CP_DRAW_STATE_DATA, 0x100, NULL },
- { "CP_UCODE_DBG_DATA", REG_A6XX_CP_SQE_UCODE_DBG_ADDR,
+ { "CP_SQE_UCODE_DBG", REG_A6XX_CP_SQE_UCODE_DBG_ADDR,
REG_A6XX_CP_SQE_UCODE_DBG_DATA, 0x8000, NULL },
- { "CP_ROQ", REG_A6XX_CP_ROQ_DBG_ADDR,
+ { "CP_ROQ_DBG", REG_A6XX_CP_ROQ_DBG_ADDR,
REG_A6XX_CP_ROQ_DBG_DATA, 0, a6xx_get_cp_roq_size},
};
static const struct a6xx_indexed_registers a7xx_indexed_reglist[] = {
{ "CP_SQE_STAT", REG_A6XX_CP_SQE_STAT_ADDR,
- REG_A6XX_CP_SQE_STAT_DATA, 0x33, NULL },
+ REG_A6XX_CP_SQE_STAT_DATA, 0x40, NULL },
{ "CP_DRAW_STATE", REG_A6XX_CP_DRAW_STATE_ADDR,
REG_A6XX_CP_DRAW_STATE_DATA, 0x100, NULL },
- { "CP_UCODE_DBG_DATA", REG_A6XX_CP_SQE_UCODE_DBG_ADDR,
+ { "CP_SQE_UCODE_DBG", REG_A6XX_CP_SQE_UCODE_DBG_ADDR,
REG_A6XX_CP_SQE_UCODE_DBG_DATA, 0x8000, NULL },
- { "CP_BV_SQE_STAT_ADDR", REG_A7XX_CP_BV_SQE_STAT_ADDR,
- REG_A7XX_CP_BV_SQE_STAT_DATA, 0x33, NULL },
- { "CP_BV_DRAW_STATE_ADDR", REG_A7XX_CP_BV_DRAW_STATE_ADDR,
+ { "CP_BV_SQE_STAT", REG_A7XX_CP_BV_SQE_STAT_ADDR,
+ REG_A7XX_CP_BV_SQE_STAT_DATA, 0x40, NULL },
+ { "CP_BV_DRAW_STATE", REG_A7XX_CP_BV_DRAW_STATE_ADDR,
REG_A7XX_CP_BV_DRAW_STATE_DATA, 0x100, NULL },
- { "CP_BV_SQE_UCODE_DBG_ADDR", REG_A7XX_CP_BV_SQE_UCODE_DBG_ADDR,
+ { "CP_BV_SQE_UCODE_DBG", REG_A7XX_CP_BV_SQE_UCODE_DBG_ADDR,
REG_A7XX_CP_BV_SQE_UCODE_DBG_DATA, 0x8000, NULL },
- { "CP_SQE_AC_STAT_ADDR", REG_A7XX_CP_SQE_AC_STAT_ADDR,
- REG_A7XX_CP_SQE_AC_STAT_DATA, 0x33, NULL },
- { "CP_LPAC_DRAW_STATE_ADDR", REG_A7XX_CP_LPAC_DRAW_STATE_ADDR,
+ { "CP_SQE_AC_STAT", REG_A7XX_CP_SQE_AC_STAT_ADDR,
+ REG_A7XX_CP_SQE_AC_STAT_DATA, 0x40, NULL },
+ { "CP_LPAC_DRAW_STATE", REG_A7XX_CP_LPAC_DRAW_STATE_ADDR,
REG_A7XX_CP_LPAC_DRAW_STATE_DATA, 0x100, NULL },
- { "CP_SQE_AC_UCODE_DBG_ADDR", REG_A7XX_CP_SQE_AC_UCODE_DBG_ADDR,
+ { "CP_SQE_AC_UCODE_DBG", REG_A7XX_CP_SQE_AC_UCODE_DBG_ADDR,
REG_A7XX_CP_SQE_AC_UCODE_DBG_DATA, 0x8000, NULL },
- { "CP_LPAC_FIFO_DBG_ADDR", REG_A7XX_CP_LPAC_FIFO_DBG_ADDR,
+ { "CP_LPAC_FIFO_DBG", REG_A7XX_CP_LPAC_FIFO_DBG_ADDR,
REG_A7XX_CP_LPAC_FIFO_DBG_DATA, 0x40, NULL },
- { "CP_ROQ", REG_A6XX_CP_ROQ_DBG_ADDR,
+ { "CP_ROQ_DBG", REG_A6XX_CP_ROQ_DBG_ADDR,
REG_A6XX_CP_ROQ_DBG_DATA, 0, a7xx_get_cp_roq_size },
};
static const struct a6xx_indexed_registers a6xx_cp_mempool_indexed = {
- "CP_MEMPOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
+ "CP_MEM_POOL_DBG", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2060, NULL,
};
static const struct a6xx_indexed_registers a7xx_cp_bv_mempool_indexed[] = {
- { "CP_MEMPOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
- REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2100, NULL },
- { "CP_BV_MEMPOOL", REG_A7XX_CP_BV_MEM_POOL_DBG_ADDR,
- REG_A7XX_CP_BV_MEM_POOL_DBG_DATA, 0x2100, NULL },
+ { "CP_MEM_POOL_DBG", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
+ REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2200, NULL },
+ { "CP_BV_MEM_POOL_DBG", REG_A7XX_CP_BV_MEM_POOL_DBG_ADDR,
+ REG_A7XX_CP_BV_MEM_POOL_DBG_DATA, 0x2200, NULL },
};
#define DEBUGBUS(_id, _count) { .id = _id, .name = #_id, .count = _count }
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gen7_0_0_snapshot.h b/drivers/gpu/drm/msm/adreno/adreno_gen7_0_0_snapshot.h
index cb66ece6606b..04b49d385f9d 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gen7_0_0_snapshot.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gen7_0_0_snapshot.h
@@ -81,7 +81,7 @@ static const u32 gen7_0_0_debugbus_blocks[] = {
A7XX_DBGBUS_USPTP_7,
};
-static struct gen7_shader_block gen7_0_0_shader_blocks[] = {
+static const struct gen7_shader_block gen7_0_0_shader_blocks[] = {
{A7XX_TP0_TMO_DATA, 0x200, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
{A7XX_TP0_SMO_DATA, 0x80, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
{A7XX_TP0_MIPMAP_BASE_DATA, 0x3c0, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
@@ -668,12 +668,19 @@ static const u32 gen7_0_0_sp_noncontext_pipe_lpac_usptp_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_noncontext_pipe_lpac_usptp_registers), 8));
-/* Block: TPl1 Cluster: noncontext Pipeline: A7XX_PIPE_BR */
-static const u32 gen7_0_0_tpl1_noncontext_pipe_br_registers[] = {
+/* Block: TPl1 Cluster: noncontext Pipeline: A7XX_PIPE_NONE */
+static const u32 gen7_0_0_tpl1_noncontext_pipe_none_registers[] = {
0x0b600, 0x0b600, 0x0b602, 0x0b602, 0x0b604, 0x0b604, 0x0b608, 0x0b60c,
0x0b60f, 0x0b621, 0x0b630, 0x0b633,
UINT_MAX, UINT_MAX,
};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_tpl1_noncontext_pipe_none_registers), 8));
+
+/* Block: TPl1 Cluster: noncontext Pipeline: A7XX_PIPE_BR */
+static const u32 gen7_0_0_tpl1_noncontext_pipe_br_registers[] = {
+ 0x0b600, 0x0b600,
+ UINT_MAX, UINT_MAX,
+};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_tpl1_noncontext_pipe_br_registers), 8));
/* Block: TPl1 Cluster: noncontext Pipeline: A7XX_PIPE_LPAC */
@@ -695,7 +702,7 @@ static const struct gen7_sel_reg gen7_0_0_rb_rbp_sel = {
.val = 0x9,
};
-static struct gen7_cluster_registers gen7_0_0_clusters[] = {
+static const struct gen7_cluster_registers gen7_0_0_clusters[] = {
{ A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT,
gen7_0_0_noncontext_pipe_br_registers, },
{ A7XX_CLUSTER_NONE, A7XX_PIPE_BV, STATE_NON_CONTEXT,
@@ -764,7 +771,7 @@ static struct gen7_cluster_registers gen7_0_0_clusters[] = {
gen7_0_0_vpc_cluster_vpc_ps_pipe_bv_registers, },
};
-static struct gen7_sptp_cluster_registers gen7_0_0_sptp_clusters[] = {
+static const struct gen7_sptp_cluster_registers gen7_0_0_sptp_clusters[] = {
{ A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE,
gen7_0_0_sp_noncontext_pipe_br_hlsq_state_registers, 0xae00 },
{ A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP,
@@ -914,7 +921,7 @@ static const u32 gen7_0_0_dpm_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_dpm_registers), 8));
-static struct gen7_reg_list gen7_0_0_reg_list[] = {
+static const struct gen7_reg_list gen7_0_0_reg_list[] = {
{ gen7_0_0_gpu_registers, NULL },
{ gen7_0_0_cx_misc_registers, NULL },
{ gen7_0_0_dpm_registers, NULL },
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gen7_2_0_snapshot.h b/drivers/gpu/drm/msm/adreno/adreno_gen7_2_0_snapshot.h
index 6f8ad50f32ce..772652eb61f3 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gen7_2_0_snapshot.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gen7_2_0_snapshot.h
@@ -95,7 +95,7 @@ static const u32 gen7_2_0_debugbus_blocks[] = {
A7XX_DBGBUS_CCHE_2,
};
-static struct gen7_shader_block gen7_2_0_shader_blocks[] = {
+static const struct gen7_shader_block gen7_2_0_shader_blocks[] = {
{A7XX_TP0_TMO_DATA, 0x200, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
{A7XX_TP0_SMO_DATA, 0x80, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
{A7XX_TP0_MIPMAP_BASE_DATA, 0x3c0, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
@@ -489,7 +489,7 @@ static const struct gen7_sel_reg gen7_2_0_rb_rbp_sel = {
.val = 0x9,
};
-static struct gen7_cluster_registers gen7_2_0_clusters[] = {
+static const struct gen7_cluster_registers gen7_2_0_clusters[] = {
{ A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT,
gen7_2_0_noncontext_pipe_br_registers, },
{ A7XX_CLUSTER_NONE, A7XX_PIPE_BV, STATE_NON_CONTEXT,
@@ -558,7 +558,7 @@ static struct gen7_cluster_registers gen7_2_0_clusters[] = {
gen7_0_0_vpc_cluster_vpc_ps_pipe_bv_registers, },
};
-static struct gen7_sptp_cluster_registers gen7_2_0_sptp_clusters[] = {
+static const struct gen7_sptp_cluster_registers gen7_2_0_sptp_clusters[] = {
{ A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE,
gen7_0_0_sp_noncontext_pipe_br_hlsq_state_registers, 0xae00 },
{ A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP,
@@ -573,6 +573,8 @@ static struct gen7_sptp_cluster_registers gen7_2_0_sptp_clusters[] = {
gen7_0_0_sp_noncontext_pipe_lpac_usptp_registers, 0xaf80 },
{ A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
gen7_0_0_tpl1_noncontext_pipe_br_registers, 0xb600 },
+ { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_NONE, 0, A7XX_USPTP,
+ gen7_0_0_tpl1_noncontext_pipe_none_registers, 0xb600 },
{ A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP,
gen7_0_0_tpl1_noncontext_pipe_lpac_registers, 0xb780 },
{ A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE,
@@ -737,7 +739,7 @@ static const u32 gen7_2_0_dpm_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_2_0_dpm_registers), 8));
-static struct gen7_reg_list gen7_2_0_reg_list[] = {
+static const struct gen7_reg_list gen7_2_0_reg_list[] = {
{ gen7_2_0_gpu_registers, NULL },
{ gen7_2_0_cx_misc_registers, NULL },
{ gen7_2_0_dpm_registers, NULL },
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h b/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h
index e02cabb39f19..0956dfca1f05 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h
@@ -117,7 +117,7 @@ static const u32 gen7_9_0_cx_debugbus_blocks[] = {
A7XX_DBGBUS_GBIF_CX,
};
-static struct gen7_shader_block gen7_9_0_shader_blocks[] = {
+static const struct gen7_shader_block gen7_9_0_shader_blocks[] = {
{ A7XX_TP0_TMO_DATA, 0x0200, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
{ A7XX_TP0_SMO_DATA, 0x0080, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
{ A7XX_TP0_MIPMAP_BASE_DATA, 0x03C0, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
@@ -1116,7 +1116,7 @@ static const struct gen7_sel_reg gen7_9_0_rb_rbp_sel = {
.val = 0x9,
};
-static struct gen7_cluster_registers gen7_9_0_clusters[] = {
+static const struct gen7_cluster_registers gen7_9_0_clusters[] = {
{ A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT,
gen7_9_0_non_context_pipe_br_registers, },
{ A7XX_CLUSTER_NONE, A7XX_PIPE_BV, STATE_NON_CONTEXT,
@@ -1185,7 +1185,7 @@ static struct gen7_cluster_registers gen7_9_0_clusters[] = {
gen7_9_0_vpc_pipe_bv_cluster_vpc_ps_registers, },
};
-static struct gen7_sptp_cluster_registers gen7_9_0_sptp_clusters[] = {
+static const struct gen7_sptp_cluster_registers gen7_9_0_sptp_clusters[] = {
{ A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE,
gen7_9_0_non_context_sp_pipe_br_hlsq_state_registers, 0xae00},
{ A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP,
@@ -1294,34 +1294,34 @@ static struct gen7_sptp_cluster_registers gen7_9_0_sptp_clusters[] = {
gen7_9_0_tpl1_pipe_br_cluster_sp_ps_usptp_registers, 0xb000},
};
-static struct a6xx_indexed_registers gen7_9_0_cp_indexed_reg_list[] = {
+static const struct a6xx_indexed_registers gen7_9_0_cp_indexed_reg_list[] = {
{ "CP_SQE_STAT", REG_A6XX_CP_SQE_STAT_ADDR,
REG_A6XX_CP_SQE_STAT_DATA, 0x00040},
{ "CP_DRAW_STATE", REG_A6XX_CP_DRAW_STATE_ADDR,
REG_A6XX_CP_DRAW_STATE_DATA, 0x00200},
- { "CP_ROQ", REG_A6XX_CP_ROQ_DBG_ADDR,
+ { "CP_ROQ_DBG", REG_A6XX_CP_ROQ_DBG_ADDR,
REG_A6XX_CP_ROQ_DBG_DATA, 0x00800},
- { "CP_UCODE_DBG_DATA", REG_A6XX_CP_SQE_UCODE_DBG_ADDR,
+ { "CP_SQE_UCODE_DBG", REG_A6XX_CP_SQE_UCODE_DBG_ADDR,
REG_A6XX_CP_SQE_UCODE_DBG_DATA, 0x08000},
- { "CP_BV_DRAW_STATE_ADDR", REG_A7XX_CP_BV_DRAW_STATE_ADDR,
+ { "CP_BV_DRAW_STATE", REG_A7XX_CP_BV_DRAW_STATE_ADDR,
REG_A7XX_CP_BV_DRAW_STATE_DATA, 0x00200},
- { "CP_BV_ROQ_DBG_ADDR", REG_A7XX_CP_BV_ROQ_DBG_ADDR,
+ { "CP_BV_ROQ_DBG", REG_A7XX_CP_BV_ROQ_DBG_ADDR,
REG_A7XX_CP_BV_ROQ_DBG_DATA, 0x00800},
- { "CP_BV_SQE_UCODE_DBG_ADDR", REG_A7XX_CP_BV_SQE_UCODE_DBG_ADDR,
+ { "CP_BV_SQE_UCODE_DBG", REG_A7XX_CP_BV_SQE_UCODE_DBG_ADDR,
REG_A7XX_CP_BV_SQE_UCODE_DBG_DATA, 0x08000},
- { "CP_BV_SQE_STAT_ADDR", REG_A7XX_CP_BV_SQE_STAT_ADDR,
+ { "CP_BV_SQE_STAT", REG_A7XX_CP_BV_SQE_STAT_ADDR,
REG_A7XX_CP_BV_SQE_STAT_DATA, 0x00040},
- { "CP_RESOURCE_TBL", REG_A7XX_CP_RESOURCE_TABLE_DBG_ADDR,
+ { "CP_RESOURCE_TABLE_DBG", REG_A7XX_CP_RESOURCE_TABLE_DBG_ADDR,
REG_A7XX_CP_RESOURCE_TABLE_DBG_DATA, 0x04100},
- { "CP_LPAC_DRAW_STATE_ADDR", REG_A7XX_CP_LPAC_DRAW_STATE_ADDR,
+ { "CP_LPAC_DRAW_STATE", REG_A7XX_CP_LPAC_DRAW_STATE_ADDR,
REG_A7XX_CP_LPAC_DRAW_STATE_DATA, 0x00200},
- { "CP_LPAC_ROQ", REG_A7XX_CP_LPAC_ROQ_DBG_ADDR,
+ { "CP_LPAC_ROQ_DBG", REG_A7XX_CP_LPAC_ROQ_DBG_ADDR,
REG_A7XX_CP_LPAC_ROQ_DBG_DATA, 0x00200},
- { "CP_SQE_AC_UCODE_DBG_ADDR", REG_A7XX_CP_SQE_AC_UCODE_DBG_ADDR,
+ { "CP_SQE_AC_UCODE_DBG", REG_A7XX_CP_SQE_AC_UCODE_DBG_ADDR,
REG_A7XX_CP_SQE_AC_UCODE_DBG_DATA, 0x08000},
- { "CP_SQE_AC_STAT_ADDR", REG_A7XX_CP_SQE_AC_STAT_ADDR,
+ { "CP_SQE_AC_STAT", REG_A7XX_CP_SQE_AC_STAT_ADDR,
REG_A7XX_CP_SQE_AC_STAT_DATA, 0x00040},
- { "CP_LPAC_FIFO_DBG_ADDR", REG_A7XX_CP_LPAC_FIFO_DBG_ADDR,
+ { "CP_LPAC_FIFO_DBG", REG_A7XX_CP_LPAC_FIFO_DBG_ADDR,
REG_A7XX_CP_LPAC_FIFO_DBG_DATA, 0x00040},
{ "CP_AQE_ROQ_0", REG_A7XX_CP_AQE_ROQ_DBG_ADDR_0,
REG_A7XX_CP_AQE_ROQ_DBG_DATA_0, 0x00100},
@@ -1337,7 +1337,7 @@ static struct a6xx_indexed_registers gen7_9_0_cp_indexed_reg_list[] = {
REG_A7XX_CP_AQE_STAT_DATA_1, 0x00040},
};
-static struct gen7_reg_list gen7_9_0_reg_list[] = {
+static const struct gen7_reg_list gen7_9_0_reg_list[] = {
{ gen7_9_0_gpu_registers, NULL},
{ gen7_9_0_cx_misc_registers, NULL},
{ gen7_9_0_cx_dbgc_registers, NULL},
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index d4b545448d74..94912b4708fb 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -596,7 +596,7 @@ static void _dpu_crtc_complete_flip(struct drm_crtc *crtc)
spin_lock_irqsave(&dev->event_lock, flags);
if (dpu_crtc->event) {
- DRM_DEBUG_VBL("%s: send event: %pK\n", dpu_crtc->name,
+ DRM_DEBUG_VBL("%s: send event: %p\n", dpu_crtc->name,
dpu_crtc->event);
trace_dpu_crtc_complete_flip(DRMID(crtc));
drm_crtc_send_vblank_event(crtc, dpu_crtc->event);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 05e5f3463e30..258edaa18fc0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -730,6 +730,8 @@ bool dpu_encoder_needs_modeset(struct drm_encoder *drm_enc, struct drm_atomic_st
return false;
conn_state = drm_atomic_get_new_connector_state(state, connector);
+ if (!conn_state)
+ return false;
/**
* These checks are duplicated from dpu_encoder_update_topology() since
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
index 11fb1bc54fa9..54b20faa0b69 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
@@ -31,14 +31,14 @@ static void dpu_setup_dspp_pcc(struct dpu_hw_dspp *ctx,
u32 base;
if (!ctx) {
- DRM_ERROR("invalid ctx %pK\n", ctx);
+ DRM_ERROR("invalid ctx %p\n", ctx);
return;
}
base = ctx->cap->sblk->pcc.base;
if (!base) {
- DRM_ERROR("invalid ctx %pK pcc base 0x%x\n", ctx, base);
+ DRM_ERROR("invalid ctx %p pcc base 0x%x\n", ctx, base);
return;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index 12dcb32b4724..a306077647c3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -1345,7 +1345,7 @@ static int dpu_kms_mmap_mdp5(struct dpu_kms *dpu_kms)
dpu_kms->mmio = NULL;
return ret;
}
- DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
+ DRM_DEBUG("mapped dpu address space @%p\n", dpu_kms->mmio);
dpu_kms->vbif[VBIF_RT] = msm_ioremap_mdss(mdss_dev,
dpu_kms->pdev,
@@ -1380,7 +1380,7 @@ static int dpu_kms_mmap_dpu(struct dpu_kms *dpu_kms)
dpu_kms->mmio = NULL;
return ret;
}
- DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
+ DRM_DEBUG("mapped dpu address space @%p\n", dpu_kms->mmio);
dpu_kms->vbif[VBIF_RT] = msm_ioremap(pdev, "vbif");
if (IS_ERR(dpu_kms->vbif[VBIF_RT])) {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index 01171c535a27..6859e8ef6b05 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -1129,7 +1129,7 @@ static int dpu_plane_virtual_atomic_check(struct drm_plane *plane,
struct drm_plane_state *old_plane_state =
drm_atomic_get_old_plane_state(state, plane);
struct dpu_plane_state *pstate = to_dpu_plane_state(plane_state);
- struct drm_crtc_state *crtc_state;
+ struct drm_crtc_state *crtc_state = NULL;
int ret;
if (IS_ERR(plane_state))
@@ -1162,7 +1162,7 @@ static int dpu_plane_virtual_atomic_check(struct drm_plane *plane,
if (!old_plane_state || !old_plane_state->fb ||
old_plane_state->src_w != plane_state->src_w ||
old_plane_state->src_h != plane_state->src_h ||
- old_plane_state->src_w != plane_state->src_w ||
+ old_plane_state->crtc_w != plane_state->crtc_w ||
old_plane_state->crtc_h != plane_state->crtc_h ||
msm_framebuffer_format(old_plane_state->fb) !=
msm_framebuffer_format(plane_state->fb))
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index 221f12db5f8b..4ea681130dba 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -5,6 +5,8 @@
#include <linux/clk-provider.h>
#include <linux/platform_device.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_runtime.h>
#include <dt-bindings/phy/phy.h>
#include "dsi_phy.h"
@@ -511,30 +513,6 @@ int msm_dsi_cphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing,
return 0;
}
-static int dsi_phy_enable_resource(struct msm_dsi_phy *phy)
-{
- struct device *dev = &phy->pdev->dev;
- int ret;
-
- ret = pm_runtime_resume_and_get(dev);
- if (ret)
- return ret;
-
- ret = clk_prepare_enable(phy->ahb_clk);
- if (ret) {
- DRM_DEV_ERROR(dev, "%s: can't enable ahb clk, %d\n", __func__, ret);
- pm_runtime_put_sync(dev);
- }
-
- return ret;
-}
-
-static void dsi_phy_disable_resource(struct msm_dsi_phy *phy)
-{
- clk_disable_unprepare(phy->ahb_clk);
- pm_runtime_put(&phy->pdev->dev);
-}
-
static const struct of_device_id dsi_phy_dt_match[] = {
#ifdef CONFIG_DRM_MSM_DSI_28NM_PHY
{ .compatible = "qcom,dsi-phy-28nm-hpm",
@@ -698,22 +676,20 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
if (ret)
return ret;
- phy->ahb_clk = msm_clk_get(pdev, "iface");
- if (IS_ERR(phy->ahb_clk))
- return dev_err_probe(dev, PTR_ERR(phy->ahb_clk),
- "Unable to get ahb clk\n");
+ platform_set_drvdata(pdev, phy);
- ret = devm_pm_runtime_enable(&pdev->dev);
+ ret = devm_pm_runtime_enable(dev);
if (ret)
return ret;
- /* PLL init will call into clk_register which requires
- * register access, so we need to enable power and ahb clock.
- */
- ret = dsi_phy_enable_resource(phy);
+ ret = devm_pm_clk_create(dev);
if (ret)
return ret;
+ ret = pm_clk_add(dev, "iface");
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Unable to get iface clk\n");
+
if (phy->cfg->ops.pll_init) {
ret = phy->cfg->ops.pll_init(phy);
if (ret)
@@ -727,18 +703,19 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
return dev_err_probe(dev, ret,
"Failed to register clk provider\n");
- dsi_phy_disable_resource(phy);
-
- platform_set_drvdata(pdev, phy);
-
return 0;
}
+static const struct dev_pm_ops dsi_phy_pm_ops = {
+ SET_RUNTIME_PM_OPS(pm_clk_suspend, pm_clk_resume, NULL)
+};
+
static struct platform_driver dsi_phy_platform_driver = {
.probe = dsi_phy_driver_probe,
.driver = {
.name = "msm_dsi_phy",
.of_match_table = dsi_phy_dt_match,
+ .pm = &dsi_phy_pm_ops,
},
};
@@ -764,9 +741,9 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy,
dev = &phy->pdev->dev;
- ret = dsi_phy_enable_resource(phy);
+ ret = pm_runtime_resume_and_get(dev);
if (ret) {
- DRM_DEV_ERROR(dev, "%s: resource enable failed, %d\n",
+ DRM_DEV_ERROR(dev, "%s: resume failed, %d\n",
__func__, ret);
goto res_en_fail;
}
@@ -810,7 +787,7 @@ pll_restor_fail:
phy_en_fail:
regulator_bulk_disable(phy->cfg->num_regulators, phy->supplies);
reg_en_fail:
- dsi_phy_disable_resource(phy);
+ pm_runtime_put(dev);
res_en_fail:
return ret;
}
@@ -823,7 +800,7 @@ void msm_dsi_phy_disable(struct msm_dsi_phy *phy)
phy->cfg->ops.disable(phy);
regulator_bulk_disable(phy->cfg->num_regulators, phy->supplies);
- dsi_phy_disable_resource(phy);
+ pm_runtime_put(&phy->pdev->dev);
}
void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy,
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
index c558f8df1684..3cbf08231492 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -104,7 +104,6 @@ struct msm_dsi_phy {
phys_addr_t lane_size;
int id;
- struct clk *ahb_clk;
struct regulator_bulk_data *supplies;
struct msm_dsi_dphy_timing timing;
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index bbda865addae..97dc70876442 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -325,25 +325,28 @@ static struct drm_info_list msm_debugfs_list[] = {
static int late_init_minor(struct drm_minor *minor)
{
- struct drm_device *dev = minor->dev;
- struct msm_drm_private *priv = dev->dev_private;
+ struct drm_device *dev;
+ struct msm_drm_private *priv;
int ret;
if (!minor)
return 0;
+ dev = minor->dev;
+ priv = dev->dev_private;
+
if (!priv->gpu_pdev)
return 0;
ret = msm_rd_debugfs_init(minor);
if (ret) {
- DRM_DEV_ERROR(minor->dev->dev, "could not install rd debugfs\n");
+ DRM_DEV_ERROR(dev->dev, "could not install rd debugfs\n");
return ret;
}
ret = msm_perf_debugfs_init(minor);
if (ret) {
- DRM_DEV_ERROR(minor->dev->dev, "could not install perf debugfs\n");
+ DRM_DEV_ERROR(dev->dev, "could not install perf debugfs\n");
return ret;
}
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 7ff994d4f91a..e7631f4ef530 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -95,7 +95,6 @@ void msm_gem_vma_get(struct drm_gem_object *obj)
void msm_gem_vma_put(struct drm_gem_object *obj)
{
struct msm_drm_private *priv = obj->dev->dev_private;
- struct drm_exec exec;
if (atomic_dec_return(&to_msm_bo(obj)->vma_ref))
return;
@@ -103,9 +102,13 @@ void msm_gem_vma_put(struct drm_gem_object *obj)
if (!priv->kms)
return;
+#ifdef CONFIG_DRM_MSM_KMS
+ struct drm_exec exec;
+
msm_gem_lock_vm_and_obj(&exec, obj, priv->kms->vm);
put_iova_spaces(obj, priv->kms->vm, true, "vma_put");
drm_exec_fini(&exec); /* drop locks */
+#endif
}
/*
@@ -663,9 +666,13 @@ int msm_gem_set_iova(struct drm_gem_object *obj,
static bool is_kms_vm(struct drm_gpuvm *vm)
{
+#ifdef CONFIG_DRM_MSM_KMS
struct msm_drm_private *priv = vm->drm->dev_private;
return priv->kms && (priv->kms->vm == vm);
+#else
+ return false;
+#endif
}
/*
@@ -1113,10 +1120,12 @@ static void msm_gem_free_object(struct drm_gem_object *obj)
put_pages(obj);
}
- if (msm_obj->flags & MSM_BO_NO_SHARE) {
+ if (obj->resv != &obj->_resv) {
struct drm_gem_object *r_obj =
container_of(obj->resv, struct drm_gem_object, _resv);
+ WARN_ON(!(msm_obj->flags & MSM_BO_NO_SHARE));
+
/* Drop reference we hold to shared resv obj: */
drm_gem_object_put(r_obj);
}
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 88239da1cd72..751c3b4965bc 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -100,7 +100,7 @@ struct msm_gem_vm {
*
* Only used for kernel managed VMs, unused for user managed VMs.
*
- * Protected by @mm_lock.
+ * Protected by vm lock. See msm_gem_lock_vm_and_obj(), for ex.
*/
struct drm_mm mm;
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 5f8e939a5906..3ab3b27134f9 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -271,32 +271,37 @@ out:
return ret;
}
-/* This is where we make sure all the bo's are reserved and pin'd: */
-static int submit_lock_objects(struct msm_gem_submit *submit)
+static int submit_lock_objects_vmbind(struct msm_gem_submit *submit)
{
- unsigned flags = DRM_EXEC_INTERRUPTIBLE_WAIT;
+ unsigned flags = DRM_EXEC_INTERRUPTIBLE_WAIT | DRM_EXEC_IGNORE_DUPLICATES;
struct drm_exec *exec = &submit->exec;
- int ret;
+ int ret = 0;
- if (msm_context_is_vmbind(submit->queue->ctx)) {
- flags |= DRM_EXEC_IGNORE_DUPLICATES;
+ drm_exec_init(&submit->exec, flags, submit->nr_bos);
- drm_exec_init(&submit->exec, flags, submit->nr_bos);
+ drm_exec_until_all_locked (&submit->exec) {
+ ret = drm_gpuvm_prepare_vm(submit->vm, exec, 1);
+ drm_exec_retry_on_contention(exec);
+ if (ret)
+ break;
- drm_exec_until_all_locked (&submit->exec) {
- ret = drm_gpuvm_prepare_vm(submit->vm, exec, 1);
- drm_exec_retry_on_contention(exec);
- if (ret)
- return ret;
+ ret = drm_gpuvm_prepare_objects(submit->vm, exec, 1);
+ drm_exec_retry_on_contention(exec);
+ if (ret)
+ break;
+ }
- ret = drm_gpuvm_prepare_objects(submit->vm, exec, 1);
- drm_exec_retry_on_contention(exec);
- if (ret)
- return ret;
- }
+ return ret;
+}
- return 0;
- }
+/* This is where we make sure all the bo's are reserved and pin'd: */
+static int submit_lock_objects(struct msm_gem_submit *submit)
+{
+ unsigned flags = DRM_EXEC_INTERRUPTIBLE_WAIT;
+ int ret = 0;
+
+ if (msm_context_is_vmbind(submit->queue->ctx))
+ return submit_lock_objects_vmbind(submit);
drm_exec_init(&submit->exec, flags, submit->nr_bos);
@@ -305,17 +310,17 @@ static int submit_lock_objects(struct msm_gem_submit *submit)
drm_gpuvm_resv_obj(submit->vm));
drm_exec_retry_on_contention(&submit->exec);
if (ret)
- return ret;
+ break;
for (unsigned i = 0; i < submit->nr_bos; i++) {
struct drm_gem_object *obj = submit->bos[i].obj;
ret = drm_exec_prepare_obj(&submit->exec, obj, 1);
drm_exec_retry_on_contention(&submit->exec);
if (ret)
- return ret;
+ break;
}
}
- return 0;
+ return ret;
}
static int submit_fence_sync(struct msm_gem_submit *submit)
@@ -514,14 +519,15 @@ out:
*/
static void submit_cleanup(struct msm_gem_submit *submit, bool error)
{
+ if (error)
+ submit_unpin_objects(submit);
+
if (submit->exec.objects)
drm_exec_fini(&submit->exec);
- if (error) {
- submit_unpin_objects(submit);
- /* job wasn't enqueued to scheduler, so early retirement: */
+ /* if job wasn't enqueued to scheduler, early retirement: */
+ if (error)
msm_submit_retire(submit);
- }
}
void msm_submit_retire(struct msm_gem_submit *submit)
@@ -769,12 +775,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (ret == 0 && args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
sync_file = sync_file_create(submit->user_fence);
- if (!sync_file) {
+ if (!sync_file)
ret = -ENOMEM;
- } else {
- fd_install(out_fence_fd, sync_file->file);
- args->fence_fd = out_fence_fd;
- }
}
if (ret)
@@ -812,10 +814,14 @@ out:
out_unlock:
mutex_unlock(&queue->lock);
out_post_unlock:
- if (ret && (out_fence_fd >= 0)) {
- put_unused_fd(out_fence_fd);
+ if (ret) {
+ if (out_fence_fd >= 0)
+ put_unused_fd(out_fence_fd);
if (sync_file)
fput(sync_file->file);
+ } else if (sync_file) {
+ fd_install(out_fence_fd, sync_file->file);
+ args->fence_fd = out_fence_fd;
}
if (!IS_ERR_OR_NULL(submit)) {
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index 3cd8562a5109..00d0f3b7ba32 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -319,13 +319,10 @@ msm_gem_vma_map(struct drm_gpuva *vma, int prot, struct sg_table *sgt)
mutex_lock(&vm->mmu_lock);
/*
- * NOTE: iommu/io-pgtable can allocate pages, so we cannot hold
+ * NOTE: if not using pgtable preallocation, we cannot hold
* a lock across map/unmap which is also used in the job_run()
* path, as this can cause deadlock in job_run() vs shrinker/
* reclaim.
- *
- * Revisit this if we can come up with a scheme to pre-alloc pages
- * for the pgtable in map/unmap ops.
*/
ret = vm_map_op(vm, &(struct msm_vm_map_op){
.iova = vma->va.addr,
@@ -454,6 +451,8 @@ msm_gem_vm_bo_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec)
struct op_arg {
unsigned flags;
struct msm_vm_bind_job *job;
+ const struct msm_vm_bind_op *op;
+ bool kept;
};
static void
@@ -475,14 +474,18 @@ vma_from_op(struct op_arg *arg, struct drm_gpuva_op_map *op)
}
static int
-msm_gem_vm_sm_step_map(struct drm_gpuva_op *op, void *arg)
+msm_gem_vm_sm_step_map(struct drm_gpuva_op *op, void *_arg)
{
- struct msm_vm_bind_job *job = ((struct op_arg *)arg)->job;
+ struct op_arg *arg = _arg;
+ struct msm_vm_bind_job *job = arg->job;
struct drm_gem_object *obj = op->map.gem.obj;
struct drm_gpuva *vma;
struct sg_table *sgt;
unsigned prot;
+ if (arg->kept)
+ return 0;
+
vma = vma_from_op(arg, &op->map);
if (WARN_ON(IS_ERR(vma)))
return PTR_ERR(vma);
@@ -602,15 +605,41 @@ msm_gem_vm_sm_step_remap(struct drm_gpuva_op *op, void *arg)
}
static int
-msm_gem_vm_sm_step_unmap(struct drm_gpuva_op *op, void *arg)
+msm_gem_vm_sm_step_unmap(struct drm_gpuva_op *op, void *_arg)
{
- struct msm_vm_bind_job *job = ((struct op_arg *)arg)->job;
+ struct op_arg *arg = _arg;
+ struct msm_vm_bind_job *job = arg->job;
struct drm_gpuva *vma = op->unmap.va;
struct msm_gem_vma *msm_vma = to_msm_vma(vma);
vm_dbg("%p:%p:%p: %016llx %016llx", vma->vm, vma, vma->gem.obj,
vma->va.addr, vma->va.range);
+ /*
+ * Detect in-place remap. Turnip does this to change the vma flags,
+ * in particular MSM_VMA_DUMP. In this case we want to avoid actually
+ * touching the page tables, as that would require synchronization
+ * against SUBMIT jobs running on the GPU.
+ */
+ if (op->unmap.keep &&
+ (arg->op->op == MSM_VM_BIND_OP_MAP) &&
+ (vma->gem.obj == arg->op->obj) &&
+ (vma->gem.offset == arg->op->obj_offset) &&
+ (vma->va.addr == arg->op->iova) &&
+ (vma->va.range == arg->op->range)) {
+ /* We are only expecting a single in-place unmap+map cb pair: */
+ WARN_ON(arg->kept);
+
+ /* Leave the existing VMA in place, but signal that to the map cb: */
+ arg->kept = true;
+
+ /* Only flags are changing, so update that in-place: */
+ unsigned orig_flags = vma->flags & (DRM_GPUVA_USERBITS - 1);
+ vma->flags = orig_flags | arg->flags;
+
+ return 0;
+ }
+
if (!msm_vma->mapped)
goto out_close;
@@ -1271,6 +1300,7 @@ vm_bind_job_prepare(struct msm_vm_bind_job *job)
const struct msm_vm_bind_op *op = &job->ops[i];
struct op_arg arg = {
.job = job,
+ .op = op,
};
switch (op->op) {
@@ -1460,12 +1490,8 @@ msm_ioctl_vm_bind(struct drm_device *dev, void *data, struct drm_file *file)
if (args->flags & MSM_VM_BIND_FENCE_FD_OUT) {
sync_file = sync_file_create(job->fence);
- if (!sync_file) {
+ if (!sync_file)
ret = -ENOMEM;
- } else {
- fd_install(out_fence_fd, sync_file->file);
- args->fence_fd = out_fence_fd;
- }
}
if (ret)
@@ -1494,10 +1520,14 @@ out:
out_unlock:
mutex_unlock(&queue->lock);
out_post_unlock:
- if (ret && (out_fence_fd >= 0)) {
- put_unused_fd(out_fence_fd);
+ if (ret) {
+ if (out_fence_fd >= 0)
+ put_unused_fd(out_fence_fd);
if (sync_file)
fput(sync_file->file);
+ } else if (sync_file) {
+ fd_install(out_fence_fd, sync_file->file);
+ args->fence_fd = out_fence_fd;
}
if (!IS_ERR_OR_NULL(job)) {
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index c317b25a8162..26c5ce897cbb 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -465,6 +465,7 @@ static void recover_worker(struct kthread_work *work)
struct msm_gem_submit *submit;
struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
char *comm = NULL, *cmd = NULL;
+ struct task_struct *task;
int i;
mutex_lock(&gpu->lock);
@@ -482,16 +483,20 @@ static void recover_worker(struct kthread_work *work)
/* Increment the fault counts */
submit->queue->faults++;
- if (submit->vm) {
+
+ task = get_pid_task(submit->pid, PIDTYPE_PID);
+ if (!task)
+ gpu->global_faults++;
+ else {
struct msm_gem_vm *vm = to_msm_vm(submit->vm);
vm->faults++;
/*
* If userspace has opted-in to VM_BIND (and therefore userspace
- * management of the VM), faults mark the VM as unusuable. This
+ * management of the VM), faults mark the VM as unusable. This
* matches vulkan expectations (vulkan is the main target for
- * VM_BIND)
+ * VM_BIND).
*/
if (!vm->managed)
msm_gem_vm_unusable(submit->vm);
@@ -553,8 +558,15 @@ static void recover_worker(struct kthread_work *work)
unsigned long flags;
spin_lock_irqsave(&ring->submit_lock, flags);
- list_for_each_entry(submit, &ring->submits, node)
+ list_for_each_entry(submit, &ring->submits, node) {
+ /*
+ * If the submit uses an unusable vm make sure
+ * we don't actually run it
+ */
+ if (to_msm_vm(submit->vm)->unusable)
+ submit->nr_cmds = 0;
gpu->funcs->submit(gpu, submit);
+ }
spin_unlock_irqrestore(&ring->submit_lock, flags);
}
}
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 55c29f49b788..76cdd5ea06a0 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -14,7 +14,9 @@
struct msm_iommu {
struct msm_mmu base;
struct iommu_domain *domain;
- atomic_t pagetables;
+
+ struct mutex init_lock; /* protects pagetables counter and prr_page */
+ int pagetables;
struct page *prr_page;
struct kmem_cache *pt_cache;
@@ -227,7 +229,8 @@ static void msm_iommu_pagetable_destroy(struct msm_mmu *mmu)
* If this is the last attached pagetable for the parent,
* disable TTBR0 in the arm-smmu driver
*/
- if (atomic_dec_return(&iommu->pagetables) == 0) {
+ mutex_lock(&iommu->init_lock);
+ if (--iommu->pagetables == 0) {
adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, NULL);
if (adreno_smmu->set_prr_bit) {
@@ -236,6 +239,7 @@ static void msm_iommu_pagetable_destroy(struct msm_mmu *mmu)
iommu->prr_page = NULL;
}
}
+ mutex_unlock(&iommu->init_lock);
free_io_pgtable_ops(pagetable->pgtbl_ops);
kfree(pagetable);
@@ -568,9 +572,12 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent, bool kernel_m
* If this is the first pagetable that we've allocated, send it back to
* the arm-smmu driver as a trigger to set up TTBR0
*/
- if (atomic_inc_return(&iommu->pagetables) == 1) {
+ mutex_lock(&iommu->init_lock);
+ if (iommu->pagetables++ == 0) {
ret = adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, &ttbr0_cfg);
if (ret) {
+ iommu->pagetables--;
+ mutex_unlock(&iommu->init_lock);
free_io_pgtable_ops(pagetable->pgtbl_ops);
kfree(pagetable);
return ERR_PTR(ret);
@@ -595,6 +602,7 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent, bool kernel_m
adreno_smmu->set_prr_bit(adreno_smmu->cookie, true);
}
}
+ mutex_unlock(&iommu->init_lock);
/* Needed later for TLB flush */
pagetable->parent = parent;
@@ -730,7 +738,7 @@ struct msm_mmu *msm_iommu_new(struct device *dev, unsigned long quirks)
iommu->domain = domain;
msm_mmu_init(&iommu->base, dev, &funcs, MSM_MMU_IOMMU);
- atomic_set(&iommu->pagetables, 0);
+ mutex_init(&iommu->init_lock);
ret = iommu_attach_device(iommu->domain, dev);
if (ret) {
diff --git a/drivers/gpu/drm/msm/msm_kms.c b/drivers/gpu/drm/msm/msm_kms.c
index 6889f1c1e721..56828d218e88 100644
--- a/drivers/gpu/drm/msm/msm_kms.c
+++ b/drivers/gpu/drm/msm/msm_kms.c
@@ -275,6 +275,12 @@ int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
if (ret)
return ret;
+ ret = msm_disp_snapshot_init(ddev);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "msm_disp_snapshot_init failed ret = %d\n", ret);
+ return ret;
+ }
+
ret = priv->kms_init(ddev);
if (ret) {
DRM_DEV_ERROR(dev, "failed to load kms\n");
@@ -327,10 +333,6 @@ int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
goto err_msm_uninit;
}
- ret = msm_disp_snapshot_init(ddev);
- if (ret)
- DRM_DEV_ERROR(dev, "msm_disp_snapshot_init failed ret = %d\n", ret);
-
drm_mode_config_reset(ddev);
return 0;
diff --git a/drivers/gpu/drm/msm/msm_mdss.c b/drivers/gpu/drm/msm/msm_mdss.c
index 1f5fe7811e01..39885b333910 100644
--- a/drivers/gpu/drm/msm/msm_mdss.c
+++ b/drivers/gpu/drm/msm/msm_mdss.c
@@ -423,7 +423,7 @@ static struct msm_mdss *msm_mdss_init(struct platform_device *pdev, bool is_mdp5
if (IS_ERR(msm_mdss->mmio))
return ERR_CAST(msm_mdss->mmio);
- dev_dbg(&pdev->dev, "mapped mdss address space @%pK\n", msm_mdss->mmio);
+ dev_dbg(&pdev->dev, "mapped mdss address space @%p\n", msm_mdss->mmio);
ret = msm_mdss_parse_data_bus_icc_path(&pdev->dev, msm_mdss);
if (ret)
diff --git a/drivers/gpu/drm/msm/registers/adreno/a6xx.xml b/drivers/gpu/drm/msm/registers/adreno/a6xx.xml
index d860fd94feae..86fab2750ba7 100644
--- a/drivers/gpu/drm/msm/registers/adreno/a6xx.xml
+++ b/drivers/gpu/drm/msm/registers/adreno/a6xx.xml
@@ -594,10 +594,14 @@ by a particular renderpass/blit.
<reg32 offset="0x0600" name="DBGC_CFG_DBGBUS_SEL_A"/>
<reg32 offset="0x0601" name="DBGC_CFG_DBGBUS_SEL_B"/>
<reg32 offset="0x0602" name="DBGC_CFG_DBGBUS_SEL_C"/>
- <reg32 offset="0x0603" name="DBGC_CFG_DBGBUS_SEL_D">
+ <reg32 offset="0x0603" name="DBGC_CFG_DBGBUS_SEL_D" variants="A6XX">
<bitfield high="7" low="0" name="PING_INDEX"/>
<bitfield high="15" low="8" name="PING_BLK_SEL"/>
</reg32>
+ <reg32 offset="0x0603" name="DBGC_CFG_DBGBUS_SEL_D" variants="A7XX-">
+ <bitfield high="7" low="0" name="PING_INDEX"/>
+ <bitfield high="24" low="16" name="PING_BLK_SEL"/>
+ </reg32>
<reg32 offset="0x0604" name="DBGC_CFG_DBGBUS_CNTLT">
<bitfield high="5" low="0" name="TRACEEN"/>
<bitfield high="14" low="12" name="GRANU"/>
@@ -3796,6 +3800,14 @@ by a particular renderpass/blit.
<reg32 offset="0x0030" name="CFG_DBGBUS_TRACE_BUF2"/>
</domain>
+<domain name="A7XX_CX_DBGC" width="32">
+ <!-- Bitfields shifted, but otherwise the same: -->
+ <reg32 offset="0x0000" name="CFG_DBGBUS_SEL_A" variants="A7XX-">
+ <bitfield high="7" low="0" name="PING_INDEX"/>
+ <bitfield high="24" low="16" name="PING_BLK_SEL"/>
+ </reg32>
+</domain>
+
<domain name="A6XX_CX_MISC" width="32" prefix="variant" varset="chip">
<reg32 offset="0x0001" name="SYSTEM_CACHE_CNTL_0"/>
<reg32 offset="0x0002" name="SYSTEM_CACHE_CNTL_1"/>
diff --git a/drivers/gpu/drm/msm/registers/display/dsi.xml b/drivers/gpu/drm/msm/registers/display/dsi.xml
index 501ffc585a9f..c7a7b633d747 100644
--- a/drivers/gpu/drm/msm/registers/display/dsi.xml
+++ b/drivers/gpu/drm/msm/registers/display/dsi.xml
@@ -159,28 +159,28 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<bitfield name="RGB_SWAP" low="12" high="14" type="dsi_rgb_swap"/>
</reg32>
<reg32 offset="0x00020" name="ACTIVE_H">
- <bitfield name="START" low="0" high="11" type="uint"/>
- <bitfield name="END" low="16" high="27" type="uint"/>
+ <bitfield name="START" low="0" high="15" type="uint"/>
+ <bitfield name="END" low="16" high="31" type="uint"/>
</reg32>
<reg32 offset="0x00024" name="ACTIVE_V">
- <bitfield name="START" low="0" high="11" type="uint"/>
- <bitfield name="END" low="16" high="27" type="uint"/>
+ <bitfield name="START" low="0" high="15" type="uint"/>
+ <bitfield name="END" low="16" high="31" type="uint"/>
</reg32>
<reg32 offset="0x00028" name="TOTAL">
- <bitfield name="H_TOTAL" low="0" high="11" type="uint"/>
- <bitfield name="V_TOTAL" low="16" high="27" type="uint"/>
+ <bitfield name="H_TOTAL" low="0" high="15" type="uint"/>
+ <bitfield name="V_TOTAL" low="16" high="31" type="uint"/>
</reg32>
<reg32 offset="0x0002c" name="ACTIVE_HSYNC">
- <bitfield name="START" low="0" high="11" type="uint"/>
- <bitfield name="END" low="16" high="27" type="uint"/>
+ <bitfield name="START" low="0" high="15" type="uint"/>
+ <bitfield name="END" low="16" high="31" type="uint"/>
</reg32>
<reg32 offset="0x00030" name="ACTIVE_VSYNC_HPOS">
- <bitfield name="START" low="0" high="11" type="uint"/>
- <bitfield name="END" low="16" high="27" type="uint"/>
+ <bitfield name="START" low="0" high="15" type="uint"/>
+ <bitfield name="END" low="16" high="31" type="uint"/>
</reg32>
<reg32 offset="0x00034" name="ACTIVE_VSYNC_VPOS">
- <bitfield name="START" low="0" high="11" type="uint"/>
- <bitfield name="END" low="16" high="27" type="uint"/>
+ <bitfield name="START" low="0" high="15" type="uint"/>
+ <bitfield name="END" low="16" high="31" type="uint"/>
</reg32>
<reg32 offset="0x00038" name="CMD_DMA_CTRL">
@@ -209,8 +209,8 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<bitfield name="WORD_COUNT" low="16" high="31" type="uint"/>
</reg32>
<reg32 offset="0x00058" name="CMD_MDP_STREAM0_TOTAL">
- <bitfield name="H_TOTAL" low="0" high="11" type="uint"/>
- <bitfield name="V_TOTAL" low="16" high="27" type="uint"/>
+ <bitfield name="H_TOTAL" low="0" high="15" type="uint"/>
+ <bitfield name="V_TOTAL" low="16" high="31" type="uint"/>
</reg32>
<reg32 offset="0x0005c" name="CMD_MDP_STREAM1_CTRL">
<bitfield name="DATA_TYPE" low="0" high="5" type="uint"/>
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index 11d5b923d6e7..e2c55f4b9c5a 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -795,6 +795,10 @@ static bool nv50_plane_format_mod_supported(struct drm_plane *plane,
struct nouveau_drm *drm = nouveau_drm(plane->dev);
uint8_t i;
+ /* All chipsets can display all formats in linear layout */
+ if (modifier == DRM_FORMAT_MOD_LINEAR)
+ return true;
+
if (drm->client.device.info.chipset < 0xc0) {
const struct drm_format_info *info = drm_format_info(format);
const uint8_t kind = (modifier >> 12) & 0xff;
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c b/drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c
index b7da3ab44c27..7c43397c19e6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c
@@ -103,7 +103,7 @@ gm200_flcn_pio_imem_wr_init(struct nvkm_falcon *falcon, u8 port, bool sec, u32 i
static void
gm200_flcn_pio_imem_wr(struct nvkm_falcon *falcon, u8 port, const u8 *img, int len, u16 tag)
{
- nvkm_falcon_wr32(falcon, 0x188 + (port * 0x10), tag++);
+ nvkm_falcon_wr32(falcon, 0x188 + (port * 0x10), tag);
while (len >= 4) {
nvkm_falcon_wr32(falcon, 0x184 + (port * 0x10), *(u32 *)img);
img += 4;
@@ -249,9 +249,11 @@ int
gm200_flcn_fw_load(struct nvkm_falcon_fw *fw)
{
struct nvkm_falcon *falcon = fw->falcon;
- int target, ret;
+ int ret;
if (fw->inst) {
+ int target;
+
nvkm_falcon_mask(falcon, 0x048, 0x00000001, 0x00000001);
switch (nvkm_memory_target(fw->inst)) {
@@ -285,15 +287,6 @@ gm200_flcn_fw_load(struct nvkm_falcon_fw *fw)
}
if (fw->boot) {
- switch (nvkm_memory_target(&fw->fw.mem.memory)) {
- case NVKM_MEM_TARGET_VRAM: target = 4; break;
- case NVKM_MEM_TARGET_HOST: target = 5; break;
- case NVKM_MEM_TARGET_NCOH: target = 6; break;
- default:
- WARN_ON(1);
- return -EINVAL;
- }
-
ret = nvkm_falcon_pio_wr(falcon, fw->boot, 0, 0,
IMEM, falcon->code.limit - fw->boot_size, fw->boot_size,
fw->boot_addr >> 8, false);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c
index 52412965fac1..5b721bd9d799 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c
@@ -209,11 +209,12 @@ nvkm_gsp_fwsec_v2(struct nvkm_gsp *gsp, const char *name,
fw->boot_addr = bld->start_tag << 8;
fw->boot_size = bld->code_size;
fw->boot = kmemdup(bl->data + hdr->data_offset + bld->code_off, fw->boot_size, GFP_KERNEL);
- if (!fw->boot)
- ret = -ENOMEM;
nvkm_firmware_put(bl);
+ if (!fw->boot)
+ return -ENOMEM;
+
/* Patch in interface data. */
return nvkm_gsp_fwsec_patch(gsp, fw, desc->InterfaceOffset, init_cmd);
}
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 41a285ec889f..8ede07fb7a21 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -526,7 +526,7 @@ void tegra_bo_free_object(struct drm_gem_object *gem)
if (drm_gem_is_imported(gem)) {
dma_buf_unmap_attachment_unlocked(gem->import_attach, bo->sgt,
DMA_TO_DEVICE);
- dma_buf_detach(gem->dma_buf, gem->import_attach);
+ dma_buf_detach(gem->import_attach->dmabuf, gem->import_attach);
}
}
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index 18f27da47a36..1be2415966df 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -812,7 +812,8 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
}
if (ttm_bo->type == ttm_bo_type_sg) {
- ret = xe_bo_move_notify(bo, ctx);
+ if (new_mem->mem_type == XE_PL_SYSTEM)
+ ret = xe_bo_move_notify(bo, ctx);
if (!ret)
ret = xe_bo_move_dmabuf(ttm_bo, new_mem);
return ret;
@@ -2438,7 +2439,6 @@ int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict)
.no_wait_gpu = false,
.gfp_retry_mayfail = true,
};
- struct pin_cookie cookie;
int ret;
if (vm) {
@@ -2449,10 +2449,10 @@ int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict)
ctx.resv = xe_vm_resv(vm);
}
- cookie = xe_vm_set_validating(vm, allow_res_evict);
+ xe_vm_set_validating(vm, allow_res_evict);
trace_xe_bo_validate(bo);
ret = ttm_bo_validate(&bo->ttm, &bo->placement, &ctx);
- xe_vm_clear_validating(vm, allow_res_evict, cookie);
+ xe_vm_clear_validating(vm, allow_res_evict);
return ret;
}
diff --git a/drivers/gpu/drm/xe/xe_gen_wa_oob.c b/drivers/gpu/drm/xe/xe_gen_wa_oob.c
index 6581cb0f0e59..247e41c1c48d 100644
--- a/drivers/gpu/drm/xe/xe_gen_wa_oob.c
+++ b/drivers/gpu/drm/xe/xe_gen_wa_oob.c
@@ -123,11 +123,19 @@ static int parse(FILE *input, FILE *csource, FILE *cheader, char *prefix)
return 0;
}
+/* Avoid GNU vs POSIX basename() discrepancy, just use our own */
+static const char *xbasename(const char *s)
+{
+ const char *p = strrchr(s, '/');
+
+ return p ? p + 1 : s;
+}
+
static int fn_to_prefix(const char *fn, char *prefix, size_t size)
{
size_t len;
- fn = basename(fn);
+ fn = xbasename(fn);
len = strlen(fn);
if (len > size - 1)
diff --git a/drivers/gpu/drm/xe/xe_sync.c b/drivers/gpu/drm/xe/xe_sync.c
index f87276df18f2..82872a51f098 100644
--- a/drivers/gpu/drm/xe/xe_sync.c
+++ b/drivers/gpu/drm/xe/xe_sync.c
@@ -77,6 +77,7 @@ static void user_fence_worker(struct work_struct *w)
{
struct xe_user_fence *ufence = container_of(w, struct xe_user_fence, worker);
+ WRITE_ONCE(ufence->signalled, 1);
if (mmget_not_zero(ufence->mm)) {
kthread_use_mm(ufence->mm);
if (copy_to_user(ufence->addr, &ufence->value, sizeof(ufence->value)))
@@ -91,7 +92,6 @@ static void user_fence_worker(struct work_struct *w)
* Wake up waiters only after updating the ufence state, allowing the UMD
* to safely reuse the same ufence without encountering -EBUSY errors.
*/
- WRITE_ONCE(ufence->signalled, 1);
wake_up_all(&ufence->xe->ufence_wq);
user_fence_put(ufence);
}
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index ec04bef8ae40..d60c4b115304 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -1610,8 +1610,12 @@ static int xe_vm_create_scratch(struct xe_device *xe, struct xe_tile *tile,
for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; i++) {
vm->scratch_pt[id][i] = xe_pt_create(vm, tile, i);
- if (IS_ERR(vm->scratch_pt[id][i]))
- return PTR_ERR(vm->scratch_pt[id][i]);
+ if (IS_ERR(vm->scratch_pt[id][i])) {
+ int err = PTR_ERR(vm->scratch_pt[id][i]);
+
+ vm->scratch_pt[id][i] = NULL;
+ return err;
+ }
xe_pt_populate_empty(tile, vm, vm->scratch_pt[id][i]);
}
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index 2f213737c7e5..2ecb417c19a2 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -315,22 +315,14 @@ void xe_vm_snapshot_free(struct xe_vm_snapshot *snap);
* Register this task as currently making bos resident for the vm. Intended
* to avoid eviction by the same task of shared bos bound to the vm.
* Call with the vm's resv lock held.
- *
- * Return: A pin cookie that should be used for xe_vm_clear_validating().
*/
-static inline struct pin_cookie xe_vm_set_validating(struct xe_vm *vm,
- bool allow_res_evict)
+static inline void xe_vm_set_validating(struct xe_vm *vm, bool allow_res_evict)
{
- struct pin_cookie cookie = {};
-
if (vm && !allow_res_evict) {
xe_vm_assert_held(vm);
- cookie = lockdep_pin_lock(&xe_vm_resv(vm)->lock.base);
/* Pairs with READ_ONCE in xe_vm_is_validating() */
WRITE_ONCE(vm->validating, current);
}
-
- return cookie;
}
/**
@@ -338,17 +330,14 @@ static inline struct pin_cookie xe_vm_set_validating(struct xe_vm *vm,
* @vm: Pointer to the vm or NULL
* @allow_res_evict: Eviction from @vm was allowed. Must be set to the same
* value as for xe_vm_set_validation().
- * @cookie: Cookie obtained from xe_vm_set_validating().
*
* Register this task as currently making bos resident for the vm. Intended
* to avoid eviction by the same task of shared bos bound to the vm.
* Call with the vm's resv lock held.
*/
-static inline void xe_vm_clear_validating(struct xe_vm *vm, bool allow_res_evict,
- struct pin_cookie cookie)
+static inline void xe_vm_clear_validating(struct xe_vm *vm, bool allow_res_evict)
{
if (vm && !allow_res_evict) {
- lockdep_unpin_lock(&xe_vm_resv(vm)->lock.base, cookie);
/* Pairs with READ_ONCE in xe_vm_is_validating() */
WRITE_ONCE(vm->validating, NULL);
}
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index 2b05722d4dbe..ea8a0ab47afd 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -39,12 +39,13 @@
#include "hfc_pci.h"
+static void hfcpci_softirq(struct timer_list *unused);
static const char *hfcpci_revision = "2.0";
static int HFC_cnt;
static uint debug;
static uint poll, tics;
-static struct timer_list hfc_tl;
+static DEFINE_TIMER(hfc_tl, hfcpci_softirq);
static unsigned long hfc_jiffies;
MODULE_AUTHOR("Karsten Keil");
@@ -2305,8 +2306,7 @@ hfcpci_softirq(struct timer_list *unused)
hfc_jiffies = jiffies + 1;
else
hfc_jiffies += tics;
- hfc_tl.expires = hfc_jiffies;
- add_timer(&hfc_tl);
+ mod_timer(&hfc_tl, hfc_jiffies);
}
static int __init
@@ -2332,10 +2332,8 @@ HFC_init(void)
if (poll != HFCPCI_BTRANS_THRESHOLD) {
printk(KERN_INFO "%s: Using alternative poll value of %d\n",
__func__, poll);
- timer_setup(&hfc_tl, hfcpci_softirq, 0);
- hfc_tl.expires = jiffies + tics;
- hfc_jiffies = hfc_tl.expires;
- add_timer(&hfc_tl);
+ hfc_jiffies = jiffies + tics;
+ mod_timer(&hfc_tl, hfc_jiffies);
} else
tics = 0; /* indicate the use of controller's timer */
diff --git a/drivers/media/i2c/alvium-csi2.c b/drivers/media/i2c/alvium-csi2.c
index 05b708bd0a64..1f088acecf36 100644
--- a/drivers/media/i2c/alvium-csi2.c
+++ b/drivers/media/i2c/alvium-csi2.c
@@ -1841,7 +1841,6 @@ static int alvium_s_stream(struct v4l2_subdev *sd, int enable)
} else {
alvium_set_stream_mipi(alvium, enable);
- pm_runtime_mark_last_busy(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
}
diff --git a/drivers/media/i2c/ccs/ccs-core.c b/drivers/media/i2c/ccs/ccs-core.c
index 487bcabb4a19..1c889c878abd 100644
--- a/drivers/media/i2c/ccs/ccs-core.c
+++ b/drivers/media/i2c/ccs/ccs-core.c
@@ -787,10 +787,8 @@ static int ccs_set_ctrl(struct v4l2_ctrl *ctrl)
rval = -EINVAL;
}
- if (pm_status > 0) {
- pm_runtime_mark_last_busy(&client->dev);
+ if (pm_status > 0)
pm_runtime_put_autosuspend(&client->dev);
- }
return rval;
}
@@ -1914,7 +1912,6 @@ static int ccs_set_stream(struct v4l2_subdev *subdev, int enable)
if (!enable) {
ccs_stop_streaming(sensor);
sensor->streaming = false;
- pm_runtime_mark_last_busy(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
return 0;
@@ -1929,7 +1926,6 @@ static int ccs_set_stream(struct v4l2_subdev *subdev, int enable)
rval = ccs_start_streaming(sensor);
if (rval < 0) {
sensor->streaming = false;
- pm_runtime_mark_last_busy(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
}
@@ -2677,7 +2673,6 @@ nvm_show(struct device *dev, struct device_attribute *attr, char *buf)
return -ENODEV;
}
- pm_runtime_mark_last_busy(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
/*
diff --git a/drivers/media/i2c/dw9768.c b/drivers/media/i2c/dw9768.c
index 3a4d100b9199..d434721ba8ed 100644
--- a/drivers/media/i2c/dw9768.c
+++ b/drivers/media/i2c/dw9768.c
@@ -374,7 +374,6 @@ static int dw9768_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
static int dw9768_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
- pm_runtime_mark_last_busy(sd->dev);
pm_runtime_put_autosuspend(sd->dev);
return 0;
diff --git a/drivers/media/i2c/gc0308.c b/drivers/media/i2c/gc0308.c
index 069f42785b3c..cbcda0e18ff1 100644
--- a/drivers/media/i2c/gc0308.c
+++ b/drivers/media/i2c/gc0308.c
@@ -974,7 +974,6 @@ static int gc0308_s_ctrl(struct v4l2_ctrl *ctrl)
if (ret)
dev_err(gc0308->dev, "failed to set control: %d\n", ret);
- pm_runtime_mark_last_busy(gc0308->dev);
pm_runtime_put_autosuspend(gc0308->dev);
return ret;
@@ -1157,14 +1156,12 @@ static int gc0308_start_stream(struct gc0308 *gc0308)
return 0;
disable_pm:
- pm_runtime_mark_last_busy(gc0308->dev);
pm_runtime_put_autosuspend(gc0308->dev);
return ret;
}
static int gc0308_stop_stream(struct gc0308 *gc0308)
{
- pm_runtime_mark_last_busy(gc0308->dev);
pm_runtime_put_autosuspend(gc0308->dev);
return 0;
}
diff --git a/drivers/media/i2c/gc2145.c b/drivers/media/i2c/gc2145.c
index ba02161d46e7..559a851669aa 100644
--- a/drivers/media/i2c/gc2145.c
+++ b/drivers/media/i2c/gc2145.c
@@ -963,7 +963,6 @@ static int gc2145_enable_streams(struct v4l2_subdev *sd,
return 0;
err_rpm_put:
- pm_runtime_mark_last_busy(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
return ret;
}
@@ -985,7 +984,6 @@ static int gc2145_disable_streams(struct v4l2_subdev *sd,
if (ret)
dev_err(&client->dev, "%s failed to write regs\n", __func__);
- pm_runtime_mark_last_busy(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
return ret;
@@ -1193,7 +1191,6 @@ static int gc2145_s_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_mark_last_busy(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
return ret;
diff --git a/drivers/media/i2c/imx219.c b/drivers/media/i2c/imx219.c
index 3b4f68543342..3faf48f34af4 100644
--- a/drivers/media/i2c/imx219.c
+++ b/drivers/media/i2c/imx219.c
@@ -771,7 +771,6 @@ static int imx219_enable_streams(struct v4l2_subdev *sd,
return 0;
err_rpm_put:
- pm_runtime_mark_last_busy(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
return ret;
}
@@ -793,7 +792,6 @@ static int imx219_disable_streams(struct v4l2_subdev *sd,
__v4l2_ctrl_grab(imx219->vflip, false);
__v4l2_ctrl_grab(imx219->hflip, false);
- pm_runtime_mark_last_busy(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
return ret;
diff --git a/drivers/media/i2c/imx283.c b/drivers/media/i2c/imx283.c
index da618c8cbadc..67e8bb432d10 100644
--- a/drivers/media/i2c/imx283.c
+++ b/drivers/media/i2c/imx283.c
@@ -1143,7 +1143,6 @@ static int imx283_enable_streams(struct v4l2_subdev *sd,
return 0;
err_rpm_put:
- pm_runtime_mark_last_busy(imx283->dev);
pm_runtime_put_autosuspend(imx283->dev);
return ret;
@@ -1163,7 +1162,6 @@ static int imx283_disable_streams(struct v4l2_subdev *sd,
if (ret)
dev_err(imx283->dev, "Failed to stop stream\n");
- pm_runtime_mark_last_busy(imx283->dev);
pm_runtime_put_autosuspend(imx283->dev);
return ret;
@@ -1558,7 +1556,6 @@ static int imx283_probe(struct i2c_client *client)
* Decrease the PM usage count. The device will get suspended after the
* autosuspend delay, turning the power off.
*/
- pm_runtime_mark_last_busy(imx283->dev);
pm_runtime_put_autosuspend(imx283->dev);
return 0;
diff --git a/drivers/media/i2c/imx290.c b/drivers/media/i2c/imx290.c
index 4f3f386c5353..ec172556612e 100644
--- a/drivers/media/i2c/imx290.c
+++ b/drivers/media/i2c/imx290.c
@@ -869,7 +869,6 @@ static int imx290_set_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_mark_last_busy(imx290->dev);
pm_runtime_put_autosuspend(imx290->dev);
return ret;
@@ -1099,7 +1098,6 @@ static int imx290_set_stream(struct v4l2_subdev *sd, int enable)
}
} else {
imx290_stop_streaming(imx290);
- pm_runtime_mark_last_busy(imx290->dev);
pm_runtime_put_autosuspend(imx290->dev);
}
@@ -1294,7 +1292,6 @@ static int imx290_subdev_init(struct imx290 *imx290)
* will already be prevented even before the delay.
*/
v4l2_i2c_subdev_init(&imx290->sd, client, &imx290_subdev_ops);
- pm_runtime_mark_last_busy(imx290->dev);
pm_runtime_put_autosuspend(imx290->dev);
imx290->sd.internal_ops = &imx290_internal_ops;
diff --git a/drivers/media/i2c/imx296.c b/drivers/media/i2c/imx296.c
index f3bec16b527c..61116f4e3f76 100644
--- a/drivers/media/i2c/imx296.c
+++ b/drivers/media/i2c/imx296.c
@@ -604,7 +604,6 @@ static int imx296_s_stream(struct v4l2_subdev *sd, int enable)
if (!enable) {
ret = imx296_stream_off(sensor);
- pm_runtime_mark_last_busy(sensor->dev);
pm_runtime_put_autosuspend(sensor->dev);
goto unlock;
diff --git a/drivers/media/i2c/imx415.c b/drivers/media/i2c/imx415.c
index 278e743646ea..276bf4d6f39d 100644
--- a/drivers/media/i2c/imx415.c
+++ b/drivers/media/i2c/imx415.c
@@ -952,7 +952,6 @@ static int imx415_s_stream(struct v4l2_subdev *sd, int enable)
if (!enable) {
ret = imx415_stream_off(sensor);
- pm_runtime_mark_last_busy(sensor->dev);
pm_runtime_put_autosuspend(sensor->dev);
goto unlock;
diff --git a/drivers/media/i2c/mt9m114.c b/drivers/media/i2c/mt9m114.c
index 3f540ca40f3c..aa3fd6c6c76c 100644
--- a/drivers/media/i2c/mt9m114.c
+++ b/drivers/media/i2c/mt9m114.c
@@ -974,7 +974,6 @@ static int mt9m114_start_streaming(struct mt9m114 *sensor,
return 0;
error:
- pm_runtime_mark_last_busy(&sensor->client->dev);
pm_runtime_put_autosuspend(&sensor->client->dev);
return ret;
@@ -988,7 +987,6 @@ static int mt9m114_stop_streaming(struct mt9m114 *sensor)
ret = mt9m114_set_state(sensor, MT9M114_SYS_STATE_ENTER_SUSPEND);
- pm_runtime_mark_last_busy(&sensor->client->dev);
pm_runtime_put_autosuspend(&sensor->client->dev);
return ret;
@@ -1046,7 +1044,6 @@ static int mt9m114_pa_g_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_mark_last_busy(&sensor->client->dev);
pm_runtime_put_autosuspend(&sensor->client->dev);
return ret;
@@ -1113,7 +1110,6 @@ static int mt9m114_pa_s_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_mark_last_busy(&sensor->client->dev);
pm_runtime_put_autosuspend(&sensor->client->dev);
return ret;
@@ -1565,7 +1561,6 @@ static int mt9m114_ifp_s_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_mark_last_busy(&sensor->client->dev);
pm_runtime_put_autosuspend(&sensor->client->dev);
return ret;
@@ -2472,7 +2467,6 @@ static int mt9m114_probe(struct i2c_client *client)
* Decrease the PM usage count. The device will get suspended after the
* autosuspend delay, turning the power off.
*/
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
diff --git a/drivers/media/i2c/ov4689.c b/drivers/media/i2c/ov4689.c
index 1c3a449f9354..7d740ad3926f 100644
--- a/drivers/media/i2c/ov4689.c
+++ b/drivers/media/i2c/ov4689.c
@@ -497,7 +497,6 @@ static int ov4689_s_stream(struct v4l2_subdev *sd, int on)
} else {
cci_write(ov4689->regmap, OV4689_REG_CTRL_MODE,
OV4689_MODE_SW_STANDBY, NULL);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
}
@@ -702,7 +701,6 @@ static int ov4689_set_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
@@ -999,7 +997,6 @@ static int ov4689_probe(struct i2c_client *client)
goto err_clean_subdev_pm;
}
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
index 0dae0438aa80..84198613381d 100644
--- a/drivers/media/i2c/ov5640.c
+++ b/drivers/media/i2c/ov5640.c
@@ -3341,7 +3341,6 @@ static int ov5640_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_mark_last_busy(&sensor->i2c_client->dev);
pm_runtime_put_autosuspend(&sensor->i2c_client->dev);
return 0;
@@ -3417,7 +3416,6 @@ static int ov5640_s_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_mark_last_busy(&sensor->i2c_client->dev);
pm_runtime_put_autosuspend(&sensor->i2c_client->dev);
return ret;
@@ -3754,7 +3752,6 @@ out:
mutex_unlock(&sensor->lock);
if (!enable || ret) {
- pm_runtime_mark_last_busy(&sensor->i2c_client->dev);
pm_runtime_put_autosuspend(&sensor->i2c_client->dev);
}
@@ -3965,7 +3962,6 @@ static int ov5640_probe(struct i2c_client *client)
pm_runtime_set_autosuspend_delay(dev, 1000);
pm_runtime_use_autosuspend(dev);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
diff --git a/drivers/media/i2c/ov5645.c b/drivers/media/i2c/ov5645.c
index 004d0ee5c3f5..58c846a44376 100644
--- a/drivers/media/i2c/ov5645.c
+++ b/drivers/media/i2c/ov5645.c
@@ -808,7 +808,6 @@ static int ov5645_s_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_mark_last_busy(ov5645->dev);
pm_runtime_put_autosuspend(ov5645->dev);
return ret;
@@ -979,7 +978,6 @@ static int ov5645_disable_streams(struct v4l2_subdev *sd,
OV5645_SYSTEM_CTRL0_STOP);
rpm_put:
- pm_runtime_mark_last_busy(ov5645->dev);
pm_runtime_put_autosuspend(ov5645->dev);
return ret;
@@ -1196,7 +1194,6 @@ static int ov5645_probe(struct i2c_client *client)
pm_runtime_set_autosuspend_delay(dev, 1000);
pm_runtime_use_autosuspend(dev);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
diff --git a/drivers/media/i2c/ov64a40.c b/drivers/media/i2c/ov64a40.c
index a5da4fe47e0b..2031cbd05c26 100644
--- a/drivers/media/i2c/ov64a40.c
+++ b/drivers/media/i2c/ov64a40.c
@@ -2990,7 +2990,6 @@ static int ov64a40_start_streaming(struct ov64a40 *ov64a40,
return 0;
error_power_off:
- pm_runtime_mark_last_busy(ov64a40->dev);
pm_runtime_put_autosuspend(ov64a40->dev);
return ret;
@@ -3000,7 +2999,6 @@ static int ov64a40_stop_streaming(struct ov64a40 *ov64a40,
struct v4l2_subdev_state *state)
{
cci_update_bits(ov64a40->cci, OV64A40_REG_SMIA, BIT(0), 0, NULL);
- pm_runtime_mark_last_busy(ov64a40->dev);
pm_runtime_put_autosuspend(ov64a40->dev);
__v4l2_ctrl_grab(ov64a40->link_freq, false);
@@ -3329,10 +3327,8 @@ static int ov64a40_set_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- if (pm_status > 0) {
- pm_runtime_mark_last_busy(ov64a40->dev);
+ if (pm_status > 0)
pm_runtime_put_autosuspend(ov64a40->dev);
- }
return ret;
}
@@ -3622,7 +3618,6 @@ static int ov64a40_probe(struct i2c_client *client)
goto error_subdev_cleanup;
}
- pm_runtime_mark_last_busy(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
return 0;
diff --git a/drivers/media/i2c/ov8858.c b/drivers/media/i2c/ov8858.c
index 95f9ae794846..6b7193eaea1f 100644
--- a/drivers/media/i2c/ov8858.c
+++ b/drivers/media/i2c/ov8858.c
@@ -1391,7 +1391,6 @@ static int ov8858_s_stream(struct v4l2_subdev *sd, int on)
}
} else {
ov8858_stop_stream(ov8858);
- pm_runtime_mark_last_busy(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
}
@@ -1945,7 +1944,6 @@ static int ov8858_probe(struct i2c_client *client)
goto err_power_off;
}
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
diff --git a/drivers/media/i2c/st-mipid02.c b/drivers/media/i2c/st-mipid02.c
index f4568e87f018..41ae25b0911f 100644
--- a/drivers/media/i2c/st-mipid02.c
+++ b/drivers/media/i2c/st-mipid02.c
@@ -465,7 +465,6 @@ static int mipid02_disable_streams(struct v4l2_subdev *sd,
if (ret)
goto error;
- pm_runtime_mark_last_busy(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
error:
@@ -542,7 +541,6 @@ error:
cci_write(bridge->regmap, MIPID02_DATA_LANE0_REG1, 0, &ret);
cci_write(bridge->regmap, MIPID02_DATA_LANE1_REG1, 0, &ret);
- pm_runtime_mark_last_busy(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
return ret;
}
diff --git a/drivers/media/i2c/tc358746.c b/drivers/media/i2c/tc358746.c
index 143aa1359aba..bcfc274cf891 100644
--- a/drivers/media/i2c/tc358746.c
+++ b/drivers/media/i2c/tc358746.c
@@ -816,7 +816,6 @@ static int tc358746_s_stream(struct v4l2_subdev *sd, int enable)
return 0;
err_out:
- pm_runtime_mark_last_busy(sd->dev);
pm_runtime_put_sync_autosuspend(sd->dev);
return err;
@@ -838,7 +837,6 @@ err_out:
if (err)
return err;
- pm_runtime_mark_last_busy(sd->dev);
pm_runtime_put_sync_autosuspend(sd->dev);
return v4l2_subdev_call(src, video, s_stream, 0);
@@ -1016,7 +1014,6 @@ tc358746_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg)
err = tc358746_read(tc358746, reg->reg, &val);
reg->val = val;
- pm_runtime_mark_last_busy(sd->dev);
pm_runtime_put_sync_autosuspend(sd->dev);
return err;
@@ -1032,7 +1029,6 @@ tc358746_s_register(struct v4l2_subdev *sd, const struct v4l2_dbg_register *reg)
tc358746_write(tc358746, (u32)reg->reg, (u32)reg->val);
- pm_runtime_mark_last_busy(sd->dev);
pm_runtime_put_sync_autosuspend(sd->dev);
return 0;
@@ -1395,7 +1391,6 @@ static int tc358746_init_hw(struct tc358746 *tc358746)
}
err = tc358746_read(tc358746, CHIPID_REG, &val);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_sync_autosuspend(dev);
if (err)
return -ENODEV;
diff --git a/drivers/media/i2c/thp7312.c b/drivers/media/i2c/thp7312.c
index 8852c56431fe..775cfba188d8 100644
--- a/drivers/media/i2c/thp7312.c
+++ b/drivers/media/i2c/thp7312.c
@@ -808,7 +808,6 @@ static int thp7312_s_stream(struct v4l2_subdev *sd, int enable)
if (!enable) {
thp7312_stream_enable(thp7312, false);
- pm_runtime_mark_last_busy(thp7312->dev);
pm_runtime_put_autosuspend(thp7312->dev);
v4l2_subdev_unlock_state(sd_state);
@@ -839,7 +838,6 @@ static int thp7312_s_stream(struct v4l2_subdev *sd, int enable)
goto finish_unlock;
finish_pm:
- pm_runtime_mark_last_busy(thp7312->dev);
pm_runtime_put_autosuspend(thp7312->dev);
finish_unlock:
v4l2_subdev_unlock_state(sd_state);
@@ -1147,7 +1145,6 @@ static int thp7312_s_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_mark_last_busy(thp7312->dev);
pm_runtime_put_autosuspend(thp7312->dev);
return ret;
@@ -2183,7 +2180,6 @@ static int thp7312_probe(struct i2c_client *client)
* Decrease the PM usage count. The device will get suspended after the
* autosuspend delay, turning the power off.
*/
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
dev_info(dev, "THP7312 firmware version %02u.%02u\n",
diff --git a/drivers/media/i2c/vd55g1.c b/drivers/media/i2c/vd55g1.c
index c0754fd03b1d..7c39183dd44b 100644
--- a/drivers/media/i2c/vd55g1.c
+++ b/drivers/media/i2c/vd55g1.c
@@ -1104,7 +1104,6 @@ static int vd55g1_disable_streams(struct v4l2_subdev *sd,
vd55g1_grab_ctrls(sensor, false);
- pm_runtime_mark_last_busy(sensor->dev);
pm_runtime_put_autosuspend(sensor->dev);
return ret;
@@ -1338,7 +1337,6 @@ static int vd55g1_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_mark_last_busy(sensor->dev);
pm_runtime_put_autosuspend(sensor->dev);
return ret;
@@ -1433,7 +1431,6 @@ static int vd55g1_s_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_mark_last_busy(sensor->dev);
pm_runtime_put_autosuspend(sensor->dev);
return ret;
@@ -1895,7 +1892,6 @@ static int vd55g1_probe(struct i2c_client *client)
pm_runtime_enable(dev);
pm_runtime_set_autosuspend_delay(dev, 4000);
pm_runtime_use_autosuspend(dev);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
ret = vd55g1_subdev_init(sensor);
diff --git a/drivers/media/i2c/vd56g3.c b/drivers/media/i2c/vd56g3.c
index 5d951ad0b478..d66e21ba4498 100644
--- a/drivers/media/i2c/vd56g3.c
+++ b/drivers/media/i2c/vd56g3.c
@@ -493,7 +493,6 @@ static int vd56g3_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_mark_last_busy(sensor->dev);
pm_runtime_put_autosuspend(sensor->dev);
return ret;
@@ -577,7 +576,6 @@ static int vd56g3_s_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_mark_last_busy(sensor->dev);
pm_runtime_put_autosuspend(sensor->dev);
return ret;
@@ -1021,7 +1019,6 @@ static int vd56g3_disable_streams(struct v4l2_subdev *sd,
__v4l2_ctrl_grab(sensor->vflip_ctrl, false);
__v4l2_ctrl_grab(sensor->patgen_ctrl, false);
- pm_runtime_mark_last_busy(sensor->dev);
pm_runtime_put_autosuspend(sensor->dev);
return ret;
@@ -1527,7 +1524,6 @@ static int vd56g3_probe(struct i2c_client *client)
}
/* Sensor could now be powered off (after the autosuspend delay) */
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
dev_dbg(dev, "Successfully probe %s sensor\n",
diff --git a/drivers/media/i2c/video-i2c.c b/drivers/media/i2c/video-i2c.c
index 0dd991d70d53..1eee2d4f5b40 100644
--- a/drivers/media/i2c/video-i2c.c
+++ b/drivers/media/i2c/video-i2c.c
@@ -288,7 +288,6 @@ static int amg88xx_read(struct device *dev, enum hwmon_sensor_types type,
return tmp;
tmp = regmap_bulk_read(data->regmap, AMG88XX_REG_TTHL, &buf, 2);
- pm_runtime_mark_last_busy(regmap_get_device(data->regmap));
pm_runtime_put_autosuspend(regmap_get_device(data->regmap));
if (tmp)
return tmp;
@@ -527,7 +526,6 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
return 0;
error_rpm_put:
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
error_del_list:
video_i2c_del_list(vq, VB2_BUF_STATE_QUEUED);
@@ -544,7 +542,6 @@ static void stop_streaming(struct vb2_queue *vq)
kthread_stop(data->kthread_vid_cap);
data->kthread_vid_cap = NULL;
- pm_runtime_mark_last_busy(regmap_get_device(data->regmap));
pm_runtime_put_autosuspend(regmap_get_device(data->regmap));
video_i2c_del_list(vq, VB2_BUF_STATE_ERROR);
@@ -853,7 +850,6 @@ static int video_i2c_probe(struct i2c_client *client)
if (ret < 0)
goto error_pm_disable;
- pm_runtime_mark_last_busy(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
return 0;
diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c b/drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
index fd71f0c43ac3..a9ce032cc5a2 100644
--- a/drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
+++ b/drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
@@ -451,7 +451,6 @@ static void wave5_vpu_dec_finish_decode(struct vpu_instance *inst)
if (q_status.report_queue_count == 0 &&
(q_status.instance_queue_count == 0 || dec_info.sequence_changed)) {
dev_dbg(inst->dev->dev, "%s: finishing job.\n", __func__);
- pm_runtime_mark_last_busy(inst->dev->dev);
pm_runtime_put_autosuspend(inst->dev->dev);
v4l2_m2m_job_finish(inst->v4l2_m2m_dev, m2m_ctx);
}
@@ -1364,7 +1363,6 @@ static int wave5_vpu_dec_start_streaming(struct vb2_queue *q, unsigned int count
}
}
- pm_runtime_mark_last_busy(inst->dev->dev);
pm_runtime_put_autosuspend(inst->dev->dev);
return ret;
@@ -1498,7 +1496,6 @@ static void wave5_vpu_dec_stop_streaming(struct vb2_queue *q)
else
streamoff_capture(q);
- pm_runtime_mark_last_busy(inst->dev->dev);
pm_runtime_put_autosuspend(inst->dev->dev);
}
@@ -1662,7 +1659,6 @@ static void wave5_vpu_dec_device_run(void *priv)
finish_job_and_return:
dev_dbg(inst->dev->dev, "%s: leave and finish job", __func__);
- pm_runtime_mark_last_busy(inst->dev->dev);
pm_runtime_put_autosuspend(inst->dev->dev);
v4l2_m2m_job_finish(inst->v4l2_m2m_dev, m2m_ctx);
}
diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c b/drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c
index 1e5fc5f8b856..35913a7de834 100644
--- a/drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c
+++ b/drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c
@@ -1391,12 +1391,10 @@ static int wave5_vpu_enc_start_streaming(struct vb2_queue *q, unsigned int count
if (ret)
goto return_buffers;
- pm_runtime_mark_last_busy(inst->dev->dev);
pm_runtime_put_autosuspend(inst->dev->dev);
return 0;
return_buffers:
wave5_return_bufs(q, VB2_BUF_STATE_QUEUED);
- pm_runtime_mark_last_busy(inst->dev->dev);
pm_runtime_put_autosuspend(inst->dev->dev);
return ret;
}
@@ -1465,7 +1463,6 @@ static void wave5_vpu_enc_stop_streaming(struct vb2_queue *q)
else
streamoff_capture(inst, q);
- pm_runtime_mark_last_busy(inst->dev->dev);
pm_runtime_put_autosuspend(inst->dev->dev);
}
@@ -1520,7 +1517,6 @@ static void wave5_vpu_enc_device_run(void *priv)
break;
}
dev_dbg(inst->dev->dev, "%s: leave with active job", __func__);
- pm_runtime_mark_last_busy(inst->dev->dev);
pm_runtime_put_autosuspend(inst->dev->dev);
return;
default:
@@ -1529,7 +1525,6 @@ static void wave5_vpu_enc_device_run(void *priv)
break;
}
dev_dbg(inst->dev->dev, "%s: leave and finish job", __func__);
- pm_runtime_mark_last_busy(inst->dev->dev);
pm_runtime_put_autosuspend(inst->dev->dev);
v4l2_m2m_job_finish(inst->v4l2_m2m_dev, m2m_ctx);
}
diff --git a/drivers/media/platform/nvidia/tegra-vde/h264.c b/drivers/media/platform/nvidia/tegra-vde/h264.c
index 0e56a4331b0d..45f8f6904867 100644
--- a/drivers/media/platform/nvidia/tegra-vde/h264.c
+++ b/drivers/media/platform/nvidia/tegra-vde/h264.c
@@ -585,7 +585,6 @@ static int tegra_vde_decode_begin(struct tegra_vde *vde,
return 0;
put_runtime_pm:
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
unlock:
@@ -612,7 +611,6 @@ static void tegra_vde_decode_abort(struct tegra_vde *vde)
if (err)
dev_err(dev, "DEC end: Failed to assert HW reset: %d\n", err);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
mutex_unlock(&vde->lock);
diff --git a/drivers/media/platform/qcom/iris/iris_hfi_queue.c b/drivers/media/platform/qcom/iris/iris_hfi_queue.c
index 221dcd09e1e1..b3ed06297953 100644
--- a/drivers/media/platform/qcom/iris/iris_hfi_queue.c
+++ b/drivers/media/platform/qcom/iris/iris_hfi_queue.c
@@ -142,7 +142,6 @@ int iris_hfi_queue_cmd_write(struct iris_core *core, void *pkt, u32 pkt_size)
}
mutex_unlock(&core->lock);
- pm_runtime_mark_last_busy(core->dev);
pm_runtime_put_autosuspend(core->dev);
return 0;
diff --git a/drivers/media/platform/raspberrypi/pisp_be/pisp_be.c b/drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
index b30891718d8d..d60d92d2ffa1 100644
--- a/drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
+++ b/drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
@@ -950,7 +950,6 @@ static void pispbe_node_stop_streaming(struct vb2_queue *q)
kfree(job);
}
- pm_runtime_mark_last_busy(pispbe->dev);
pm_runtime_put_autosuspend(pispbe->dev);
dev_dbg(pispbe->dev, "Nodes streaming now 0x%x\n",
@@ -1742,7 +1741,6 @@ static int pispbe_probe(struct platform_device *pdev)
if (ret)
goto disable_devs_err;
- pm_runtime_mark_last_busy(pispbe->dev);
pm_runtime_put_autosuspend(pispbe->dev);
return 0;
diff --git a/drivers/media/platform/rockchip/rkvdec/rkvdec.c b/drivers/media/platform/rockchip/rkvdec/rkvdec.c
index d707088ec0dc..d3b31f461194 100644
--- a/drivers/media/platform/rockchip/rkvdec/rkvdec.c
+++ b/drivers/media/platform/rockchip/rkvdec/rkvdec.c
@@ -765,7 +765,6 @@ static void rkvdec_job_finish(struct rkvdec_ctx *ctx,
{
struct rkvdec_dev *rkvdec = ctx->dev;
- pm_runtime_mark_last_busy(rkvdec->dev);
pm_runtime_put_autosuspend(rkvdec->dev);
rkvdec_job_finish_no_pm(ctx, result);
}
@@ -1159,13 +1158,6 @@ static int rkvdec_probe(struct platform_device *pdev)
return ret;
}
- if (iommu_get_domain_for_dev(&pdev->dev)) {
- rkvdec->empty_domain = iommu_paging_domain_alloc(rkvdec->dev);
-
- if (!rkvdec->empty_domain)
- dev_warn(rkvdec->dev, "cannot alloc new empty domain\n");
- }
-
vb2_dma_contig_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
irq = platform_get_irq(pdev, 0);
@@ -1188,6 +1180,15 @@ static int rkvdec_probe(struct platform_device *pdev)
if (ret)
goto err_disable_runtime_pm;
+ if (iommu_get_domain_for_dev(&pdev->dev)) {
+ rkvdec->empty_domain = iommu_paging_domain_alloc(rkvdec->dev);
+
+ if (IS_ERR(rkvdec->empty_domain)) {
+ rkvdec->empty_domain = NULL;
+ dev_warn(rkvdec->dev, "cannot alloc new empty domain\n");
+ }
+ }
+
return 0;
err_disable_runtime_pm:
diff --git a/drivers/media/platform/verisilicon/hantro_drv.c b/drivers/media/platform/verisilicon/hantro_drv.c
index 8542238e0fb1..fa972effd4a2 100644
--- a/drivers/media/platform/verisilicon/hantro_drv.c
+++ b/drivers/media/platform/verisilicon/hantro_drv.c
@@ -89,7 +89,6 @@ static void hantro_job_finish(struct hantro_dev *vpu,
struct hantro_ctx *ctx,
enum vb2_buffer_state result)
{
- pm_runtime_mark_last_busy(vpu->dev);
pm_runtime_put_autosuspend(vpu->dev);
clk_bulk_disable(vpu->variant->num_clocks, vpu->clocks);
diff --git a/drivers/media/rc/gpio-ir-recv.c b/drivers/media/rc/gpio-ir-recv.c
index bf6d8fa983bf..a6418ef782bc 100644
--- a/drivers/media/rc/gpio-ir-recv.c
+++ b/drivers/media/rc/gpio-ir-recv.c
@@ -48,10 +48,8 @@ static irqreturn_t gpio_ir_recv_irq(int irq, void *dev_id)
if (val >= 0)
ir_raw_event_store_edge(gpio_dev->rcdev, val == 1);
- if (pmdev) {
- pm_runtime_mark_last_busy(pmdev);
+ if (pmdev)
pm_runtime_put_autosuspend(pmdev);
- }
return IRQ_HANDLED;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 207a8bb36ae5..31e3d825b4bc 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -8016,7 +8016,8 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
}
rx_rings = min_t(int, rx_rings, hwr.grp);
hwr.cp = min_t(int, hwr.cp, bp->cp_nr_rings);
- if (hwr.stat > bnxt_get_ulp_stat_ctxs(bp))
+ if (bnxt_ulp_registered(bp->edev) &&
+ hwr.stat > bnxt_get_ulp_stat_ctxs(bp))
hwr.stat -= bnxt_get_ulp_stat_ctxs(bp);
hwr.cp = min_t(int, hwr.cp, hwr.stat);
rc = bnxt_trim_rings(bp, &rx_rings, &hwr.tx, hwr.cp, sh);
@@ -8024,6 +8025,11 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
hwr.rx = rx_rings << 1;
tx_cp = bnxt_num_tx_to_cp(bp, hwr.tx);
hwr.cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings;
+ if (hwr.tx != bp->tx_nr_rings) {
+ netdev_warn(bp->dev,
+ "Able to reserve only %d out of %d requested TX rings\n",
+ hwr.tx, bp->tx_nr_rings);
+ }
bp->tx_nr_rings = hwr.tx;
/* If we cannot reserve all the RX rings, reset the RSS map only
@@ -12851,6 +12857,17 @@ static int bnxt_set_xps_mapping(struct bnxt *bp)
return rc;
}
+static int bnxt_tx_nr_rings(struct bnxt *bp)
+{
+ return bp->num_tc ? bp->tx_nr_rings_per_tc * bp->num_tc :
+ bp->tx_nr_rings_per_tc;
+}
+
+static int bnxt_tx_nr_rings_per_tc(struct bnxt *bp)
+{
+ return bp->num_tc ? bp->tx_nr_rings / bp->num_tc : bp->tx_nr_rings;
+}
+
static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
{
int rc = 0;
@@ -12868,6 +12885,13 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
if (rc)
return rc;
+ /* Make adjustments if reserved TX rings are less than requested */
+ bp->tx_nr_rings -= bp->tx_nr_rings_xdp;
+ bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
+ if (bp->tx_nr_rings_xdp) {
+ bp->tx_nr_rings_xdp = bp->tx_nr_rings_per_tc;
+ bp->tx_nr_rings += bp->tx_nr_rings_xdp;
+ }
rc = bnxt_alloc_mem(bp, irq_re_init);
if (rc) {
netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
@@ -16325,7 +16349,7 @@ static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
bp->rx_nr_rings = bp->cp_nr_rings;
bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
- bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
+ bp->tx_nr_rings = bnxt_tx_nr_rings(bp);
}
static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
@@ -16357,7 +16381,7 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
bnxt_trim_dflt_sh_rings(bp);
else
bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
- bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
+ bp->tx_nr_rings = bnxt_tx_nr_rings(bp);
avail_msix = bnxt_get_max_func_irqs(bp) - bp->cp_nr_rings;
if (avail_msix >= BNXT_MIN_ROCE_CP_RINGS) {
@@ -16370,7 +16394,7 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
rc = __bnxt_reserve_rings(bp);
if (rc && rc != -ENODEV)
netdev_warn(bp->dev, "Unable to reserve tx rings\n");
- bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
if (sh)
bnxt_trim_dflt_sh_rings(bp);
@@ -16379,7 +16403,7 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
rc = __bnxt_reserve_rings(bp);
if (rc && rc != -ENODEV)
netdev_warn(bp->dev, "2nd rings reservation failed.\n");
- bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
}
if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
bp->rx_nr_rings++;
@@ -16413,7 +16437,7 @@ static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
if (rc)
goto init_dflt_ring_err;
- bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
bnxt_set_dflt_rfs(bp);
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 9693f0289435..16d28a8b3b56 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -3090,7 +3090,7 @@ static void gem_update_stats(struct macb *bp)
/* Add GEM_OCTTXH, GEM_OCTRXH */
val = bp->macb_reg_readl(bp, offset + 4);
bp->ethtool_stats[i] += ((u64)val) << 32;
- *(p++) += ((u64)val) << 32;
+ *p += ((u64)val) << 32;
}
}
@@ -5399,19 +5399,16 @@ static void macb_remove(struct platform_device *pdev)
if (dev) {
bp = netdev_priv(dev);
+ unregister_netdev(dev);
phy_exit(bp->sgmii_phy);
mdiobus_unregister(bp->mii_bus);
mdiobus_free(bp->mii_bus);
- unregister_netdev(dev);
+ device_set_wakeup_enable(&bp->pdev->dev, 0);
cancel_work_sync(&bp->hresp_err_bh_work);
pm_runtime_disable(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
- if (!pm_runtime_suspended(&pdev->dev)) {
- macb_clks_disable(bp->pclk, bp->hclk, bp->tx_clk,
- bp->rx_clk, bp->tsu_clk);
- pm_runtime_set_suspended(&pdev->dev);
- }
+ pm_runtime_set_suspended(&pdev->dev);
phylink_destroy(bp->phylink);
free_netdev(dev);
}
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index cc60ee454bf9..6bbf6e5584e5 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -1099,7 +1099,7 @@ get_stats (struct net_device *dev)
dev->stats.rx_bytes += dr32(OctetRcvOk);
dev->stats.tx_bytes += dr32(OctetXmtOk);
- dev->stats.multicast = dr32(McstFramesRcvdOk);
+ dev->stats.multicast += dr32(McstFramesRcvdOk);
dev->stats.collisions += dr32(SingleColFrames)
+ dr32(MultiColFrames);
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 2098f00b3cd3..8a8a01a4bb40 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -510,6 +510,7 @@ enum ice_pf_flags {
ICE_FLAG_LINK_LENIENT_MODE_ENA,
ICE_FLAG_PLUG_AUX_DEV,
ICE_FLAG_UNPLUG_AUX_DEV,
+ ICE_FLAG_AUX_DEV_CREATED,
ICE_FLAG_MTU_CHANGED,
ICE_FLAG_GNSS, /* GNSS successfully initialized */
ICE_FLAG_DPLL, /* SyncE/PTP dplls initialized */
diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.c b/drivers/net/ethernet/intel/ice/ice_adapter.c
index 9e4adc43e474..b53561c34708 100644
--- a/drivers/net/ethernet/intel/ice/ice_adapter.c
+++ b/drivers/net/ethernet/intel/ice/ice_adapter.c
@@ -13,16 +13,45 @@
static DEFINE_XARRAY(ice_adapters);
static DEFINE_MUTEX(ice_adapters_mutex);
-static unsigned long ice_adapter_index(u64 dsn)
+#define ICE_ADAPTER_FIXED_INDEX BIT_ULL(63)
+
+#define ICE_ADAPTER_INDEX_E825C \
+ (ICE_DEV_ID_E825C_BACKPLANE | ICE_ADAPTER_FIXED_INDEX)
+
+static u64 ice_adapter_index(struct pci_dev *pdev)
{
+ switch (pdev->device) {
+ case ICE_DEV_ID_E825C_BACKPLANE:
+ case ICE_DEV_ID_E825C_QSFP:
+ case ICE_DEV_ID_E825C_SFP:
+ case ICE_DEV_ID_E825C_SGMII:
+ /* E825C devices have multiple NACs which are connected to the
+ * same clock source, and which must share the same
+ * ice_adapter structure. We can't use the serial number since
+ * each NAC has its own NVM generated with its own unique
+ * Device Serial Number. Instead, rely on the embedded nature
+ * of the E825C devices, and use a fixed index. This relies on
+ * the fact that all E825C physical functions in a given
+ * system are part of the same overall device.
+ */
+ return ICE_ADAPTER_INDEX_E825C;
+ default:
+ return pci_get_dsn(pdev) & ~ICE_ADAPTER_FIXED_INDEX;
+ }
+}
+
+static unsigned long ice_adapter_xa_index(struct pci_dev *pdev)
+{
+ u64 index = ice_adapter_index(pdev);
+
#if BITS_PER_LONG == 64
- return dsn;
+ return index;
#else
- return (u32)dsn ^ (u32)(dsn >> 32);
+ return (u32)index ^ (u32)(index >> 32);
#endif
}
-static struct ice_adapter *ice_adapter_new(u64 dsn)
+static struct ice_adapter *ice_adapter_new(struct pci_dev *pdev)
{
struct ice_adapter *adapter;
@@ -30,7 +59,7 @@ static struct ice_adapter *ice_adapter_new(u64 dsn)
if (!adapter)
return NULL;
- adapter->device_serial_number = dsn;
+ adapter->index = ice_adapter_index(pdev);
spin_lock_init(&adapter->ptp_gltsyn_time_lock);
spin_lock_init(&adapter->txq_ctx_lock);
refcount_set(&adapter->refcount, 1);
@@ -64,24 +93,23 @@ static void ice_adapter_free(struct ice_adapter *adapter)
*/
struct ice_adapter *ice_adapter_get(struct pci_dev *pdev)
{
- u64 dsn = pci_get_dsn(pdev);
struct ice_adapter *adapter;
unsigned long index;
int err;
- index = ice_adapter_index(dsn);
+ index = ice_adapter_xa_index(pdev);
scoped_guard(mutex, &ice_adapters_mutex) {
err = xa_insert(&ice_adapters, index, NULL, GFP_KERNEL);
if (err == -EBUSY) {
adapter = xa_load(&ice_adapters, index);
refcount_inc(&adapter->refcount);
- WARN_ON_ONCE(adapter->device_serial_number != dsn);
+ WARN_ON_ONCE(adapter->index != ice_adapter_index(pdev));
return adapter;
}
if (err)
return ERR_PTR(err);
- adapter = ice_adapter_new(dsn);
+ adapter = ice_adapter_new(pdev);
if (!adapter)
return ERR_PTR(-ENOMEM);
xa_store(&ice_adapters, index, adapter, GFP_KERNEL);
@@ -100,11 +128,10 @@ struct ice_adapter *ice_adapter_get(struct pci_dev *pdev)
*/
void ice_adapter_put(struct pci_dev *pdev)
{
- u64 dsn = pci_get_dsn(pdev);
struct ice_adapter *adapter;
unsigned long index;
- index = ice_adapter_index(dsn);
+ index = ice_adapter_xa_index(pdev);
scoped_guard(mutex, &ice_adapters_mutex) {
adapter = xa_load(&ice_adapters, index);
if (WARN_ON(!adapter))
diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.h b/drivers/net/ethernet/intel/ice/ice_adapter.h
index db66d03c9f96..e95266c7f20b 100644
--- a/drivers/net/ethernet/intel/ice/ice_adapter.h
+++ b/drivers/net/ethernet/intel/ice/ice_adapter.h
@@ -33,7 +33,7 @@ struct ice_port_list {
* @txq_ctx_lock: Spinlock protecting access to the GLCOMM_QTX_CNTX_CTL register
* @ctrl_pf: Control PF of the adapter
* @ports: Ports list
- * @device_serial_number: DSN cached for collision detection on 32bit systems
+ * @index: 64-bit index cached for collision detection on 32bit systems
*/
struct ice_adapter {
refcount_t refcount;
@@ -44,7 +44,7 @@ struct ice_adapter {
struct ice_pf *ctrl_pf;
struct ice_port_list ports;
- u64 device_serial_number;
+ u64 index;
};
struct ice_adapter *ice_adapter_get(struct pci_dev *pdev);
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c
index e2a036ce76ca..3b2d9c436979 100644
--- a/drivers/net/ethernet/intel/ice/ice_ddp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.c
@@ -2377,7 +2377,13 @@ ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size,
* The function will apply the new Tx topology from the package buffer
* if available.
*
- * Return: zero when update was successful, negative values otherwise.
+ * Return:
+ * * 0 - Successfully applied topology configuration.
+ * * -EBUSY - Failed to acquire global configuration lock.
+ * * -EEXIST - Topology configuration has already been applied.
+ * * -EIO - Unable to apply topology configuration.
+ * * -ENODEV - Failed to re-initialize device after applying configuration.
+ * * Other negative error codes indicate unexpected failures.
*/
int ice_cfg_tx_topo(struct ice_hw *hw, const void *buf, u32 len)
{
@@ -2410,7 +2416,7 @@ int ice_cfg_tx_topo(struct ice_hw *hw, const void *buf, u32 len)
if (status) {
ice_debug(hw, ICE_DBG_INIT, "Get current topology is failed\n");
- return status;
+ return -EIO;
}
/* Is default topology already applied ? */
@@ -2497,31 +2503,45 @@ update_topo:
ICE_GLOBAL_CFG_LOCK_TIMEOUT);
if (status) {
ice_debug(hw, ICE_DBG_INIT, "Failed to acquire global lock\n");
- return status;
+ return -EBUSY;
}
/* Check if reset was triggered already. */
reg = rd32(hw, GLGEN_RSTAT);
if (reg & GLGEN_RSTAT_DEVSTATE_M) {
- /* Reset is in progress, re-init the HW again */
ice_debug(hw, ICE_DBG_INIT, "Reset is in progress. Layer topology might be applied already\n");
ice_check_reset(hw);
- return 0;
+ /* Reset is in progress, re-init the HW again */
+ goto reinit_hw;
}
/* Set new topology */
status = ice_get_set_tx_topo(hw, new_topo, size, NULL, NULL, true);
if (status) {
- ice_debug(hw, ICE_DBG_INIT, "Failed setting Tx topology\n");
- return status;
+ ice_debug(hw, ICE_DBG_INIT, "Failed to set Tx topology, status %pe\n",
+ ERR_PTR(status));
+ /* only report -EIO here as the caller checks the error value
+ * and reports an informational error message informing that
+ * the driver failed to program Tx topology.
+ */
+ status = -EIO;
}
- /* New topology is updated, delay 1 second before issuing the CORER */
+ /* Even if Tx topology config failed, we need to CORE reset here to
+ * clear the global configuration lock. Delay 1 second to allow
+ * hardware to settle then issue a CORER
+ */
msleep(1000);
ice_reset(hw, ICE_RESET_CORER);
- /* CORER will clear the global lock, so no explicit call
- * required for release.
- */
+ ice_check_reset(hw);
+
+reinit_hw:
+ /* Since we triggered a CORER, re-initialize hardware */
+ ice_deinit_hw(hw);
+ if (ice_init_hw(hw)) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to re-init hardware after setting Tx topology\n");
+ return -ENODEV;
+ }
- return 0;
+ return status;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c
index 6ab53e430f91..420d45c2558b 100644
--- a/drivers/net/ethernet/intel/ice/ice_idc.c
+++ b/drivers/net/ethernet/intel/ice/ice_idc.c
@@ -336,6 +336,7 @@ int ice_plug_aux_dev(struct ice_pf *pf)
mutex_lock(&pf->adev_mutex);
cdev->adev = adev;
mutex_unlock(&pf->adev_mutex);
+ set_bit(ICE_FLAG_AUX_DEV_CREATED, pf->flags);
return 0;
}
@@ -347,15 +348,16 @@ void ice_unplug_aux_dev(struct ice_pf *pf)
{
struct auxiliary_device *adev;
+ if (!test_and_clear_bit(ICE_FLAG_AUX_DEV_CREATED, pf->flags))
+ return;
+
mutex_lock(&pf->adev_mutex);
adev = pf->cdev_info->adev;
pf->cdev_info->adev = NULL;
mutex_unlock(&pf->adev_mutex);
- if (adev) {
- auxiliary_device_delete(adev);
- auxiliary_device_uninit(adev);
- }
+ auxiliary_device_delete(adev);
+ auxiliary_device_uninit(adev);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 8e0b06c1e02b..cae992d8f03c 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -4536,17 +4536,23 @@ ice_init_tx_topology(struct ice_hw *hw, const struct firmware *firmware)
dev_info(dev, "Tx scheduling layers switching feature disabled\n");
else
dev_info(dev, "Tx scheduling layers switching feature enabled\n");
- /* if there was a change in topology ice_cfg_tx_topo triggered
- * a CORER and we need to re-init hw
+ return 0;
+ } else if (err == -ENODEV) {
+ /* If we failed to re-initialize the device, we can no longer
+ * continue loading.
*/
- ice_deinit_hw(hw);
- err = ice_init_hw(hw);
-
+ dev_warn(dev, "Failed to initialize hardware after applying Tx scheduling configuration.\n");
return err;
} else if (err == -EIO) {
dev_info(dev, "DDP package does not support Tx scheduling layers switching feature - please update to the latest DDP package and try again\n");
+ return 0;
+ } else if (err == -EEXIST) {
+ return 0;
}
+ /* Do not treat this as a fatal error. */
+ dev_info(dev, "Failed to apply Tx scheduling configuration, err %pe\n",
+ ERR_PTR(err));
return 0;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 29e0088ab6b2..d2871757ec94 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -1352,7 +1352,7 @@ construct_skb:
skb = ice_construct_skb(rx_ring, xdp);
/* exit if we failed to retrieve a buffer */
if (!skb) {
- rx_ring->ring_stats->rx_stats.alloc_page_failed++;
+ rx_ring->ring_stats->rx_stats.alloc_buf_failed++;
xdp_verdict = ICE_XDP_CONSUMED;
}
ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
index 555879b1248d..b19b462e0bb6 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
@@ -180,6 +180,58 @@ static int idpf_tx_singleq_csum(struct sk_buff *skb,
}
/**
+ * idpf_tx_singleq_dma_map_error - handle TX DMA map errors
+ * @txq: queue to send buffer on
+ * @skb: send buffer
+ * @first: original first buffer info buffer for packet
+ * @idx: starting point on ring to unwind
+ */
+static void idpf_tx_singleq_dma_map_error(struct idpf_tx_queue *txq,
+ struct sk_buff *skb,
+ struct idpf_tx_buf *first, u16 idx)
+{
+ struct libeth_sq_napi_stats ss = { };
+ struct libeth_cq_pp cp = {
+ .dev = txq->dev,
+ .ss = &ss,
+ };
+
+ u64_stats_update_begin(&txq->stats_sync);
+ u64_stats_inc(&txq->q_stats.dma_map_errs);
+ u64_stats_update_end(&txq->stats_sync);
+
+ /* clear dma mappings for failed tx_buf map */
+ for (;;) {
+ struct idpf_tx_buf *tx_buf;
+
+ tx_buf = &txq->tx_buf[idx];
+ libeth_tx_complete(tx_buf, &cp);
+ if (tx_buf == first)
+ break;
+ if (idx == 0)
+ idx = txq->desc_count;
+ idx--;
+ }
+
+ if (skb_is_gso(skb)) {
+ union idpf_tx_flex_desc *tx_desc;
+
+ /* If we failed a DMA mapping for a TSO packet, we will have
+ * used one additional descriptor for a context
+ * descriptor. Reset that here.
+ */
+ tx_desc = &txq->flex_tx[idx];
+ memset(tx_desc, 0, sizeof(*tx_desc));
+ if (idx == 0)
+ idx = txq->desc_count;
+ idx--;
+ }
+
+ /* Update tail in case netdev_xmit_more was previously true */
+ idpf_tx_buf_hw_update(txq, idx, false);
+}
+
+/**
* idpf_tx_singleq_map - Build the Tx base descriptor
* @tx_q: queue to send buffer on
* @first: first buffer info buffer to use
@@ -219,8 +271,9 @@ static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q,
for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
unsigned int max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
- if (dma_mapping_error(tx_q->dev, dma))
- return idpf_tx_dma_map_error(tx_q, skb, first, i);
+ if (unlikely(dma_mapping_error(tx_q->dev, dma)))
+ return idpf_tx_singleq_dma_map_error(tx_q, skb,
+ first, i);
/* record length, and DMA address */
dma_unmap_len_set(tx_buf, len, size);
@@ -362,11 +415,11 @@ netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
{
struct idpf_tx_offload_params offload = { };
struct idpf_tx_buf *first;
+ u32 count, buf_count = 1;
int csum, tso, needed;
- unsigned int count;
__be16 protocol;
- count = idpf_tx_desc_count_required(tx_q, skb);
+ count = idpf_tx_res_count_required(tx_q, skb, &buf_count);
if (unlikely(!count))
return idpf_tx_drop_skb(tx_q, skb);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index 66a1b040639d..eaad52a83b04 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -8,48 +8,13 @@
#include "idpf_ptp.h"
#include "idpf_virtchnl.h"
-struct idpf_tx_stash {
- struct hlist_node hlist;
- struct libeth_sqe buf;
-};
-
-#define idpf_tx_buf_compl_tag(buf) (*(u32 *)&(buf)->priv)
+#define idpf_tx_buf_next(buf) (*(u32 *)&(buf)->priv)
LIBETH_SQE_CHECK_PRIV(u32);
static bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
unsigned int count);
/**
- * idpf_buf_lifo_push - push a buffer pointer onto stack
- * @stack: pointer to stack struct
- * @buf: pointer to buf to push
- *
- * Returns 0 on success, negative on failure
- **/
-static int idpf_buf_lifo_push(struct idpf_buf_lifo *stack,
- struct idpf_tx_stash *buf)
-{
- if (unlikely(stack->top == stack->size))
- return -ENOSPC;
-
- stack->bufs[stack->top++] = buf;
-
- return 0;
-}
-
-/**
- * idpf_buf_lifo_pop - pop a buffer pointer from stack
- * @stack: pointer to stack struct
- **/
-static struct idpf_tx_stash *idpf_buf_lifo_pop(struct idpf_buf_lifo *stack)
-{
- if (unlikely(!stack->top))
- return NULL;
-
- return stack->bufs[--stack->top];
-}
-
-/**
* idpf_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
* @txqueue: TX queue
@@ -77,52 +42,22 @@ void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
static void idpf_tx_buf_rel_all(struct idpf_tx_queue *txq)
{
struct libeth_sq_napi_stats ss = { };
- struct idpf_buf_lifo *buf_stack;
- struct idpf_tx_stash *stash;
struct libeth_cq_pp cp = {
.dev = txq->dev,
.ss = &ss,
};
- struct hlist_node *tmp;
- u32 i, tag;
+ u32 i;
/* Buffers already cleared, nothing to do */
if (!txq->tx_buf)
return;
/* Free all the Tx buffer sk_buffs */
- for (i = 0; i < txq->desc_count; i++)
+ for (i = 0; i < txq->buf_pool_size; i++)
libeth_tx_complete(&txq->tx_buf[i], &cp);
kfree(txq->tx_buf);
txq->tx_buf = NULL;
-
- if (!idpf_queue_has(FLOW_SCH_EN, txq))
- return;
-
- buf_stack = &txq->stash->buf_stack;
- if (!buf_stack->bufs)
- return;
-
- /*
- * If a Tx timeout occurred, there are potentially still bufs in the
- * hash table, free them here.
- */
- hash_for_each_safe(txq->stash->sched_buf_hash, tag, tmp, stash,
- hlist) {
- if (!stash)
- continue;
-
- libeth_tx_complete(&stash->buf, &cp);
- hash_del(&stash->hlist);
- idpf_buf_lifo_push(buf_stack, stash);
- }
-
- for (i = 0; i < buf_stack->size; i++)
- kfree(buf_stack->bufs[i]);
-
- kfree(buf_stack->bufs);
- buf_stack->bufs = NULL;
}
/**
@@ -139,6 +74,9 @@ static void idpf_tx_desc_rel(struct idpf_tx_queue *txq)
if (!txq->desc_ring)
return;
+ if (txq->refillq)
+ kfree(txq->refillq->ring);
+
dmam_free_coherent(txq->dev, txq->size, txq->desc_ring, txq->dma);
txq->desc_ring = NULL;
txq->next_to_use = 0;
@@ -195,41 +133,18 @@ static void idpf_tx_desc_rel_all(struct idpf_vport *vport)
*/
static int idpf_tx_buf_alloc_all(struct idpf_tx_queue *tx_q)
{
- struct idpf_buf_lifo *buf_stack;
- int buf_size;
- int i;
-
/* Allocate book keeping buffers only. Buffers to be supplied to HW
* are allocated by kernel network stack and received as part of skb
*/
- buf_size = sizeof(struct idpf_tx_buf) * tx_q->desc_count;
- tx_q->tx_buf = kzalloc(buf_size, GFP_KERNEL);
+ if (idpf_queue_has(FLOW_SCH_EN, tx_q))
+ tx_q->buf_pool_size = U16_MAX;
+ else
+ tx_q->buf_pool_size = tx_q->desc_count;
+ tx_q->tx_buf = kcalloc(tx_q->buf_pool_size, sizeof(*tx_q->tx_buf),
+ GFP_KERNEL);
if (!tx_q->tx_buf)
return -ENOMEM;
- if (!idpf_queue_has(FLOW_SCH_EN, tx_q))
- return 0;
-
- buf_stack = &tx_q->stash->buf_stack;
-
- /* Initialize tx buf stack for out-of-order completions if
- * flow scheduling offload is enabled
- */
- buf_stack->bufs = kcalloc(tx_q->desc_count, sizeof(*buf_stack->bufs),
- GFP_KERNEL);
- if (!buf_stack->bufs)
- return -ENOMEM;
-
- buf_stack->size = tx_q->desc_count;
- buf_stack->top = tx_q->desc_count;
-
- for (i = 0; i < tx_q->desc_count; i++) {
- buf_stack->bufs[i] = kzalloc(sizeof(*buf_stack->bufs[i]),
- GFP_KERNEL);
- if (!buf_stack->bufs[i])
- return -ENOMEM;
- }
-
return 0;
}
@@ -244,6 +159,7 @@ static int idpf_tx_desc_alloc(const struct idpf_vport *vport,
struct idpf_tx_queue *tx_q)
{
struct device *dev = tx_q->dev;
+ struct idpf_sw_queue *refillq;
int err;
err = idpf_tx_buf_alloc_all(tx_q);
@@ -267,6 +183,31 @@ static int idpf_tx_desc_alloc(const struct idpf_vport *vport,
tx_q->next_to_clean = 0;
idpf_queue_set(GEN_CHK, tx_q);
+ if (!idpf_queue_has(FLOW_SCH_EN, tx_q))
+ return 0;
+
+ refillq = tx_q->refillq;
+ refillq->desc_count = tx_q->buf_pool_size;
+ refillq->ring = kcalloc(refillq->desc_count, sizeof(u32),
+ GFP_KERNEL);
+ if (!refillq->ring) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+
+ for (unsigned int i = 0; i < refillq->desc_count; i++)
+ refillq->ring[i] =
+ FIELD_PREP(IDPF_RFL_BI_BUFID_M, i) |
+ FIELD_PREP(IDPF_RFL_BI_GEN_M,
+ idpf_queue_has(GEN_CHK, refillq));
+
+ /* Go ahead and flip the GEN bit since this counts as filling
+ * up the ring, i.e. we already ring wrapped.
+ */
+ idpf_queue_change(GEN_CHK, refillq);
+
+ tx_q->last_re = tx_q->desc_count - IDPF_TX_SPLITQ_RE_MIN_GAP;
+
return 0;
err_alloc:
@@ -317,8 +258,6 @@ static int idpf_tx_desc_alloc_all(struct idpf_vport *vport)
for (i = 0; i < vport->num_txq_grp; i++) {
for (j = 0; j < vport->txq_grps[i].num_txq; j++) {
struct idpf_tx_queue *txq = vport->txq_grps[i].txqs[j];
- u8 gen_bits = 0;
- u16 bufidx_mask;
err = idpf_tx_desc_alloc(vport, txq);
if (err) {
@@ -327,34 +266,6 @@ static int idpf_tx_desc_alloc_all(struct idpf_vport *vport)
i);
goto err_out;
}
-
- if (!idpf_is_queue_model_split(vport->txq_model))
- continue;
-
- txq->compl_tag_cur_gen = 0;
-
- /* Determine the number of bits in the bufid
- * mask and add one to get the start of the
- * generation bits
- */
- bufidx_mask = txq->desc_count - 1;
- while (bufidx_mask >> 1) {
- txq->compl_tag_gen_s++;
- bufidx_mask = bufidx_mask >> 1;
- }
- txq->compl_tag_gen_s++;
-
- gen_bits = IDPF_TX_SPLITQ_COMPL_TAG_WIDTH -
- txq->compl_tag_gen_s;
- txq->compl_tag_gen_max = GETMAXVAL(gen_bits);
-
- /* Set bufid mask based on location of first
- * gen bit; it cannot simply be the descriptor
- * ring size-1 since we can have size values
- * where not all of those bits are set.
- */
- txq->compl_tag_bufid_m =
- GETMAXVAL(txq->compl_tag_gen_s);
}
if (!idpf_is_queue_model_split(vport->txq_model))
@@ -603,18 +514,18 @@ static int idpf_rx_hdr_buf_alloc_all(struct idpf_buf_queue *bufq)
}
/**
- * idpf_rx_post_buf_refill - Post buffer id to refill queue
+ * idpf_post_buf_refill - Post buffer id to refill queue
* @refillq: refill queue to post to
* @buf_id: buffer id to post
*/
-static void idpf_rx_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id)
+static void idpf_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id)
{
u32 nta = refillq->next_to_use;
/* store the buffer ID and the SW maintained GEN bit to the refillq */
refillq->ring[nta] =
- FIELD_PREP(IDPF_RX_BI_BUFID_M, buf_id) |
- FIELD_PREP(IDPF_RX_BI_GEN_M,
+ FIELD_PREP(IDPF_RFL_BI_BUFID_M, buf_id) |
+ FIELD_PREP(IDPF_RFL_BI_GEN_M,
idpf_queue_has(GEN_CHK, refillq));
if (unlikely(++nta == refillq->desc_count)) {
@@ -995,6 +906,11 @@ static void idpf_txq_group_rel(struct idpf_vport *vport)
struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
for (j = 0; j < txq_grp->num_txq; j++) {
+ if (flow_sch_en) {
+ kfree(txq_grp->txqs[j]->refillq);
+ txq_grp->txqs[j]->refillq = NULL;
+ }
+
kfree(txq_grp->txqs[j]);
txq_grp->txqs[j] = NULL;
}
@@ -1004,9 +920,6 @@ static void idpf_txq_group_rel(struct idpf_vport *vport)
kfree(txq_grp->complq);
txq_grp->complq = NULL;
-
- if (flow_sch_en)
- kfree(txq_grp->stashes);
}
kfree(vport->txq_grps);
vport->txq_grps = NULL;
@@ -1367,7 +1280,6 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
for (i = 0; i < vport->num_txq_grp; i++) {
struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
struct idpf_adapter *adapter = vport->adapter;
- struct idpf_txq_stash *stashes;
int j;
tx_qgrp->vport = vport;
@@ -1380,15 +1292,6 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
goto err_alloc;
}
- if (split && flow_sch_en) {
- stashes = kcalloc(num_txq, sizeof(*stashes),
- GFP_KERNEL);
- if (!stashes)
- goto err_alloc;
-
- tx_qgrp->stashes = stashes;
- }
-
for (j = 0; j < tx_qgrp->num_txq; j++) {
struct idpf_tx_queue *q = tx_qgrp->txqs[j];
@@ -1408,12 +1311,14 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
if (!flow_sch_en)
continue;
- if (split) {
- q->stash = &stashes[j];
- hash_init(q->stash->sched_buf_hash);
- }
-
idpf_queue_set(FLOW_SCH_EN, q);
+
+ q->refillq = kzalloc(sizeof(*q->refillq), GFP_KERNEL);
+ if (!q->refillq)
+ goto err_alloc;
+
+ idpf_queue_set(GEN_CHK, q->refillq);
+ idpf_queue_set(RFL_GEN_CHK, q->refillq);
}
if (!split)
@@ -1697,87 +1602,6 @@ static void idpf_tx_read_tstamp(struct idpf_tx_queue *txq, struct sk_buff *skb)
spin_unlock_bh(&tx_tstamp_caps->status_lock);
}
-/**
- * idpf_tx_clean_stashed_bufs - clean bufs that were stored for
- * out of order completions
- * @txq: queue to clean
- * @compl_tag: completion tag of packet to clean (from completion descriptor)
- * @cleaned: pointer to stats struct to track cleaned packets/bytes
- * @budget: Used to determine if we are in netpoll
- */
-static void idpf_tx_clean_stashed_bufs(struct idpf_tx_queue *txq,
- u16 compl_tag,
- struct libeth_sq_napi_stats *cleaned,
- int budget)
-{
- struct idpf_tx_stash *stash;
- struct hlist_node *tmp_buf;
- struct libeth_cq_pp cp = {
- .dev = txq->dev,
- .ss = cleaned,
- .napi = budget,
- };
-
- /* Buffer completion */
- hash_for_each_possible_safe(txq->stash->sched_buf_hash, stash, tmp_buf,
- hlist, compl_tag) {
- if (unlikely(idpf_tx_buf_compl_tag(&stash->buf) != compl_tag))
- continue;
-
- hash_del(&stash->hlist);
-
- if (stash->buf.type == LIBETH_SQE_SKB &&
- (skb_shinfo(stash->buf.skb)->tx_flags & SKBTX_IN_PROGRESS))
- idpf_tx_read_tstamp(txq, stash->buf.skb);
-
- libeth_tx_complete(&stash->buf, &cp);
-
- /* Push shadow buf back onto stack */
- idpf_buf_lifo_push(&txq->stash->buf_stack, stash);
- }
-}
-
-/**
- * idpf_stash_flow_sch_buffers - store buffer parameters info to be freed at a
- * later time (only relevant for flow scheduling mode)
- * @txq: Tx queue to clean
- * @tx_buf: buffer to store
- */
-static int idpf_stash_flow_sch_buffers(struct idpf_tx_queue *txq,
- struct idpf_tx_buf *tx_buf)
-{
- struct idpf_tx_stash *stash;
-
- if (unlikely(tx_buf->type <= LIBETH_SQE_CTX))
- return 0;
-
- stash = idpf_buf_lifo_pop(&txq->stash->buf_stack);
- if (unlikely(!stash)) {
- net_err_ratelimited("%s: No out-of-order TX buffers left!\n",
- netdev_name(txq->netdev));
-
- return -ENOMEM;
- }
-
- /* Store buffer params in shadow buffer */
- stash->buf.skb = tx_buf->skb;
- stash->buf.bytes = tx_buf->bytes;
- stash->buf.packets = tx_buf->packets;
- stash->buf.type = tx_buf->type;
- stash->buf.nr_frags = tx_buf->nr_frags;
- dma_unmap_addr_set(&stash->buf, dma, dma_unmap_addr(tx_buf, dma));
- dma_unmap_len_set(&stash->buf, len, dma_unmap_len(tx_buf, len));
- idpf_tx_buf_compl_tag(&stash->buf) = idpf_tx_buf_compl_tag(tx_buf);
-
- /* Add buffer to buf_hash table to be freed later */
- hash_add(txq->stash->sched_buf_hash, &stash->hlist,
- idpf_tx_buf_compl_tag(&stash->buf));
-
- tx_buf->type = LIBETH_SQE_EMPTY;
-
- return 0;
-}
-
#define idpf_tx_splitq_clean_bump_ntc(txq, ntc, desc, buf) \
do { \
if (unlikely(++(ntc) == (txq)->desc_count)) { \
@@ -1805,14 +1629,8 @@ do { \
* Separate packet completion events will be reported on the completion queue,
* and the buffers will be cleaned separately. The stats are not updated from
* this function when using flow-based scheduling.
- *
- * Furthermore, in flow scheduling mode, check to make sure there are enough
- * reserve buffers to stash the packet. If there are not, return early, which
- * will leave next_to_clean pointing to the packet that failed to be stashed.
- *
- * Return: false in the scenario above, true otherwise.
*/
-static bool idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end,
+static void idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end,
int napi_budget,
struct libeth_sq_napi_stats *cleaned,
bool descs_only)
@@ -1826,7 +1644,12 @@ static bool idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end,
.napi = napi_budget,
};
struct idpf_tx_buf *tx_buf;
- bool clean_complete = true;
+
+ if (descs_only) {
+ /* Bump ring index to mark as cleaned. */
+ tx_q->next_to_clean = end;
+ return;
+ }
tx_desc = &tx_q->flex_tx[ntc];
next_pending_desc = &tx_q->flex_tx[end];
@@ -1846,136 +1669,61 @@ static bool idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end,
break;
eop_idx = tx_buf->rs_idx;
+ libeth_tx_complete(tx_buf, &cp);
- if (descs_only) {
- if (IDPF_TX_BUF_RSV_UNUSED(tx_q) < tx_buf->nr_frags) {
- clean_complete = false;
- goto tx_splitq_clean_out;
- }
-
- idpf_stash_flow_sch_buffers(tx_q, tx_buf);
+ /* unmap remaining buffers */
+ while (ntc != eop_idx) {
+ idpf_tx_splitq_clean_bump_ntc(tx_q, ntc,
+ tx_desc, tx_buf);
- while (ntc != eop_idx) {
- idpf_tx_splitq_clean_bump_ntc(tx_q, ntc,
- tx_desc, tx_buf);
- idpf_stash_flow_sch_buffers(tx_q, tx_buf);
- }
- } else {
+ /* unmap any remaining paged data */
libeth_tx_complete(tx_buf, &cp);
-
- /* unmap remaining buffers */
- while (ntc != eop_idx) {
- idpf_tx_splitq_clean_bump_ntc(tx_q, ntc,
- tx_desc, tx_buf);
-
- /* unmap any remaining paged data */
- libeth_tx_complete(tx_buf, &cp);
- }
}
fetch_next_txq_desc:
idpf_tx_splitq_clean_bump_ntc(tx_q, ntc, tx_desc, tx_buf);
}
-tx_splitq_clean_out:
tx_q->next_to_clean = ntc;
-
- return clean_complete;
}
-#define idpf_tx_clean_buf_ring_bump_ntc(txq, ntc, buf) \
-do { \
- (buf)++; \
- (ntc)++; \
- if (unlikely((ntc) == (txq)->desc_count)) { \
- buf = (txq)->tx_buf; \
- ntc = 0; \
- } \
-} while (0)
-
/**
- * idpf_tx_clean_buf_ring - clean flow scheduling TX queue buffers
+ * idpf_tx_clean_bufs - clean flow scheduling TX queue buffers
* @txq: queue to clean
- * @compl_tag: completion tag of packet to clean (from completion descriptor)
+ * @buf_id: packet's starting buffer ID, from completion descriptor
* @cleaned: pointer to stats struct to track cleaned packets/bytes
* @budget: Used to determine if we are in netpoll
*
- * Cleans all buffers associated with the input completion tag either from the
- * TX buffer ring or from the hash table if the buffers were previously
- * stashed. Returns the byte/segment count for the cleaned packet associated
- * this completion tag.
+ * Clean all buffers associated with the packet starting at buf_id. Returns the
+ * byte/segment count for the cleaned packet.
*/
-static bool idpf_tx_clean_buf_ring(struct idpf_tx_queue *txq, u16 compl_tag,
- struct libeth_sq_napi_stats *cleaned,
- int budget)
+static void idpf_tx_clean_bufs(struct idpf_tx_queue *txq, u32 buf_id,
+ struct libeth_sq_napi_stats *cleaned,
+ int budget)
{
- u16 idx = compl_tag & txq->compl_tag_bufid_m;
struct idpf_tx_buf *tx_buf = NULL;
struct libeth_cq_pp cp = {
.dev = txq->dev,
.ss = cleaned,
.napi = budget,
};
- u16 ntc, orig_idx = idx;
-
- tx_buf = &txq->tx_buf[idx];
-
- if (unlikely(tx_buf->type <= LIBETH_SQE_CTX ||
- idpf_tx_buf_compl_tag(tx_buf) != compl_tag))
- return false;
+ tx_buf = &txq->tx_buf[buf_id];
if (tx_buf->type == LIBETH_SQE_SKB) {
if (skb_shinfo(tx_buf->skb)->tx_flags & SKBTX_IN_PROGRESS)
idpf_tx_read_tstamp(txq, tx_buf->skb);
libeth_tx_complete(tx_buf, &cp);
+ idpf_post_buf_refill(txq->refillq, buf_id);
}
- idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf);
+ while (idpf_tx_buf_next(tx_buf) != IDPF_TXBUF_NULL) {
+ buf_id = idpf_tx_buf_next(tx_buf);
- while (idpf_tx_buf_compl_tag(tx_buf) == compl_tag) {
+ tx_buf = &txq->tx_buf[buf_id];
libeth_tx_complete(tx_buf, &cp);
- idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf);
+ idpf_post_buf_refill(txq->refillq, buf_id);
}
-
- /*
- * It's possible the packet we just cleaned was an out of order
- * completion, which means we can stash the buffers starting from
- * the original next_to_clean and reuse the descriptors. We need
- * to compare the descriptor ring next_to_clean packet's "first" buffer
- * to the "first" buffer of the packet we just cleaned to determine if
- * this is the case. Howevever, next_to_clean can point to either a
- * reserved buffer that corresponds to a context descriptor used for the
- * next_to_clean packet (TSO packet) or the "first" buffer (single
- * packet). The orig_idx from the packet we just cleaned will always
- * point to the "first" buffer. If next_to_clean points to a reserved
- * buffer, let's bump ntc once and start the comparison from there.
- */
- ntc = txq->next_to_clean;
- tx_buf = &txq->tx_buf[ntc];
-
- if (tx_buf->type == LIBETH_SQE_CTX)
- idpf_tx_clean_buf_ring_bump_ntc(txq, ntc, tx_buf);
-
- /*
- * If ntc still points to a different "first" buffer, clean the
- * descriptor ring and stash all of the buffers for later cleaning. If
- * we cannot stash all of the buffers, next_to_clean will point to the
- * "first" buffer of the packet that could not be stashed and cleaning
- * will start there next time.
- */
- if (unlikely(tx_buf != &txq->tx_buf[orig_idx] &&
- !idpf_tx_splitq_clean(txq, orig_idx, budget, cleaned,
- true)))
- return true;
-
- /*
- * Otherwise, update next_to_clean to reflect the cleaning that was
- * done above.
- */
- txq->next_to_clean = idx;
-
- return true;
}
/**
@@ -1994,22 +1742,17 @@ static void idpf_tx_handle_rs_completion(struct idpf_tx_queue *txq,
struct libeth_sq_napi_stats *cleaned,
int budget)
{
- u16 compl_tag;
+ /* RS completion contains queue head for queue based scheduling or
+ * completion tag for flow based scheduling.
+ */
+ u16 rs_compl_val = le16_to_cpu(desc->q_head_compl_tag.q_head);
if (!idpf_queue_has(FLOW_SCH_EN, txq)) {
- u16 head = le16_to_cpu(desc->q_head_compl_tag.q_head);
-
- idpf_tx_splitq_clean(txq, head, budget, cleaned, false);
+ idpf_tx_splitq_clean(txq, rs_compl_val, budget, cleaned, false);
return;
}
- compl_tag = le16_to_cpu(desc->q_head_compl_tag.compl_tag);
-
- /* If we didn't clean anything on the ring, this packet must be
- * in the hash table. Go clean it there.
- */
- if (!idpf_tx_clean_buf_ring(txq, compl_tag, cleaned, budget))
- idpf_tx_clean_stashed_bufs(txq, compl_tag, cleaned, budget);
+ idpf_tx_clean_bufs(txq, rs_compl_val, cleaned, budget);
}
/**
@@ -2126,8 +1869,7 @@ fetch_next_desc:
/* Update BQL */
nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
- dont_wake = !complq_ok || IDPF_TX_BUF_RSV_LOW(tx_q) ||
- np->state != __IDPF_VPORT_UP ||
+ dont_wake = !complq_ok || np->state != __IDPF_VPORT_UP ||
!netif_carrier_ok(tx_q->netdev);
/* Check if the TXQ needs to and can be restarted */
__netif_txq_completed_wake(nq, tx_q->cleaned_pkts, tx_q->cleaned_bytes,
@@ -2184,15 +1926,21 @@ void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
desc->flow.qw1.compl_tag = cpu_to_le16(params->compl_tag);
}
-/* Global conditions to tell whether the txq (and related resources)
- * has room to allow the use of "size" descriptors.
+/**
+ * idpf_tx_splitq_has_room - check if enough Tx splitq resources are available
+ * @tx_q: the queue to be checked
+ * @descs_needed: number of descriptors required for this packet
+ * @bufs_needed: number of Tx buffers required for this packet
+ *
+ * Return: 0 if no room available, 1 otherwise
*/
-static int idpf_txq_has_room(struct idpf_tx_queue *tx_q, u32 size)
+static int idpf_txq_has_room(struct idpf_tx_queue *tx_q, u32 descs_needed,
+ u32 bufs_needed)
{
- if (IDPF_DESC_UNUSED(tx_q) < size ||
+ if (IDPF_DESC_UNUSED(tx_q) < descs_needed ||
IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) >
IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq) ||
- IDPF_TX_BUF_RSV_LOW(tx_q))
+ idpf_tx_splitq_get_free_bufs(tx_q->refillq) < bufs_needed)
return 0;
return 1;
}
@@ -2201,14 +1949,21 @@ static int idpf_txq_has_room(struct idpf_tx_queue *tx_q, u32 size)
* idpf_tx_maybe_stop_splitq - 1st level check for Tx splitq stop conditions
* @tx_q: the queue to be checked
* @descs_needed: number of descriptors required for this packet
+ * @bufs_needed: number of buffers needed for this packet
*
- * Returns 0 if stop is not needed
+ * Return: 0 if stop is not needed
*/
static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q,
- unsigned int descs_needed)
+ u32 descs_needed,
+ u32 bufs_needed)
{
+ /* Since we have multiple resources to check for splitq, our
+ * start,stop_thrs becomes a boolean check instead of a count
+ * threshold.
+ */
if (netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx,
- idpf_txq_has_room(tx_q, descs_needed),
+ idpf_txq_has_room(tx_q, descs_needed,
+ bufs_needed),
1, 1))
return 0;
@@ -2250,14 +2005,16 @@ void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
}
/**
- * idpf_tx_desc_count_required - calculate number of Tx descriptors needed
+ * idpf_tx_res_count_required - get number of Tx resources needed for this pkt
* @txq: queue to send buffer on
* @skb: send buffer
+ * @bufs_needed: (output) number of buffers needed for this skb.
*
- * Returns number of data descriptors needed for this skb.
+ * Return: number of data descriptors and buffers needed for this skb.
*/
-unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
- struct sk_buff *skb)
+unsigned int idpf_tx_res_count_required(struct idpf_tx_queue *txq,
+ struct sk_buff *skb,
+ u32 *bufs_needed)
{
const struct skb_shared_info *shinfo;
unsigned int count = 0, i;
@@ -2268,6 +2025,7 @@ unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
return count;
shinfo = skb_shinfo(skb);
+ *bufs_needed += shinfo->nr_frags;
for (i = 0; i < shinfo->nr_frags; i++) {
unsigned int size;
@@ -2297,71 +2055,89 @@ unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
}
/**
- * idpf_tx_dma_map_error - handle TX DMA map errors
- * @txq: queue to send buffer on
- * @skb: send buffer
- * @first: original first buffer info buffer for packet
- * @idx: starting point on ring to unwind
+ * idpf_tx_splitq_bump_ntu - adjust NTU and generation
+ * @txq: the tx ring to wrap
+ * @ntu: ring index to bump
*/
-void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb,
- struct idpf_tx_buf *first, u16 idx)
+static unsigned int idpf_tx_splitq_bump_ntu(struct idpf_tx_queue *txq, u16 ntu)
{
- struct libeth_sq_napi_stats ss = { };
- struct libeth_cq_pp cp = {
- .dev = txq->dev,
- .ss = &ss,
- };
+ ntu++;
- u64_stats_update_begin(&txq->stats_sync);
- u64_stats_inc(&txq->q_stats.dma_map_errs);
- u64_stats_update_end(&txq->stats_sync);
+ if (ntu == txq->desc_count)
+ ntu = 0;
- /* clear dma mappings for failed tx_buf map */
- for (;;) {
- struct idpf_tx_buf *tx_buf;
+ return ntu;
+}
- tx_buf = &txq->tx_buf[idx];
- libeth_tx_complete(tx_buf, &cp);
- if (tx_buf == first)
- break;
- if (idx == 0)
- idx = txq->desc_count;
- idx--;
- }
+/**
+ * idpf_tx_get_free_buf_id - get a free buffer ID from the refill queue
+ * @refillq: refill queue to get buffer ID from
+ * @buf_id: return buffer ID
+ *
+ * Return: true if a buffer ID was found, false if not
+ */
+static bool idpf_tx_get_free_buf_id(struct idpf_sw_queue *refillq,
+ u32 *buf_id)
+{
+ u32 ntc = refillq->next_to_clean;
+ u32 refill_desc;
- if (skb_is_gso(skb)) {
- union idpf_tx_flex_desc *tx_desc;
+ refill_desc = refillq->ring[ntc];
- /* If we failed a DMA mapping for a TSO packet, we will have
- * used one additional descriptor for a context
- * descriptor. Reset that here.
- */
- tx_desc = &txq->flex_tx[idx];
- memset(tx_desc, 0, sizeof(*tx_desc));
- if (idx == 0)
- idx = txq->desc_count;
- idx--;
+ if (unlikely(idpf_queue_has(RFL_GEN_CHK, refillq) !=
+ !!(refill_desc & IDPF_RFL_BI_GEN_M)))
+ return false;
+
+ *buf_id = FIELD_GET(IDPF_RFL_BI_BUFID_M, refill_desc);
+
+ if (unlikely(++ntc == refillq->desc_count)) {
+ idpf_queue_change(RFL_GEN_CHK, refillq);
+ ntc = 0;
}
- /* Update tail in case netdev_xmit_more was previously true */
- idpf_tx_buf_hw_update(txq, idx, false);
+ refillq->next_to_clean = ntc;
+
+ return true;
}
/**
- * idpf_tx_splitq_bump_ntu - adjust NTU and generation
- * @txq: the tx ring to wrap
- * @ntu: ring index to bump
+ * idpf_tx_splitq_pkt_err_unmap - Unmap buffers and bump tail in case of error
+ * @txq: Tx queue to unwind
+ * @params: pointer to splitq params struct
+ * @first: starting buffer for packet to unmap
*/
-static unsigned int idpf_tx_splitq_bump_ntu(struct idpf_tx_queue *txq, u16 ntu)
+static void idpf_tx_splitq_pkt_err_unmap(struct idpf_tx_queue *txq,
+ struct idpf_tx_splitq_params *params,
+ struct idpf_tx_buf *first)
{
- ntu++;
+ struct idpf_sw_queue *refillq = txq->refillq;
+ struct libeth_sq_napi_stats ss = { };
+ struct idpf_tx_buf *tx_buf = first;
+ struct libeth_cq_pp cp = {
+ .dev = txq->dev,
+ .ss = &ss,
+ };
- if (ntu == txq->desc_count) {
- ntu = 0;
- txq->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(txq);
+ u64_stats_update_begin(&txq->stats_sync);
+ u64_stats_inc(&txq->q_stats.dma_map_errs);
+ u64_stats_update_end(&txq->stats_sync);
+
+ libeth_tx_complete(tx_buf, &cp);
+ while (idpf_tx_buf_next(tx_buf) != IDPF_TXBUF_NULL) {
+ tx_buf = &txq->tx_buf[idpf_tx_buf_next(tx_buf)];
+ libeth_tx_complete(tx_buf, &cp);
}
- return ntu;
+ /* Update tail in case netdev_xmit_more was previously true. */
+ idpf_tx_buf_hw_update(txq, params->prev_ntu, false);
+
+ if (!refillq)
+ return;
+
+ /* Restore refillq state to avoid leaking tags. */
+ if (params->prev_refill_gen != idpf_queue_has(RFL_GEN_CHK, refillq))
+ idpf_queue_change(RFL_GEN_CHK, refillq);
+ refillq->next_to_clean = params->prev_refill_ntc;
}
/**
@@ -2385,6 +2161,7 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
struct netdev_queue *nq;
struct sk_buff *skb;
skb_frag_t *frag;
+ u32 next_buf_id;
u16 td_cmd = 0;
dma_addr_t dma;
@@ -2402,17 +2179,16 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
tx_buf = first;
first->nr_frags = 0;
- params->compl_tag =
- (tx_q->compl_tag_cur_gen << tx_q->compl_tag_gen_s) | i;
-
for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
unsigned int max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
- if (dma_mapping_error(tx_q->dev, dma))
- return idpf_tx_dma_map_error(tx_q, skb, first, i);
+ if (unlikely(dma_mapping_error(tx_q->dev, dma))) {
+ idpf_tx_buf_next(tx_buf) = IDPF_TXBUF_NULL;
+ return idpf_tx_splitq_pkt_err_unmap(tx_q, params,
+ first);
+ }
first->nr_frags++;
- idpf_tx_buf_compl_tag(tx_buf) = params->compl_tag;
tx_buf->type = LIBETH_SQE_FRAG;
/* record length, and DMA address */
@@ -2468,29 +2244,12 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
max_data);
if (unlikely(++i == tx_q->desc_count)) {
- tx_buf = tx_q->tx_buf;
tx_desc = &tx_q->flex_tx[0];
i = 0;
- tx_q->compl_tag_cur_gen =
- IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
} else {
- tx_buf++;
tx_desc++;
}
- /* Since this packet has a buffer that is going to span
- * multiple descriptors, it's going to leave holes in
- * to the TX buffer ring. To ensure these holes do not
- * cause issues in the cleaning routines, we will clear
- * them of any stale data and assign them the same
- * completion tag as the current packet. Then when the
- * packet is being cleaned, the cleaning routines will
- * simply pass over these holes and finish cleaning the
- * rest of the packet.
- */
- tx_buf->type = LIBETH_SQE_EMPTY;
- idpf_tx_buf_compl_tag(tx_buf) = params->compl_tag;
-
/* Adjust the DMA offset and the remaining size of the
* fragment. On the first iteration of this loop,
* max_data will be >= 12K and <= 16K-1. On any
@@ -2515,15 +2274,25 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size);
if (unlikely(++i == tx_q->desc_count)) {
- tx_buf = tx_q->tx_buf;
tx_desc = &tx_q->flex_tx[0];
i = 0;
- tx_q->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
} else {
- tx_buf++;
tx_desc++;
}
+ if (idpf_queue_has(FLOW_SCH_EN, tx_q)) {
+ if (unlikely(!idpf_tx_get_free_buf_id(tx_q->refillq,
+ &next_buf_id))) {
+ idpf_tx_buf_next(tx_buf) = IDPF_TXBUF_NULL;
+ return idpf_tx_splitq_pkt_err_unmap(tx_q, params,
+ first);
+ }
+ } else {
+ next_buf_id = i;
+ }
+ idpf_tx_buf_next(tx_buf) = next_buf_id;
+ tx_buf = &tx_q->tx_buf[next_buf_id];
+
size = skb_frag_size(frag);
data_len -= size;
@@ -2538,6 +2307,7 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
/* write last descriptor with RS and EOP bits */
first->rs_idx = i;
+ idpf_tx_buf_next(tx_buf) = IDPF_TXBUF_NULL;
td_cmd |= params->eop_cmd;
idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size);
i = idpf_tx_splitq_bump_ntu(tx_q, i);
@@ -2746,8 +2516,6 @@ idpf_tx_splitq_get_ctx_desc(struct idpf_tx_queue *txq)
union idpf_flex_tx_ctx_desc *desc;
int i = txq->next_to_use;
- txq->tx_buf[i].type = LIBETH_SQE_CTX;
-
/* grab the next descriptor */
desc = &txq->flex_ctx[i];
txq->next_to_use = idpf_tx_splitq_bump_ntu(txq, i);
@@ -2841,6 +2609,21 @@ static void idpf_tx_set_tstamp_desc(union idpf_flex_tx_ctx_desc *ctx_desc,
#endif /* CONFIG_PTP_1588_CLOCK */
/**
+ * idpf_tx_splitq_need_re - check whether RE bit needs to be set
+ * @tx_q: pointer to Tx queue
+ *
+ * Return: true if RE bit needs to be set, false otherwise
+ */
+static bool idpf_tx_splitq_need_re(struct idpf_tx_queue *tx_q)
+{
+ int gap = tx_q->next_to_use - tx_q->last_re;
+
+ gap += (gap < 0) ? tx_q->desc_count : 0;
+
+ return gap >= IDPF_TX_SPLITQ_RE_MIN_GAP;
+}
+
+/**
* idpf_tx_splitq_frame - Sends buffer on Tx ring using flex descriptors
* @skb: send buffer
* @tx_q: queue to send buffer on
@@ -2850,13 +2633,16 @@ static void idpf_tx_set_tstamp_desc(union idpf_flex_tx_ctx_desc *ctx_desc,
static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
struct idpf_tx_queue *tx_q)
{
- struct idpf_tx_splitq_params tx_params = { };
+ struct idpf_tx_splitq_params tx_params = {
+ .prev_ntu = tx_q->next_to_use,
+ };
union idpf_flex_tx_ctx_desc *ctx_desc;
struct idpf_tx_buf *first;
- unsigned int count;
+ u32 count, buf_count = 1;
int tso, idx;
+ u32 buf_id;
- count = idpf_tx_desc_count_required(tx_q, skb);
+ count = idpf_tx_res_count_required(tx_q, skb, &buf_count);
if (unlikely(!count))
return idpf_tx_drop_skb(tx_q, skb);
@@ -2866,7 +2652,7 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
/* Check for splitq specific TX resources */
count += (IDPF_TX_DESCS_PER_CACHE_LINE + tso);
- if (idpf_tx_maybe_stop_splitq(tx_q, count)) {
+ if (idpf_tx_maybe_stop_splitq(tx_q, count, buf_count)) {
idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
return NETDEV_TX_BUSY;
@@ -2898,36 +2684,47 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
idpf_tx_set_tstamp_desc(ctx_desc, idx);
}
- /* record the location of the first descriptor for this packet */
- first = &tx_q->tx_buf[tx_q->next_to_use];
- first->skb = skb;
+ if (idpf_queue_has(FLOW_SCH_EN, tx_q)) {
+ struct idpf_sw_queue *refillq = tx_q->refillq;
- if (tso) {
- first->packets = tx_params.offload.tso_segs;
- first->bytes = skb->len +
- ((first->packets - 1) * tx_params.offload.tso_hdr_len);
- } else {
- first->packets = 1;
- first->bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
- }
+ /* Save refillq state in case of a packet rollback. Otherwise,
+ * the tags will be leaked since they will be popped from the
+ * refillq but never reposted during cleaning.
+ */
+ tx_params.prev_refill_gen =
+ idpf_queue_has(RFL_GEN_CHK, refillq);
+ tx_params.prev_refill_ntc = refillq->next_to_clean;
+
+ if (unlikely(!idpf_tx_get_free_buf_id(tx_q->refillq,
+ &buf_id))) {
+ if (tx_params.prev_refill_gen !=
+ idpf_queue_has(RFL_GEN_CHK, refillq))
+ idpf_queue_change(RFL_GEN_CHK, refillq);
+ refillq->next_to_clean = tx_params.prev_refill_ntc;
+
+ tx_q->next_to_use = tx_params.prev_ntu;
+ return idpf_tx_drop_skb(tx_q, skb);
+ }
+ tx_params.compl_tag = buf_id;
- if (idpf_queue_has(FLOW_SCH_EN, tx_q)) {
tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE;
tx_params.eop_cmd = IDPF_TXD_FLEX_FLOW_CMD_EOP;
- /* Set the RE bit to catch any packets that may have not been
- * stashed during RS completion cleaning. MIN_GAP is set to
- * MIN_RING size to ensure it will be set at least once each
- * time around the ring.
+ /* Set the RE bit to periodically "clean" the descriptor ring.
+ * MIN_GAP is set to MIN_RING size to ensure it will be set at
+ * least once each time around the ring.
*/
- if (!(tx_q->next_to_use % IDPF_TX_SPLITQ_RE_MIN_GAP)) {
+ if (idpf_tx_splitq_need_re(tx_q)) {
tx_params.eop_cmd |= IDPF_TXD_FLEX_FLOW_CMD_RE;
tx_q->txq_grp->num_completions_pending++;
+ tx_q->last_re = tx_q->next_to_use;
}
if (skb->ip_summed == CHECKSUM_PARTIAL)
tx_params.offload.td_cmd |= IDPF_TXD_FLEX_FLOW_CMD_CS_EN;
} else {
+ buf_id = tx_q->next_to_use;
+
tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2;
tx_params.eop_cmd = IDPF_TXD_LAST_DESC_CMD;
@@ -2935,6 +2732,18 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
tx_params.offload.td_cmd |= IDPF_TX_FLEX_DESC_CMD_CS_EN;
}
+ first = &tx_q->tx_buf[buf_id];
+ first->skb = skb;
+
+ if (tso) {
+ first->packets = tx_params.offload.tso_segs;
+ first->bytes = skb->len +
+ ((first->packets - 1) * tx_params.offload.tso_hdr_len);
+ } else {
+ first->packets = 1;
+ first->bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
+ }
+
idpf_tx_splitq_map(tx_q, &tx_params, first);
return NETDEV_TX_OK;
@@ -3472,7 +3281,7 @@ payload:
skip_data:
rx_buf->netmem = 0;
- idpf_rx_post_buf_refill(refillq, buf_id);
+ idpf_post_buf_refill(refillq, buf_id);
IDPF_RX_BUMP_NTC(rxq, ntc);
/* skip if it is non EOP desc */
@@ -3580,10 +3389,10 @@ static void idpf_rx_clean_refillq(struct idpf_buf_queue *bufq,
bool failure;
if (idpf_queue_has(RFL_GEN_CHK, refillq) !=
- !!(refill_desc & IDPF_RX_BI_GEN_M))
+ !!(refill_desc & IDPF_RFL_BI_GEN_M))
break;
- buf_id = FIELD_GET(IDPF_RX_BI_BUFID_M, refill_desc);
+ buf_id = FIELD_GET(IDPF_RFL_BI_BUFID_M, refill_desc);
failure = idpf_rx_update_bufq_desc(bufq, buf_id, buf_desc);
if (failure)
break;
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
index 281de655a813..52753dff381c 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
@@ -108,8 +108,8 @@ do { \
*/
#define IDPF_TX_SPLITQ_RE_MIN_GAP 64
-#define IDPF_RX_BI_GEN_M BIT(16)
-#define IDPF_RX_BI_BUFID_M GENMASK(15, 0)
+#define IDPF_RFL_BI_GEN_M BIT(16)
+#define IDPF_RFL_BI_BUFID_M GENMASK(15, 0)
#define IDPF_RXD_EOF_SPLITQ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_EOF_M
#define IDPF_RXD_EOF_SINGLEQ VIRTCHNL2_RX_BASE_DESC_STATUS_EOF_M
@@ -118,10 +118,6 @@ do { \
((((txq)->next_to_clean > (txq)->next_to_use) ? 0 : (txq)->desc_count) + \
(txq)->next_to_clean - (txq)->next_to_use - 1)
-#define IDPF_TX_BUF_RSV_UNUSED(txq) ((txq)->stash->buf_stack.top)
-#define IDPF_TX_BUF_RSV_LOW(txq) (IDPF_TX_BUF_RSV_UNUSED(txq) < \
- (txq)->desc_count >> 2)
-
#define IDPF_TX_COMPLQ_OVERFLOW_THRESH(txcq) ((txcq)->desc_count >> 1)
/* Determine the absolute number of completions pending, i.e. the number of
* completions that are expected to arrive on the TX completion queue.
@@ -131,11 +127,7 @@ do { \
0 : U32_MAX) + \
(txq)->num_completions_pending - (txq)->complq->num_completions)
-#define IDPF_TX_SPLITQ_COMPL_TAG_WIDTH 16
-/* Adjust the generation for the completion tag and wrap if necessary */
-#define IDPF_TX_ADJ_COMPL_TAG_GEN(txq) \
- ((++(txq)->compl_tag_cur_gen) >= (txq)->compl_tag_gen_max ? \
- 0 : (txq)->compl_tag_cur_gen)
+#define IDPF_TXBUF_NULL U32_MAX
#define IDPF_TXD_LAST_DESC_CMD (IDPF_TX_DESC_CMD_EOP | IDPF_TX_DESC_CMD_RS)
@@ -153,18 +145,6 @@ union idpf_tx_flex_desc {
#define idpf_tx_buf libeth_sqe
/**
- * struct idpf_buf_lifo - LIFO for managing OOO completions
- * @top: Used to know how many buffers are left
- * @size: Total size of LIFO
- * @bufs: Backing array
- */
-struct idpf_buf_lifo {
- u16 top;
- u16 size;
- struct idpf_tx_stash **bufs;
-};
-
-/**
* struct idpf_tx_offload_params - Offload parameters for a given packet
* @tx_flags: Feature flags enabled for this packet
* @hdr_offsets: Offset parameter for single queue model
@@ -196,6 +176,9 @@ struct idpf_tx_offload_params {
* @compl_tag: Associated tag for completion
* @td_tag: Descriptor tunneling tag
* @offload: Offload parameters
+ * @prev_ntu: stored TxQ next_to_use in case of rollback
+ * @prev_refill_ntc: stored refillq next_to_clean in case of packet rollback
+ * @prev_refill_gen: stored refillq generation bit in case of packet rollback
*/
struct idpf_tx_splitq_params {
enum idpf_tx_desc_dtype_value dtype;
@@ -206,6 +189,10 @@ struct idpf_tx_splitq_params {
};
struct idpf_tx_offload_params offload;
+
+ u16 prev_ntu;
+ u16 prev_refill_ntc;
+ bool prev_refill_gen;
};
enum idpf_tx_ctx_desc_eipt_offload {
@@ -468,17 +455,6 @@ struct idpf_tx_queue_stats {
#define IDPF_DIM_DEFAULT_PROFILE_IX 1
/**
- * struct idpf_txq_stash - Tx buffer stash for Flow-based scheduling mode
- * @buf_stack: Stack of empty buffers to store buffer info for out of order
- * buffer completions. See struct idpf_buf_lifo
- * @sched_buf_hash: Hash table to store buffers
- */
-struct idpf_txq_stash {
- struct idpf_buf_lifo buf_stack;
- DECLARE_HASHTABLE(sched_buf_hash, 12);
-} ____cacheline_aligned;
-
-/**
* struct idpf_rx_queue - software structure representing a receive queue
* @rx: universal receive descriptor array
* @single_buf: buffer descriptor array in singleq
@@ -610,6 +586,8 @@ libeth_cacheline_set_assert(struct idpf_rx_queue, 64,
* @netdev: &net_device corresponding to this queue
* @next_to_use: Next descriptor to use
* @next_to_clean: Next descriptor to clean
+ * @last_re: last descriptor index that RE bit was set
+ * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather
* @cleaned_bytes: Splitq only, TXQ only: When a TX completion is received on
* the TX completion queue, it can be for any TXQ associated
* with that completion queue. This means we can clean up to
@@ -620,11 +598,7 @@ libeth_cacheline_set_assert(struct idpf_rx_queue, 64,
* only once at the end of the cleaning routine.
* @clean_budget: singleq only, queue cleaning budget
* @cleaned_pkts: Number of packets cleaned for the above said case
- * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather
- * @stash: Tx buffer stash for Flow-based scheduling mode
- * @compl_tag_bufid_m: Completion tag buffer id mask
- * @compl_tag_cur_gen: Used to keep track of current completion tag generation
- * @compl_tag_gen_max: To determine when compl_tag_cur_gen should be reset
+ * @refillq: Pointer to refill queue
* @cached_tstamp_caps: Tx timestamp capabilities negotiated with the CP
* @tstamp_task: Work that handles Tx timestamp read
* @stats_sync: See struct u64_stats_sync
@@ -633,6 +607,7 @@ libeth_cacheline_set_assert(struct idpf_rx_queue, 64,
* @size: Length of descriptor ring in bytes
* @dma: Physical address of ring
* @q_vector: Backreference to associated vector
+ * @buf_pool_size: Total number of idpf_tx_buf
*/
struct idpf_tx_queue {
__cacheline_group_begin_aligned(read_mostly);
@@ -654,7 +629,6 @@ struct idpf_tx_queue {
u16 desc_count;
u16 tx_min_pkt_len;
- u16 compl_tag_gen_s;
struct net_device *netdev;
__cacheline_group_end_aligned(read_mostly);
@@ -662,6 +636,8 @@ struct idpf_tx_queue {
__cacheline_group_begin_aligned(read_write);
u16 next_to_use;
u16 next_to_clean;
+ u16 last_re;
+ u16 tx_max_bufs;
union {
u32 cleaned_bytes;
@@ -669,12 +645,7 @@ struct idpf_tx_queue {
};
u16 cleaned_pkts;
- u16 tx_max_bufs;
- struct idpf_txq_stash *stash;
-
- u16 compl_tag_bufid_m;
- u16 compl_tag_cur_gen;
- u16 compl_tag_gen_max;
+ struct idpf_sw_queue *refillq;
struct idpf_ptp_vport_tx_tstamp_caps *cached_tstamp_caps;
struct work_struct *tstamp_task;
@@ -689,11 +660,12 @@ struct idpf_tx_queue {
dma_addr_t dma;
struct idpf_q_vector *q_vector;
+ u32 buf_pool_size;
__cacheline_group_end_aligned(cold);
};
libeth_cacheline_set_assert(struct idpf_tx_queue, 64,
- 112 + sizeof(struct u64_stats_sync),
- 24);
+ 104 + sizeof(struct u64_stats_sync),
+ 32);
/**
* struct idpf_buf_queue - software structure representing a buffer queue
@@ -903,7 +875,6 @@ struct idpf_rxq_group {
* @vport: Vport back pointer
* @num_txq: Number of TX queues associated
* @txqs: Array of TX queue pointers
- * @stashes: array of OOO stashes for the queues
* @complq: Associated completion queue pointer, split queue only
* @num_completions_pending: Total number of completions pending for the
* completion queue, acculumated for all TX queues
@@ -918,7 +889,6 @@ struct idpf_txq_group {
u16 num_txq;
struct idpf_tx_queue *txqs[IDPF_LARGE_MAX_Q];
- struct idpf_txq_stash *stashes;
struct idpf_compl_queue *complq;
@@ -1011,6 +981,17 @@ static inline void idpf_vport_intr_set_wb_on_itr(struct idpf_q_vector *q_vector)
reg->dyn_ctl);
}
+/**
+ * idpf_tx_splitq_get_free_bufs - get number of free buf_ids in refillq
+ * @refillq: pointer to refillq containing buf_ids
+ */
+static inline u32 idpf_tx_splitq_get_free_bufs(struct idpf_sw_queue *refillq)
+{
+ return (refillq->next_to_use > refillq->next_to_clean ?
+ 0 : refillq->desc_count) +
+ refillq->next_to_use - refillq->next_to_clean - 1;
+}
+
int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget);
void idpf_vport_init_num_qs(struct idpf_vport *vport,
struct virtchnl2_create_vport *vport_msg);
@@ -1038,10 +1019,8 @@ void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
bool xmit_more);
unsigned int idpf_size_to_txd_count(unsigned int size);
netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb);
-void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb,
- struct idpf_tx_buf *first, u16 ring_idx);
-unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
- struct sk_buff *skb);
+unsigned int idpf_tx_res_count_required(struct idpf_tx_queue *txq,
+ struct sk_buff *skb, u32 *buf_count);
void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue);
netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
struct idpf_tx_queue *tx_q);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c
index d74116441d1c..bfeef5b0b99d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c
@@ -3125,7 +3125,7 @@ static int ixgbe_get_orom_ver_info(struct ixgbe_hw *hw,
if (err)
return err;
- combo_ver = le32_to_cpu(civd.combo_ver);
+ combo_ver = get_unaligned_le32(&civd.combo_ver);
orom->major = (u8)FIELD_GET(IXGBE_OROM_VER_MASK, combo_ver);
orom->patch = (u8)FIELD_GET(IXGBE_OROM_VER_PATCH_MASK, combo_ver);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h
index d2f22d8558f8..ff8d640a50b1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h
@@ -932,7 +932,7 @@ struct ixgbe_orom_civd_info {
__le32 combo_ver; /* Combo Image Version number */
u8 combo_name_len; /* Length of the unicode combo image version string, max of 32 */
__le16 combo_name[32]; /* Unicode string representing the Combo Image version */
-};
+} __packed;
/* Function specific capabilities */
struct ixgbe_hw_func_caps {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 4ff19a04b23e..0c46ba8a5adc 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -1978,6 +1978,13 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_release_regions;
}
+ if (!is_cn20k(pdev) &&
+ !is_cgx_mapped_to_nix(pdev->subsystem_device, cgx->cgx_id)) {
+ dev_notice(dev, "CGX %d not mapped to NIX, skipping probe\n",
+ cgx->cgx_id);
+ goto err_release_regions;
+ }
+
cgx->lmac_count = cgx->mac_ops->get_nr_lmacs(cgx);
if (!cgx->lmac_count) {
dev_notice(dev, "CGX %d LMAC count is zero, skipping probe\n", cgx->cgx_id);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 7ee1fdeb5295..18c7bb39dbc7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -783,6 +783,20 @@ static inline bool is_cn10kb(struct rvu *rvu)
return false;
}
+static inline bool is_cgx_mapped_to_nix(unsigned short id, u8 cgx_id)
+{
+ /* On CNF10KA and CNF10KB silicons only two CGX blocks are connected
+ * to NIX.
+ */
+ if (id == PCI_SUBSYS_DEVID_CNF10K_A || id == PCI_SUBSYS_DEVID_CNF10K_B)
+ return cgx_id <= 1;
+
+ return !(cgx_id && !(id == PCI_SUBSYS_DEVID_96XX ||
+ id == PCI_SUBSYS_DEVID_98XX ||
+ id == PCI_SUBSYS_DEVID_CN10K_A ||
+ id == PCI_SUBSYS_DEVID_CN10K_B));
+}
+
static inline bool is_rvu_npc_hash_extract_en(struct rvu *rvu)
{
u64 npc_const3;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index f674729124e6..aff17c37ddde 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -124,7 +124,9 @@ void otx2_get_dev_stats(struct otx2_nic *pfvf)
dev_stats->rx_ucast_frames;
dev_stats->tx_bytes = OTX2_GET_TX_STATS(TX_OCTS);
- dev_stats->tx_drops = OTX2_GET_TX_STATS(TX_DROP);
+ dev_stats->tx_drops = OTX2_GET_TX_STATS(TX_DROP) +
+ (unsigned long)atomic_long_read(&dev_stats->tx_discards);
+
dev_stats->tx_bcast_frames = OTX2_GET_TX_STATS(TX_BCAST);
dev_stats->tx_mcast_frames = OTX2_GET_TX_STATS(TX_MCAST);
dev_stats->tx_ucast_frames = OTX2_GET_TX_STATS(TX_UCAST);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index e3765b73c434..1c8a3c078a64 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -153,6 +153,7 @@ struct otx2_dev_stats {
u64 tx_bcast_frames;
u64 tx_mcast_frames;
u64 tx_drops;
+ atomic_long_t tx_discards;
};
/* Driver counted stats */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index b23585c5e5c2..5027fae0aa77 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -2220,6 +2220,7 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct otx2_nic *pf = netdev_priv(netdev);
int qidx = skb_get_queue_mapping(skb);
+ struct otx2_dev_stats *dev_stats;
struct otx2_snd_queue *sq;
struct netdev_queue *txq;
int sq_idx;
@@ -2232,6 +2233,8 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
/* Check for minimum and maximum packet length */
if (skb->len <= ETH_HLEN ||
(!skb_shinfo(skb)->gso_size && skb->len > pf->tx_max_pktlen)) {
+ dev_stats = &pf->hw.dev_stats;
+ atomic_long_inc(&dev_stats->tx_discards);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 5589fccd370b..7ebb6e656884 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -417,9 +417,19 @@ static netdev_tx_t otx2vf_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct otx2_nic *vf = netdev_priv(netdev);
int qidx = skb_get_queue_mapping(skb);
+ struct otx2_dev_stats *dev_stats;
struct otx2_snd_queue *sq;
struct netdev_queue *txq;
+ /* Check for minimum and maximum packet length */
+ if (skb->len <= ETH_HLEN ||
+ (!skb_shinfo(skb)->gso_size && skb->len > vf->tx_max_pktlen)) {
+ dev_stats = &vf->hw.dev_stats;
+ atomic_long_inc(&dev_stats->tx_discards);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
sq = &vf->qset.sq[qidx];
txq = netdev_get_tx_queue(netdev, qidx);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/rep.c b/drivers/net/ethernet/marvell/octeontx2/nic/rep.c
index 25af98034e2e..b476733a0234 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/rep.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/rep.c
@@ -371,7 +371,8 @@ static void rvu_rep_get_stats(struct work_struct *work)
stats->rx_mcast_frames = rsp->rx.mcast;
stats->tx_bytes = rsp->tx.octs;
stats->tx_frames = rsp->tx.ucast + rsp->tx.bcast + rsp->tx.mcast;
- stats->tx_drops = rsp->tx.drop;
+ stats->tx_drops = rsp->tx.drop +
+ (unsigned long)atomic_long_read(&stats->tx_discards);
exit:
mutex_unlock(&priv->mbox.lock);
}
@@ -418,6 +419,16 @@ static netdev_tx_t rvu_rep_xmit(struct sk_buff *skb, struct net_device *dev)
struct otx2_nic *pf = rep->mdev;
struct otx2_snd_queue *sq;
struct netdev_queue *txq;
+ struct rep_stats *stats;
+
+ /* Check for minimum and maximum packet length */
+ if (skb->len <= ETH_HLEN ||
+ (!skb_shinfo(skb)->gso_size && skb->len > pf->tx_max_pktlen)) {
+ stats = &rep->stats;
+ atomic_long_inc(&stats->tx_discards);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
sq = &pf->qset.sq[rep->rep_id];
txq = netdev_get_tx_queue(dev, 0);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/rep.h b/drivers/net/ethernet/marvell/octeontx2/nic/rep.h
index 38446b3e4f13..5bc9e2c7d800 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/rep.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/rep.h
@@ -27,6 +27,7 @@ struct rep_stats {
u64 tx_bytes;
u64 tx_frames;
u64 tx_drops;
+ atomic_long_t tx_discards;
};
struct rep_dev {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index 3ffa3fbacd16..2c0e0c16ca90 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -160,7 +160,7 @@ static int mlx5_devlink_reload_fw_activate(struct devlink *devlink, struct netli
if (err)
return err;
- mlx5_unload_one_devl_locked(dev, true);
+ mlx5_sync_reset_unload_flow(dev, true);
err = mlx5_health_wait_pci_up(dev);
if (err)
NL_SET_ERR_MSG_MOD(extack, "FW activate aborted, PCI reads fail after reset");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
index 3efa8bf1d14e..4720523813b9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
@@ -575,7 +575,6 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
if (err)
return err;
}
- priv->dcbx.xoff = xoff;
/* Apply the settings */
if (update_buffer) {
@@ -584,6 +583,8 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
return err;
}
+ priv->dcbx.xoff = xoff;
+
if (update_prio2buffer)
err = mlx5e_port_set_priority2buffer(priv->mdev, prio2buffer);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h
index f4a19ffbb641..66d276a1be83 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h
@@ -66,11 +66,23 @@ struct mlx5e_port_buffer {
struct mlx5e_bufferx_reg buffer[MLX5E_MAX_NETWORK_BUFFER];
};
+#ifdef CONFIG_MLX5_CORE_EN_DCB
int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
u32 change, unsigned int mtu,
struct ieee_pfc *pfc,
u32 *buffer_size,
u8 *prio2buffer);
+#else
+static inline int
+mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
+ u32 change, unsigned int mtu,
+ void *pfc,
+ u32 *buffer_size,
+ u8 *prio2buffer)
+{
+ return 0;
+}
+#endif
int mlx5e_port_query_buffer(struct mlx5e_priv *priv,
struct mlx5e_port_buffer *port_buffer);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 21bb88c5d3dc..e680673ffb72 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -49,6 +49,7 @@
#include "en.h"
#include "en/dim.h"
#include "en/txrx.h"
+#include "en/port_buffer.h"
#include "en_tc.h"
#include "en_rep.h"
#include "en_accel/ipsec.h"
@@ -138,6 +139,8 @@ void mlx5e_update_carrier(struct mlx5e_priv *priv)
if (up) {
netdev_info(priv->netdev, "Link up\n");
netif_carrier_on(priv->netdev);
+ mlx5e_port_manual_buffer_config(priv, 0, priv->netdev->mtu,
+ NULL, NULL, NULL);
} else {
netdev_info(priv->netdev, "Link down\n");
netif_carrier_off(priv->netdev);
@@ -3040,9 +3043,11 @@ int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv)
struct mlx5e_params *params = &priv->channels.params;
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
- u16 mtu;
+ u16 mtu, prev_mtu;
int err;
+ mlx5e_query_mtu(mdev, params, &prev_mtu);
+
err = mlx5e_set_mtu(mdev, params, params->sw_mtu);
if (err)
return err;
@@ -3052,6 +3057,18 @@ int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv)
netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
__func__, mtu, params->sw_mtu);
+ if (mtu != prev_mtu && MLX5_BUFFER_SUPPORTED(mdev)) {
+ err = mlx5e_port_manual_buffer_config(priv, 0, mtu,
+ NULL, NULL, NULL);
+ if (err) {
+ netdev_warn(netdev, "%s: Failed to set Xon/Xoff values with MTU %d (err %d), setting back to previous MTU %d\n",
+ __func__, mtu, err, prev_mtu);
+
+ mlx5e_set_mtu(mdev, params, prev_mtu);
+ return err;
+ }
+ }
+
params->sw_mtu = mtu;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index d87392360dbd..cb165085a4c1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -3734,6 +3734,13 @@ static int mlx5_fs_mode_validate(struct devlink *devlink, u32 id,
char *value = val.vstr;
u8 eswitch_mode;
+ eswitch_mode = mlx5_eswitch_mode(dev);
+ if (eswitch_mode == MLX5_ESWITCH_OFFLOADS) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Changing fs mode is not supported when eswitch offloads enabled.");
+ return -EOPNOTSUPP;
+ }
+
if (!strcmp(value, "dmfs"))
return 0;
@@ -3759,14 +3766,6 @@ static int mlx5_fs_mode_validate(struct devlink *devlink, u32 id,
return -EINVAL;
}
- eswitch_mode = mlx5_eswitch_mode(dev);
- if (eswitch_mode == MLX5_ESWITCH_OFFLOADS) {
- NL_SET_ERR_MSG_FMT_MOD(extack,
- "Moving to %s is not supported when eswitch offloads enabled.",
- value);
- return -EOPNOTSUPP;
- }
-
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index 69933addd921..22995131824a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -6,13 +6,15 @@
#include "fw_reset.h"
#include "diag/fw_tracer.h"
#include "lib/tout.h"
+#include "sf/sf.h"
enum {
MLX5_FW_RESET_FLAGS_RESET_REQUESTED,
MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST,
MLX5_FW_RESET_FLAGS_PENDING_COMP,
MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS,
- MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED
+ MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED,
+ MLX5_FW_RESET_FLAGS_UNLOAD_EVENT,
};
struct mlx5_fw_reset {
@@ -219,7 +221,7 @@ int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev)
return mlx5_reg_mfrl_set(dev, MLX5_MFRL_REG_RESET_LEVEL0, 0, 0, false);
}
-static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev, bool unloaded)
+static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
struct devlink *devlink = priv_to_devlink(dev);
@@ -228,8 +230,7 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev, bool unload
if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags)) {
complete(&fw_reset->done);
} else {
- if (!unloaded)
- mlx5_unload_one(dev, false);
+ mlx5_sync_reset_unload_flow(dev, false);
if (mlx5_health_wait_pci_up(dev))
mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n");
else
@@ -272,7 +273,7 @@ static void mlx5_sync_reset_reload_work(struct work_struct *work)
mlx5_sync_reset_clear_reset_requested(dev, false);
mlx5_enter_error_state(dev, true);
- mlx5_fw_reset_complete_reload(dev, false);
+ mlx5_fw_reset_complete_reload(dev);
}
#define MLX5_RESET_POLL_INTERVAL (HZ / 10)
@@ -428,6 +429,11 @@ static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev,
return false;
}
+ if (!mlx5_core_is_ecpf(dev) && !mlx5_sf_table_empty(dev)) {
+ mlx5_core_warn(dev, "SFs should be removed before reset\n");
+ return false;
+ }
+
#if IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)
if (reset_method != MLX5_MFRL_REG_PCI_RESET_METHOD_HOT_RESET) {
err = mlx5_check_hotplug_interrupt(dev, bridge);
@@ -586,6 +592,65 @@ static int mlx5_sync_pci_reset(struct mlx5_core_dev *dev, u8 reset_method)
return err;
}
+void mlx5_sync_reset_unload_flow(struct mlx5_core_dev *dev, bool locked)
+{
+ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+ unsigned long timeout;
+ int poll_freq = 20;
+ bool reset_action;
+ u8 rst_state;
+ int err;
+
+ if (locked)
+ mlx5_unload_one_devl_locked(dev, false);
+ else
+ mlx5_unload_one(dev, false);
+
+ if (!test_bit(MLX5_FW_RESET_FLAGS_UNLOAD_EVENT, &fw_reset->reset_flags))
+ return;
+
+ mlx5_set_fw_rst_ack(dev);
+ mlx5_core_warn(dev, "Sync Reset Unload done, device reset expected\n");
+
+ reset_action = false;
+ timeout = jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, RESET_UNLOAD));
+ do {
+ rst_state = mlx5_get_fw_rst_state(dev);
+ if (rst_state == MLX5_FW_RST_STATE_TOGGLE_REQ ||
+ rst_state == MLX5_FW_RST_STATE_IDLE) {
+ reset_action = true;
+ break;
+ }
+ if (rst_state == MLX5_FW_RST_STATE_DROP_MODE) {
+ mlx5_core_info(dev, "Sync Reset Drop mode ack\n");
+ mlx5_set_fw_rst_ack(dev);
+ poll_freq = 1000;
+ }
+ msleep(poll_freq);
+ } while (!time_after(jiffies, timeout));
+
+ if (!reset_action) {
+ mlx5_core_err(dev, "Got timeout waiting for sync reset action, state = %u\n",
+ rst_state);
+ fw_reset->ret = -ETIMEDOUT;
+ goto done;
+ }
+
+ mlx5_core_warn(dev, "Sync Reset, got reset action. rst_state = %u\n",
+ rst_state);
+ if (rst_state == MLX5_FW_RST_STATE_TOGGLE_REQ) {
+ err = mlx5_sync_pci_reset(dev, fw_reset->reset_method);
+ if (err) {
+ mlx5_core_warn(dev, "mlx5_sync_pci_reset failed, err %d\n",
+ err);
+ fw_reset->ret = err;
+ }
+ }
+
+done:
+ clear_bit(MLX5_FW_RESET_FLAGS_UNLOAD_EVENT, &fw_reset->reset_flags);
+}
+
static void mlx5_sync_reset_now_event(struct work_struct *work)
{
struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset,
@@ -613,17 +678,13 @@ static void mlx5_sync_reset_now_event(struct work_struct *work)
mlx5_enter_error_state(dev, true);
done:
fw_reset->ret = err;
- mlx5_fw_reset_complete_reload(dev, false);
+ mlx5_fw_reset_complete_reload(dev);
}
static void mlx5_sync_reset_unload_event(struct work_struct *work)
{
struct mlx5_fw_reset *fw_reset;
struct mlx5_core_dev *dev;
- unsigned long timeout;
- int poll_freq = 20;
- bool reset_action;
- u8 rst_state;
int err;
fw_reset = container_of(work, struct mlx5_fw_reset, reset_unload_work);
@@ -632,6 +693,7 @@ static void mlx5_sync_reset_unload_event(struct work_struct *work)
if (mlx5_sync_reset_clear_reset_requested(dev, false))
return;
+ set_bit(MLX5_FW_RESET_FLAGS_UNLOAD_EVENT, &fw_reset->reset_flags);
mlx5_core_warn(dev, "Sync Reset Unload. Function is forced down.\n");
err = mlx5_cmd_fast_teardown_hca(dev);
@@ -640,49 +702,7 @@ static void mlx5_sync_reset_unload_event(struct work_struct *work)
else
mlx5_enter_error_state(dev, true);
- if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags))
- mlx5_unload_one_devl_locked(dev, false);
- else
- mlx5_unload_one(dev, false);
-
- mlx5_set_fw_rst_ack(dev);
- mlx5_core_warn(dev, "Sync Reset Unload done, device reset expected\n");
-
- reset_action = false;
- timeout = jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, RESET_UNLOAD));
- do {
- rst_state = mlx5_get_fw_rst_state(dev);
- if (rst_state == MLX5_FW_RST_STATE_TOGGLE_REQ ||
- rst_state == MLX5_FW_RST_STATE_IDLE) {
- reset_action = true;
- break;
- }
- if (rst_state == MLX5_FW_RST_STATE_DROP_MODE) {
- mlx5_core_info(dev, "Sync Reset Drop mode ack\n");
- mlx5_set_fw_rst_ack(dev);
- poll_freq = 1000;
- }
- msleep(poll_freq);
- } while (!time_after(jiffies, timeout));
-
- if (!reset_action) {
- mlx5_core_err(dev, "Got timeout waiting for sync reset action, state = %u\n",
- rst_state);
- fw_reset->ret = -ETIMEDOUT;
- goto done;
- }
-
- mlx5_core_warn(dev, "Sync Reset, got reset action. rst_state = %u\n", rst_state);
- if (rst_state == MLX5_FW_RST_STATE_TOGGLE_REQ) {
- err = mlx5_sync_pci_reset(dev, fw_reset->reset_method);
- if (err) {
- mlx5_core_warn(dev, "mlx5_sync_pci_reset failed, err %d\n", err);
- fw_reset->ret = err;
- }
- }
-
-done:
- mlx5_fw_reset_complete_reload(dev, true);
+ mlx5_fw_reset_complete_reload(dev);
}
static void mlx5_sync_reset_abort_event(struct work_struct *work)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
index ea527d06a85f..d5b28525c960 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
@@ -12,6 +12,7 @@ int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel,
int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev);
int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev);
+void mlx5_sync_reset_unload_flow(struct mlx5_core_dev *dev, bool locked);
int mlx5_fw_reset_verify_fw_complete(struct mlx5_core_dev *dev,
struct netlink_ext_ack *extack);
void mlx5_fw_reset_events_start(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
index 0864ba625c07..3304f25cc805 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
@@ -518,3 +518,13 @@ void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev)
WARN_ON(!xa_empty(&table->function_ids));
kfree(table);
}
+
+bool mlx5_sf_table_empty(const struct mlx5_core_dev *dev)
+{
+ struct mlx5_sf_table *table = dev->priv.sf_table;
+
+ if (!table)
+ return true;
+
+ return xa_empty(&table->function_ids);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h
index 860f9ddb7107..89559a37997a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h
@@ -17,6 +17,7 @@ void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev);
int mlx5_sf_table_init(struct mlx5_core_dev *dev);
void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev);
+bool mlx5_sf_table_empty(const struct mlx5_core_dev *dev);
int mlx5_devlink_sf_port_new(struct devlink *devlink,
const struct devlink_port_new_attrs *add_attr,
@@ -61,6 +62,11 @@ static inline void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev)
{
}
+static inline bool mlx5_sf_table_empty(const struct mlx5_core_dev *dev)
+{
+ return true;
+}
+
#endif
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c
index 396804369b00..6b36a4a7d895 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c
@@ -117,7 +117,7 @@ static int hws_action_get_shared_stc_nic(struct mlx5hws_context *ctx,
mlx5hws_err(ctx, "No such stc_type: %d\n", stc_type);
pr_warn("HWS: Invalid stc_type: %d\n", stc_type);
ret = -EINVAL;
- goto unlock_and_out;
+ goto free_shared_stc;
}
ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c
index 51e4c551e0ef..d56271a9e4f0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c
@@ -279,7 +279,7 @@ int mlx5hws_pat_get_pattern(struct mlx5hws_context *ctx,
return ret;
clean_pattern:
- mlx5hws_cmd_header_modify_pattern_destroy(ctx->mdev, *pattern_id);
+ mlx5hws_cmd_header_modify_pattern_destroy(ctx->mdev, ptrn_id);
out_unlock:
mutex_unlock(&ctx->pattern_cache->lock);
return ret;
@@ -527,7 +527,6 @@ int mlx5hws_pat_calc_nop(__be64 *pattern, size_t num_actions,
u32 *nop_locations, __be64 *new_pat)
{
u16 prev_src_field = INVALID_FIELD, prev_dst_field = INVALID_FIELD;
- u16 src_field, dst_field;
u8 action_type;
bool dependent;
size_t i, j;
@@ -539,6 +538,9 @@ int mlx5hws_pat_calc_nop(__be64 *pattern, size_t num_actions,
return 0;
for (i = 0, j = 0; i < num_actions; i++, j++) {
+ u16 src_field = INVALID_FIELD;
+ u16 dst_field = INVALID_FIELD;
+
if (j >= max_actions)
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c
index 7e37d6e9eb83..7b5071c3df36 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c
@@ -124,6 +124,7 @@ static int hws_pool_buddy_init(struct mlx5hws_pool *pool)
mlx5hws_err(pool->ctx, "Failed to create resource type: %d size %zu\n",
pool->type, pool->alloc_log_sz);
mlx5hws_buddy_cleanup(buddy);
+ kfree(buddy);
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
index e67e99487a27..40581550da1a 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
@@ -52,6 +52,8 @@ int __fbnic_open(struct fbnic_net *fbn)
fbnic_bmc_rpc_init(fbd);
fbnic_rss_reinit(fbd, fbn);
+ phylink_resume(fbn->phylink);
+
return 0;
time_stop:
fbnic_time_stop(fbn);
@@ -84,6 +86,8 @@ static int fbnic_stop(struct net_device *netdev)
{
struct fbnic_net *fbn = netdev_priv(netdev);
+ phylink_suspend(fbn->phylink, fbnic_bmc_present(fbn->fbd));
+
fbnic_down(fbn);
fbnic_pcs_free_irq(fbn->fbd);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
index b70e4cadb37b..28e23e3ffca8 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
@@ -118,14 +118,12 @@ static void fbnic_service_task_start(struct fbnic_net *fbn)
struct fbnic_dev *fbd = fbn->fbd;
schedule_delayed_work(&fbd->service_task, HZ);
- phylink_resume(fbn->phylink);
}
static void fbnic_service_task_stop(struct fbnic_net *fbn)
{
struct fbnic_dev *fbd = fbn->fbd;
- phylink_suspend(fbn->phylink, fbnic_bmc_present(fbd));
cancel_delayed_work(&fbd->service_task);
}
@@ -443,11 +441,10 @@ static int __fbnic_pm_resume(struct device *dev)
/* Re-enable mailbox */
err = fbnic_fw_request_mbx(fbd);
+ devl_unlock(priv_to_devlink(fbd));
if (err)
goto err_free_irqs;
- devl_unlock(priv_to_devlink(fbd));
-
/* Only send log history if log buffer is empty to prevent duplicate
* log entries.
*/
@@ -464,20 +461,20 @@ static int __fbnic_pm_resume(struct device *dev)
rtnl_lock();
- if (netif_running(netdev)) {
+ if (netif_running(netdev))
err = __fbnic_open(fbn);
- if (err)
- goto err_free_mbx;
- }
rtnl_unlock();
+ if (err)
+ goto err_free_mbx;
return 0;
err_free_mbx:
fbnic_fw_log_disable(fbd);
- rtnl_unlock();
+ devl_lock(priv_to_devlink(fbd));
fbnic_fw_free_mbx(fbd);
+ devl_unlock(priv_to_devlink(fbd));
err_free_irqs:
fbnic_free_irqs(fbd);
err_invalidate_uc_addr:
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index 6cadf8de4fdf..00e929bf280b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -49,6 +49,14 @@ static void dwxgmac2_core_init(struct mac_device_info *hw,
writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN);
}
+static void dwxgmac2_update_caps(struct stmmac_priv *priv)
+{
+ if (!priv->dma_cap.mbps_10_100)
+ priv->hw->link.caps &= ~(MAC_10 | MAC_100);
+ else if (!priv->dma_cap.half_duplex)
+ priv->hw->link.caps &= ~(MAC_10HD | MAC_100HD);
+}
+
static void dwxgmac2_set_mac(void __iomem *ioaddr, bool enable)
{
u32 tx = readl(ioaddr + XGMAC_TX_CONFIG);
@@ -1424,6 +1432,7 @@ static void dwxgmac2_set_arp_offload(struct mac_device_info *hw, bool en,
const struct stmmac_ops dwxgmac210_ops = {
.core_init = dwxgmac2_core_init,
+ .update_caps = dwxgmac2_update_caps,
.set_mac = dwxgmac2_set_mac,
.rx_ipc = dwxgmac2_rx_ipc,
.rx_queue_enable = dwxgmac2_rx_queue_enable,
@@ -1532,8 +1541,8 @@ int dwxgmac2_setup(struct stmmac_priv *priv)
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
- MAC_1000FD | MAC_2500FD | MAC_5000FD |
- MAC_10000FD;
+ MAC_10 | MAC_100 | MAC_1000FD |
+ MAC_2500FD | MAC_5000FD | MAC_10000FD;
mac->link.duplex = 0;
mac->link.speed10 = XGMAC_CONFIG_SS_10_MII;
mac->link.speed100 = XGMAC_CONFIG_SS_100_MII;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
index 5dcc95bc0ad2..4d6bb995d8d8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -203,10 +203,6 @@ static void dwxgmac2_dma_rx_mode(struct stmmac_priv *priv, void __iomem *ioaddr,
}
writel(value, ioaddr + XGMAC_MTL_RXQ_OPMODE(channel));
-
- /* Enable MTL RX overflow */
- value = readl(ioaddr + XGMAC_MTL_QINTEN(channel));
- writel(value | XGMAC_RXOIE, ioaddr + XGMAC_MTL_QINTEN(channel));
}
static void dwxgmac2_dma_tx_mode(struct stmmac_priv *priv, void __iomem *ioaddr,
@@ -386,8 +382,11 @@ static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv,
static int dwxgmac2_get_hw_feature(void __iomem *ioaddr,
struct dma_features *dma_cap)
{
+ struct stmmac_priv *priv;
u32 hw_cap;
+ priv = container_of(dma_cap, struct stmmac_priv, dma_cap);
+
/* MAC HW feature 0 */
hw_cap = readl(ioaddr + XGMAC_HW_FEATURE0);
dma_cap->edma = (hw_cap & XGMAC_HWFEAT_EDMA) >> 31;
@@ -410,6 +409,8 @@ static int dwxgmac2_get_hw_feature(void __iomem *ioaddr,
dma_cap->vlhash = (hw_cap & XGMAC_HWFEAT_VLHASH) >> 4;
dma_cap->half_duplex = (hw_cap & XGMAC_HWFEAT_HDSEL) >> 3;
dma_cap->mbps_1000 = (hw_cap & XGMAC_HWFEAT_GMIISEL) >> 1;
+ if (dma_cap->mbps_1000 && priv->synopsys_id >= DWXGMAC_CORE_2_20)
+ dma_cap->mbps_10_100 = 1;
/* MAC HW feature 1 */
hw_cap = readl(ioaddr + XGMAC_HW_FEATURE1);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index f1abf4242cd2..7b16d1207b80 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2584,6 +2584,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
+ bool csum = !priv->plat->tx_queues_cfg[queue].coe_unsupported;
struct xsk_buff_pool *pool = tx_q->xsk_pool;
unsigned int entry = tx_q->cur_tx;
struct dma_desc *tx_desc = NULL;
@@ -2671,7 +2672,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
}
stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
- true, priv->mode, true, true,
+ csum, priv->mode, true, true,
xdp_desc.len);
stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
@@ -4983,6 +4984,7 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
{
struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
+ bool csum = !priv->plat->tx_queues_cfg[queue].coe_unsupported;
unsigned int entry = tx_q->cur_tx;
struct dma_desc *tx_desc;
dma_addr_t dma_addr;
@@ -5034,7 +5036,7 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
stmmac_set_desc_addr(priv, tx_desc, dma_addr);
stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
- true, priv->mode, true, true,
+ csum, priv->mode, true, true,
xdpf->len);
tx_q->tx_count_frames++;
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 720104661d7f..60a4629fe6ba 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -1812,6 +1812,11 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
/* Enable NAPI handler before init callbacks */
netif_napi_add(ndev, &net_device->chan_table[0].napi, netvsc_poll);
+ napi_enable(&net_device->chan_table[0].napi);
+ netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_RX,
+ &net_device->chan_table[0].napi);
+ netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_TX,
+ &net_device->chan_table[0].napi);
/* Open the channel */
device->channel->next_request_id_callback = vmbus_next_request_id;
@@ -1831,12 +1836,6 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
/* Channel is opened */
netdev_dbg(ndev, "hv_netvsc channel opened successfully\n");
- napi_enable(&net_device->chan_table[0].napi);
- netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_RX,
- &net_device->chan_table[0].napi);
- netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_TX,
- &net_device->chan_table[0].napi);
-
/* Connect with the NetVsp */
ret = netvsc_connect_vsp(device, net_device, device_info);
if (ret != 0) {
@@ -1854,14 +1853,14 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
close:
RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
- netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_TX, NULL);
- netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_RX, NULL);
- napi_disable(&net_device->chan_table[0].napi);
/* Now, we can close the channel safely */
vmbus_close(device->channel);
cleanup:
+ netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_TX, NULL);
+ netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_RX, NULL);
+ napi_disable(&net_device->chan_table[0].napi);
netif_napi_del(&net_device->chan_table[0].napi);
cleanup2:
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 9e73959e61ee..c35f9685b6bf 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -1252,17 +1252,26 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
new_sc->rqstor_size = netvsc_rqstor_size(netvsc_ring_bytes);
new_sc->max_pkt_size = NETVSC_MAX_PKT_SIZE;
+ /* Enable napi before opening the vmbus channel to avoid races
+ * as the host placing data on the host->guest ring may be left
+ * out if napi was not enabled.
+ */
+ napi_enable(&nvchan->napi);
+ netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_RX,
+ &nvchan->napi);
+ netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_TX,
+ &nvchan->napi);
+
ret = vmbus_open(new_sc, netvsc_ring_bytes,
netvsc_ring_bytes, NULL, 0,
netvsc_channel_cb, nvchan);
- if (ret == 0) {
- napi_enable(&nvchan->napi);
- netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_RX,
- &nvchan->napi);
- netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_TX,
- &nvchan->napi);
- } else {
+ if (ret != 0) {
netdev_notice(ndev, "sub channel open failed: %d\n", ret);
+ netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_TX,
+ NULL);
+ netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_RX,
+ NULL);
+ napi_disable(&nvchan->napi);
}
if (atomic_inc_return(&nvscdev->open_chn) == nvscdev->num_chn)
diff --git a/drivers/net/phy/mscc/mscc.h b/drivers/net/phy/mscc/mscc.h
index 58c6d47fbe04..2bfe314ef881 100644
--- a/drivers/net/phy/mscc/mscc.h
+++ b/drivers/net/phy/mscc/mscc.h
@@ -481,6 +481,7 @@ static inline void vsc8584_config_macsec_intr(struct phy_device *phydev)
void vsc85xx_link_change_notify(struct phy_device *phydev);
void vsc8584_config_ts_intr(struct phy_device *phydev);
int vsc8584_ptp_init(struct phy_device *phydev);
+void vsc8584_ptp_deinit(struct phy_device *phydev);
int vsc8584_ptp_probe_once(struct phy_device *phydev);
int vsc8584_ptp_probe(struct phy_device *phydev);
irqreturn_t vsc8584_handle_ts_interrupt(struct phy_device *phydev);
@@ -495,6 +496,9 @@ static inline int vsc8584_ptp_init(struct phy_device *phydev)
{
return 0;
}
+static inline void vsc8584_ptp_deinit(struct phy_device *phydev)
+{
+}
static inline int vsc8584_ptp_probe_once(struct phy_device *phydev)
{
return 0;
diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c
index f1c9ce351ab4..24c75903f535 100644
--- a/drivers/net/phy/mscc/mscc_main.c
+++ b/drivers/net/phy/mscc/mscc_main.c
@@ -2337,9 +2337,7 @@ static int vsc85xx_probe(struct phy_device *phydev)
static void vsc85xx_remove(struct phy_device *phydev)
{
- struct vsc8531_private *priv = phydev->priv;
-
- skb_queue_purge(&priv->rx_skbs_list);
+ vsc8584_ptp_deinit(phydev);
}
/* Microsemi VSC85xx PHYs */
diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c
index de6c7312e8f2..72847320cb65 100644
--- a/drivers/net/phy/mscc/mscc_ptp.c
+++ b/drivers/net/phy/mscc/mscc_ptp.c
@@ -1298,7 +1298,6 @@ static void vsc8584_set_input_clk_configured(struct phy_device *phydev)
static int __vsc8584_init_ptp(struct phy_device *phydev)
{
- struct vsc8531_private *vsc8531 = phydev->priv;
static const u32 ltc_seq_e[] = { 0, 400000, 0, 0, 0 };
static const u8 ltc_seq_a[] = { 8, 6, 5, 4, 2 };
u32 val;
@@ -1515,17 +1514,7 @@ static int __vsc8584_init_ptp(struct phy_device *phydev)
vsc85xx_ts_eth_cmp1_sig(phydev);
- vsc8531->mii_ts.rxtstamp = vsc85xx_rxtstamp;
- vsc8531->mii_ts.txtstamp = vsc85xx_txtstamp;
- vsc8531->mii_ts.hwtstamp = vsc85xx_hwtstamp;
- vsc8531->mii_ts.ts_info = vsc85xx_ts_info;
- phydev->mii_ts = &vsc8531->mii_ts;
-
- memcpy(&vsc8531->ptp->caps, &vsc85xx_clk_caps, sizeof(vsc85xx_clk_caps));
-
- vsc8531->ptp->ptp_clock = ptp_clock_register(&vsc8531->ptp->caps,
- &phydev->mdio.dev);
- return PTR_ERR_OR_ZERO(vsc8531->ptp->ptp_clock);
+ return 0;
}
void vsc8584_config_ts_intr(struct phy_device *phydev)
@@ -1552,6 +1541,16 @@ int vsc8584_ptp_init(struct phy_device *phydev)
return 0;
}
+void vsc8584_ptp_deinit(struct phy_device *phydev)
+{
+ struct vsc8531_private *vsc8531 = phydev->priv;
+
+ if (vsc8531->ptp->ptp_clock) {
+ ptp_clock_unregister(vsc8531->ptp->ptp_clock);
+ skb_queue_purge(&vsc8531->rx_skbs_list);
+ }
+}
+
irqreturn_t vsc8584_handle_ts_interrupt(struct phy_device *phydev)
{
struct vsc8531_private *priv = phydev->priv;
@@ -1612,7 +1611,16 @@ int vsc8584_ptp_probe(struct phy_device *phydev)
vsc8531->ptp->phydev = phydev;
- return 0;
+ vsc8531->mii_ts.rxtstamp = vsc85xx_rxtstamp;
+ vsc8531->mii_ts.txtstamp = vsc85xx_txtstamp;
+ vsc8531->mii_ts.hwtstamp = vsc85xx_hwtstamp;
+ vsc8531->mii_ts.ts_info = vsc85xx_ts_info;
+ phydev->mii_ts = &vsc8531->mii_ts;
+
+ memcpy(&vsc8531->ptp->caps, &vsc85xx_clk_caps, sizeof(vsc85xx_clk_caps));
+ vsc8531->ptp->ptp_clock = ptp_clock_register(&vsc8531->ptp->caps,
+ &phydev->mdio.dev);
+ return PTR_ERR_OR_ZERO(vsc8531->ptp->ptp_clock);
}
int vsc8584_ptp_probe_once(struct phy_device *phydev)
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index e56901bb6ebc..11352d85475a 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1355,6 +1355,9 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
{QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1031, 3)}, /* Telit LE910C1-EUX */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1034, 2)}, /* Telit LE910C4-WWX */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1037, 4)}, /* Telit LE910C4-WWX */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1038, 3)}, /* Telit LE910C4-WWX */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x103a, 0)}, /* Telit LE910C4-WWX */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index d14e6d602273..975bdc5dab84 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -5758,14 +5758,15 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
disable_rx_mode_work(vi);
flush_work(&vi->rx_mode_work);
- netif_tx_lock_bh(vi->dev);
- netif_device_detach(vi->dev);
- netif_tx_unlock_bh(vi->dev);
if (netif_running(vi->dev)) {
rtnl_lock();
virtnet_close(vi->dev);
rtnl_unlock();
}
+
+ netif_tx_lock_bh(vi->dev);
+ netif_device_detach(vi->dev);
+ netif_tx_unlock_bh(vi->dev);
}
static int init_vqs(struct virtnet_info *vi);
diff --git a/drivers/of/device.c b/drivers/of/device.c
index c80426510ec2..f7e75e527667 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -17,8 +17,8 @@
/**
* of_match_device - Tell if a struct device matches an of_device_id list
- * @matches: array of of device match structures to search in
- * @dev: the of device structure to match against
+ * @matches: array of of_device_id match structures to search in
+ * @dev: the OF device structure to match against
*
* Used by a driver to check whether an platform_device present in the
* system is in its list of supported devices.
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
index 0aba760f7577..2eaaddcb0ec4 100644
--- a/drivers/of/dynamic.c
+++ b/drivers/of/dynamic.c
@@ -935,10 +935,15 @@ static int of_changeset_add_prop_helper(struct of_changeset *ocs,
return -ENOMEM;
ret = of_changeset_add_property(ocs, np, new_pp);
- if (ret)
+ if (ret) {
__of_prop_free(new_pp);
+ return ret;
+ }
- return ret;
+ new_pp->next = np->deadprops;
+ np->deadprops = new_pp;
+
+ return 0;
}
/**
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 77016c0cc296..2e9ea751ed2d 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -25,6 +25,7 @@
#include <linux/memblock.h>
#include <linux/kmemleak.h>
#include <linux/cma.h>
+#include <linux/dma-map-ops.h>
#include "of_private.h"
@@ -175,13 +176,17 @@ static int __init __reserved_mem_reserve_reg(unsigned long node,
base = dt_mem_next_cell(dt_root_addr_cells, &prop);
size = dt_mem_next_cell(dt_root_size_cells, &prop);
- if (size &&
- early_init_dt_reserve_memory(base, size, nomap) == 0)
+ if (size && early_init_dt_reserve_memory(base, size, nomap) == 0) {
+ /* Architecture specific contiguous memory fixup. */
+ if (of_flat_dt_is_compatible(node, "shared-dma-pool") &&
+ of_get_flat_dt_prop(node, "reusable", NULL))
+ dma_contiguous_early_fixup(base, size);
pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %lu MiB\n",
uname, &base, (unsigned long)(size / SZ_1M));
- else
+ } else {
pr_err("Reserved memory: failed to reserve memory for node '%s': base %pa, size %lu MiB\n",
uname, &base, (unsigned long)(size / SZ_1M));
+ }
len -= t_len;
}
@@ -472,7 +477,10 @@ static int __init __reserved_mem_alloc_size(unsigned long node, const char *unam
uname, (unsigned long)(size / SZ_1M));
return -ENOMEM;
}
-
+ /* Architecture specific contiguous memory fixup. */
+ if (of_flat_dt_is_compatible(node, "shared-dma-pool") &&
+ of_get_flat_dt_prop(node, "reusable", NULL))
+ dma_contiguous_early_fixup(base, size);
/* Save region in the reserved_mem array */
fdt_reserved_mem_save_node(node, uname, base, size);
return 0;
@@ -771,6 +779,7 @@ int of_reserved_mem_region_to_resource(const struct device_node *np,
return -EINVAL;
resource_set_range(res, rmem->base, rmem->size);
+ res->flags = IORESOURCE_MEM;
res->name = rmem->name;
return 0;
}
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index ddd11668457c..be1ca8e85754 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -539,6 +539,7 @@ config PINCTRL_STMFX
tristate "STMicroelectronics STMFX GPIO expander pinctrl driver"
depends on I2C
depends on OF_GPIO
+ depends on HAS_IOMEM
select GENERIC_PINCONF
select GPIOLIB_IRQCHIP
select MFD_STMFX
diff --git a/drivers/pinctrl/mediatek/pinctrl-airoha.c b/drivers/pinctrl/mediatek/pinctrl-airoha.c
index 5f1ec9e0de21..1b2f132d76f0 100644
--- a/drivers/pinctrl/mediatek/pinctrl-airoha.c
+++ b/drivers/pinctrl/mediatek/pinctrl-airoha.c
@@ -2696,7 +2696,7 @@ static int airoha_pinconf_get(struct pinctrl_dev *pctrl_dev,
arg = 1;
break;
default:
- return -EOPNOTSUPP;
+ return -ENOTSUPP;
}
*config = pinconf_to_config_packed(param, arg);
@@ -2788,7 +2788,7 @@ static int airoha_pinconf_set(struct pinctrl_dev *pctrl_dev,
break;
}
default:
- return -EOPNOTSUPP;
+ return -ENOTSUPP;
}
}
@@ -2805,10 +2805,10 @@ static int airoha_pinconf_group_get(struct pinctrl_dev *pctrl_dev,
if (airoha_pinconf_get(pctrl_dev,
airoha_pinctrl_groups[group].pins[i],
config))
- return -EOPNOTSUPP;
+ return -ENOTSUPP;
if (i && cur_config != *config)
- return -EOPNOTSUPP;
+ return -ENOTSUPP;
cur_config = *config;
}
diff --git a/drivers/pinctrl/meson/pinctrl-amlogic-a4.c b/drivers/pinctrl/meson/pinctrl-amlogic-a4.c
index e34e984c2b38..6132710aff68 100644
--- a/drivers/pinctrl/meson/pinctrl-amlogic-a4.c
+++ b/drivers/pinctrl/meson/pinctrl-amlogic-a4.c
@@ -1093,7 +1093,7 @@ static const struct of_device_id aml_pctl_of_match[] = {
{ .compatible = "amlogic,pinctrl-s6", .data = &s6_priv_data, },
{ /* sentinel */ }
};
-MODULE_DEVICE_TABLE(of, aml_pctl_dt_match);
+MODULE_DEVICE_TABLE(of, aml_pctl_of_match);
static struct platform_driver aml_pctl_driver = {
.driver = {
diff --git a/drivers/platform/x86/intel/int3472/discrete.c b/drivers/platform/x86/intel/int3472/discrete.c
index 4c0aed6e626f..bdfb8a800c54 100644
--- a/drivers/platform/x86/intel/int3472/discrete.c
+++ b/drivers/platform/x86/intel/int3472/discrete.c
@@ -193,6 +193,10 @@ static void int3472_get_con_id_and_polarity(struct int3472_discrete_device *int3
*con_id = "privacy-led";
*gpio_flags = GPIO_ACTIVE_HIGH;
break;
+ case INT3472_GPIO_TYPE_HOTPLUG_DETECT:
+ *con_id = "hpd";
+ *gpio_flags = GPIO_ACTIVE_HIGH;
+ break;
case INT3472_GPIO_TYPE_POWER_ENABLE:
*con_id = "avdd";
*gpio_flags = GPIO_ACTIVE_HIGH;
@@ -223,6 +227,7 @@ static void int3472_get_con_id_and_polarity(struct int3472_discrete_device *int3
* 0x0b Power enable
* 0x0c Clock enable
* 0x0d Privacy LED
+ * 0x13 Hotplug detect
*
* There are some known platform specific quirks where that does not quite
* hold up; for example where a pin with type 0x01 (Power down) is mapped to
@@ -292,6 +297,7 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares,
switch (type) {
case INT3472_GPIO_TYPE_RESET:
case INT3472_GPIO_TYPE_POWERDOWN:
+ case INT3472_GPIO_TYPE_HOTPLUG_DETECT:
ret = skl_int3472_map_gpio_to_sensor(int3472, agpio, con_id, gpio_flags);
if (ret)
err_msg = "Failed to map GPIO pin to sensor\n";
diff --git a/drivers/soc/qcom/ubwc_config.c b/drivers/soc/qcom/ubwc_config.c
index bd0a98aad9f3..15d373bff231 100644
--- a/drivers/soc/qcom/ubwc_config.c
+++ b/drivers/soc/qcom/ubwc_config.c
@@ -12,6 +12,10 @@
#include <linux/soc/qcom/ubwc.h>
+static const struct qcom_ubwc_cfg_data no_ubwc_data = {
+ /* no UBWC, no HBB */
+};
+
static const struct qcom_ubwc_cfg_data msm8937_data = {
.ubwc_enc_version = UBWC_1_0,
.ubwc_dec_version = UBWC_1_0,
@@ -215,12 +219,20 @@ static const struct qcom_ubwc_cfg_data x1e80100_data = {
};
static const struct of_device_id qcom_ubwc_configs[] __maybe_unused = {
+ { .compatible = "qcom,apq8016", .data = &no_ubwc_data },
+ { .compatible = "qcom,apq8026", .data = &no_ubwc_data },
+ { .compatible = "qcom,apq8074", .data = &no_ubwc_data },
{ .compatible = "qcom,apq8096", .data = &msm8998_data },
- { .compatible = "qcom,msm8917", .data = &msm8937_data },
+ { .compatible = "qcom,msm8226", .data = &no_ubwc_data },
+ { .compatible = "qcom,msm8916", .data = &no_ubwc_data },
+ { .compatible = "qcom,msm8917", .data = &no_ubwc_data },
{ .compatible = "qcom,msm8937", .data = &msm8937_data },
+ { .compatible = "qcom,msm8929", .data = &no_ubwc_data },
+ { .compatible = "qcom,msm8939", .data = &no_ubwc_data },
{ .compatible = "qcom,msm8953", .data = &msm8937_data },
- { .compatible = "qcom,msm8956", .data = &msm8937_data },
- { .compatible = "qcom,msm8976", .data = &msm8937_data },
+ { .compatible = "qcom,msm8956", .data = &no_ubwc_data },
+ { .compatible = "qcom,msm8974", .data = &no_ubwc_data },
+ { .compatible = "qcom,msm8976", .data = &no_ubwc_data },
{ .compatible = "qcom,msm8996", .data = &msm8998_data },
{ .compatible = "qcom,msm8998", .data = &msm8998_data },
{ .compatible = "qcom,qcm2290", .data = &qcm2290_data, },
@@ -233,7 +245,10 @@ static const struct of_device_id qcom_ubwc_configs[] __maybe_unused = {
{ .compatible = "qcom,sc7280", .data = &sc7280_data, },
{ .compatible = "qcom,sc8180x", .data = &sc8180x_data, },
{ .compatible = "qcom,sc8280xp", .data = &sc8280xp_data, },
+ { .compatible = "qcom,sda660", .data = &msm8937_data },
+ { .compatible = "qcom,sdm450", .data = &msm8937_data },
{ .compatible = "qcom,sdm630", .data = &msm8937_data },
+ { .compatible = "qcom,sdm632", .data = &msm8937_data },
{ .compatible = "qcom,sdm636", .data = &msm8937_data },
{ .compatible = "qcom,sdm660", .data = &msm8937_data },
{ .compatible = "qcom,sdm670", .data = &sdm670_data, },
@@ -246,6 +261,8 @@ static const struct of_device_id qcom_ubwc_configs[] __maybe_unused = {
{ .compatible = "qcom,sm6375", .data = &sm6350_data, },
{ .compatible = "qcom,sm7125", .data = &sc7180_data },
{ .compatible = "qcom,sm7150", .data = &sm7150_data, },
+ { .compatible = "qcom,sm7225", .data = &sm6350_data, },
+ { .compatible = "qcom,sm7325", .data = &sc7280_data, },
{ .compatible = "qcom,sm8150", .data = &sm8150_data, },
{ .compatible = "qcom,sm8250", .data = &sm8250_data, },
{ .compatible = "qcom,sm8350", .data = &sm8350_data, },
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 6edac0c1ba9b..c6508fe0d5c8 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -99,6 +99,7 @@ struct vhost_net_ubuf_ref {
atomic_t refcount;
wait_queue_head_t wait;
struct vhost_virtqueue *vq;
+ struct rcu_head rcu;
};
#define VHOST_NET_BATCH 64
@@ -250,9 +251,13 @@ vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy)
static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs)
{
- int r = atomic_sub_return(1, &ubufs->refcount);
+ int r;
+
+ rcu_read_lock();
+ r = atomic_sub_return(1, &ubufs->refcount);
if (unlikely(!r))
wake_up(&ubufs->wait);
+ rcu_read_unlock();
return r;
}
@@ -265,7 +270,7 @@ static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs)
static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs)
{
vhost_net_ubuf_put_and_wait(ubufs);
- kfree(ubufs);
+ kfree_rcu(ubufs, rcu);
}
static void vhost_net_clear_ubuf_info(struct vhost_net *n)
diff --git a/drivers/virtio/virtio_input.c b/drivers/virtio/virtio_input.c
index a5d63269f20b..d0728285b6ce 100644
--- a/drivers/virtio/virtio_input.c
+++ b/drivers/virtio/virtio_input.c
@@ -360,11 +360,15 @@ static int virtinput_freeze(struct virtio_device *vdev)
{
struct virtio_input *vi = vdev->priv;
unsigned long flags;
+ void *buf;
spin_lock_irqsave(&vi->lock, flags);
vi->ready = false;
spin_unlock_irqrestore(&vi->lock, flags);
+ virtio_reset_device(vdev);
+ while ((buf = virtqueue_detach_unused_buf(vi->sts)) != NULL)
+ kfree(buf);
vdev->config->del_vqs(vdev);
return 0;
}
diff --git a/drivers/virtio/virtio_pci_legacy_dev.c b/drivers/virtio/virtio_pci_legacy_dev.c
index 677d1f68bc9b..bbbf89c22880 100644
--- a/drivers/virtio/virtio_pci_legacy_dev.c
+++ b/drivers/virtio/virtio_pci_legacy_dev.c
@@ -140,9 +140,9 @@ EXPORT_SYMBOL_GPL(vp_legacy_set_status);
* vp_legacy_queue_vector - set the MSIX vector for a specific virtqueue
* @ldev: the legacy virtio-pci device
* @index: queue index
- * @vector: the config vector
+ * @vector: the queue vector
*
- * Returns the config vector read from the device
+ * Returns the queue vector read from the device
*/
u16 vp_legacy_queue_vector(struct virtio_pci_legacy_device *ldev,
u16 index, u16 vector)
diff --git a/drivers/virtio/virtio_pci_modern_dev.c b/drivers/virtio/virtio_pci_modern_dev.c
index d665f8f73ea8..9e503b7a58d8 100644
--- a/drivers/virtio/virtio_pci_modern_dev.c
+++ b/drivers/virtio/virtio_pci_modern_dev.c
@@ -546,9 +546,9 @@ EXPORT_SYMBOL_GPL(vp_modern_set_queue_reset);
* vp_modern_queue_vector - set the MSIX vector for a specific virtqueue
* @mdev: the modern virtio-pci device
* @index: queue index
- * @vector: the config vector
+ * @vector: the queue vector
*
- * Returns the config vector read from the device
+ * Returns the queue vector read from the device
*/
u16 vp_modern_queue_vector(struct virtio_pci_modern_device *mdev,
u16 index, u16 vector)
diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h
index 274532facfd6..2e7088264355 100644
--- a/include/drm/drm_gpuvm.h
+++ b/include/drm/drm_gpuvm.h
@@ -103,7 +103,7 @@ struct drm_gpuva {
} va;
/**
- * @gem: structure containing the &drm_gem_object and it's offset
+ * @gem: structure containing the &drm_gem_object and its offset
*/
struct {
/**
@@ -843,7 +843,7 @@ struct drm_gpuva_op_map {
} va;
/**
- * @gem: structure containing the &drm_gem_object and it's offset
+ * @gem: structure containing the &drm_gem_object and its offset
*/
struct {
/**
@@ -1189,11 +1189,11 @@ struct drm_gpuvm_ops {
/**
* @sm_step_unmap: called from &drm_gpuvm_sm_map and
- * &drm_gpuvm_sm_unmap to unmap an existent mapping
+ * &drm_gpuvm_sm_unmap to unmap an existing mapping
*
- * This callback is called when existent mapping needs to be unmapped.
+ * This callback is called when existing mapping needs to be unmapped.
* This is the case when either a newly requested mapping encloses an
- * existent mapping or an unmap of an existent mapping is requested.
+ * existing mapping or an unmap of an existing mapping is requested.
*
* The &priv pointer matches the one the driver passed to
* &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively.
diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
index 45f2f278b50a..70807c679f1a 100644
--- a/include/linux/atmdev.h
+++ b/include/linux/atmdev.h
@@ -185,6 +185,7 @@ struct atmdev_ops { /* only send is required */
int (*compat_ioctl)(struct atm_dev *dev,unsigned int cmd,
void __user *arg);
#endif
+ int (*pre_send)(struct atm_vcc *vcc, struct sk_buff *skb);
int (*send)(struct atm_vcc *vcc,struct sk_buff *skb);
int (*send_bh)(struct atm_vcc *vcc, struct sk_buff *skb);
int (*send_oam)(struct atm_vcc *vcc,void *cell,int flags);
diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h
index f48e5fb88bd5..332b80c42b6f 100644
--- a/include/linux/dma-map-ops.h
+++ b/include/linux/dma-map-ops.h
@@ -153,6 +153,9 @@ static inline void dma_free_contiguous(struct device *dev, struct page *page,
{
__free_pages(page, get_order(size));
}
+static inline void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
+{
+}
#endif /* CONFIG_DMA_CMA*/
#ifdef CONFIG_DMA_DECLARE_COHERENT
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index b96746376e17..fcda8481de9a 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -40,8 +40,9 @@ extern unsigned long long max_possible_pfn;
* via a driver, and never indicated in the firmware-provided memory map as
* system RAM. This corresponds to IORESOURCE_SYSRAM_DRIVER_MANAGED in the
* kernel resource tree.
- * @MEMBLOCK_RSRV_NOINIT: memory region for which struct pages are
- * not initialized (only for reserved regions).
+ * @MEMBLOCK_RSRV_NOINIT: reserved memory region for which struct pages are not
+ * fully initialized. Users of this flag are responsible to properly initialize
+ * struct pages of this region
* @MEMBLOCK_RSRV_KERN: memory region that is reserved for kernel use,
* either explictitly with memblock_reserve_kern() or via memblock
* allocation APIs. All memblock allocations set this flag.
diff --git a/include/linux/platform_data/x86/int3472.h b/include/linux/platform_data/x86/int3472.h
index 78276a11c48d..1571e9157fa5 100644
--- a/include/linux/platform_data/x86/int3472.h
+++ b/include/linux/platform_data/x86/int3472.h
@@ -27,6 +27,7 @@
#define INT3472_GPIO_TYPE_CLK_ENABLE 0x0c
#define INT3472_GPIO_TYPE_PRIVACY_LED 0x0d
#define INT3472_GPIO_TYPE_HANDSHAKE 0x12
+#define INT3472_GPIO_TYPE_HOTPLUG_DETECT 0x13
#define INT3472_PDEV_MAX_NAME_LEN 23
#define INT3472_MAX_SENSOR_GPIOS 3
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 14b923ddb6df..fa633657e4c0 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -4172,6 +4172,8 @@ int skb_copy_and_crc32c_datagram_iter(const struct sk_buff *skb, int offset,
struct iov_iter *to, int len, u32 *crcp);
int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
struct iov_iter *from, int len);
+int skb_copy_datagram_from_iter_full(struct sk_buff *skb, int offset,
+ struct iov_iter *from, int len);
int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index 918cf25cd3c6..8bf156dde554 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -328,8 +328,6 @@ static inline
bool virtio_get_shm_region(struct virtio_device *vdev,
struct virtio_shm_region *region, u8 id)
{
- if (!region->len)
- return false;
if (!vdev->config->get_shm_region)
return false;
return vdev->config->get_shm_region(vdev, region, id);
diff --git a/include/net/bluetooth/hci_sync.h b/include/net/bluetooth/hci_sync.h
index 5224f57f6af2..e352a4e0ef8d 100644
--- a/include/net/bluetooth/hci_sync.h
+++ b/include/net/bluetooth/hci_sync.h
@@ -93,7 +93,7 @@ int hci_update_class_sync(struct hci_dev *hdev);
int hci_update_eir_sync(struct hci_dev *hdev);
int hci_update_class_sync(struct hci_dev *hdev);
-int hci_update_name_sync(struct hci_dev *hdev);
+int hci_update_name_sync(struct hci_dev *hdev, const u8 *name);
int hci_write_ssp_mode_sync(struct hci_dev *hdev, u8 mode);
int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
diff --git a/include/net/rose.h b/include/net/rose.h
index 23267b4efcfa..2b5491bbf39a 100644
--- a/include/net/rose.h
+++ b/include/net/rose.h
@@ -8,6 +8,7 @@
#ifndef _ROSE_H
#define _ROSE_H
+#include <linux/refcount.h>
#include <linux/rose.h>
#include <net/ax25.h>
#include <net/sock.h>
@@ -96,7 +97,7 @@ struct rose_neigh {
ax25_cb *ax25;
struct net_device *dev;
unsigned short count;
- unsigned short use;
+ refcount_t use;
unsigned int number;
char restarted;
char dce_mode;
@@ -151,6 +152,21 @@ struct rose_sock {
#define rose_sk(sk) ((struct rose_sock *)(sk))
+static inline void rose_neigh_hold(struct rose_neigh *rose_neigh)
+{
+ refcount_inc(&rose_neigh->use);
+}
+
+static inline void rose_neigh_put(struct rose_neigh *rose_neigh)
+{
+ if (refcount_dec_and_test(&rose_neigh->use)) {
+ if (rose_neigh->ax25)
+ ax25_cb_put(rose_neigh->ax25);
+ kfree(rose_neigh->digipeat);
+ kfree(rose_neigh);
+ }
+}
+
/* af_rose.c */
extern ax25_address rose_callsign;
extern int sysctl_rose_restart_request_timeout;
diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h
index 283348b64af9..c57674a6aa0d 100644
--- a/include/uapi/linux/vhost.h
+++ b/include/uapi/linux/vhost.h
@@ -260,7 +260,7 @@
* When fork_owner is set to VHOST_FORK_OWNER_KTHREAD:
* - Vhost will create vhost workers as kernel threads.
*/
-#define VHOST_SET_FORK_FROM_OWNER _IOW(VHOST_VIRTIO, 0x83, __u8)
+#define VHOST_SET_FORK_FROM_OWNER _IOW(VHOST_VIRTIO, 0x84, __u8)
/**
* VHOST_GET_FORK_OWNER - Get the current fork_owner flag for the vhost device.
@@ -268,6 +268,6 @@
*
* @return: An 8-bit value indicating the current thread mode.
*/
-#define VHOST_GET_FORK_FROM_OWNER _IOR(VHOST_VIRTIO, 0x84, __u8)
+#define VHOST_GET_FORK_FROM_OWNER _IOR(VHOST_VIRTIO, 0x85, __u8)
#endif
diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
index f2d2cc319faa..19a8bde5e1e1 100644
--- a/io_uring/kbuf.c
+++ b/io_uring/kbuf.c
@@ -36,15 +36,19 @@ static bool io_kbuf_inc_commit(struct io_buffer_list *bl, int len)
{
while (len) {
struct io_uring_buf *buf;
- u32 this_len;
+ u32 buf_len, this_len;
buf = io_ring_head_to_buf(bl->buf_ring, bl->head, bl->mask);
- this_len = min_t(int, len, buf->len);
- buf->len -= this_len;
- if (buf->len) {
+ buf_len = READ_ONCE(buf->len);
+ this_len = min_t(u32, len, buf_len);
+ buf_len -= this_len;
+ /* Stop looping for invalid buffer length of 0 */
+ if (buf_len || !this_len) {
buf->addr += this_len;
+ buf->len = buf_len;
return false;
}
+ buf->len = 0;
bl->head++;
len -= this_len;
}
@@ -159,6 +163,7 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
__u16 tail, head = bl->head;
struct io_uring_buf *buf;
void __user *ret;
+ u32 buf_len;
tail = smp_load_acquire(&br->tail);
if (unlikely(tail == head))
@@ -168,8 +173,9 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
req->flags |= REQ_F_BL_EMPTY;
buf = io_ring_head_to_buf(br, head, bl->mask);
- if (*len == 0 || *len > buf->len)
- *len = buf->len;
+ buf_len = READ_ONCE(buf->len);
+ if (*len == 0 || *len > buf_len)
+ *len = buf_len;
req->flags |= REQ_F_BUFFER_RING | REQ_F_BUFFERS_COMMIT;
req->buf_list = bl;
req->buf_index = buf->bid;
@@ -265,7 +271,7 @@ static int io_ring_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg,
req->buf_index = buf->bid;
do {
- u32 len = buf->len;
+ u32 len = READ_ONCE(buf->len);
/* truncate end piece, if needed, for non partial buffers */
if (len > arg->max_len) {
diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c
index 67af8a55185d..d9b9dcba6ff7 100644
--- a/kernel/dma/contiguous.c
+++ b/kernel/dma/contiguous.c
@@ -483,8 +483,6 @@ static int __init rmem_cma_setup(struct reserved_mem *rmem)
pr_err("Reserved memory: unable to setup CMA region\n");
return err;
}
- /* Architecture specific contiguous memory fixup. */
- dma_contiguous_early_fixup(rmem->base, rmem->size);
if (default_cma)
dma_contiguous_default_area = cma;
diff --git a/kernel/dma/pool.c b/kernel/dma/pool.c
index 7b04f7575796..ee45dee33d49 100644
--- a/kernel/dma/pool.c
+++ b/kernel/dma/pool.c
@@ -102,8 +102,8 @@ static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size,
#ifdef CONFIG_DMA_DIRECT_REMAP
addr = dma_common_contiguous_remap(page, pool_size,
- pgprot_dmacoherent(PAGE_KERNEL),
- __builtin_return_address(0));
+ pgprot_decrypted(pgprot_dmacoherent(PAGE_KERNEL)),
+ __builtin_return_address(0));
if (!addr)
goto free_page;
#else
diff --git a/mm/memblock.c b/mm/memblock.c
index 154f1d73b61f..117d963e677c 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -780,9 +780,9 @@ bool __init_memblock memblock_validate_numa_coverage(unsigned long threshold_byt
}
if ((nr_pages << PAGE_SHIFT) > threshold_bytes) {
- mem_size_mb = memblock_phys_mem_size() >> 20;
+ mem_size_mb = memblock_phys_mem_size() / SZ_1M;
pr_err("NUMA: no nodes coverage for %luMB of %luMB RAM\n",
- (nr_pages << PAGE_SHIFT) >> 20, mem_size_mb);
+ (nr_pages << PAGE_SHIFT) / SZ_1M, mem_size_mb);
return false;
}
@@ -1091,13 +1091,20 @@ int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size)
/**
* memblock_reserved_mark_noinit - Mark a reserved memory region with flag
- * MEMBLOCK_RSRV_NOINIT which results in the struct pages not being initialized
- * for this region.
+ * MEMBLOCK_RSRV_NOINIT
+ *
* @base: the base phys addr of the region
* @size: the size of the region
*
- * struct pages will not be initialized for reserved memory regions marked with
- * %MEMBLOCK_RSRV_NOINIT.
+ * The struct pages for the reserved regions marked %MEMBLOCK_RSRV_NOINIT will
+ * not be fully initialized to allow the caller optimize their initialization.
+ *
+ * When %CONFIG_DEFERRED_STRUCT_PAGE_INIT is enabled, setting this flag
+ * completely bypasses the initialization of struct pages for such region.
+ *
+ * When %CONFIG_DEFERRED_STRUCT_PAGE_INIT is disabled, struct pages in this
+ * region will be initialized with default values but won't be marked as
+ * reserved.
*
* Return: 0 on success, -errno on failure.
*/
diff --git a/mm/numa_emulation.c b/mm/numa_emulation.c
index 9d55679d99ce..703c8fa05048 100644
--- a/mm/numa_emulation.c
+++ b/mm/numa_emulation.c
@@ -73,7 +73,7 @@ static int __init emu_setup_memblk(struct numa_meminfo *ei,
}
printk(KERN_INFO "Faking node %d at [mem %#018Lx-%#018Lx] (%LuMB)\n",
- nid, eb->start, eb->end - 1, (eb->end - eb->start) >> 20);
+ nid, eb->start, eb->end - 1, (eb->end - eb->start) / SZ_1M);
return 0;
}
@@ -264,7 +264,7 @@ static int __init split_nodes_size_interleave_uniform(struct numa_meminfo *ei,
min_size = ALIGN(max(min_size, FAKE_NODE_MIN_SIZE), FAKE_NODE_MIN_SIZE);
if (size < min_size) {
pr_err("Fake node size %LuMB too small, increasing to %LuMB\n",
- size >> 20, min_size >> 20);
+ size / SZ_1M, min_size / SZ_1M);
size = min_size;
}
size = ALIGN_DOWN(size, FAKE_NODE_MIN_SIZE);
diff --git a/mm/numa_memblks.c b/mm/numa_memblks.c
index 541a99c4071a..5b009a9cd8b4 100644
--- a/mm/numa_memblks.c
+++ b/mm/numa_memblks.c
@@ -76,7 +76,7 @@ static int __init numa_alloc_distance(void)
for (j = 0; j < cnt; j++)
numa_distance[i * cnt + j] = i == j ?
LOCAL_DISTANCE : REMOTE_DISTANCE;
- printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt);
+ pr_debug("NUMA: Initialized distance table, cnt=%d\n", cnt);
return 0;
}
@@ -427,9 +427,9 @@ static int __init numa_register_meminfo(struct numa_meminfo *mi)
unsigned long pfn_align = node_map_pfn_alignment();
if (pfn_align && pfn_align < PAGES_PER_SECTION) {
- unsigned long node_align_mb = PFN_PHYS(pfn_align) >> 20;
+ unsigned long node_align_mb = PFN_PHYS(pfn_align) / SZ_1M;
- unsigned long sect_align_mb = PFN_PHYS(PAGES_PER_SECTION) >> 20;
+ unsigned long sect_align_mb = PFN_PHYS(PAGES_PER_SECTION) / SZ_1M;
pr_warn("Node alignment %luMB < min %luMB, rejecting NUMA config\n",
node_align_mb, sect_align_mb);
diff --git a/net/atm/common.c b/net/atm/common.c
index d7f7976ea13a..881c7f259dbd 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -635,18 +635,27 @@ int vcc_sendmsg(struct socket *sock, struct msghdr *m, size_t size)
skb->dev = NULL; /* for paths shared with net_device interfaces */
if (!copy_from_iter_full(skb_put(skb, size), size, &m->msg_iter)) {
- atm_return_tx(vcc, skb);
- kfree_skb(skb);
error = -EFAULT;
- goto out;
+ goto free_skb;
}
if (eff != size)
memset(skb->data + size, 0, eff-size);
+
+ if (vcc->dev->ops->pre_send) {
+ error = vcc->dev->ops->pre_send(vcc, skb);
+ if (error)
+ goto free_skb;
+ }
+
error = vcc->dev->ops->send(vcc, skb);
error = error ? error : size;
out:
release_sock(sk);
return error;
+free_skb:
+ atm_return_tx(vcc, skb);
+ kfree_skb(skb);
+ goto out;
}
__poll_t vcc_poll(struct file *file, struct socket *sock, poll_table *wait)
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 7a879290dd28..e524bb59bff2 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -149,8 +149,6 @@ static void hci_conn_cleanup(struct hci_conn *conn)
hci_chan_list_flush(conn);
- hci_conn_hash_del(hdev, conn);
-
if (HCI_CONN_HANDLE_UNSET(conn->handle))
ida_free(&hdev->unset_handle_ida, conn->handle);
@@ -1152,28 +1150,54 @@ void hci_conn_del(struct hci_conn *conn)
disable_delayed_work_sync(&conn->auto_accept_work);
disable_delayed_work_sync(&conn->idle_work);
- if (conn->type == ACL_LINK) {
- /* Unacked frames */
- hdev->acl_cnt += conn->sent;
- } else if (conn->type == LE_LINK) {
- cancel_delayed_work(&conn->le_conn_timeout);
+ /* Remove the connection from the list so unacked logic can detect when
+ * a certain pool is not being utilized.
+ */
+ hci_conn_hash_del(hdev, conn);
- if (hdev->le_pkts)
- hdev->le_cnt += conn->sent;
+ /* Handle unacked frames:
+ *
+ * - In case there are no connection, or if restoring the buffers
+ * considered in transist would overflow, restore all buffers to the
+ * pool.
+ * - Otherwise restore just the buffers considered in transit for the
+ * hci_conn
+ */
+ switch (conn->type) {
+ case ACL_LINK:
+ if (!hci_conn_num(hdev, ACL_LINK) ||
+ hdev->acl_cnt + conn->sent > hdev->acl_pkts)
+ hdev->acl_cnt = hdev->acl_pkts;
else
hdev->acl_cnt += conn->sent;
- } else {
- /* Unacked ISO frames */
- if (conn->type == CIS_LINK ||
- conn->type == BIS_LINK ||
- conn->type == PA_LINK) {
- if (hdev->iso_pkts)
- hdev->iso_cnt += conn->sent;
- else if (hdev->le_pkts)
+ break;
+ case LE_LINK:
+ cancel_delayed_work(&conn->le_conn_timeout);
+
+ if (hdev->le_pkts) {
+ if (!hci_conn_num(hdev, LE_LINK) ||
+ hdev->le_cnt + conn->sent > hdev->le_pkts)
+ hdev->le_cnt = hdev->le_pkts;
+ else
hdev->le_cnt += conn->sent;
+ } else {
+ if ((!hci_conn_num(hdev, LE_LINK) &&
+ !hci_conn_num(hdev, ACL_LINK)) ||
+ hdev->acl_cnt + conn->sent > hdev->acl_pkts)
+ hdev->acl_cnt = hdev->acl_pkts;
else
hdev->acl_cnt += conn->sent;
}
+ break;
+ case CIS_LINK:
+ case BIS_LINK:
+ case PA_LINK:
+ if (!hci_iso_count(hdev) ||
+ hdev->iso_cnt + conn->sent > hdev->iso_pkts)
+ hdev->iso_cnt = hdev->iso_pkts;
+ else
+ hdev->iso_cnt += conn->sent;
+ break;
}
skb_queue_purge(&conn->data_q);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index fe7cdd67ad2a..7a2174851857 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -2703,7 +2703,7 @@ static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
if (!conn)
goto unlock;
- if (status) {
+ if (status && status != HCI_ERROR_UNKNOWN_CONN_ID) {
mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
conn->dst_type, status);
@@ -2718,6 +2718,12 @@ static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
goto done;
}
+ /* During suspend, mark connection as closed immediately
+ * since we might not receive HCI_EV_DISCONN_COMPLETE
+ */
+ if (hdev->suspended)
+ conn->state = BT_CLOSED;
+
mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
if (conn->type == ACL_LINK) {
@@ -4398,7 +4404,17 @@ static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
if (!conn)
continue;
- conn->sent -= count;
+ /* Check if there is really enough packets outstanding before
+ * attempting to decrease the sent counter otherwise it could
+ * underflow..
+ */
+ if (conn->sent >= count) {
+ conn->sent -= count;
+ } else {
+ bt_dev_warn(hdev, "hcon %p sent %u < count %u",
+ conn, conn->sent, count);
+ conn->sent = 0;
+ }
for (i = 0; i < count; ++i)
hci_conn_tx_dequeue(conn);
@@ -7008,6 +7024,7 @@ static void hci_le_big_sync_lost_evt(struct hci_dev *hdev, void *data,
{
struct hci_evt_le_big_sync_lost *ev = data;
struct hci_conn *bis, *conn;
+ bool mgmt_conn;
bt_dev_dbg(hdev, "big handle 0x%2.2x", ev->handle);
@@ -7026,6 +7043,10 @@ static void hci_le_big_sync_lost_evt(struct hci_dev *hdev, void *data,
while ((bis = hci_conn_hash_lookup_big_state(hdev, ev->handle,
BT_CONNECTED,
HCI_ROLE_SLAVE))) {
+ mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &bis->flags);
+ mgmt_device_disconnected(hdev, &bis->dst, bis->type, bis->dst_type,
+ ev->reason, mgmt_conn);
+
clear_bit(HCI_CONN_BIG_SYNC, &bis->flags);
hci_disconn_cfm(bis, ev->reason);
hci_conn_del(bis);
diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
index 31d72b9683ef..b6f888d8354e 100644
--- a/net/bluetooth/hci_sync.c
+++ b/net/bluetooth/hci_sync.c
@@ -3481,13 +3481,13 @@ int hci_update_scan_sync(struct hci_dev *hdev)
return hci_write_scan_enable_sync(hdev, scan);
}
-int hci_update_name_sync(struct hci_dev *hdev)
+int hci_update_name_sync(struct hci_dev *hdev, const u8 *name)
{
struct hci_cp_write_local_name cp;
memset(&cp, 0, sizeof(cp));
- memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
+ memcpy(cp.name, name, sizeof(cp.name));
return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LOCAL_NAME,
sizeof(cp), &cp,
@@ -3540,7 +3540,7 @@ int hci_powered_update_sync(struct hci_dev *hdev)
hci_write_fast_connectable_sync(hdev, false);
hci_update_scan_sync(hdev);
hci_update_class_sync(hdev);
- hci_update_name_sync(hdev);
+ hci_update_name_sync(hdev, hdev->dev_name);
hci_update_eir_sync(hdev);
}
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 3166f5fb876b..50634ef5c8b7 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -3892,8 +3892,11 @@ static void set_name_complete(struct hci_dev *hdev, void *data, int err)
static int set_name_sync(struct hci_dev *hdev, void *data)
{
+ struct mgmt_pending_cmd *cmd = data;
+ struct mgmt_cp_set_local_name *cp = cmd->param;
+
if (lmp_bredr_capable(hdev)) {
- hci_update_name_sync(hdev);
+ hci_update_name_sync(hdev, cp->name);
hci_update_eir_sync(hdev);
}
@@ -9705,7 +9708,9 @@ void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
if (!mgmt_connected)
return;
- if (link_type != ACL_LINK && link_type != LE_LINK)
+ if (link_type != ACL_LINK &&
+ link_type != LE_LINK &&
+ link_type != BIS_LINK)
return;
bacpy(&ev.addr.bdaddr, bdaddr);
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 94cc4705e91d..f474b9b120f9 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -618,6 +618,20 @@ fault:
}
EXPORT_SYMBOL(skb_copy_datagram_from_iter);
+int skb_copy_datagram_from_iter_full(struct sk_buff *skb, int offset,
+ struct iov_iter *from, int len)
+{
+ struct iov_iter_state state;
+ int ret;
+
+ iov_iter_save_state(from, &state);
+ ret = skb_copy_datagram_from_iter(skb, offset, from, len);
+ if (ret)
+ iov_iter_restore(from, &state);
+ return ret;
+}
+EXPORT_SYMBOL(skb_copy_datagram_from_iter_full);
+
int zerocopy_fill_skb_from_iter(struct sk_buff *skb,
struct iov_iter *from, size_t length)
{
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 343a6cac21e3..ba70569bd4b0 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -287,8 +287,10 @@ static int page_pool_init(struct page_pool *pool,
}
if (pool->mp_ops) {
- if (!pool->dma_map || !pool->dma_sync)
- return -EOPNOTSUPP;
+ if (!pool->dma_map || !pool->dma_sync) {
+ err = -EOPNOTSUPP;
+ goto free_ptr_ring;
+ }
if (WARN_ON(!is_kernel_rodata((unsigned long)pool->mp_ops))) {
err = -EFAULT;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index f639a2ae881a..baa43e5966b1 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2575,12 +2575,16 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
!netif_is_l3_master(dev_out))
return ERR_PTR(-EINVAL);
- if (ipv4_is_lbcast(fl4->daddr))
+ if (ipv4_is_lbcast(fl4->daddr)) {
type = RTN_BROADCAST;
- else if (ipv4_is_multicast(fl4->daddr))
+
+ /* reset fi to prevent gateway resolution */
+ fi = NULL;
+ } else if (ipv4_is_multicast(fl4->daddr)) {
type = RTN_MULTICAST;
- else if (ipv4_is_zeronet(fl4->daddr))
+ } else if (ipv4_is_zeronet(fl4->daddr)) {
return ERR_PTR(-EINVAL);
+ }
if (dev_out->flags & IFF_LOOPBACK)
flags |= RTCF_LOCAL;
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index fc5c2fd8f34c..5e12e7ce17d8 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -129,22 +129,12 @@ static const struct ppp_channel_ops pppol2tp_chan_ops = {
static const struct proto_ops pppol2tp_ops;
-/* Retrieves the pppol2tp socket associated to a session.
- * A reference is held on the returned socket, so this function must be paired
- * with sock_put().
- */
+/* Retrieves the pppol2tp socket associated to a session. */
static struct sock *pppol2tp_session_get_sock(struct l2tp_session *session)
{
struct pppol2tp_session *ps = l2tp_session_priv(session);
- struct sock *sk;
-
- rcu_read_lock();
- sk = rcu_dereference(ps->sk);
- if (sk)
- sock_hold(sk);
- rcu_read_unlock();
- return sk;
+ return rcu_dereference(ps->sk);
}
/* Helpers to obtain tunnel/session contexts from sockets.
@@ -206,14 +196,13 @@ end:
static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len)
{
- struct pppol2tp_session *ps = l2tp_session_priv(session);
- struct sock *sk = NULL;
+ struct sock *sk;
/* If the socket is bound, send it in to PPP's input queue. Otherwise
* queue it on the session socket.
*/
rcu_read_lock();
- sk = rcu_dereference(ps->sk);
+ sk = pppol2tp_session_get_sock(session);
if (!sk)
goto no_sock;
@@ -510,13 +499,14 @@ static void pppol2tp_show(struct seq_file *m, void *arg)
struct l2tp_session *session = arg;
struct sock *sk;
+ rcu_read_lock();
sk = pppol2tp_session_get_sock(session);
if (sk) {
struct pppox_sock *po = pppox_sk(sk);
seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan));
- sock_put(sk);
}
+ rcu_read_unlock();
}
static void pppol2tp_session_init(struct l2tp_session *session)
@@ -1530,6 +1520,7 @@ static void pppol2tp_seq_session_show(struct seq_file *m, void *v)
port = ntohs(inet->inet_sport);
}
+ rcu_read_lock();
sk = pppol2tp_session_get_sock(session);
if (sk) {
state = sk->sk_state;
@@ -1565,8 +1556,8 @@ static void pppol2tp_seq_session_show(struct seq_file *m, void *v)
struct pppox_sock *po = pppox_sk(sk);
seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan));
- sock_put(sk);
}
+ rcu_read_unlock();
}
static int pppol2tp_seq_show(struct seq_file *m, void *v)
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 4e72b636a46a..543f9e8ebb69 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -170,7 +170,7 @@ void rose_kill_by_neigh(struct rose_neigh *neigh)
if (rose->neighbour == neigh) {
rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
- rose->neighbour->use--;
+ rose_neigh_put(rose->neighbour);
rose->neighbour = NULL;
}
}
@@ -212,7 +212,7 @@ start:
if (rose->device == dev) {
rose_disconnect(sk, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
if (rose->neighbour)
- rose->neighbour->use--;
+ rose_neigh_put(rose->neighbour);
netdev_put(rose->device, &rose->dev_tracker);
rose->device = NULL;
}
@@ -655,7 +655,7 @@ static int rose_release(struct socket *sock)
break;
case ROSE_STATE_2:
- rose->neighbour->use--;
+ rose_neigh_put(rose->neighbour);
release_sock(sk);
rose_disconnect(sk, 0, -1, -1);
lock_sock(sk);
@@ -823,6 +823,7 @@ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_le
rose->lci = rose_new_lci(rose->neighbour);
if (!rose->lci) {
err = -ENETUNREACH;
+ rose_neigh_put(rose->neighbour);
goto out_release;
}
@@ -834,12 +835,14 @@ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_le
dev = rose_dev_first();
if (!dev) {
err = -ENETUNREACH;
+ rose_neigh_put(rose->neighbour);
goto out_release;
}
user = ax25_findbyuid(current_euid());
if (!user) {
err = -EINVAL;
+ rose_neigh_put(rose->neighbour);
dev_put(dev);
goto out_release;
}
@@ -874,8 +877,6 @@ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_le
rose->state = ROSE_STATE_1;
- rose->neighbour->use++;
-
rose_write_internal(sk, ROSE_CALL_REQUEST);
rose_start_heartbeat(sk);
rose_start_t1timer(sk);
@@ -1077,7 +1078,7 @@ int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct ros
GFP_ATOMIC);
make_rose->facilities = facilities;
- make_rose->neighbour->use++;
+ rose_neigh_hold(make_rose->neighbour);
if (rose_sk(sk)->defer) {
make_rose->state = ROSE_STATE_5;
diff --git a/net/rose/rose_in.c b/net/rose/rose_in.c
index 3e99181e759f..0276b393f0e5 100644
--- a/net/rose/rose_in.c
+++ b/net/rose/rose_in.c
@@ -56,7 +56,7 @@ static int rose_state1_machine(struct sock *sk, struct sk_buff *skb, int framety
case ROSE_CLEAR_REQUEST:
rose_write_internal(sk, ROSE_CLEAR_CONFIRMATION);
rose_disconnect(sk, ECONNREFUSED, skb->data[3], skb->data[4]);
- rose->neighbour->use--;
+ rose_neigh_put(rose->neighbour);
break;
default:
@@ -79,12 +79,12 @@ static int rose_state2_machine(struct sock *sk, struct sk_buff *skb, int framety
case ROSE_CLEAR_REQUEST:
rose_write_internal(sk, ROSE_CLEAR_CONFIRMATION);
rose_disconnect(sk, 0, skb->data[3], skb->data[4]);
- rose->neighbour->use--;
+ rose_neigh_put(rose->neighbour);
break;
case ROSE_CLEAR_CONFIRMATION:
rose_disconnect(sk, 0, -1, -1);
- rose->neighbour->use--;
+ rose_neigh_put(rose->neighbour);
break;
default:
@@ -121,7 +121,7 @@ static int rose_state3_machine(struct sock *sk, struct sk_buff *skb, int framety
case ROSE_CLEAR_REQUEST:
rose_write_internal(sk, ROSE_CLEAR_CONFIRMATION);
rose_disconnect(sk, 0, skb->data[3], skb->data[4]);
- rose->neighbour->use--;
+ rose_neigh_put(rose->neighbour);
break;
case ROSE_RR:
@@ -234,7 +234,7 @@ static int rose_state4_machine(struct sock *sk, struct sk_buff *skb, int framety
case ROSE_CLEAR_REQUEST:
rose_write_internal(sk, ROSE_CLEAR_CONFIRMATION);
rose_disconnect(sk, 0, skb->data[3], skb->data[4]);
- rose->neighbour->use--;
+ rose_neigh_put(rose->neighbour);
break;
default:
@@ -254,7 +254,7 @@ static int rose_state5_machine(struct sock *sk, struct sk_buff *skb, int framety
if (frametype == ROSE_CLEAR_REQUEST) {
rose_write_internal(sk, ROSE_CLEAR_CONFIRMATION);
rose_disconnect(sk, 0, skb->data[3], skb->data[4]);
- rose_sk(sk)->neighbour->use--;
+ rose_neigh_put(rose_sk(sk)->neighbour);
}
return 0;
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index b72bf8a08d48..a1e9b05ef6f5 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -93,11 +93,11 @@ static int __must_check rose_add_node(struct rose_route_struct *rose_route,
rose_neigh->ax25 = NULL;
rose_neigh->dev = dev;
rose_neigh->count = 0;
- rose_neigh->use = 0;
rose_neigh->dce_mode = 0;
rose_neigh->loopback = 0;
rose_neigh->number = rose_neigh_no++;
rose_neigh->restarted = 0;
+ refcount_set(&rose_neigh->use, 1);
skb_queue_head_init(&rose_neigh->queue);
@@ -178,6 +178,7 @@ static int __must_check rose_add_node(struct rose_route_struct *rose_route,
}
}
rose_neigh->count++;
+ rose_neigh_hold(rose_neigh);
goto out;
}
@@ -187,6 +188,7 @@ static int __must_check rose_add_node(struct rose_route_struct *rose_route,
rose_node->neighbour[rose_node->count] = rose_neigh;
rose_node->count++;
rose_neigh->count++;
+ rose_neigh_hold(rose_neigh);
}
out:
@@ -234,20 +236,12 @@ static void rose_remove_neigh(struct rose_neigh *rose_neigh)
if ((s = rose_neigh_list) == rose_neigh) {
rose_neigh_list = rose_neigh->next;
- if (rose_neigh->ax25)
- ax25_cb_put(rose_neigh->ax25);
- kfree(rose_neigh->digipeat);
- kfree(rose_neigh);
return;
}
while (s != NULL && s->next != NULL) {
if (s->next == rose_neigh) {
s->next = rose_neigh->next;
- if (rose_neigh->ax25)
- ax25_cb_put(rose_neigh->ax25);
- kfree(rose_neigh->digipeat);
- kfree(rose_neigh);
return;
}
@@ -263,10 +257,10 @@ static void rose_remove_route(struct rose_route *rose_route)
struct rose_route *s;
if (rose_route->neigh1 != NULL)
- rose_route->neigh1->use--;
+ rose_neigh_put(rose_route->neigh1);
if (rose_route->neigh2 != NULL)
- rose_route->neigh2->use--;
+ rose_neigh_put(rose_route->neigh2);
if ((s = rose_route_list) == rose_route) {
rose_route_list = rose_route->next;
@@ -330,9 +324,12 @@ static int rose_del_node(struct rose_route_struct *rose_route,
for (i = 0; i < rose_node->count; i++) {
if (rose_node->neighbour[i] == rose_neigh) {
rose_neigh->count--;
+ rose_neigh_put(rose_neigh);
- if (rose_neigh->count == 0 && rose_neigh->use == 0)
+ if (rose_neigh->count == 0) {
rose_remove_neigh(rose_neigh);
+ rose_neigh_put(rose_neigh);
+ }
rose_node->count--;
@@ -381,11 +378,11 @@ void rose_add_loopback_neigh(void)
sn->ax25 = NULL;
sn->dev = NULL;
sn->count = 0;
- sn->use = 0;
sn->dce_mode = 1;
sn->loopback = 1;
sn->number = rose_neigh_no++;
sn->restarted = 1;
+ refcount_set(&sn->use, 1);
skb_queue_head_init(&sn->queue);
@@ -436,6 +433,7 @@ int rose_add_loopback_node(const rose_address *address)
rose_node_list = rose_node;
rose_loopback_neigh->count++;
+ rose_neigh_hold(rose_loopback_neigh);
out:
spin_unlock_bh(&rose_node_list_lock);
@@ -467,6 +465,7 @@ void rose_del_loopback_node(const rose_address *address)
rose_remove_node(rose_node);
rose_loopback_neigh->count--;
+ rose_neigh_put(rose_loopback_neigh);
out:
spin_unlock_bh(&rose_node_list_lock);
@@ -506,6 +505,7 @@ void rose_rt_device_down(struct net_device *dev)
memmove(&t->neighbour[i], &t->neighbour[i + 1],
sizeof(t->neighbour[0]) *
(t->count - i));
+ rose_neigh_put(s);
}
if (t->count <= 0)
@@ -513,6 +513,7 @@ void rose_rt_device_down(struct net_device *dev)
}
rose_remove_neigh(s);
+ rose_neigh_put(s);
}
spin_unlock_bh(&rose_neigh_list_lock);
spin_unlock_bh(&rose_node_list_lock);
@@ -548,6 +549,7 @@ static int rose_clear_routes(void)
{
struct rose_neigh *s, *rose_neigh;
struct rose_node *t, *rose_node;
+ int i;
spin_lock_bh(&rose_node_list_lock);
spin_lock_bh(&rose_neigh_list_lock);
@@ -558,17 +560,21 @@ static int rose_clear_routes(void)
while (rose_node != NULL) {
t = rose_node;
rose_node = rose_node->next;
- if (!t->loopback)
+
+ if (!t->loopback) {
+ for (i = 0; i < t->count; i++)
+ rose_neigh_put(t->neighbour[i]);
rose_remove_node(t);
+ }
}
while (rose_neigh != NULL) {
s = rose_neigh;
rose_neigh = rose_neigh->next;
- if (s->use == 0 && !s->loopback) {
- s->count = 0;
+ if (!s->loopback) {
rose_remove_neigh(s);
+ rose_neigh_put(s);
}
}
@@ -684,6 +690,7 @@ struct rose_neigh *rose_get_neigh(rose_address *addr, unsigned char *cause,
for (i = 0; i < node->count; i++) {
if (node->neighbour[i]->restarted) {
res = node->neighbour[i];
+ rose_neigh_hold(node->neighbour[i]);
goto out;
}
}
@@ -695,6 +702,7 @@ struct rose_neigh *rose_get_neigh(rose_address *addr, unsigned char *cause,
for (i = 0; i < node->count; i++) {
if (!rose_ftimer_running(node->neighbour[i])) {
res = node->neighbour[i];
+ rose_neigh_hold(node->neighbour[i]);
goto out;
}
failed = 1;
@@ -784,13 +792,13 @@ static void rose_del_route_by_neigh(struct rose_neigh *rose_neigh)
}
if (rose_route->neigh1 == rose_neigh) {
- rose_route->neigh1->use--;
+ rose_neigh_put(rose_route->neigh1);
rose_route->neigh1 = NULL;
rose_transmit_clear_request(rose_route->neigh2, rose_route->lci2, ROSE_OUT_OF_ORDER, 0);
}
if (rose_route->neigh2 == rose_neigh) {
- rose_route->neigh2->use--;
+ rose_neigh_put(rose_route->neigh2);
rose_route->neigh2 = NULL;
rose_transmit_clear_request(rose_route->neigh1, rose_route->lci1, ROSE_OUT_OF_ORDER, 0);
}
@@ -919,7 +927,7 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
rose_clear_queues(sk);
rose->cause = ROSE_NETWORK_CONGESTION;
rose->diagnostic = 0;
- rose->neighbour->use--;
+ rose_neigh_put(rose->neighbour);
rose->neighbour = NULL;
rose->lci = 0;
rose->state = ROSE_STATE_0;
@@ -1044,12 +1052,12 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
if ((new_lci = rose_new_lci(new_neigh)) == 0) {
rose_transmit_clear_request(rose_neigh, lci, ROSE_NETWORK_CONGESTION, 71);
- goto out;
+ goto put_neigh;
}
if ((rose_route = kmalloc(sizeof(*rose_route), GFP_ATOMIC)) == NULL) {
rose_transmit_clear_request(rose_neigh, lci, ROSE_NETWORK_CONGESTION, 120);
- goto out;
+ goto put_neigh;
}
rose_route->lci1 = lci;
@@ -1062,8 +1070,8 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
rose_route->lci2 = new_lci;
rose_route->neigh2 = new_neigh;
- rose_route->neigh1->use++;
- rose_route->neigh2->use++;
+ rose_neigh_hold(rose_route->neigh1);
+ rose_neigh_hold(rose_route->neigh2);
rose_route->next = rose_route_list;
rose_route_list = rose_route;
@@ -1075,6 +1083,8 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
rose_transmit_link(skb, rose_route->neigh2);
res = 1;
+put_neigh:
+ rose_neigh_put(new_neigh);
out:
spin_unlock_bh(&rose_route_list_lock);
spin_unlock_bh(&rose_neigh_list_lock);
@@ -1190,7 +1200,7 @@ static int rose_neigh_show(struct seq_file *seq, void *v)
(rose_neigh->loopback) ? "RSLOOP-0" : ax2asc(buf, &rose_neigh->callsign),
rose_neigh->dev ? rose_neigh->dev->name : "???",
rose_neigh->count,
- rose_neigh->use,
+ refcount_read(&rose_neigh->use) - rose_neigh->count - 1,
(rose_neigh->dce_mode) ? "DCE" : "DTE",
(rose_neigh->restarted) ? "yes" : "no",
ax25_display_timer(&rose_neigh->t0timer) / HZ,
@@ -1295,18 +1305,22 @@ void __exit rose_rt_free(void)
struct rose_neigh *s, *rose_neigh = rose_neigh_list;
struct rose_node *t, *rose_node = rose_node_list;
struct rose_route *u, *rose_route = rose_route_list;
+ int i;
while (rose_neigh != NULL) {
s = rose_neigh;
rose_neigh = rose_neigh->next;
rose_remove_neigh(s);
+ rose_neigh_put(s);
}
while (rose_node != NULL) {
t = rose_node;
rose_node = rose_node->next;
+ for (i = 0; i < t->count; i++)
+ rose_neigh_put(t->neighbour[i]);
rose_remove_node(t);
}
diff --git a/net/rose/rose_timer.c b/net/rose/rose_timer.c
index 020369c49587..bb60a1654d61 100644
--- a/net/rose/rose_timer.c
+++ b/net/rose/rose_timer.c
@@ -180,7 +180,7 @@ static void rose_timer_expiry(struct timer_list *t)
break;
case ROSE_STATE_2: /* T3 */
- rose->neighbour->use--;
+ rose_neigh_put(rose->neighbour);
rose_disconnect(sk, ETIMEDOUT, -1, -1);
break;
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 3336dcfb4515..568ff8797c39 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -547,7 +547,9 @@ static void sctp_v6_from_sk(union sctp_addr *addr, struct sock *sk)
{
addr->v6.sin6_family = AF_INET6;
addr->v6.sin6_port = 0;
+ addr->v6.sin6_flowinfo = 0;
addr->v6.sin6_addr = sk->sk_v6_rcv_saddr;
+ addr->v6.sin6_scope_id = 0;
}
/* Initialize sk->sk_rcv_saddr from sctp_addr. */
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
index fe92e5fa95b4..dcc8a1d5851e 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -105,12 +105,14 @@ static int virtio_transport_fill_skb(struct sk_buff *skb,
size_t len,
bool zcopy)
{
+ struct msghdr *msg = info->msg;
+
if (zcopy)
- return __zerocopy_sg_from_iter(info->msg, NULL, skb,
- &info->msg->msg_iter, len, NULL);
+ return __zerocopy_sg_from_iter(msg, NULL, skb,
+ &msg->msg_iter, len, NULL);
virtio_vsock_skb_put(skb, len);
- return skb_copy_datagram_from_iter(skb, 0, &info->msg->msg_iter, len);
+ return skb_copy_datagram_from_iter_full(skb, 0, &msg->msg_iter, len);
}
static void virtio_transport_init_hdr(struct sk_buff *skb,
diff --git a/tools/arch/arm64/include/asm/cputype.h b/tools/arch/arm64/include/asm/cputype.h
index 9a5d85cfd1fb..139d5e87dc95 100644
--- a/tools/arch/arm64/include/asm/cputype.h
+++ b/tools/arch/arm64/include/asm/cputype.h
@@ -75,11 +75,13 @@
#define ARM_CPU_PART_CORTEX_A76 0xD0B
#define ARM_CPU_PART_NEOVERSE_N1 0xD0C
#define ARM_CPU_PART_CORTEX_A77 0xD0D
+#define ARM_CPU_PART_CORTEX_A76AE 0xD0E
#define ARM_CPU_PART_NEOVERSE_V1 0xD40
#define ARM_CPU_PART_CORTEX_A78 0xD41
#define ARM_CPU_PART_CORTEX_A78AE 0xD42
#define ARM_CPU_PART_CORTEX_X1 0xD44
#define ARM_CPU_PART_CORTEX_A510 0xD46
+#define ARM_CPU_PART_CORTEX_X1C 0xD4C
#define ARM_CPU_PART_CORTEX_A520 0xD80
#define ARM_CPU_PART_CORTEX_A710 0xD47
#define ARM_CPU_PART_CORTEX_A715 0xD4D
@@ -119,9 +121,11 @@
#define QCOM_CPU_PART_KRYO 0x200
#define QCOM_CPU_PART_KRYO_2XX_GOLD 0x800
#define QCOM_CPU_PART_KRYO_2XX_SILVER 0x801
+#define QCOM_CPU_PART_KRYO_3XX_GOLD 0x802
#define QCOM_CPU_PART_KRYO_3XX_SILVER 0x803
#define QCOM_CPU_PART_KRYO_4XX_GOLD 0x804
#define QCOM_CPU_PART_KRYO_4XX_SILVER 0x805
+#define QCOM_CPU_PART_ORYON_X1 0x001
#define NVIDIA_CPU_PART_DENVER 0x003
#define NVIDIA_CPU_PART_CARMEL 0x004
@@ -129,6 +133,7 @@
#define FUJITSU_CPU_PART_A64FX 0x001
#define HISI_CPU_PART_TSV110 0xD01
+#define HISI_CPU_PART_HIP09 0xD02
#define HISI_CPU_PART_HIP12 0xD06
#define APPLE_CPU_PART_M1_ICESTORM 0x022
@@ -159,11 +164,13 @@
#define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76)
#define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N1)
#define MIDR_CORTEX_A77 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77)
+#define MIDR_CORTEX_A76AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76AE)
#define MIDR_NEOVERSE_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V1)
#define MIDR_CORTEX_A78 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78)
#define MIDR_CORTEX_A78AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78AE)
#define MIDR_CORTEX_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1)
#define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510)
+#define MIDR_CORTEX_X1C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1C)
#define MIDR_CORTEX_A520 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A520)
#define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710)
#define MIDR_CORTEX_A715 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A715)
@@ -196,13 +203,26 @@
#define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO)
#define MIDR_QCOM_KRYO_2XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_2XX_GOLD)
#define MIDR_QCOM_KRYO_2XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_2XX_SILVER)
+#define MIDR_QCOM_KRYO_3XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_3XX_GOLD)
#define MIDR_QCOM_KRYO_3XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_3XX_SILVER)
#define MIDR_QCOM_KRYO_4XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_GOLD)
#define MIDR_QCOM_KRYO_4XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_SILVER)
+#define MIDR_QCOM_ORYON_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_ORYON_X1)
+
+/*
+ * NOTES:
+ * - Qualcomm Kryo 5XX Prime / Gold ID themselves as MIDR_CORTEX_A77
+ * - Qualcomm Kryo 5XX Silver IDs itself as MIDR_QCOM_KRYO_4XX_SILVER
+ * - Qualcomm Kryo 6XX Prime IDs itself as MIDR_CORTEX_X1
+ * - Qualcomm Kryo 6XX Gold IDs itself as ARM_CPU_PART_CORTEX_A78
+ * - Qualcomm Kryo 6XX Silver IDs itself as MIDR_CORTEX_A55
+ */
+
#define MIDR_NVIDIA_DENVER MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_DENVER)
#define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL)
#define MIDR_FUJITSU_A64FX MIDR_CPU_MODEL(ARM_CPU_IMP_FUJITSU, FUJITSU_CPU_PART_A64FX)
#define MIDR_HISI_TSV110 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_TSV110)
+#define MIDR_HISI_HIP09 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_HIP09)
#define MIDR_HISI_HIP12 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_HIP12)
#define MIDR_APPLE_M1_ICESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM)
#define MIDR_APPLE_M1_FIRESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM)
@@ -291,6 +311,14 @@ static inline u32 __attribute_const__ read_cpuid_id(void)
return read_cpuid(MIDR_EL1);
}
+struct target_impl_cpu {
+ u64 midr;
+ u64 revidr;
+ u64 aidr;
+};
+
+bool cpu_errata_set_target_impl(u64 num, void *impl_cpus);
+
static inline u64 __attribute_const__ read_cpuid_mpidr(void)
{
return read_cpuid(MPIDR_EL1);
diff --git a/tools/arch/powerpc/include/uapi/asm/kvm.h b/tools/arch/powerpc/include/uapi/asm/kvm.h
index eaeda001784e..077c5437f521 100644
--- a/tools/arch/powerpc/include/uapi/asm/kvm.h
+++ b/tools/arch/powerpc/include/uapi/asm/kvm.h
@@ -1,18 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- *
* Copyright IBM Corp. 2007
*
* Authors: Hollis Blanchard <hollisb@us.ibm.com>
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index ee176236c2be..06fc0479a23f 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -218,6 +218,7 @@
#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 1) /* "flexpriority" Intel FlexPriority */
#define X86_FEATURE_EPT ( 8*32+ 2) /* "ept" Intel Extended Page Table */
#define X86_FEATURE_VPID ( 8*32+ 3) /* "vpid" Intel Virtual Processor ID */
+#define X86_FEATURE_COHERENCY_SFW_NO ( 8*32+ 4) /* SNP cache coherency software work around not needed */
#define X86_FEATURE_VMMCALL ( 8*32+15) /* "vmmcall" Prefer VMMCALL to VMCALL */
#define X86_FEATURE_XENPV ( 8*32+16) /* Xen paravirtual guest */
@@ -456,10 +457,14 @@
#define X86_FEATURE_NO_NESTED_DATA_BP (20*32+ 0) /* No Nested Data Breakpoints */
#define X86_FEATURE_WRMSR_XX_BASE_NS (20*32+ 1) /* WRMSR to {FS,GS,KERNEL_GS}_BASE is non-serializing */
#define X86_FEATURE_LFENCE_RDTSC (20*32+ 2) /* LFENCE always serializing / synchronizes RDTSC */
+#define X86_FEATURE_VERW_CLEAR (20*32+ 5) /* The memory form of VERW mitigates TSA */
#define X86_FEATURE_NULL_SEL_CLR_BASE (20*32+ 6) /* Null Selector Clears Base */
+
#define X86_FEATURE_AUTOIBRS (20*32+ 8) /* Automatic IBRS */
#define X86_FEATURE_NO_SMM_CTL_MSR (20*32+ 9) /* SMM_CTL MSR is not present */
+#define X86_FEATURE_GP_ON_USER_CPUID (20*32+17) /* User CPUID faulting */
+
#define X86_FEATURE_PREFETCHI (20*32+20) /* Prefetch Data/Instruction to Cache Level */
#define X86_FEATURE_SBPB (20*32+27) /* Selective Branch Prediction Barrier */
#define X86_FEATURE_IBPB_BRTYPE (20*32+28) /* MSR_PRED_CMD[IBPB] flushes all branch type predictions */
@@ -487,6 +492,9 @@
#define X86_FEATURE_PREFER_YMM (21*32+ 8) /* Avoid ZMM registers due to downclocking */
#define X86_FEATURE_APX (21*32+ 9) /* Advanced Performance Extensions */
#define X86_FEATURE_INDIRECT_THUNK_ITS (21*32+10) /* Use thunk for indirect branches in lower half of cacheline */
+#define X86_FEATURE_TSA_SQ_NO (21*32+11) /* AMD CPU not vulnerable to TSA-SQ */
+#define X86_FEATURE_TSA_L1_NO (21*32+12) /* AMD CPU not vulnerable to TSA-L1 */
+#define X86_FEATURE_CLEAR_CPU_BUF_VM (21*32+13) /* Clear CPU buffers using VERW before VMRUN */
/*
* BUG word(s)
@@ -542,5 +550,5 @@
#define X86_BUG_OLD_MICROCODE X86_BUG( 1*32+ 6) /* "old_microcode" CPU has old microcode, it is surely vulnerable to something */
#define X86_BUG_ITS X86_BUG( 1*32+ 7) /* "its" CPU is affected by Indirect Target Selection */
#define X86_BUG_ITS_NATIVE_ONLY X86_BUG( 1*32+ 8) /* "its_native_only" CPU is affected by ITS, VMX is not affected */
-
+#define X86_BUG_TSA X86_BUG( 1*32+ 9) /* "tsa" CPU is affected by Transient Scheduler Attacks */
#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h
index 5cfb5d74dd5f..b65c3ba5fa14 100644
--- a/tools/arch/x86/include/asm/msr-index.h
+++ b/tools/arch/x86/include/asm/msr-index.h
@@ -419,6 +419,7 @@
#define DEBUGCTLMSR_FREEZE_PERFMON_ON_PMI (1UL << 12)
#define DEBUGCTLMSR_FREEZE_IN_SMM_BIT 14
#define DEBUGCTLMSR_FREEZE_IN_SMM (1UL << DEBUGCTLMSR_FREEZE_IN_SMM_BIT)
+#define DEBUGCTLMSR_RTM_DEBUG BIT(15)
#define MSR_PEBS_FRONTEND 0x000003f7
@@ -733,6 +734,11 @@
#define MSR_AMD64_PERF_CNTR_GLOBAL_CTL 0xc0000301
#define MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR 0xc0000302
+/* AMD Hardware Feedback Support MSRs */
+#define MSR_AMD_WORKLOAD_CLASS_CONFIG 0xc0000500
+#define MSR_AMD_WORKLOAD_CLASS_ID 0xc0000501
+#define MSR_AMD_WORKLOAD_HRST 0xc0000502
+
/* AMD Last Branch Record MSRs */
#define MSR_AMD64_LBR_SELECT 0xc000010e
@@ -831,6 +837,7 @@
#define MSR_K7_HWCR_SMMLOCK BIT_ULL(MSR_K7_HWCR_SMMLOCK_BIT)
#define MSR_K7_HWCR_IRPERF_EN_BIT 30
#define MSR_K7_HWCR_IRPERF_EN BIT_ULL(MSR_K7_HWCR_IRPERF_EN_BIT)
+#define MSR_K7_HWCR_CPUID_USER_DIS_BIT 35
#define MSR_K7_FID_VID_CTL 0xc0010041
#define MSR_K7_FID_VID_STATUS 0xc0010042
#define MSR_K7_HWCR_CPB_DIS_BIT 25
diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h
index 6f3499507c5e..0f15d683817d 100644
--- a/tools/arch/x86/include/uapi/asm/kvm.h
+++ b/tools/arch/x86/include/uapi/asm/kvm.h
@@ -965,7 +965,13 @@ struct kvm_tdx_cmd {
struct kvm_tdx_capabilities {
__u64 supported_attrs;
__u64 supported_xfam;
- __u64 reserved[254];
+
+ __u64 kernel_tdvmcallinfo_1_r11;
+ __u64 user_tdvmcallinfo_1_r11;
+ __u64 kernel_tdvmcallinfo_1_r12;
+ __u64 user_tdvmcallinfo_1_r12;
+
+ __u64 reserved[250];
/* Configurable CPUID bits for userspace */
struct kvm_cpuid2 cpuid;
diff --git a/tools/include/linux/bits.h b/tools/include/linux/bits.h
index 7ad056219115..a40cc861b3a7 100644
--- a/tools/include/linux/bits.h
+++ b/tools/include/linux/bits.h
@@ -2,10 +2,8 @@
#ifndef __LINUX_BITS_H
#define __LINUX_BITS_H
-#include <linux/const.h>
#include <vdso/bits.h>
#include <uapi/linux/bits.h>
-#include <asm/bitsperlong.h>
#define BIT_MASK(nr) (UL(1) << ((nr) % BITS_PER_LONG))
#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
@@ -50,10 +48,14 @@
(type_max(t) << (l) & \
type_max(t) >> (BITS_PER_TYPE(t) - 1 - (h)))))
+#define GENMASK(h, l) GENMASK_TYPE(unsigned long, h, l)
+#define GENMASK_ULL(h, l) GENMASK_TYPE(unsigned long long, h, l)
+
#define GENMASK_U8(h, l) GENMASK_TYPE(u8, h, l)
#define GENMASK_U16(h, l) GENMASK_TYPE(u16, h, l)
#define GENMASK_U32(h, l) GENMASK_TYPE(u32, h, l)
#define GENMASK_U64(h, l) GENMASK_TYPE(u64, h, l)
+#define GENMASK_U128(h, l) GENMASK_TYPE(u128, h, l)
/*
* Fixed-type variants of BIT(), with additional checks like GENMASK_TYPE(). The
@@ -79,28 +81,9 @@
* BUILD_BUG_ON_ZERO is not available in h files included from asm files,
* disable the input check if that is the case.
*/
-#define GENMASK_INPUT_CHECK(h, l) 0
+#define GENMASK(h, l) __GENMASK(h, l)
+#define GENMASK_ULL(h, l) __GENMASK_ULL(h, l)
#endif /* !defined(__ASSEMBLY__) */
-#define GENMASK(h, l) \
- (GENMASK_INPUT_CHECK(h, l) + __GENMASK(h, l))
-#define GENMASK_ULL(h, l) \
- (GENMASK_INPUT_CHECK(h, l) + __GENMASK_ULL(h, l))
-
-#if !defined(__ASSEMBLY__)
-/*
- * Missing asm support
- *
- * __GENMASK_U128() depends on _BIT128() which would not work
- * in the asm code, as it shifts an 'unsigned __int128' data
- * type instead of direct representation of 128 bit constants
- * such as long and unsigned long. The fundamental problem is
- * that a 128 bit constant will get silently truncated by the
- * gcc compiler.
- */
-#define GENMASK_U128(h, l) \
- (GENMASK_INPUT_CHECK(h, l) + __GENMASK_U128(h, l))
-#endif
-
#endif /* __LINUX_BITS_H */
diff --git a/tools/include/linux/cfi_types.h b/tools/include/linux/cfi_types.h
index 6b8713675765..685f7181780f 100644
--- a/tools/include/linux/cfi_types.h
+++ b/tools/include/linux/cfi_types.h
@@ -41,5 +41,28 @@
SYM_TYPED_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)
#endif
+#else /* __ASSEMBLY__ */
+
+#ifdef CONFIG_CFI_CLANG
+#define DEFINE_CFI_TYPE(name, func) \
+ /* \
+ * Force a reference to the function so the compiler generates \
+ * __kcfi_typeid_<func>. \
+ */ \
+ __ADDRESSABLE(func); \
+ /* u32 name __ro_after_init = __kcfi_typeid_<func> */ \
+ extern u32 name; \
+ asm ( \
+ " .pushsection .data..ro_after_init,\"aw\",\%progbits \n" \
+ " .type " #name ",\%object \n" \
+ " .globl " #name " \n" \
+ " .p2align 2, 0x0 \n" \
+ #name ": \n" \
+ " .4byte __kcfi_typeid_" #func " \n" \
+ " .size " #name ", 4 \n" \
+ " .popsection \n" \
+ );
+#endif
+
#endif /* __ASSEMBLY__ */
#endif /* _LINUX_CFI_TYPES_H */
diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h
index 2892a45023af..04e0077fb4c9 100644
--- a/tools/include/uapi/asm-generic/unistd.h
+++ b/tools/include/uapi/asm-generic/unistd.h
@@ -852,8 +852,14 @@ __SYSCALL(__NR_removexattrat, sys_removexattrat)
#define __NR_open_tree_attr 467
__SYSCALL(__NR_open_tree_attr, sys_open_tree_attr)
+/* fs/inode.c */
+#define __NR_file_getattr 468
+__SYSCALL(__NR_file_getattr, sys_file_getattr)
+#define __NR_file_setattr 469
+__SYSCALL(__NR_file_setattr, sys_file_setattr)
+
#undef __NR_syscalls
-#define __NR_syscalls 468
+#define __NR_syscalls 470
/*
* 32 bit systems traditionally used different
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index 7415a3863891..f0f0d49d2544 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -178,6 +178,7 @@ struct kvm_xen_exit {
#define KVM_EXIT_NOTIFY 37
#define KVM_EXIT_LOONGARCH_IOCSR 38
#define KVM_EXIT_MEMORY_FAULT 39
+#define KVM_EXIT_TDX 40
/* For KVM_EXIT_INTERNAL_ERROR */
/* Emulate instruction failed. */
@@ -447,6 +448,31 @@ struct kvm_run {
__u64 gpa;
__u64 size;
} memory_fault;
+ /* KVM_EXIT_TDX */
+ struct {
+ __u64 flags;
+ __u64 nr;
+ union {
+ struct {
+ __u64 ret;
+ __u64 data[5];
+ } unknown;
+ struct {
+ __u64 ret;
+ __u64 gpa;
+ __u64 size;
+ } get_quote;
+ struct {
+ __u64 ret;
+ __u64 leaf;
+ __u64 r11, r12, r13, r14;
+ } get_tdvmcall_info;
+ struct {
+ __u64 ret;
+ __u64 vector;
+ } setup_event_notify;
+ };
+ } tdx;
/* Fix the size of the union. */
char padding[256];
};
@@ -935,6 +961,7 @@ struct kvm_enable_cap {
#define KVM_CAP_ARM_EL2 240
#define KVM_CAP_ARM_EL2_E2H0 241
#define KVM_CAP_RISCV_MP_STATE_RESET 242
+#define KVM_CAP_ARM_CACHEABLE_PFNMAP_SUPPORTED 243
struct kvm_irq_routing_irqchip {
__u32 irqchip;
diff --git a/tools/perf/arch/arm/entry/syscalls/syscall.tbl b/tools/perf/arch/arm/entry/syscalls/syscall.tbl
index 27c1d5ebcd91..b07e699aaa3c 100644
--- a/tools/perf/arch/arm/entry/syscalls/syscall.tbl
+++ b/tools/perf/arch/arm/entry/syscalls/syscall.tbl
@@ -482,3 +482,5 @@
465 common listxattrat sys_listxattrat
466 common removexattrat sys_removexattrat
467 common open_tree_attr sys_open_tree_attr
+468 common file_getattr sys_file_getattr
+469 common file_setattr sys_file_setattr
diff --git a/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl b/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
index 1e8c44c7b614..7a7049c2c307 100644
--- a/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
+++ b/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
@@ -382,3 +382,5 @@
465 n64 listxattrat sys_listxattrat
466 n64 removexattrat sys_removexattrat
467 n64 open_tree_attr sys_open_tree_attr
+468 n64 file_getattr sys_file_getattr
+469 n64 file_setattr sys_file_setattr
diff --git a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
index 9a084bdb8926..b453e80dfc00 100644
--- a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
+++ b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
@@ -558,3 +558,5 @@
465 common listxattrat sys_listxattrat
466 common removexattrat sys_removexattrat
467 common open_tree_attr sys_open_tree_attr
+468 common file_getattr sys_file_getattr
+469 common file_setattr sys_file_setattr
diff --git a/tools/perf/arch/s390/entry/syscalls/syscall.tbl b/tools/perf/arch/s390/entry/syscalls/syscall.tbl
index a4569b96ef06..8a6744d658db 100644
--- a/tools/perf/arch/s390/entry/syscalls/syscall.tbl
+++ b/tools/perf/arch/s390/entry/syscalls/syscall.tbl
@@ -470,3 +470,5 @@
465 common listxattrat sys_listxattrat sys_listxattrat
466 common removexattrat sys_removexattrat sys_removexattrat
467 common open_tree_attr sys_open_tree_attr sys_open_tree_attr
+468 common file_getattr sys_file_getattr sys_file_getattr
+469 common file_setattr sys_file_setattr sys_file_setattr
diff --git a/tools/perf/arch/sh/entry/syscalls/syscall.tbl b/tools/perf/arch/sh/entry/syscalls/syscall.tbl
index 52a7652fcff6..5e9c9eff5539 100644
--- a/tools/perf/arch/sh/entry/syscalls/syscall.tbl
+++ b/tools/perf/arch/sh/entry/syscalls/syscall.tbl
@@ -471,3 +471,5 @@
465 common listxattrat sys_listxattrat
466 common removexattrat sys_removexattrat
467 common open_tree_attr sys_open_tree_attr
+468 common file_getattr sys_file_getattr
+469 common file_setattr sys_file_setattr
diff --git a/tools/perf/arch/sparc/entry/syscalls/syscall.tbl b/tools/perf/arch/sparc/entry/syscalls/syscall.tbl
index 83e45eb6c095..ebb7d06d1044 100644
--- a/tools/perf/arch/sparc/entry/syscalls/syscall.tbl
+++ b/tools/perf/arch/sparc/entry/syscalls/syscall.tbl
@@ -513,3 +513,5 @@
465 common listxattrat sys_listxattrat
466 common removexattrat sys_removexattrat
467 common open_tree_attr sys_open_tree_attr
+468 common file_getattr sys_file_getattr
+469 common file_setattr sys_file_setattr
diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_32.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_32.tbl
index ac007ea00979..4877e16da69a 100644
--- a/tools/perf/arch/x86/entry/syscalls/syscall_32.tbl
+++ b/tools/perf/arch/x86/entry/syscalls/syscall_32.tbl
@@ -473,3 +473,5 @@
465 i386 listxattrat sys_listxattrat
466 i386 removexattrat sys_removexattrat
467 i386 open_tree_attr sys_open_tree_attr
+468 i386 file_getattr sys_file_getattr
+469 i386 file_setattr sys_file_setattr
diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
index cfb5ca41e30d..92cf0fe2291e 100644
--- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
@@ -391,6 +391,8 @@
465 common listxattrat sys_listxattrat
466 common removexattrat sys_removexattrat
467 common open_tree_attr sys_open_tree_attr
+468 common file_getattr sys_file_getattr
+469 common file_setattr sys_file_setattr
#
# Due to a historical design error, certain syscalls are numbered differently
diff --git a/tools/perf/arch/x86/tests/topdown.c b/tools/perf/arch/x86/tests/topdown.c
index 8d0ea7a4bbc1..1eba3b4594ef 100644
--- a/tools/perf/arch/x86/tests/topdown.c
+++ b/tools/perf/arch/x86/tests/topdown.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include "arch-tests.h"
#include "../util/topdown.h"
+#include "debug.h"
#include "evlist.h"
#include "parse-events.h"
#include "pmu.h"
diff --git a/tools/perf/arch/xtensa/entry/syscalls/syscall.tbl b/tools/perf/arch/xtensa/entry/syscalls/syscall.tbl
index f657a77314f8..374e4cb788d8 100644
--- a/tools/perf/arch/xtensa/entry/syscalls/syscall.tbl
+++ b/tools/perf/arch/xtensa/entry/syscalls/syscall.tbl
@@ -438,3 +438,5 @@
465 common listxattrat sys_listxattrat
466 common removexattrat sys_removexattrat
467 common open_tree_attr sys_open_tree_attr
+468 common file_getattr sys_file_getattr
+469 common file_setattr sys_file_setattr
diff --git a/tools/perf/bench/inject-buildid.c b/tools/perf/bench/inject-buildid.c
index aad572a78d7f..12387ea88b9a 100644
--- a/tools/perf/bench/inject-buildid.c
+++ b/tools/perf/bench/inject-buildid.c
@@ -85,7 +85,7 @@ static int add_dso(const char *fpath, const struct stat *sb __maybe_unused,
if (typeflag == FTW_D || typeflag == FTW_SL)
return 0;
- if (filename__read_build_id(fpath, &bid) < 0)
+ if (filename__read_build_id(fpath, &bid, /*block=*/true) < 0)
return 0;
dso->name = realpath(fpath, NULL);
diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c
index c98104481c8a..2e0f2004696a 100644
--- a/tools/perf/builtin-buildid-cache.c
+++ b/tools/perf/builtin-buildid-cache.c
@@ -180,7 +180,7 @@ static int build_id_cache__add_file(const char *filename, struct nsinfo *nsi)
struct nscookie nsc;
nsinfo__mountns_enter(nsi, &nsc);
- err = filename__read_build_id(filename, &bid);
+ err = filename__read_build_id(filename, &bid, /*block=*/true);
nsinfo__mountns_exit(&nsc);
if (err < 0) {
pr_debug("Couldn't read a build-id in %s\n", filename);
@@ -204,7 +204,7 @@ static int build_id_cache__remove_file(const char *filename, struct nsinfo *nsi)
int err;
nsinfo__mountns_enter(nsi, &nsc);
- err = filename__read_build_id(filename, &bid);
+ err = filename__read_build_id(filename, &bid, /*block=*/true);
nsinfo__mountns_exit(&nsc);
if (err < 0) {
pr_debug("Couldn't read a build-id in %s\n", filename);
@@ -280,7 +280,7 @@ static bool dso__missing_buildid_cache(struct dso *dso, int parm __maybe_unused)
if (!dso__build_id_filename(dso, filename, sizeof(filename), false))
return true;
- if (filename__read_build_id(filename, &bid) == -1) {
+ if (filename__read_build_id(filename, &bid, /*block=*/true) == -1) {
if (errno == ENOENT)
return false;
@@ -309,7 +309,7 @@ static int build_id_cache__update_file(const char *filename, struct nsinfo *nsi)
int err;
nsinfo__mountns_enter(nsi, &nsc);
- err = filename__read_build_id(filename, &bid);
+ err = filename__read_build_id(filename, &bid, /*block=*/true);
nsinfo__mountns_exit(&nsc);
if (err < 0) {
pr_debug("Couldn't read a build-id in %s\n", filename);
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index 40ba6a94f719..a114b3fa1bea 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -680,12 +680,12 @@ static int dso__read_build_id(struct dso *dso)
mutex_lock(dso__lock(dso));
nsinfo__mountns_enter(dso__nsinfo(dso), &nsc);
- if (filename__read_build_id(dso__long_name(dso), &bid) > 0)
+ if (filename__read_build_id(dso__long_name(dso), &bid, /*block=*/true) > 0)
dso__set_build_id(dso, &bid);
else if (dso__nsinfo(dso)) {
char *new_name = dso__filename_with_chroot(dso, dso__long_name(dso));
- if (new_name && filename__read_build_id(new_name, &bid) > 0)
+ if (new_name && filename__read_build_id(new_name, &bid, /*block=*/true) > 0)
dso__set_build_id(dso, &bid);
free(new_name);
}
diff --git a/tools/perf/tests/sdt.c b/tools/perf/tests/sdt.c
index 93baee2eae42..6132f1af3e22 100644
--- a/tools/perf/tests/sdt.c
+++ b/tools/perf/tests/sdt.c
@@ -31,7 +31,7 @@ static int build_id_cache__add_file(const char *filename)
struct build_id bid = { .size = 0, };
int err;
- err = filename__read_build_id(filename, &bid);
+ err = filename__read_build_id(filename, &bid, /*block=*/true);
if (err < 0) {
pr_debug("Failed to read build id of %s\n", filename);
return err;
diff --git a/tools/perf/trace/beauty/include/uapi/linux/fcntl.h b/tools/perf/trace/beauty/include/uapi/linux/fcntl.h
index a15ac2fa4b20..f291ab4f94eb 100644
--- a/tools/perf/trace/beauty/include/uapi/linux/fcntl.h
+++ b/tools/perf/trace/beauty/include/uapi/linux/fcntl.h
@@ -90,10 +90,28 @@
#define DN_ATTRIB 0x00000020 /* File changed attibutes */
#define DN_MULTISHOT 0x80000000 /* Don't remove notifier */
+/* Reserved kernel ranges [-100], [-10000, -40000]. */
#define AT_FDCWD -100 /* Special value for dirfd used to
indicate openat should use the
current working directory. */
+/*
+ * The concept of process and threads in userland and the kernel is a confusing
+ * one - within the kernel every thread is a 'task' with its own individual PID,
+ * however from userland's point of view threads are grouped by a single PID,
+ * which is that of the 'thread group leader', typically the first thread
+ * spawned.
+ *
+ * To cut the Gideon knot, for internal kernel usage, we refer to
+ * PIDFD_SELF_THREAD to refer to the current thread (or task from a kernel
+ * perspective), and PIDFD_SELF_THREAD_GROUP to refer to the current thread
+ * group leader...
+ */
+#define PIDFD_SELF_THREAD -10000 /* Current thread. */
+#define PIDFD_SELF_THREAD_GROUP -10001 /* Current thread group leader. */
+
+#define FD_PIDFS_ROOT -10002 /* Root of the pidfs filesystem */
+#define FD_INVALID -10009 /* Invalid file descriptor: -10000 - EBADF = -10009 */
/* Generic flags for the *at(2) family of syscalls. */
diff --git a/tools/perf/trace/beauty/include/uapi/linux/fs.h b/tools/perf/trace/beauty/include/uapi/linux/fs.h
index 0098b0ce8ccb..0bd678a4a10e 100644
--- a/tools/perf/trace/beauty/include/uapi/linux/fs.h
+++ b/tools/perf/trace/beauty/include/uapi/linux/fs.h
@@ -60,6 +60,17 @@
#define RENAME_EXCHANGE (1 << 1) /* Exchange source and dest */
#define RENAME_WHITEOUT (1 << 2) /* Whiteout source */
+/*
+ * The root inode of procfs is guaranteed to always have the same inode number.
+ * For programs that make heavy use of procfs, verifying that the root is a
+ * real procfs root and using openat2(RESOLVE_{NO_{XDEV,MAGICLINKS},BENEATH})
+ * will allow you to make sure you are never tricked into operating on the
+ * wrong procfs file.
+ */
+enum procfs_ino {
+ PROCFS_ROOT_INO = 1,
+};
+
struct file_clone_range {
__s64 src_fd;
__u64 src_offset;
@@ -91,6 +102,63 @@ struct fs_sysfs_path {
__u8 name[128];
};
+/* Protection info capability flags */
+#define LBMD_PI_CAP_INTEGRITY (1 << 0)
+#define LBMD_PI_CAP_REFTAG (1 << 1)
+
+/* Checksum types for Protection Information */
+#define LBMD_PI_CSUM_NONE 0
+#define LBMD_PI_CSUM_IP 1
+#define LBMD_PI_CSUM_CRC16_T10DIF 2
+#define LBMD_PI_CSUM_CRC64_NVME 4
+
+/* sizeof first published struct */
+#define LBMD_SIZE_VER0 16
+
+/*
+ * Logical block metadata capability descriptor
+ * If the device does not support metadata, all the fields will be zero.
+ * Applications must check lbmd_flags to determine whether metadata is
+ * supported or not.
+ */
+struct logical_block_metadata_cap {
+ /* Bitmask of logical block metadata capability flags */
+ __u32 lbmd_flags;
+ /*
+ * The amount of data described by each unit of logical block
+ * metadata
+ */
+ __u16 lbmd_interval;
+ /*
+ * Size in bytes of the logical block metadata associated with each
+ * interval
+ */
+ __u8 lbmd_size;
+ /*
+ * Size in bytes of the opaque block tag associated with each
+ * interval
+ */
+ __u8 lbmd_opaque_size;
+ /*
+ * Offset in bytes of the opaque block tag within the logical block
+ * metadata
+ */
+ __u8 lbmd_opaque_offset;
+ /* Size in bytes of the T10 PI tuple associated with each interval */
+ __u8 lbmd_pi_size;
+ /* Offset in bytes of T10 PI tuple within the logical block metadata */
+ __u8 lbmd_pi_offset;
+ /* T10 PI guard tag type */
+ __u8 lbmd_guard_tag_type;
+ /* Size in bytes of the T10 PI application tag */
+ __u8 lbmd_app_tag_size;
+ /* Size in bytes of the T10 PI reference tag */
+ __u8 lbmd_ref_tag_size;
+ /* Size in bytes of the T10 PI storage tag */
+ __u8 lbmd_storage_tag_size;
+ __u8 pad;
+};
+
/* extent-same (dedupe) ioctls; these MUST match the btrfs ioctl definitions */
#define FILE_DEDUPE_RANGE_SAME 0
#define FILE_DEDUPE_RANGE_DIFFERS 1
@@ -149,6 +217,24 @@ struct fsxattr {
};
/*
+ * Variable size structure for file_[sg]et_attr().
+ *
+ * Note. This is alternative to the structure 'struct file_kattr'/'struct fsxattr'.
+ * As this structure is passed to/from userspace with its size, this can
+ * be versioned based on the size.
+ */
+struct file_attr {
+ __u64 fa_xflags; /* xflags field value (get/set) */
+ __u32 fa_extsize; /* extsize field value (get/set)*/
+ __u32 fa_nextents; /* nextents field value (get) */
+ __u32 fa_projid; /* project identifier (get/set) */
+ __u32 fa_cowextsize; /* CoW extsize field value (get/set) */
+};
+
+#define FILE_ATTR_SIZE_VER0 24
+#define FILE_ATTR_SIZE_LATEST FILE_ATTR_SIZE_VER0
+
+/*
* Flags for the fsx_xflags field
*/
#define FS_XFLAG_REALTIME 0x00000001 /* data in realtime volume */
@@ -247,6 +333,8 @@ struct fsxattr {
* also /sys/kernel/debug/ for filesystems with debugfs exports
*/
#define FS_IOC_GETFSSYSFSPATH _IOR(0x15, 1, struct fs_sysfs_path)
+/* Get logical block metadata capability details */
+#define FS_IOC_GETLBMD_CAP _IOWR(0x15, 2, struct logical_block_metadata_cap)
/*
* Inode flags (FS_IOC_GETFLAGS / FS_IOC_SETFLAGS)
diff --git a/tools/perf/trace/beauty/include/uapi/linux/prctl.h b/tools/perf/trace/beauty/include/uapi/linux/prctl.h
index 3b93fb906e3c..ed3aed264aeb 100644
--- a/tools/perf/trace/beauty/include/uapi/linux/prctl.h
+++ b/tools/perf/trace/beauty/include/uapi/linux/prctl.h
@@ -244,6 +244,8 @@ struct prctl_mm_map {
# define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT)
/* Unused; kept only for source compatibility */
# define PR_MTE_TCF_SHIFT 1
+/* MTE tag check store only */
+# define PR_MTE_STORE_ONLY (1UL << 19)
/* RISC-V pointer masking tag length */
# define PR_PMLEN_SHIFT 24
# define PR_PMLEN_MASK (0x7fUL << PR_PMLEN_SHIFT)
@@ -255,7 +257,12 @@ struct prctl_mm_map {
/* Dispatch syscalls to a userspace handler */
#define PR_SET_SYSCALL_USER_DISPATCH 59
# define PR_SYS_DISPATCH_OFF 0
-# define PR_SYS_DISPATCH_ON 1
+/* Enable dispatch except for the specified range */
+# define PR_SYS_DISPATCH_EXCLUSIVE_ON 1
+/* Enable dispatch for the specified range */
+# define PR_SYS_DISPATCH_INCLUSIVE_ON 2
+/* Legacy name for backwards compatibility */
+# define PR_SYS_DISPATCH_ON PR_SYS_DISPATCH_EXCLUSIVE_ON
/* The control values for the user space selector when dispatch is enabled */
# define SYSCALL_DISPATCH_FILTER_ALLOW 0
# define SYSCALL_DISPATCH_FILTER_BLOCK 1
diff --git a/tools/perf/trace/beauty/include/uapi/linux/vhost.h b/tools/perf/trace/beauty/include/uapi/linux/vhost.h
index d4b3e2ae1314..c57674a6aa0d 100644
--- a/tools/perf/trace/beauty/include/uapi/linux/vhost.h
+++ b/tools/perf/trace/beauty/include/uapi/linux/vhost.h
@@ -235,4 +235,39 @@
*/
#define VHOST_VDPA_GET_VRING_SIZE _IOWR(VHOST_VIRTIO, 0x82, \
struct vhost_vring_state)
+
+/* Extended features manipulation */
+#define VHOST_GET_FEATURES_ARRAY _IOR(VHOST_VIRTIO, 0x83, \
+ struct vhost_features_array)
+#define VHOST_SET_FEATURES_ARRAY _IOW(VHOST_VIRTIO, 0x83, \
+ struct vhost_features_array)
+
+/* fork_owner values for vhost */
+#define VHOST_FORK_OWNER_KTHREAD 0
+#define VHOST_FORK_OWNER_TASK 1
+
+/**
+ * VHOST_SET_FORK_FROM_OWNER - Set the fork_owner flag for the vhost device,
+ * This ioctl must called before VHOST_SET_OWNER.
+ * Only available when CONFIG_VHOST_ENABLE_FORK_OWNER_CONTROL=y
+ *
+ * @param fork_owner: An 8-bit value that determines the vhost thread mode
+ *
+ * When fork_owner is set to VHOST_FORK_OWNER_TASK(default value):
+ * - Vhost will create vhost worker as tasks forked from the owner,
+ * inheriting all of the owner's attributes.
+ *
+ * When fork_owner is set to VHOST_FORK_OWNER_KTHREAD:
+ * - Vhost will create vhost workers as kernel threads.
+ */
+#define VHOST_SET_FORK_FROM_OWNER _IOW(VHOST_VIRTIO, 0x84, __u8)
+
+/**
+ * VHOST_GET_FORK_OWNER - Get the current fork_owner flag for the vhost device.
+ * Only available when CONFIG_VHOST_ENABLE_FORK_OWNER_CONTROL=y
+ *
+ * @return: An 8-bit value indicating the current thread mode.
+ */
+#define VHOST_GET_FORK_FROM_OWNER _IOR(VHOST_VIRTIO, 0x85, __u8)
+
#endif
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index a7018a3b0437..bf7f3268b9a2 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -115,7 +115,7 @@ int filename__snprintf_build_id(const char *pathname, char *sbuild_id, size_t sb
struct build_id bid = { .size = 0, };
int ret;
- ret = filename__read_build_id(pathname, &bid);
+ ret = filename__read_build_id(pathname, &bid, /*block=*/true);
if (ret < 0)
return ret;
@@ -841,7 +841,7 @@ static int filename__read_build_id_ns(const char *filename,
int ret;
nsinfo__mountns_enter(nsi, &nsc);
- ret = filename__read_build_id(filename, bid);
+ ret = filename__read_build_id(filename, bid, /*block=*/true);
nsinfo__mountns_exit(&nsc);
return ret;
diff --git a/tools/perf/util/debuginfo.c b/tools/perf/util/debuginfo.c
index a44c70f93156..bb9ebd84ec2d 100644
--- a/tools/perf/util/debuginfo.c
+++ b/tools/perf/util/debuginfo.c
@@ -110,8 +110,12 @@ struct debuginfo *debuginfo__new(const char *path)
if (!dso)
goto out;
- /* Set the build id for DSO_BINARY_TYPE__BUILDID_DEBUGINFO */
- if (is_regular_file(path) && filename__read_build_id(path, &bid) > 0)
+ /*
+ * Set the build id for DSO_BINARY_TYPE__BUILDID_DEBUGINFO. Don't block
+ * incase the path isn't for a regular file.
+ */
+ assert(!dso__has_build_id(dso));
+ if (filename__read_build_id(path, &bid, /*block=*/false) > 0)
dso__set_build_id(dso, &bid);
for (type = distro_dwarf_types;
diff --git a/tools/perf/util/dsos.c b/tools/perf/util/dsos.c
index 0a7645c7fae7..64c1d65b0149 100644
--- a/tools/perf/util/dsos.c
+++ b/tools/perf/util/dsos.c
@@ -81,13 +81,13 @@ static int dsos__read_build_ids_cb(struct dso *dso, void *data)
return 0;
}
nsinfo__mountns_enter(dso__nsinfo(dso), &nsc);
- if (filename__read_build_id(dso__long_name(dso), &bid) > 0) {
+ if (filename__read_build_id(dso__long_name(dso), &bid, /*block=*/true) > 0) {
dso__set_build_id(dso, &bid);
args->have_build_id = true;
} else if (errno == ENOENT && dso__nsinfo(dso)) {
char *new_name = dso__filename_with_chroot(dso, dso__long_name(dso));
- if (new_name && filename__read_build_id(new_name, &bid) > 0) {
+ if (new_name && filename__read_build_id(new_name, &bid, /*block=*/true) > 0) {
dso__set_build_id(dso, &bid);
args->have_build_id = true;
}
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 6d2c280a1730..033c79231a54 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -902,7 +902,7 @@ out_close:
#else // HAVE_LIBBFD_BUILDID_SUPPORT
-static int read_build_id(const char *filename, struct build_id *bid)
+static int read_build_id(const char *filename, struct build_id *bid, bool block)
{
size_t size = sizeof(bid->data);
int fd, err = -1;
@@ -911,7 +911,7 @@ static int read_build_id(const char *filename, struct build_id *bid)
if (size < BUILD_ID_SIZE)
goto out;
- fd = open(filename, O_RDONLY);
+ fd = open(filename, block ? O_RDONLY : (O_RDONLY | O_NONBLOCK));
if (fd < 0)
goto out;
@@ -934,7 +934,7 @@ out:
#endif // HAVE_LIBBFD_BUILDID_SUPPORT
-int filename__read_build_id(const char *filename, struct build_id *bid)
+int filename__read_build_id(const char *filename, struct build_id *bid, bool block)
{
struct kmod_path m = { .name = NULL, };
char path[PATH_MAX];
@@ -958,9 +958,10 @@ int filename__read_build_id(const char *filename, struct build_id *bid)
}
close(fd);
filename = path;
+ block = true;
}
- err = read_build_id(filename, bid);
+ err = read_build_id(filename, bid, block);
if (m.comp)
unlink(filename);
diff --git a/tools/perf/util/symbol-minimal.c b/tools/perf/util/symbol-minimal.c
index 7201494c5c20..41e4ebe5eac5 100644
--- a/tools/perf/util/symbol-minimal.c
+++ b/tools/perf/util/symbol-minimal.c
@@ -4,7 +4,6 @@
#include <errno.h>
#include <unistd.h>
-#include <stdio.h>
#include <fcntl.h>
#include <string.h>
#include <stdlib.h>
@@ -86,13 +85,10 @@ int filename__read_debuglink(const char *filename __maybe_unused,
/*
* Just try PT_NOTE header otherwise fails
*/
-int filename__read_build_id(const char *filename, struct build_id *bid)
+int filename__read_build_id(const char *filename, struct build_id *bid, bool block)
{
- FILE *fp;
- int ret = -1;
+ int fd, ret = -1;
bool need_swap = false, elf32;
- u8 e_ident[EI_NIDENT];
- int i;
union {
struct {
Elf32_Ehdr ehdr32;
@@ -103,28 +99,27 @@ int filename__read_build_id(const char *filename, struct build_id *bid)
Elf64_Phdr *phdr64;
};
} hdrs;
- void *phdr;
- size_t phdr_size;
- void *buf = NULL;
- size_t buf_size = 0;
+ void *phdr, *buf = NULL;
+ ssize_t phdr_size, ehdr_size, buf_size = 0;
- fp = fopen(filename, "r");
- if (fp == NULL)
+ fd = open(filename, block ? O_RDONLY : (O_RDONLY | O_NONBLOCK));
+ if (fd < 0)
return -1;
- if (fread(e_ident, sizeof(e_ident), 1, fp) != 1)
+ if (read(fd, hdrs.ehdr32.e_ident, EI_NIDENT) != EI_NIDENT)
goto out;
- if (memcmp(e_ident, ELFMAG, SELFMAG) ||
- e_ident[EI_VERSION] != EV_CURRENT)
+ if (memcmp(hdrs.ehdr32.e_ident, ELFMAG, SELFMAG) ||
+ hdrs.ehdr32.e_ident[EI_VERSION] != EV_CURRENT)
goto out;
- need_swap = check_need_swap(e_ident[EI_DATA]);
- elf32 = e_ident[EI_CLASS] == ELFCLASS32;
+ need_swap = check_need_swap(hdrs.ehdr32.e_ident[EI_DATA]);
+ elf32 = hdrs.ehdr32.e_ident[EI_CLASS] == ELFCLASS32;
+ ehdr_size = (elf32 ? sizeof(hdrs.ehdr32) : sizeof(hdrs.ehdr64)) - EI_NIDENT;
- if (fread(elf32 ? (void *)&hdrs.ehdr32 : (void *)&hdrs.ehdr64,
- elf32 ? sizeof(hdrs.ehdr32) : sizeof(hdrs.ehdr64),
- 1, fp) != 1)
+ if (read(fd,
+ (elf32 ? (void *)&hdrs.ehdr32 : (void *)&hdrs.ehdr64) + EI_NIDENT,
+ ehdr_size) != ehdr_size)
goto out;
if (need_swap) {
@@ -138,14 +133,18 @@ int filename__read_build_id(const char *filename, struct build_id *bid)
hdrs.ehdr64.e_phnum = bswap_16(hdrs.ehdr64.e_phnum);
}
}
- phdr_size = elf32 ? hdrs.ehdr32.e_phentsize * hdrs.ehdr32.e_phnum
- : hdrs.ehdr64.e_phentsize * hdrs.ehdr64.e_phnum;
+ if ((elf32 && hdrs.ehdr32.e_phentsize != sizeof(Elf32_Phdr)) ||
+ (!elf32 && hdrs.ehdr64.e_phentsize != sizeof(Elf64_Phdr)))
+ goto out;
+
+ phdr_size = elf32 ? sizeof(Elf32_Phdr) * hdrs.ehdr32.e_phnum
+ : sizeof(Elf64_Phdr) * hdrs.ehdr64.e_phnum;
phdr = malloc(phdr_size);
if (phdr == NULL)
goto out;
- fseek(fp, elf32 ? hdrs.ehdr32.e_phoff : hdrs.ehdr64.e_phoff, SEEK_SET);
- if (fread(phdr, phdr_size, 1, fp) != 1)
+ lseek(fd, elf32 ? hdrs.ehdr32.e_phoff : hdrs.ehdr64.e_phoff, SEEK_SET);
+ if (read(fd, phdr, phdr_size) != phdr_size)
goto out_free;
if (elf32)
@@ -153,8 +152,8 @@ int filename__read_build_id(const char *filename, struct build_id *bid)
else
hdrs.phdr64 = phdr;
- for (i = 0; i < elf32 ? hdrs.ehdr32.e_phnum : hdrs.ehdr64.e_phnum; i++) {
- size_t p_filesz;
+ for (int i = 0; i < (elf32 ? hdrs.ehdr32.e_phnum : hdrs.ehdr64.e_phnum); i++) {
+ ssize_t p_filesz;
if (need_swap) {
if (elf32) {
@@ -180,8 +179,8 @@ int filename__read_build_id(const char *filename, struct build_id *bid)
goto out_free;
buf = tmp;
}
- fseek(fp, elf32 ? hdrs.phdr32[i].p_offset : hdrs.phdr64[i].p_offset, SEEK_SET);
- if (fread(buf, p_filesz, 1, fp) != 1)
+ lseek(fd, elf32 ? hdrs.phdr32[i].p_offset : hdrs.phdr64[i].p_offset, SEEK_SET);
+ if (read(fd, buf, p_filesz) != p_filesz)
goto out_free;
ret = read_build_id(buf, p_filesz, bid, need_swap);
@@ -194,7 +193,7 @@ out_free:
free(buf);
free(phdr);
out:
- fclose(fp);
+ close(fd);
return ret;
}
@@ -324,7 +323,7 @@ int dso__load_sym(struct dso *dso, struct map *map __maybe_unused,
if (ret >= 0)
RC_CHK_ACCESS(dso)->is_64_bit = ret;
- if (filename__read_build_id(ss->name, &bid) > 0)
+ if (filename__read_build_id(ss->name, &bid, /*block=*/true) > 0)
dso__set_build_id(dso, &bid);
return 0;
}
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index e816e4220d33..3fed54de5401 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -1869,14 +1869,14 @@ int dso__load(struct dso *dso, struct map *map)
/*
* Read the build id if possible. This is required for
- * DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work
+ * DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work. Don't block in case path
+ * isn't for a regular file.
*/
- if (!dso__has_build_id(dso) &&
- is_regular_file(dso__long_name(dso))) {
+ if (!dso__has_build_id(dso)) {
struct build_id bid = { .size = 0, };
__symbol__join_symfs(name, PATH_MAX, dso__long_name(dso));
- if (filename__read_build_id(name, &bid) > 0)
+ if (filename__read_build_id(name, &bid, /*block=*/false) > 0)
dso__set_build_id(dso, &bid);
}
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 3fb5d146d9b1..347106218799 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -140,7 +140,7 @@ struct symbol *dso__next_symbol(struct symbol *sym);
enum dso_type dso__type_fd(int fd);
-int filename__read_build_id(const char *filename, struct build_id *id);
+int filename__read_build_id(const char *filename, struct build_id *id, bool block);
int sysfs__read_build_id(const char *filename, struct build_id *bid);
int modules__parse(const char *filename, void *arg,
int (*process_module)(void *arg, const char *name,
diff --git a/tools/perf/util/synthetic-events.c b/tools/perf/util/synthetic-events.c
index cb2c1ace304a..fcd1fd13c30e 100644
--- a/tools/perf/util/synthetic-events.c
+++ b/tools/perf/util/synthetic-events.c
@@ -401,7 +401,7 @@ static void perf_record_mmap2__read_build_id(struct perf_record_mmap2 *event,
nsi = nsinfo__new(event->pid);
nsinfo__mountns_enter(nsi, &nc);
- rc = filename__read_build_id(event->filename, &bid) > 0 ? 0 : -1;
+ rc = filename__read_build_id(event->filename, &bid, /*block=*/false) > 0 ? 0 : -1;
nsinfo__mountns_exit(&nc);
nsinfo__put(nsi);
diff --git a/tools/scripts/syscall.tbl b/tools/scripts/syscall.tbl
index 580b4e246aec..d1ae5e92c615 100644
--- a/tools/scripts/syscall.tbl
+++ b/tools/scripts/syscall.tbl
@@ -408,3 +408,5 @@
465 common listxattrat sys_listxattrat
466 common removexattrat sys_removexattrat
467 common open_tree_attr sys_open_tree_attr
+468 common file_getattr sys_file_getattr
+469 common file_setattr sys_file_setattr
diff --git a/tools/testing/selftests/ublk/file_backed.c b/tools/testing/selftests/ublk/file_backed.c
index 2d93ac860bd5..cd9fe69ecce2 100644
--- a/tools/testing/selftests/ublk/file_backed.c
+++ b/tools/testing/selftests/ublk/file_backed.c
@@ -20,7 +20,7 @@ static int loop_queue_flush_io(struct ublk_thread *t, struct ublk_queue *q,
struct io_uring_sqe *sqe[1];
ublk_io_alloc_sqes(t, sqe, 1);
- io_uring_prep_fsync(sqe[0], 1 /*fds[1]*/, IORING_FSYNC_DATASYNC);
+ io_uring_prep_fsync(sqe[0], ublk_get_registered_fd(q, 1) /*fds[1]*/, IORING_FSYNC_DATASYNC);
io_uring_sqe_set_flags(sqe[0], IOSQE_FIXED_FILE);
/* bit63 marks us as tgt io */
sqe[0]->user_data = build_user_data(tag, ublk_op, 0, q->q_id, 1);
@@ -42,7 +42,7 @@ static int loop_queue_tgt_rw_io(struct ublk_thread *t, struct ublk_queue *q,
if (!sqe[0])
return -ENOMEM;
- io_uring_prep_rw(op, sqe[0], 1 /*fds[1]*/,
+ io_uring_prep_rw(op, sqe[0], ublk_get_registered_fd(q, 1) /*fds[1]*/,
addr,
iod->nr_sectors << 9,
iod->start_sector << 9);
@@ -56,19 +56,19 @@ static int loop_queue_tgt_rw_io(struct ublk_thread *t, struct ublk_queue *q,
ublk_io_alloc_sqes(t, sqe, 3);
- io_uring_prep_buf_register(sqe[0], 0, tag, q->q_id, ublk_get_io(q, tag)->buf_index);
+ io_uring_prep_buf_register(sqe[0], q, tag, q->q_id, ublk_get_io(q, tag)->buf_index);
sqe[0]->flags |= IOSQE_CQE_SKIP_SUCCESS | IOSQE_IO_HARDLINK;
sqe[0]->user_data = build_user_data(tag,
ublk_cmd_op_nr(sqe[0]->cmd_op), 0, q->q_id, 1);
- io_uring_prep_rw(op, sqe[1], 1 /*fds[1]*/, 0,
+ io_uring_prep_rw(op, sqe[1], ublk_get_registered_fd(q, 1) /*fds[1]*/, 0,
iod->nr_sectors << 9,
iod->start_sector << 9);
sqe[1]->buf_index = tag;
sqe[1]->flags |= IOSQE_FIXED_FILE | IOSQE_IO_HARDLINK;
sqe[1]->user_data = build_user_data(tag, ublk_op, 0, q->q_id, 1);
- io_uring_prep_buf_unregister(sqe[2], 0, tag, q->q_id, ublk_get_io(q, tag)->buf_index);
+ io_uring_prep_buf_unregister(sqe[2], q, tag, q->q_id, ublk_get_io(q, tag)->buf_index);
sqe[2]->user_data = build_user_data(tag, ublk_cmd_op_nr(sqe[2]->cmd_op), 0, q->q_id, 1);
return 2;
diff --git a/tools/testing/selftests/ublk/kublk.c b/tools/testing/selftests/ublk/kublk.c
index 6512dfbdbce3..b71faba86c3b 100644
--- a/tools/testing/selftests/ublk/kublk.c
+++ b/tools/testing/selftests/ublk/kublk.c
@@ -432,7 +432,7 @@ static void ublk_thread_deinit(struct ublk_thread *t)
}
}
-static int ublk_queue_init(struct ublk_queue *q, unsigned extra_flags)
+static int ublk_queue_init(struct ublk_queue *q, unsigned long long extra_flags)
{
struct ublk_dev *dev = q->dev;
int depth = dev->dev_info.queue_depth;
@@ -446,6 +446,9 @@ static int ublk_queue_init(struct ublk_queue *q, unsigned extra_flags)
q->flags = dev->dev_info.flags;
q->flags |= extra_flags;
+ /* Cache fd in queue for fast path access */
+ q->ublk_fd = dev->fds[0];
+
cmd_buf_size = ublk_queue_cmd_buf_sz(q);
off = UBLKSRV_CMD_BUF_OFFSET + q->q_id * ublk_queue_max_cmd_buf_sz();
q->io_cmd_buf = mmap(0, cmd_buf_size, PROT_READ,
@@ -481,9 +484,10 @@ static int ublk_queue_init(struct ublk_queue *q, unsigned extra_flags)
return -ENOMEM;
}
-static int ublk_thread_init(struct ublk_thread *t)
+static int ublk_thread_init(struct ublk_thread *t, unsigned long long extra_flags)
{
struct ublk_dev *dev = t->dev;
+ unsigned long long flags = dev->dev_info.flags | extra_flags;
int ring_depth = dev->tgt.sq_depth, cq_depth = dev->tgt.cq_depth;
int ret;
@@ -512,7 +516,17 @@ static int ublk_thread_init(struct ublk_thread *t)
io_uring_register_ring_fd(&t->ring);
- ret = io_uring_register_files(&t->ring, dev->fds, dev->nr_fds);
+ if (flags & UBLKS_Q_NO_UBLK_FIXED_FD) {
+ /* Register only backing files starting from index 1, exclude ublk control device */
+ if (dev->nr_fds > 1) {
+ ret = io_uring_register_files(&t->ring, &dev->fds[1], dev->nr_fds - 1);
+ } else {
+ /* No backing files to register, skip file registration */
+ ret = 0;
+ }
+ } else {
+ ret = io_uring_register_files(&t->ring, dev->fds, dev->nr_fds);
+ }
if (ret) {
ublk_err("ublk dev %d thread %d register files failed %d\n",
t->dev->dev_info.dev_id, t->idx, ret);
@@ -626,9 +640,12 @@ int ublk_queue_io_cmd(struct ublk_thread *t, struct ublk_io *io)
/* These fields should be written once, never change */
ublk_set_sqe_cmd_op(sqe[0], cmd_op);
- sqe[0]->fd = 0; /* dev->fds[0] */
+ sqe[0]->fd = ublk_get_registered_fd(q, 0); /* dev->fds[0] */
sqe[0]->opcode = IORING_OP_URING_CMD;
- sqe[0]->flags = IOSQE_FIXED_FILE;
+ if (q->flags & UBLKS_Q_NO_UBLK_FIXED_FD)
+ sqe[0]->flags = 0; /* Use raw FD, not fixed file */
+ else
+ sqe[0]->flags = IOSQE_FIXED_FILE;
sqe[0]->rw_flags = 0;
cmd->tag = io->tag;
cmd->q_id = q->q_id;
@@ -832,6 +849,7 @@ struct ublk_thread_info {
unsigned idx;
sem_t *ready;
cpu_set_t *affinity;
+ unsigned long long extra_flags;
};
static void *ublk_io_handler_fn(void *data)
@@ -844,7 +862,7 @@ static void *ublk_io_handler_fn(void *data)
t->dev = info->dev;
t->idx = info->idx;
- ret = ublk_thread_init(t);
+ ret = ublk_thread_init(t, info->extra_flags);
if (ret) {
ublk_err("ublk dev %d thread %u init failed\n",
dev_id, t->idx);
@@ -934,6 +952,8 @@ static int ublk_start_daemon(const struct dev_ctx *ctx, struct ublk_dev *dev)
if (ctx->auto_zc_fallback)
extra_flags = UBLKS_Q_AUTO_BUF_REG_FALLBACK;
+ if (ctx->no_ublk_fixed_fd)
+ extra_flags |= UBLKS_Q_NO_UBLK_FIXED_FD;
for (i = 0; i < dinfo->nr_hw_queues; i++) {
dev->q[i].dev = dev;
@@ -951,6 +971,7 @@ static int ublk_start_daemon(const struct dev_ctx *ctx, struct ublk_dev *dev)
tinfo[i].dev = dev;
tinfo[i].idx = i;
tinfo[i].ready = &ready;
+ tinfo[i].extra_flags = extra_flags;
/*
* If threads are not tied 1:1 to queues, setting thread
@@ -1471,7 +1492,7 @@ static void __cmd_create_help(char *exe, bool recovery)
printf("%s %s -t [null|loop|stripe|fault_inject] [-q nr_queues] [-d depth] [-n dev_id]\n",
exe, recovery ? "recover" : "add");
printf("\t[--foreground] [--quiet] [-z] [--auto_zc] [--auto_zc_fallback] [--debug_mask mask] [-r 0|1 ] [-g]\n");
- printf("\t[-e 0|1 ] [-i 0|1]\n");
+ printf("\t[-e 0|1 ] [-i 0|1] [--no_ublk_fixed_fd]\n");
printf("\t[--nthreads threads] [--per_io_tasks]\n");
printf("\t[target options] [backfile1] [backfile2] ...\n");
printf("\tdefault: nr_queues=2(max 32), depth=128(max 1024), dev_id=-1(auto allocation)\n");
@@ -1534,6 +1555,7 @@ int main(int argc, char *argv[])
{ "size", 1, NULL, 's'},
{ "nthreads", 1, NULL, 0 },
{ "per_io_tasks", 0, NULL, 0 },
+ { "no_ublk_fixed_fd", 0, NULL, 0 },
{ 0, 0, 0, 0 }
};
const struct ublk_tgt_ops *ops = NULL;
@@ -1613,6 +1635,8 @@ int main(int argc, char *argv[])
ctx.nthreads = strtol(optarg, NULL, 10);
if (!strcmp(longopts[option_idx].name, "per_io_tasks"))
ctx.per_io_tasks = 1;
+ if (!strcmp(longopts[option_idx].name, "no_ublk_fixed_fd"))
+ ctx.no_ublk_fixed_fd = 1;
break;
case '?':
/*
diff --git a/tools/testing/selftests/ublk/kublk.h b/tools/testing/selftests/ublk/kublk.h
index 219233f8a053..5e55484fb0aa 100644
--- a/tools/testing/selftests/ublk/kublk.h
+++ b/tools/testing/selftests/ublk/kublk.h
@@ -77,6 +77,7 @@ struct dev_ctx {
unsigned int recovery:1;
unsigned int auto_zc_fallback:1;
unsigned int per_io_tasks:1;
+ unsigned int no_ublk_fixed_fd:1;
int _evtfd;
int _shmid;
@@ -166,7 +167,9 @@ struct ublk_queue {
/* borrow one bit of ublk uapi flags, which may never be used */
#define UBLKS_Q_AUTO_BUF_REG_FALLBACK (1ULL << 63)
+#define UBLKS_Q_NO_UBLK_FIXED_FD (1ULL << 62)
__u64 flags;
+ int ublk_fd; /* cached ublk char device fd */
struct ublk_io ios[UBLK_QUEUE_DEPTH];
};
@@ -273,34 +276,48 @@ static inline int ublk_io_alloc_sqes(struct ublk_thread *t,
return nr_sqes;
}
-static inline void io_uring_prep_buf_register(struct io_uring_sqe *sqe,
- int dev_fd, int tag, int q_id, __u64 index)
+static inline int ublk_get_registered_fd(struct ublk_queue *q, int fd_index)
+{
+ if (q->flags & UBLKS_Q_NO_UBLK_FIXED_FD) {
+ if (fd_index == 0)
+ /* Return the raw ublk FD for index 0 */
+ return q->ublk_fd;
+ /* Adjust index for backing files (index 1 becomes 0, etc.) */
+ return fd_index - 1;
+ }
+ return fd_index;
+}
+
+static inline void __io_uring_prep_buf_reg_unreg(struct io_uring_sqe *sqe,
+ struct ublk_queue *q, int tag, int q_id, __u64 index)
{
struct ublksrv_io_cmd *cmd = (struct ublksrv_io_cmd *)sqe->cmd;
+ int dev_fd = ublk_get_registered_fd(q, 0);
io_uring_prep_read(sqe, dev_fd, 0, 0, 0);
sqe->opcode = IORING_OP_URING_CMD;
- sqe->flags |= IOSQE_FIXED_FILE;
- sqe->cmd_op = UBLK_U_IO_REGISTER_IO_BUF;
+ if (q->flags & UBLKS_Q_NO_UBLK_FIXED_FD)
+ sqe->flags &= ~IOSQE_FIXED_FILE;
+ else
+ sqe->flags |= IOSQE_FIXED_FILE;
cmd->tag = tag;
cmd->addr = index;
cmd->q_id = q_id;
}
-static inline void io_uring_prep_buf_unregister(struct io_uring_sqe *sqe,
- int dev_fd, int tag, int q_id, __u64 index)
+static inline void io_uring_prep_buf_register(struct io_uring_sqe *sqe,
+ struct ublk_queue *q, int tag, int q_id, __u64 index)
{
- struct ublksrv_io_cmd *cmd = (struct ublksrv_io_cmd *)sqe->cmd;
+ __io_uring_prep_buf_reg_unreg(sqe, q, tag, q_id, index);
+ sqe->cmd_op = UBLK_U_IO_REGISTER_IO_BUF;
+}
- io_uring_prep_read(sqe, dev_fd, 0, 0, 0);
- sqe->opcode = IORING_OP_URING_CMD;
- sqe->flags |= IOSQE_FIXED_FILE;
+static inline void io_uring_prep_buf_unregister(struct io_uring_sqe *sqe,
+ struct ublk_queue *q, int tag, int q_id, __u64 index)
+{
+ __io_uring_prep_buf_reg_unreg(sqe, q, tag, q_id, index);
sqe->cmd_op = UBLK_U_IO_UNREGISTER_IO_BUF;
-
- cmd->tag = tag;
- cmd->addr = index;
- cmd->q_id = q_id;
}
static inline void *ublk_get_sqe_cmd(const struct io_uring_sqe *sqe)
diff --git a/tools/testing/selftests/ublk/null.c b/tools/testing/selftests/ublk/null.c
index f0e0003a4860..280043f6b689 100644
--- a/tools/testing/selftests/ublk/null.c
+++ b/tools/testing/selftests/ublk/null.c
@@ -63,7 +63,7 @@ static int null_queue_zc_io(struct ublk_thread *t, struct ublk_queue *q,
ublk_io_alloc_sqes(t, sqe, 3);
- io_uring_prep_buf_register(sqe[0], 0, tag, q->q_id, ublk_get_io(q, tag)->buf_index);
+ io_uring_prep_buf_register(sqe[0], q, tag, q->q_id, ublk_get_io(q, tag)->buf_index);
sqe[0]->user_data = build_user_data(tag,
ublk_cmd_op_nr(sqe[0]->cmd_op), 0, q->q_id, 1);
sqe[0]->flags |= IOSQE_CQE_SKIP_SUCCESS | IOSQE_IO_HARDLINK;
@@ -71,7 +71,7 @@ static int null_queue_zc_io(struct ublk_thread *t, struct ublk_queue *q,
__setup_nop_io(tag, iod, sqe[1], q->q_id);
sqe[1]->flags |= IOSQE_IO_HARDLINK;
- io_uring_prep_buf_unregister(sqe[2], 0, tag, q->q_id, ublk_get_io(q, tag)->buf_index);
+ io_uring_prep_buf_unregister(sqe[2], q, tag, q->q_id, ublk_get_io(q, tag)->buf_index);
sqe[2]->user_data = build_user_data(tag, ublk_cmd_op_nr(sqe[2]->cmd_op), 0, q->q_id, 1);
// buf register is marked as IOSQE_CQE_SKIP_SUCCESS
diff --git a/tools/testing/selftests/ublk/stripe.c b/tools/testing/selftests/ublk/stripe.c
index 1fb9b7cc281b..791fa8dc1651 100644
--- a/tools/testing/selftests/ublk/stripe.c
+++ b/tools/testing/selftests/ublk/stripe.c
@@ -142,7 +142,7 @@ static int stripe_queue_tgt_rw_io(struct ublk_thread *t, struct ublk_queue *q,
ublk_io_alloc_sqes(t, sqe, s->nr + extra);
if (zc) {
- io_uring_prep_buf_register(sqe[0], 0, tag, q->q_id, io->buf_index);
+ io_uring_prep_buf_register(sqe[0], q, tag, q->q_id, io->buf_index);
sqe[0]->flags |= IOSQE_CQE_SKIP_SUCCESS | IOSQE_IO_HARDLINK;
sqe[0]->user_data = build_user_data(tag,
ublk_cmd_op_nr(sqe[0]->cmd_op), 0, q->q_id, 1);
@@ -168,7 +168,7 @@ static int stripe_queue_tgt_rw_io(struct ublk_thread *t, struct ublk_queue *q,
if (zc) {
struct io_uring_sqe *unreg = sqe[s->nr + 1];
- io_uring_prep_buf_unregister(unreg, 0, tag, q->q_id, io->buf_index);
+ io_uring_prep_buf_unregister(unreg, q, tag, q->q_id, io->buf_index);
unreg->user_data = build_user_data(
tag, ublk_cmd_op_nr(unreg->cmd_op), 0, q->q_id, 1);
}
diff --git a/tools/testing/selftests/ublk/test_stress_04.sh b/tools/testing/selftests/ublk/test_stress_04.sh
index 40d1437ca298..3f901db4d09d 100755
--- a/tools/testing/selftests/ublk/test_stress_04.sh
+++ b/tools/testing/selftests/ublk/test_stress_04.sh
@@ -28,14 +28,14 @@ _create_backfile 0 256M
_create_backfile 1 128M
_create_backfile 2 128M
-ublk_io_and_kill_daemon 8G -t null -q 4 -z &
-ublk_io_and_kill_daemon 256M -t loop -q 4 -z "${UBLK_BACKFILES[0]}" &
+ublk_io_and_kill_daemon 8G -t null -q 4 -z --no_ublk_fixed_fd &
+ublk_io_and_kill_daemon 256M -t loop -q 4 -z --no_ublk_fixed_fd "${UBLK_BACKFILES[0]}" &
ublk_io_and_kill_daemon 256M -t stripe -q 4 -z "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" &
if _have_feature "AUTO_BUF_REG"; then
ublk_io_and_kill_daemon 8G -t null -q 4 --auto_zc &
ublk_io_and_kill_daemon 256M -t loop -q 4 --auto_zc "${UBLK_BACKFILES[0]}" &
- ublk_io_and_kill_daemon 256M -t stripe -q 4 --auto_zc "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" &
+ ublk_io_and_kill_daemon 256M -t stripe -q 4 --auto_zc --no_ublk_fixed_fd "${UBLK_BACKFILES[1]}" "${UBLK_BACKFILES[2]}" &
ublk_io_and_kill_daemon 8G -t null -q 4 -z --auto_zc --auto_zc_fallback &
fi