summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml6
-rw-r--r--Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/wireless/qca,ath9k.yaml18
-rw-r--r--Documentation/devicetree/bindings/net/wireless/qcom,ath11k-pci.yaml6
-rw-r--r--Documentation/devicetree/bindings/net/wireless/ralink,rt2880.yaml49
-rw-r--r--Documentation/hwmon/ina238.rst2
-rw-r--r--Documentation/netlink/specs/devlink.yaml26
-rw-r--r--Documentation/netlink/specs/ethtool.yaml41
-rw-r--r--Documentation/netlink/specs/tc.yaml151
-rw-r--r--Documentation/networking/ethtool-netlink.rst41
-rw-r--r--Documentation/virt/kvm/api.rst11
-rw-r--r--Documentation/virt/kvm/review-checklist.rst95
-rw-r--r--MAINTAINERS10
-rw-r--r--Makefile2
-rw-r--r--arch/arm64/kvm/sys_regs.c2
-rw-r--r--arch/mips/boot/dts/ralink/mt7620a.dtsi10
-rw-r--r--arch/powerpc/include/asm/hvcall.h1
-rw-r--r--arch/riscv/Kconfig3
-rw-r--r--arch/riscv/include/asm/kvm_aia.h4
-rw-r--r--arch/riscv/include/asm/kvm_host.h3
-rw-r--r--arch/riscv/include/asm/uaccess.h4
-rw-r--r--arch/riscv/kernel/ftrace.c18
-rw-r--r--arch/riscv/kernel/traps.c10
-rw-r--r--arch/riscv/kernel/traps_misaligned.c2
-rw-r--r--arch/riscv/kvm/aia.c51
-rw-r--r--arch/riscv/kvm/aia_imsic.c45
-rw-r--r--arch/riscv/kvm/vcpu.c10
-rw-r--r--arch/riscv/kvm/vcpu_timer.c16
-rwxr-xr-xarch/riscv/tools/relocs_check.sh4
-rw-r--r--arch/s390/net/bpf_jit_comp.c10
-rw-r--r--arch/x86/coco/sev/Makefile3
-rw-r--r--arch/x86/hyperv/hv_init.c1
-rw-r--r--arch/x86/hyperv/irqdomain.c69
-rw-r--r--arch/x86/hyperv/ivm.c1
-rw-r--r--arch/x86/hyperv/nested.c1
-rw-r--r--arch/x86/include/asm/mshyperv.h22
-rw-r--r--arch/x86/kvm/vmx/tdx.c9
-rw-r--r--arch/x86/kvm/x86.c4
-rw-r--r--arch/x86/kvm/xen.c2
-rw-r--r--block/blk-sysfs.c1
-rw-r--r--drivers/acpi/riscv/cppc.c2
-rw-r--r--drivers/base/regmap/regmap.c2
-rw-r--r--drivers/block/loop.c5
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c19
-rw-r--r--drivers/clocksource/hyperv_timer.c1
-rw-r--r--drivers/comedi/comedi_fops.c30
-rw-r--r--drivers/comedi/drivers.c17
-rw-r--r--drivers/comedi/drivers/aio_iiro_16.c3
-rw-r--r--drivers/comedi/drivers/comedi_test.c2
-rw-r--r--drivers/comedi/drivers/das16m1.c3
-rw-r--r--drivers/comedi/drivers/das6402.c3
-rw-r--r--drivers/comedi/drivers/pcl812.c3
-rw-r--r--drivers/cpuidle/cpuidle-psci.c23
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c10
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_algs.c14
-rw-r--r--drivers/dma/dw-edma/dw-edma-pcie.c60
-rw-r--r--drivers/dma/mediatek/mtk-cqdma.c4
-rw-r--r--drivers/dma/nbpfaxi.c11
-rw-r--r--drivers/gpio/gpiolib-acpi-quirks.c13
-rw-r--r--drivers/gpio/gpiolib-devres.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c3
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c2
-rw-r--r--drivers/gpu/drm/display/drm_dp_helper.c2
-rw-r--r--drivers/gpu/drm/drm_buddy.c43
-rw-r--r--drivers/gpu/drm/drm_gem_dma_helper.c2
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c8
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c4
-rw-r--r--drivers/gpu/drm/drm_prime.c8
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_crtc.c36
-rw-r--r--drivers/gpu/drm/mediatek/mtk_crtc.h1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_ddp_comp.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_ddp_comp.h9
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_drv.h1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c7
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_plane.c12
-rw-r--r--drivers/gpu/drm/mediatek/mtk_plane.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvif/chan.c3
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c23
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c21
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_prime.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gem.c6
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c13
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf.c19
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf.h5
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c27
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c6
-rw-r--r--drivers/gpu/drm/xe/xe_ring_ops.c22
-rw-r--r--drivers/hv/Kconfig2
-rw-r--r--drivers/hv/channel.c1
-rw-r--r--drivers/hv/channel_mgmt.c1
-rw-r--r--drivers/hv/connection.c5
-rw-r--r--drivers/hv/hv.c6
-rw-r--r--drivers/hv/hv_proc.c1
-rw-r--r--drivers/hv/mshv_common.c1
-rw-r--r--drivers/hv/mshv_root_hv_call.c1
-rw-r--r--drivers/hv/ring_buffer.c1
-rw-r--r--drivers/hv/vmbus_drv.c9
-rw-r--r--drivers/hwmon/corsair-cpro.c5
-rw-r--r--drivers/hwmon/ina238.c8
-rw-r--r--drivers/hwmon/pmbus/ucd9000.c8
-rw-r--r--drivers/i2c/busses/i2c-omap.c7
-rw-r--r--drivers/i2c/busses/i2c-stm32.c8
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c24
-rw-r--r--drivers/iio/accel/fxls8962af-core.c2
-rw-r--r--drivers/iio/accel/st_accel_core.c10
-rw-r--r--drivers/iio/adc/ad7380.c5
-rw-r--r--drivers/iio/adc/ad7949.c7
-rw-r--r--drivers/iio/adc/adi-axi-adc.c6
-rw-r--r--drivers/iio/adc/axp20x_adc.c1
-rw-r--r--drivers/iio/adc/max1363.c43
-rw-r--r--drivers/iio/adc/stm32-adc-core.c7
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c36
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_trigger.c20
-rw-r--r--drivers/iio/dac/ad3530r.c4
-rw-r--r--drivers/iio/industrialio-backend.c5
-rw-r--r--drivers/iio/industrialio-core.c5
-rw-r--r--drivers/input/joystick/xpad.c2
-rw-r--r--drivers/interconnect/core.c34
-rw-r--r--drivers/interconnect/icc-clk.c2
-rw-r--r--drivers/interconnect/qcom/icc-rpmh.c7
-rw-r--r--drivers/interconnect/qcom/osm-l3.c7
-rw-r--r--drivers/interconnect/qcom/sc7280.c1
-rw-r--r--drivers/interconnect/samsung/exynos.c5
-rw-r--r--drivers/iommu/hyperv-iommu.c33
-rw-r--r--drivers/memstick/core/memstick.c2
-rw-r--r--drivers/misc/amd-sbi/rmi-core.c24
-rw-r--r--drivers/mmc/host/bcm2835.c3
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c3
-rw-r--r--drivers/mmc/host/sdhci_am654.c9
-rw-r--r--drivers/mux/Kconfig1
-rw-r--r--drivers/net/can/dev/dev.c12
-rw-r--r--drivers/net/can/dev/netlink.c12
-rw-r--r--drivers/net/ethernet/amd/xgbe/Makefile2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h10
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c126
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c204
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-hwtstamp.c401
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pci.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ptp.c75
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h47
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c3
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c15
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c15
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c10
-rw-r--r--drivers/net/ethernet/google/gve/gve.h24
-rw-r--r--drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c24
-rw-r--r--drivers/net/ethernet/google/gve/gve_dqo.h1
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c300
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c94
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c148
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c31
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c36
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c6
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c220
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.h21
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h3
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c6
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c2
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/port.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devids.h18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c78
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c42
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c19
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c23
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf.h52
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ethtool.c334
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c23
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_main.c1
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ptp.c136
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ptp.h17
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c21
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.c124
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.h6
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c55
-rw-r--r--drivers/net/ethernet/intel/idpf/virtchnl2.h243
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h25
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c7
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h15
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h4
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c18
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c22
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c19
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c23
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c162
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c108
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c13
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c1
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c1
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_config.c158
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_config.h80
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c20
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.h2
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c4
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_switch_map.h3
-rw-r--r--drivers/net/netdevsim/dev.c9
-rw-r--r--drivers/net/netdevsim/netdev.c27
-rw-r--r--drivers/net/netdevsim/netdevsim.h1
-rw-r--r--drivers/net/phy/qcom/qca807x.c4
-rw-r--r--drivers/net/usb/cdc_ncm.c20
-rw-r--r--drivers/net/usb/smsc95xx.c72
-rw-r--r--drivers/net/vxlan/vxlan_core.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c50
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h13
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/debugfs_sta.c7
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h8
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c15
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c6
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.c5
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c9
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h13
-rw-r--r--drivers/net/wireless/ath/ath11k/dbring.c3
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.c38
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c15
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_sta.c11
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c45
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.c15
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.c39
-rw-r--r--drivers/net/wireless/ath/ath11k/htc.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c28
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/spectral.c3
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c12
-rw-r--r--drivers/net/wireless/ath/ath12k/ahb.c1
-rw-r--r--drivers/net/wireless/ath/ath12k/ce.c5
-rw-r--r--drivers/net/wireless/ath/ath12k/core.c80
-rw-r--r--drivers/net/wireless/ath/ath12k/core.h44
-rw-r--r--drivers/net/wireless/ath/ath12k/dbring.c3
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs.c8
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c564
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h207
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.c138
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.h43
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.c29
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.c72
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.c155
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.c40
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.c55
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.h32
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c1649
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.h17
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.c6
-rw-r--r--drivers/net/wireless/ath/ath12k/peer.c5
-rw-r--r--drivers/net/wireless/ath/ath12k/peer.h28
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.c2
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.h6
-rw-r--r--drivers/net/wireless/ath/ath12k/reg.c31
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.c359
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.h165
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c12
-rw-r--r--drivers/net/wireless/ath/ath5k/reg.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc.h6
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_mbox.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_pipe.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/usb.c6
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.h10
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c60
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c46
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c26
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/fwil_types.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c21
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c19
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c443
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_hal.h27
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c25
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/commands.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/sc.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/agn.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/power.h20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.c20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/agg.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/constants.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/d3.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/link.c389
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/link.h30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/low_latency.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mac80211.c73
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mld.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mld.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/notif.c19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/rx.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/scan.c156
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/scan.h37
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/stats.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/power.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c93
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/devinfo.c28
-rw-r--r--drivers/net/wireless/intersil/p54/p54spi.c4
-rw-r--r--drivers/net/wireless/marvell/libertas/cfg.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c7
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h3
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.c5
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/mac.c11
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/mac.h2
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/usb.c29
-rw-r--r--drivers/net/wireless/ralink/rt2x00/Kconfig7
-rw-r--r--drivers/net/wireless/ralink/rt2x00/Makefile1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800soc.c110
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00dev.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00queue.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00soc.c151
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00soc.h29
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/core.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c19
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c17
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c15
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c22
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.c22
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c10
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c10
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8703b.c1
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d.c1
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723x.c9
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723x.h6
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8812a.c1
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814a.c1
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821a.c1
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.c1
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c1
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/Kconfig26
-rw-r--r--drivers/net/wireless/realtek/rtw89/Makefile9
-rw-r--r--drivers/net/wireless/realtek/rtw89/acpi.c95
-rw-r--r--drivers/net/wireless/realtek/rtw89/acpi.h33
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.c234
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.h15
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c140
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h73
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c310
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h61
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c112
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h33
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c8
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.c5
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.c225
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.h2
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.c28
-rw-r--r--drivers/net/wireless/realtek/rtw89/reg.h34
-rw-r--r--drivers/net/wireless/realtek/rtw89/regd.c149
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b.c169
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c156
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_rfk_table.c77
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_rfk_table.h2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_table.c501
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851bu.c39
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.c5
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.c98
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_common.c16
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c77
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.h3
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt.c17
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c69
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.h3
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bu.c55
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.c5
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a.c5
-rw-r--r--drivers/net/wireless/realtek/rtw89/ser.c3
-rw-r--r--drivers/net/wireless/realtek/rtw89/txrx.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/usb.c1042
-rw-r--r--drivers/net/wireless/realtek/rtw89/usb.h65
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.c18
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.h14
-rw-r--r--drivers/net/wireless/ti/wl1251/reg.h6
-rw-r--r--drivers/net/wireless/ti/wl12xx/reg.h6
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_usb.c2
-rw-r--r--drivers/nvme/host/core.c27
-rw-r--r--drivers/nvme/target/tcp.c4
-rw-r--r--drivers/nvmem/imx-ocotp-ele.c5
-rw-r--r--drivers/nvmem/imx-ocotp.c5
-rw-r--r--drivers/nvmem/layouts/u-boot-env.c6
-rw-r--r--drivers/pci/controller/pci-hyperv-intf.c1
-rw-r--r--drivers/pci/controller/pci-hyperv.c21
-rw-r--r--drivers/phy/phy-core.c5
-rw-r--r--drivers/phy/phy-snps-eusb2.c6
-rw-r--r--drivers/phy/tegra/xusb-tegra186.c75
-rw-r--r--drivers/phy/tegra/xusb.h1
-rw-r--r--drivers/platform/arm64/huawei-gaokun-ec.c2
-rw-r--r--drivers/platform/mellanox/mlxbf-pmc.c25
-rw-r--r--drivers/platform/x86/Makefile3
-rw-r--r--drivers/platform/x86/dell/alienware-wmi-wmax.c17
-rw-r--r--drivers/platform/x86/dell/dell-lis3lv02d.c1
-rw-r--r--drivers/platform/x86/dell/dell-wmi-ddv.c10
-rw-r--r--drivers/platform/x86/ideapad-laptop.c4
-rw-r--r--drivers/platform/x86/lenovo-wmi-hotkey-utilities.c30
-rw-r--r--drivers/pmdomain/governor.c18
-rw-r--r--drivers/power/supply/power_supply_core.c82
-rw-r--r--drivers/power/supply/test_power.c4
-rw-r--r--drivers/s390/net/ism_drv.c3
-rw-r--r--drivers/s390/net/qeth_core_sys.c22
-rw-r--r--drivers/soundwire/amd_manager.c4
-rw-r--r--drivers/soundwire/qcom.c26
-rw-r--r--drivers/spi/spi.c14
-rw-r--r--drivers/ssb/driver_gpio.c8
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c98
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c1
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h2
-rw-r--r--drivers/thunderbolt/switch.c10
-rw-r--r--drivers/thunderbolt/tb.h2
-rw-r--r--drivers/thunderbolt/usb4.c12
-rw-r--r--drivers/tty/serial/pch_uart.c2
-rw-r--r--drivers/tty/serial/serial_base_bus.c3
-rw-r--r--drivers/usb/core/hub.c8
-rw-r--r--drivers/usb/dwc2/gadget.c38
-rw-r--r--drivers/usb/dwc3/dwc3-qcom.c7
-rw-r--r--drivers/usb/gadget/configfs.c4
-rw-r--r--drivers/usb/musb/musb_gadget.c2
-rw-r--r--drivers/usb/serial/ftdi_sio.c2
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h3
-rw-r--r--drivers/usb/serial/option.c5
-rw-r--r--fs/bcachefs/alloc_foreground.c3
-rw-r--r--fs/bcachefs/btree_io.c6
-rw-r--r--fs/bcachefs/dirent.c4
-rw-r--r--fs/bcachefs/dirent.h8
-rw-r--r--fs/bcachefs/io_read.c5
-rw-r--r--fs/bcachefs/journal_io.c1
-rw-r--r--fs/bcachefs/movinggc.c2
-rw-r--r--fs/cachefiles/io.c2
-rw-r--r--fs/cachefiles/ondemand.c4
-rw-r--r--fs/efivarfs/super.c6
-rw-r--r--fs/iomap/buffered-io.c3
-rw-r--r--fs/isofs/inode.c9
-rw-r--r--fs/netfs/read_pgpriv2.c5
-rw-r--r--fs/notify/dnotify/dnotify.c8
-rw-r--r--fs/pidfs.c2
-rw-r--r--fs/smb/client/dir.c6
-rw-r--r--fs/smb/client/file.c10
-rw-r--r--fs/smb/client/smb2inode.c3
-rw-r--r--fs/smb/client/smb2ops.c10
-rw-r--r--fs/ufs/super.c2
-rw-r--r--fs/xfs/libxfs/xfs_group.c14
-rw-r--r--fs/xfs/xfs_buf.c15
-rw-r--r--fs/xfs/xfs_buf.h8
-rw-r--r--fs/xfs/xfs_discard.c29
-rw-r--r--fs/xfs/xfs_extent_busy.h8
-rw-r--r--fs/xfs/xfs_file.c2
-rw-r--r--fs/xfs/xfs_inode.h2
-rw-r--r--fs/xfs/xfs_iomap.c2
-rw-r--r--fs/xfs/xfs_iops.c2
-rw-r--r--fs/xfs/xfs_mount.c97
-rw-r--r--fs/xfs/xfs_mount.h17
-rw-r--r--fs/xfs/xfs_notify_failure.c3
-rw-r--r--fs/xfs/xfs_trace.h31
-rw-r--r--fs/xfs/xfs_xattr.c2
-rw-r--r--include/drm/drm_buddy.h2
-rw-r--r--include/linux/interconnect-provider.h7
-rw-r--r--include/linux/ism.h1
-rw-r--r--include/linux/mlx5/mlx5_ifc.h58
-rw-r--r--include/linux/mm.h4
-rw-r--r--include/linux/mmc/sdio_ids.h1
-rw-r--r--include/linux/phy/phy.h2
-rw-r--r--include/linux/power_supply.h8
-rw-r--r--include/linux/tcp.h1
-rw-r--r--include/linux/usb/cdc_ncm.h1
-rw-r--r--include/net/cfg80211.h33
-rw-r--r--include/net/dropreason-core.h6
-rw-r--r--include/net/libeth/xdp.h2
-rw-r--r--include/net/mac80211.h27
-rw-r--r--include/net/netmem.h153
-rw-r--r--include/net/xfrm.h15
-rw-r--r--include/trace/events/netfs.h30
-rw-r--r--include/trace/events/tcp.h27
-rw-r--r--include/uapi/linux/devlink.h11
-rw-r--r--include/uapi/linux/ethtool_netlink_generated.h5
-rw-r--r--include/uapi/linux/nl80211.h39
-rw-r--r--include/uapi/linux/pkt_sched.h68
-rw-r--r--io_uring/net.c12
-rw-r--r--io_uring/poll.c2
-rw-r--r--io_uring/zcrx.c4
-rw-r--r--kernel/bpf/helpers.c11
-rw-r--r--kernel/bpf/sysfs_btf.c2
-rw-r--r--kernel/cgroup/legacy_freezer.c8
-rw-r--r--kernel/freezer.c15
-rw-r--r--kernel/sched/ext.c12
-rw-r--r--kernel/sched/ext_idle.c2
-rw-r--r--kernel/sched/loadavg.c2
-rw-r--r--kernel/sched/sched.h2
-rw-r--r--kernel/trace/trace_events.c5
-rw-r--r--kernel/trace/trace_osnoise.c2
-rw-r--r--mm/secretmem.c13
-rw-r--r--net/Kconfig6
-rw-r--r--net/appletalk/aarp.c24
-rw-r--r--net/devlink/netlink_gen.c6
-rw-r--r--net/devlink/netlink_gen.h2
-rw-r--r--net/devlink/rate.c20
-rw-r--r--net/ethtool/common.c39
-rw-r--r--net/ethtool/common.h8
-rw-r--r--net/ethtool/ioctl.c48
-rw-r--r--net/ethtool/netlink.c22
-rw-r--r--net/ethtool/netlink.h5
-rw-r--r--net/ethtool/rss.c393
-rw-r--r--net/ipv4/ipcomp.c2
-rw-r--r--net/ipv4/tcp.c23
-rw-r--r--net/ipv4/tcp_input.c10
-rw-r--r--net/ipv4/tcp_output.c46
-rw-r--r--net/ipv4/xfrm4_input.c3
-rw-r--r--net/ipv6/ip6_gre.c100
-rw-r--r--net/ipv6/ipcomp6.c2
-rw-r--r--net/ipv6/xfrm6_input.c3
-rw-r--r--net/ipv6/xfrm6_tunnel.c2
-rw-r--r--net/key/af_key.c2
-rw-r--r--net/mac80211/cfg.c76
-rw-r--r--net/mac80211/debugfs_netdev.c2
-rw-r--r--net/mac80211/ieee80211_i.h32
-rw-r--r--net/mac80211/key.c63
-rw-r--r--net/mac80211/main.c14
-rw-r--r--net/mac80211/mesh.c2
-rw-r--r--net/mac80211/mlme.c116
-rw-r--r--net/mac80211/rx.c28
-rw-r--r--net/mac80211/tdls.c2
-rw-r--r--net/mac80211/tx.c109
-rw-r--r--net/mac80211/util.c31
-rw-r--r--net/mptcp/protocol.c2
-rw-r--r--net/mptcp/protocol.h1
-rw-r--r--net/mptcp/sockopt.c33
-rw-r--r--net/sched/Kconfig12
-rw-r--r--net/sched/Makefile1
-rw-r--r--net/sched/sch_dualpi2.c1175
-rw-r--r--net/sched/sch_qfq.c7
-rw-r--r--net/wireless/mlme.c3
-rw-r--r--net/wireless/nl80211.c96
-rw-r--r--net/wireless/reg.c2
-rw-r--r--net/wireless/sme.c1
-rw-r--r--net/wireless/trace.h17
-rw-r--r--net/xfrm/xfrm_device.c1
-rw-r--r--net/xfrm/xfrm_input.c17
-rw-r--r--net/xfrm/xfrm_interface_core.c7
-rw-r--r--net/xfrm/xfrm_ipcomp.c3
-rw-r--r--net/xfrm/xfrm_state.c148
-rw-r--r--net/xfrm/xfrm_user.c3
-rw-r--r--rust/Makefile1
-rw-r--r--rust/kernel/firmware.rs2
-rw-r--r--rust/kernel/init.rs8
-rw-r--r--rust/kernel/kunit.rs2
-rw-r--r--rust/kernel/lib.rs3
-rw-r--r--rust/macros/module.rs10
-rw-r--r--scripts/Makefile.build3
-rw-r--r--sound/core/compress_offload.c48
-rw-r--r--sound/pci/hda/cs35l56_hda.c110
-rw-r--r--sound/pci/hda/patch_realtek.c5
-rw-r--r--sound/soc/amd/yc/acp6x-mach.c21
-rw-r--r--sound/soc/codecs/rt5660.c7
-rw-r--r--sound/soc/intel/avs/pcm.c4
-rw-r--r--sound/soc/intel/boards/Kconfig2
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-arl-match.c29
-rw-r--r--tools/hv/hv_fcopy_uio_daemon.c128
-rw-r--r--tools/lib/bpf/libbpf.c20
-rwxr-xr-xtools/net/ynl/pyynl/ynl_gen_c.py49
-rw-r--r--tools/objtool/check.c1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/recursive_attach.c67
-rw-r--r--tools/testing/selftests/bpf/prog_tests/snprintf.c2
-rw-r--r--tools/testing/selftests/drivers/net/Makefile1
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/devlink_rate_tc_bw.py16
-rw-r--r--tools/testing/selftests/drivers/net/hw/ncdevmem.c8
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/rss_api.py73
-rw-r--r--tools/testing/selftests/drivers/net/lib/py/load.py23
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/devlink.sh2
-rwxr-xr-xtools/testing/selftests/drivers/net/xdp.py656
-rw-r--r--tools/testing/selftests/futex/include/futex2test.h8
-rw-r--r--tools/testing/selftests/net/forwarding/lib.sh14
-rwxr-xr-xtools/testing/selftests/net/forwarding/tc_flower.sh52
-rw-r--r--tools/testing/selftests/net/lib/xdp_native.bpf.c621
-rw-r--r--tools/testing/selftests/net/mptcp/Makefile3
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_connect_checksum.sh5
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_connect_mmap.sh5
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_connect_sendfile.sh5
-rwxr-xr-xtools/testing/selftests/net/netfilter/conntrack_clash.sh45
-rw-r--r--tools/testing/selftests/sched_ext/exit.c8
-rw-r--r--tools/testing/selftests/tc-testing/config1
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/dualpi2.json254
-rwxr-xr-xtools/testing/selftests/tc-testing/tdc.sh6
653 files changed, 17526 insertions, 5492 deletions
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml
index 29f12d650442..1a5209139e13 100644
--- a/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml
@@ -223,12 +223,6 @@ allOf:
- required:
- pwms
- - oneOf:
- - required:
- - interrupts
- - required:
- - io-backends
-
- if:
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.yaml b/Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.yaml
index d1a6103fc37a..f3242dc0e7e6 100644
--- a/Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.yaml
+++ b/Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.yaml
@@ -21,7 +21,7 @@ properties:
vlogic-supply: true
interrupts:
- minItems: 1
+ maxItems: 1
description:
Interrupt mapping for the trigger interrupt from the internal oscillator.
diff --git a/Documentation/devicetree/bindings/net/wireless/qca,ath9k.yaml b/Documentation/devicetree/bindings/net/wireless/qca,ath9k.yaml
index 0e5412cff2bc..d16ca8e0a25d 100644
--- a/Documentation/devicetree/bindings/net/wireless/qca,ath9k.yaml
+++ b/Documentation/devicetree/bindings/net/wireless/qca,ath9k.yaml
@@ -12,7 +12,7 @@ maintainers:
description: |
This node provides properties for configuring the ath9k wireless device.
The node is expected to be specified as a child node of the PCI controller
- to which the wireless chip is connected.
+ or AHB bus to which the wireless chip is connected.
allOf:
- $ref: ieee80211.yaml#
@@ -35,6 +35,12 @@ properties:
- pci168c,0034 # AR9462
- pci168c,0036 # AR9565
- pci168c,0037 # AR1111 and AR9485
+ - qca,ar9130-wifi
+ - qca,ar9330-wifi
+ - qca,ar9340-wifi
+ - qca,qca9530-wifi
+ - qca,qca9550-wifi
+ - qca,qca9560-wifi
reg:
maxItems: 1
@@ -88,3 +94,13 @@ examples:
nvmem-cell-names = "mac-address", "calibration";
};
};
+ - |
+ ahb {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ wifi@180c0000 {
+ compatible = "qca,ar9130-wifi";
+ reg = <0x180c0000 0x230000>;
+ interrupts = <2>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ath11k-pci.yaml b/Documentation/devicetree/bindings/net/wireless/qcom,ath11k-pci.yaml
index 653b319fee88..e34d42a30192 100644
--- a/Documentation/devicetree/bindings/net/wireless/qcom,ath11k-pci.yaml
+++ b/Documentation/devicetree/bindings/net/wireless/qcom,ath11k-pci.yaml
@@ -35,6 +35,12 @@ properties:
string to uniquely identify variant of the calibration data for designs
with colliding bus and device ids
+ firmware-name:
+ maxItems: 1
+ description:
+ If present, a board or platform specific string used to lookup
+ usecase-specific firmware files for the device.
+
vddrfacmn-supply:
description: VDD_RFA_CMN supply regulator handle
diff --git a/Documentation/devicetree/bindings/net/wireless/ralink,rt2880.yaml b/Documentation/devicetree/bindings/net/wireless/ralink,rt2880.yaml
new file mode 100644
index 000000000000..04dc5bb2edcc
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/wireless/ralink,rt2880.yaml
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/wireless/ralink,rt2880.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Ralink RT2880 wireless device
+
+maintainers:
+ - Stanislaw Gruszka <stf_xl@wp.pl>
+
+description: |
+ This node provides properties for configuring RT2880 SOC wifi devices.
+ The node is expected to be specified as a root node of the device.
+
+allOf:
+ - $ref: ieee80211.yaml#
+
+properties:
+ compatible:
+ enum:
+ - ralink,rt2880-wifi
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ wifi@110180000 {
+ compatible = "ralink,rt2880-wifi";
+ reg = <0x10180000 0x40000>;
+ clocks = <&sysc 16>;
+ interrupt-parent = <&cpuintc>;
+ interrupts = <6>;
+ };
diff --git a/Documentation/hwmon/ina238.rst b/Documentation/hwmon/ina238.rst
index d1b93cf8627f..9a24da4786a4 100644
--- a/Documentation/hwmon/ina238.rst
+++ b/Documentation/hwmon/ina238.rst
@@ -65,7 +65,7 @@ Additional sysfs entries for sq52206
------------------------------------
======================= =======================================================
-energy1_input Energy measurement (mJ)
+energy1_input Energy measurement (uJ)
power1_input_highest Peak Power (uW)
======================= =======================================================
diff --git a/Documentation/netlink/specs/devlink.yaml b/Documentation/netlink/specs/devlink.yaml
index 1c4bb0cbe5f0..bb87111d5e16 100644
--- a/Documentation/netlink/specs/devlink.yaml
+++ b/Documentation/netlink/specs/devlink.yaml
@@ -853,18 +853,6 @@ attribute-sets:
type: nest
multi-attr: true
nested-attributes: dl-rate-tc-bws
- -
- name: rate-tc-index
- type: u8
- checks:
- max: rate-tc-index-max
- -
- name: rate-tc-bw
- type: u32
- doc: |
- Specifies the bandwidth share assigned to the Traffic Class.
- The bandwidth for the traffic class is determined
- in proportion to the sum of the shares of all configured classes.
-
name: dl-dev-stats
subset-of: devlink
@@ -1271,12 +1259,20 @@ attribute-sets:
type: flag
-
name: dl-rate-tc-bws
- subset-of: devlink
+ name-prefix: devlink-rate-tc-attr-
attributes:
-
- name: rate-tc-index
+ name: index
+ type: u8
+ checks:
+ max: rate-tc-index-max
-
- name: rate-tc-bw
+ name: bw
+ type: u32
+ doc: |
+ Specifies the bandwidth share assigned to the Traffic Class.
+ The bandwidth for the traffic class is determined
+ in proportion to the sum of the shares of all configured classes.
operations:
enum-model: directional
diff --git a/Documentation/netlink/specs/ethtool.yaml b/Documentation/netlink/specs/ethtool.yaml
index 069269edde01..1063d5d32fea 100644
--- a/Documentation/netlink/specs/ethtool.yaml
+++ b/Documentation/netlink/specs/ethtool.yaml
@@ -2684,9 +2684,46 @@ operations:
name: rss-ntf
doc: |
Notification for change in RSS configuration.
- For additional contexts only modifications are modified, not creation
- or removal of the contexts.
+ For additional contexts only modifications use this notification,
+ creation and deletion have dedicated messages.
notify: rss-get
+ -
+ name: rss-create-act
+ doc: Create an RSS context.
+ attribute-set: rss
+ do:
+ request: &rss-create-attrs
+ attributes:
+ - header
+ - context
+ - hfunc
+ - indir
+ - hkey
+ - input-xfrm
+ reply: *rss-create-attrs
+ -
+ name: rss-create-ntf
+ doc: |
+ Notification for creation of an additional RSS context.
+ notify: rss-create-act
+ -
+ name: rss-delete-act
+ doc: Delete an RSS context.
+ attribute-set: rss
+ do:
+ request:
+ attributes:
+ - header
+ - context
+ -
+ name: rss-delete-ntf
+ doc: |
+ Notification for deletion of an additional RSS context.
+ attribute-set: rss
+ event:
+ attributes:
+ - header
+ - context
mcast-groups:
list:
diff --git a/Documentation/netlink/specs/tc.yaml b/Documentation/netlink/specs/tc.yaml
index e983c0c82eb9..b1afc7ab3539 100644
--- a/Documentation/netlink/specs/tc.yaml
+++ b/Documentation/netlink/specs/tc.yaml
@@ -57,6 +57,23 @@ definitions:
- tunoam
- tuncrit
-
+ name: dualpi2-drop-overload
+ type: enum
+ entries: [overflow, drop]
+ -
+ name: dualpi2-drop-early
+ type: enum
+ entries: [drop-dequeue, drop-enqueue]
+ -
+ name: dualpi2-ecn-mask
+ type: enum
+ value-start: 1
+ entries: [l4s-ect, cla-ect, any-ect]
+ -
+ name: dualpi2-split-gso
+ type: enum
+ entries: [no-split-gso, split-gso]
+ -
name: tc-stats
type: struct
members:
@@ -826,6 +843,58 @@ definitions:
name: drop-overmemory
type: u32
-
+ name: tc-dualpi2-xstats
+ type: struct
+ members:
+ -
+ name: prob
+ type: u32
+ doc: Current base PI probability
+ -
+ name: delay-c
+ type: u32
+ doc: Current C-queue delay in microseconds
+ -
+ name: delay-l
+ type: u32
+ doc: Current L-queue delay in microseconds
+ -
+ name: pkts-in-c
+ type: u32
+ doc: Number of packets enqueued in the C-queue
+ -
+ name: pkts-in-l
+ type: u32
+ doc: Number of packets enqueued in the L-queue
+ -
+ name: maxq
+ type: u32
+ doc: Maximum number of packets seen by the DualPI2
+ -
+ name: ecn-mark
+ type: u32
+ doc: All packets marked with ECN
+ -
+ name: step-mark
+ type: u32
+ doc: Only packets marked with ECN due to L-queue step AQM
+ -
+ name: credit
+ type: s32
+ doc: Current credit value for WRR
+ -
+ name: memory-used
+ type: u32
+ doc: Memory used in bytes by the DualPI2
+ -
+ name: max-memory-used
+ type: u32
+ doc: Maximum memory used in bytes by the DualPI2
+ -
+ name: memory-limit
+ type: u32
+ doc: Memory limit in bytes
+ -
name: tc-fq-pie-xstats
type: struct
members:
@@ -848,7 +917,7 @@ definitions:
-
name: ecn-mark
type: u32
- doc: Packets marked with ecn
+ doc: Packets marked with ECN
-
name: new-flow-count
type: u32
@@ -991,7 +1060,7 @@ definitions:
-
name: ecn-mark
type: u32
- doc: Packets marked with ecn
+ doc: Packets marked with ECN
-
name: tc-red-xstats
type: struct
@@ -2285,6 +2354,78 @@ attribute-sets:
name: quantum
type: u32
-
+ name: dualpi2-attrs
+ name-prefix: tca-dualpi2-
+ attributes:
+ -
+ name: limit
+ type: u32
+ doc: Limit of total number of packets in queue
+ -
+ name: memory-limit
+ type: u32
+ doc: Memory limit of total number of packets in queue
+ -
+ name: target
+ type: u32
+ doc: Classic target delay in microseconds
+ -
+ name: tupdate
+ type: u32
+ doc: Drop probability update interval time in microseconds
+ -
+ name: alpha
+ type: u32
+ doc: Integral gain factor in Hz for PI controller
+ -
+ name: beta
+ type: u32
+ doc: Proportional gain factor in Hz for PI controller
+ -
+ name: step-thresh-pkts
+ type: u32
+ doc: L4S step marking threshold in packets
+ -
+ name: step-thresh-us
+ type: u32
+ doc: L4S Step marking threshold in microseconds
+ -
+ name: min-qlen-step
+ type: u32
+ doc: Packets enqueued to the L-queue can apply the step threshold
+ when the queue length of L-queue is larger than this value.
+ (0 is recommended)
+ -
+ name: coupling
+ type: u8
+ doc: Probability coupling factor between Classic and L4S
+ (2 is recommended)
+ -
+ name: drop-overload
+ type: u8
+ doc: Control the overload strategy (drop to preserve latency or
+ let the queue overflow)
+ enum: dualpi2-drop-overload
+ -
+ name: drop-early
+ type: u8
+ doc: Decide where the Classic packets are PI-based dropped or marked
+ enum: dualpi2-drop-early
+ -
+ name: c-protection
+ type: u8
+ doc: Classic WRR weight in percentage (from 0 to 100)
+ -
+ name: ecn-mask
+ type: u8
+ doc: Configure the L-queue ECN classifier
+ enum: dualpi2-ecn-mask
+ -
+ name: split-gso
+ type: u8
+ doc: Split aggregated skb or not
+ enum: dualpi2-split-gso
+ -
name: ematch-attrs
name-prefix: tca-ematch-
attr-max-name: tca-ematch-tree-max
@@ -3709,6 +3850,9 @@ sub-messages:
value: drr
attribute-set: drr-attrs
-
+ value: dualpi2
+ attribute-set: dualpi2-attrs
+ -
value: etf
attribute-set: etf-attrs
-
@@ -3876,6 +4020,9 @@ sub-messages:
value: codel
fixed-header: tc-codel-xstats
-
+ value: dualpi2
+ fixed-header: tc-dualpi2-xstats
+ -
value: fq
fixed-header: tc-fq-qd-stats
-
diff --git a/Documentation/networking/ethtool-netlink.rst b/Documentation/networking/ethtool-netlink.rst
index 056832c77ffd..ab20c644af24 100644
--- a/Documentation/networking/ethtool-netlink.rst
+++ b/Documentation/networking/ethtool-netlink.rst
@@ -240,6 +240,8 @@ Userspace to kernel:
``ETHTOOL_MSG_TSCONFIG_GET`` get hw timestamping configuration
``ETHTOOL_MSG_TSCONFIG_SET`` set hw timestamping configuration
``ETHTOOL_MSG_RSS_SET`` set RSS settings
+ ``ETHTOOL_MSG_RSS_CREATE_ACT`` create an additional RSS context
+ ``ETHTOOL_MSG_RSS_DELETE_ACT`` delete an additional RSS context
===================================== =================================
Kernel to userspace:
@@ -294,6 +296,9 @@ Kernel to userspace:
``ETHTOOL_MSG_TSCONFIG_SET_REPLY`` new hw timestamping configuration
``ETHTOOL_MSG_PSE_NTF`` PSE events notification
``ETHTOOL_MSG_RSS_NTF`` RSS settings notification
+ ``ETHTOOL_MSG_RSS_CREATE_ACT_REPLY`` create an additional RSS context
+ ``ETHTOOL_MSG_RSS_CREATE_NTF`` additional RSS context created
+ ``ETHTOOL_MSG_RSS_DELETE_NTF`` additional RSS context deleted
======================================== =================================
``GET`` requests are sent by userspace applications to retrieve device
@@ -2014,6 +2019,42 @@ device needs at least 8 entries - the real table in use will end up being
of 2, so tables which size is not a power of 2 will likely be rejected.
Using table of size 0 will reset the indirection table to the default.
+RSS_CREATE_ACT
+==============
+
+Request contents:
+
+===================================== ====== ==============================
+ ``ETHTOOL_A_RSS_HEADER`` nested request header
+ ``ETHTOOL_A_RSS_CONTEXT`` u32 context number
+ ``ETHTOOL_A_RSS_HFUNC`` u32 RSS hash func
+ ``ETHTOOL_A_RSS_INDIR`` binary Indir table bytes
+ ``ETHTOOL_A_RSS_HKEY`` binary Hash key bytes
+ ``ETHTOOL_A_RSS_INPUT_XFRM`` u32 RSS input data transformation
+===================================== ====== ==============================
+
+Kernel response contents:
+
+===================================== ====== ==============================
+ ``ETHTOOL_A_RSS_HEADER`` nested request header
+ ``ETHTOOL_A_RSS_CONTEXT`` u32 context number
+===================================== ====== ==============================
+
+Create an additional RSS context, if ``ETHTOOL_A_RSS_CONTEXT`` is not
+specified kernel will allocate one automatically.
+
+RSS_DELETE_ACT
+==============
+
+Request contents:
+
+===================================== ====== ==============================
+ ``ETHTOOL_A_RSS_HEADER`` nested request header
+ ``ETHTOOL_A_RSS_CONTEXT`` u32 context number
+===================================== ====== ==============================
+
+Delete an additional RSS context.
+
PLCA_GET_CFG
============
diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
index 43ed57e048a8..544fb11351d9 100644
--- a/Documentation/virt/kvm/api.rst
+++ b/Documentation/virt/kvm/api.rst
@@ -2008,6 +2008,13 @@ If the KVM_CAP_VM_TSC_CONTROL capability is advertised, this can also
be used as a vm ioctl to set the initial tsc frequency of subsequently
created vCPUs.
+For TSC protected Confidential Computing (CoCo) VMs where TSC frequency
+is configured once at VM scope and remains unchanged during VM's
+lifetime, the vm ioctl should be used to configure the TSC frequency
+and the vcpu ioctl is not supported.
+
+Example of such CoCo VMs: TDX guests.
+
4.56 KVM_GET_TSC_KHZ
--------------------
@@ -7230,8 +7237,8 @@ inputs and outputs of the TDVMCALL. Currently the following values of
placed in fields from ``r11`` to ``r14`` of the ``get_tdvmcall_info``
field of the union.
-* ``TDVMCALL_SETUP_EVENT_NOTIFY_INTERRUPT``: the guest has requested to
-set up a notification interrupt for vector ``vector``.
+ * ``TDVMCALL_SETUP_EVENT_NOTIFY_INTERRUPT``: the guest has requested to
+ set up a notification interrupt for vector ``vector``.
KVM may add support for more values in the future that may cause a userspace
exit, even without calls to ``KVM_ENABLE_CAP`` or similar. In this case,
diff --git a/Documentation/virt/kvm/review-checklist.rst b/Documentation/virt/kvm/review-checklist.rst
index dc01aea4057b..debac54e14e7 100644
--- a/Documentation/virt/kvm/review-checklist.rst
+++ b/Documentation/virt/kvm/review-checklist.rst
@@ -7,7 +7,7 @@ Review checklist for kvm patches
1. The patch must follow Documentation/process/coding-style.rst and
Documentation/process/submitting-patches.rst.
-2. Patches should be against kvm.git master branch.
+2. Patches should be against kvm.git master or next branches.
3. If the patch introduces or modifies a new userspace API:
- the API must be documented in Documentation/virt/kvm/api.rst
@@ -18,10 +18,10 @@ Review checklist for kvm patches
5. New features must default to off (userspace should explicitly request them).
Performance improvements can and should default to on.
-6. New cpu features should be exposed via KVM_GET_SUPPORTED_CPUID2
+6. New cpu features should be exposed via KVM_GET_SUPPORTED_CPUID2,
+ or its equivalent for non-x86 architectures
-7. Emulator changes should be accompanied by unit tests for qemu-kvm.git
- kvm/test directory.
+7. The feature should be testable (see below).
8. Changes should be vendor neutral when possible. Changes to common code
are better than duplicating changes to vendor code.
@@ -36,6 +36,87 @@ Review checklist for kvm patches
11. New guest visible features must either be documented in a hardware manual
or be accompanied by documentation.
-12. Features must be robust against reset and kexec - for example, shared
- host/guest memory must be unshared to prevent the host from writing to
- guest memory that the guest has not reserved for this purpose.
+Testing of KVM code
+-------------------
+
+All features contributed to KVM, and in many cases bugfixes too, should be
+accompanied by some kind of tests and/or enablement in open source guests
+and VMMs. KVM is covered by multiple test suites:
+
+*Selftests*
+ These are low level tests that allow granular testing of kernel APIs.
+ This includes API failure scenarios, invoking APIs after specific
+ guest instructions, and testing multiple calls to ``KVM_CREATE_VM``
+ within a single test. They are included in the kernel tree at
+ ``tools/testing/selftests/kvm``.
+
+``kvm-unit-tests``
+ A collection of small guests that test CPU and emulated device features
+ from a guest's perspective. They run under QEMU or ``kvmtool``, and
+ are generally not KVM-specific: they can be run with any accelerator
+ that QEMU support or even on bare metal, making it possible to compare
+ behavior across hypervisors and processor families.
+
+Functional test suites
+ Various sets of functional tests exist, such as QEMU's ``tests/functional``
+ suite and `avocado-vt <https://avocado-vt.readthedocs.io/en/latest/>`__.
+ These typically involve running a full operating system in a virtual
+ machine.
+
+The best testing approach depends on the feature's complexity and
+operation. Here are some examples and guidelines:
+
+New instructions (no new registers or APIs)
+ The corresponding CPU features (if applicable) should be made available
+ in QEMU. If the instructions require emulation support or other code in
+ KVM, it is worth adding coverage to ``kvm-unit-tests`` or selftests;
+ the latter can be a better choice if the instructions relate to an API
+ that already has good selftest coverage.
+
+New hardware features (new registers, no new APIs)
+ These should be tested via ``kvm-unit-tests``; this more or less implies
+ supporting them in QEMU and/or ``kvmtool``. In some cases selftests
+ can be used instead, similar to the previous case, or specifically to
+ test corner cases in guest state save/restore.
+
+Bug fixes and performance improvements
+ These usually do not introduce new APIs, but it's worth sharing
+ any benchmarks and tests that will validate your contribution,
+ ideally in the form of regression tests. Tests and benchmarks
+ can be included in either ``kvm-unit-tests`` or selftests, depending
+ on the specifics of your change. Selftests are especially useful for
+ regression tests because they are included directly in Linux's tree.
+
+Large scale internal changes
+ While it's difficult to provide a single policy, you should ensure that
+ the changed code is covered by either ``kvm-unit-tests`` or selftests.
+ In some cases the affected code is run for any guests and functional
+ tests suffice. Explain your testing process in the cover letter,
+ as that can help identify gaps in existing test suites.
+
+New APIs
+ It is important to demonstrate your use case. This can be as simple as
+ explaining that the feature is already in use on bare metal, or it can be
+ a proof-of-concept implementation in userspace. The latter need not be
+ open source, though that is of course preferrable for easier testing.
+ Selftests should test corner cases of the APIs, and should also cover
+ basic host and guest operation if no open source VMM uses the feature.
+
+Bigger features, usually spanning host and guest
+ These should be supported by Linux guests, with limited exceptions for
+ Hyper-V features that are testable on Windows guests. It is strongly
+ suggested that the feature be usable with an open source host VMM, such
+ as at least one of QEMU or crosvm, and guest firmware. Selftests should
+ test at least API error cases. Guest operation can be covered by
+ either selftests of ``kvm-unit-tests`` (this is especially important for
+ paravirtualized and Windows-only features). Strong selftest coverage
+ can also be a replacement for implementation in an open source VMM,
+ but this is generally not recommended.
+
+Following the above suggestions for testing in selftests and
+``kvm-unit-tests`` will make it easier for the maintainers to review
+and accept your code. In fact, even before you contribute your changes
+upstream it will make it easier for you to develop for KVM.
+
+Of course, the KVM maintainers reserve the right to require more tests,
+though they may also waive the requirement from time to time.
diff --git a/MAINTAINERS b/MAINTAINERS
index 1bc1698bc5ae..255344c1e413 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5581,6 +5581,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git
F: drivers/char/
F: drivers/misc/
F: include/linux/miscdevice.h
+F: rust/kernel/miscdevice.rs
F: samples/rust/rust_misc_device.rs
X: drivers/char/agp/
X: drivers/char/hw_random/
@@ -12200,9 +12201,8 @@ F: drivers/dma/idxd/*
F: include/uapi/linux/idxd.h
INTEL IN FIELD SCAN (IFS) DEVICE
-M: Jithu Joseph <jithu.joseph@intel.com>
+M: Tony Luck <tony.luck@intel.com>
R: Ashok Raj <ashok.raj.linux@gmail.com>
-R: Tony Luck <tony.luck@intel.com>
S: Maintained
F: drivers/platform/x86/intel/ifs
F: include/trace/events/intel_ifs.h
@@ -12542,8 +12542,7 @@ T: git https://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next.git/
F: drivers/net/wireless/intel/iwlwifi/
INTEL WMI SLIM BOOTLOADER (SBL) FIRMWARE UPDATE DRIVER
-M: Jithu Joseph <jithu.joseph@intel.com>
-S: Maintained
+S: Orphan
W: https://slimbootloader.github.io/security/firmware-update.html
F: drivers/platform/x86/intel/wmi/sbl-fw-update.c
@@ -17405,6 +17404,7 @@ F: include/linux/ethtool.h
F: include/linux/framer/framer-provider.h
F: include/linux/framer/framer.h
F: include/linux/in.h
+F: include/linux/in6.h
F: include/linux/indirect_call_wrapper.h
F: include/linux/inet.h
F: include/linux/inet_diag.h
@@ -25923,6 +25923,8 @@ F: fs/hostfs/
USERSPACE COPYIN/COPYOUT (UIOVEC)
M: Alexander Viro <viro@zeniv.linux.org.uk>
+L: linux-block@vger.kernel.org
+L: linux-fsdevel@vger.kernel.org
S: Maintained
F: include/linux/uio.h
F: lib/iov_iter.c
diff --git a/Makefile b/Makefile
index c09766beb7ef..be33e8c868ae 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 16
SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION = -rc7
NAME = Baby Opossum Posse
# *DOCUMENTATION*
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 76c2f0da821f..c20bd6f21e60 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -2624,7 +2624,7 @@ static bool access_mdcr(struct kvm_vcpu *vcpu,
*/
if (hpmn > vcpu->kvm->arch.nr_pmu_counters) {
hpmn = vcpu->kvm->arch.nr_pmu_counters;
- u64_replace_bits(val, hpmn, MDCR_EL2_HPMN);
+ u64p_replace_bits(&val, hpmn, MDCR_EL2_HPMN);
}
__vcpu_assign_sys_reg(vcpu, MDCR_EL2, val);
diff --git a/arch/mips/boot/dts/ralink/mt7620a.dtsi b/arch/mips/boot/dts/ralink/mt7620a.dtsi
index d66045948a83..460164bdd430 100644
--- a/arch/mips/boot/dts/ralink/mt7620a.dtsi
+++ b/arch/mips/boot/dts/ralink/mt7620a.dtsi
@@ -62,4 +62,14 @@
reg-shift = <2>;
};
};
+
+ wmac: wifi@10180000 {
+ compatible = "ralink,rt2880-wifi";
+ reg = <0x10180000 0x40000>;
+
+ clocks = <&sysc 16>;
+
+ interrupt-parent = <&cpuintc>;
+ interrupts = <6>;
+ };
};
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index 6df6dbbe1e7c..ea6c8dc400d2 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -270,6 +270,7 @@
#define H_QUERY_INT_STATE 0x1E4
#define H_POLL_PENDING 0x1D8
#define H_ILLAN_ATTRIBUTES 0x244
+#define H_ADD_LOGICAL_LAN_BUFFERS 0x248
#define H_MODIFY_HEA_QP 0x250
#define H_QUERY_HEA_QP 0x254
#define H_QUERY_HEA 0x258
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index d71ea0f4466f..1c5544401530 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -98,6 +98,7 @@ config RISCV
select CLONE_BACKWARDS
select COMMON_CLK
select CPU_PM if CPU_IDLE || HIBERNATION || SUSPEND
+ select DYNAMIC_FTRACE if FUNCTION_TRACER
select EDAC_SUPPORT
select FRAME_POINTER if PERF_EVENTS || (FUNCTION_TRACER && !DYNAMIC_FTRACE)
select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY if DYNAMIC_FTRACE
@@ -162,7 +163,7 @@ config RISCV
select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
select HAVE_FUNCTION_GRAPH_TRACER if HAVE_DYNAMIC_FTRACE_WITH_ARGS
select HAVE_FUNCTION_GRAPH_FREGS
- select HAVE_FUNCTION_TRACER if !XIP_KERNEL
+ select HAVE_FUNCTION_TRACER if !XIP_KERNEL && HAVE_DYNAMIC_FTRACE
select HAVE_EBPF_JIT if MMU
select HAVE_GUP_FAST if MMU
select HAVE_FUNCTION_ARG_ACCESS_API
diff --git a/arch/riscv/include/asm/kvm_aia.h b/arch/riscv/include/asm/kvm_aia.h
index 3b643b9efc07..5acce285e56e 100644
--- a/arch/riscv/include/asm/kvm_aia.h
+++ b/arch/riscv/include/asm/kvm_aia.h
@@ -87,6 +87,9 @@ DECLARE_STATIC_KEY_FALSE(kvm_riscv_aia_available);
extern struct kvm_device_ops kvm_riscv_aia_device_ops;
+bool kvm_riscv_vcpu_aia_imsic_has_interrupt(struct kvm_vcpu *vcpu);
+void kvm_riscv_vcpu_aia_imsic_load(struct kvm_vcpu *vcpu, int cpu);
+void kvm_riscv_vcpu_aia_imsic_put(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_aia_imsic_release(struct kvm_vcpu *vcpu);
int kvm_riscv_vcpu_aia_imsic_update(struct kvm_vcpu *vcpu);
@@ -161,7 +164,6 @@ void kvm_riscv_aia_destroy_vm(struct kvm *kvm);
int kvm_riscv_aia_alloc_hgei(int cpu, struct kvm_vcpu *owner,
void __iomem **hgei_va, phys_addr_t *hgei_pa);
void kvm_riscv_aia_free_hgei(int cpu, int hgei);
-void kvm_riscv_aia_wakeon_hgei(struct kvm_vcpu *owner, bool enable);
void kvm_riscv_aia_enable(void);
void kvm_riscv_aia_disable(void);
diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h
index 85cfebc32e4c..bcbf8b1ec115 100644
--- a/arch/riscv/include/asm/kvm_host.h
+++ b/arch/riscv/include/asm/kvm_host.h
@@ -306,6 +306,9 @@ static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu)
return IS_ENABLED(CONFIG_GUEST_PERF_EVENTS) && !!vcpu;
}
+static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
+
#define KVM_RISCV_GSTAGE_TLB_MIN_ORDER 12
void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid,
diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
index 525e50db24f7..b88a6218b7f2 100644
--- a/arch/riscv/include/asm/uaccess.h
+++ b/arch/riscv/include/asm/uaccess.h
@@ -311,8 +311,8 @@ do { \
do { \
if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && \
!IS_ALIGNED((uintptr_t)__gu_ptr, sizeof(*__gu_ptr))) { \
- __inttype(x) val = (__inttype(x))x; \
- if (__asm_copy_to_user_sum_enabled(__gu_ptr, &(val), sizeof(*__gu_ptr))) \
+ __inttype(x) ___val = (__inttype(x))x; \
+ if (__asm_copy_to_user_sum_enabled(__gu_ptr, &(___val), sizeof(*__gu_ptr))) \
goto label; \
break; \
} \
diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c
index 4c6c24380cfd..8d18d6727f0f 100644
--- a/arch/riscv/kernel/ftrace.c
+++ b/arch/riscv/kernel/ftrace.c
@@ -14,6 +14,18 @@
#include <asm/text-patching.h>
#ifdef CONFIG_DYNAMIC_FTRACE
+void ftrace_arch_code_modify_prepare(void)
+ __acquires(&text_mutex)
+{
+ mutex_lock(&text_mutex);
+}
+
+void ftrace_arch_code_modify_post_process(void)
+ __releases(&text_mutex)
+{
+ mutex_unlock(&text_mutex);
+}
+
unsigned long ftrace_call_adjust(unsigned long addr)
{
if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS))
@@ -29,10 +41,8 @@ unsigned long arch_ftrace_get_symaddr(unsigned long fentry_ip)
void arch_ftrace_update_code(int command)
{
- mutex_lock(&text_mutex);
command |= FTRACE_MAY_SLEEP;
ftrace_modify_all_code(command);
- mutex_unlock(&text_mutex);
flush_icache_all();
}
@@ -149,6 +159,8 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
unsigned int nops[2], offset;
int ret;
+ guard(mutex)(&text_mutex);
+
ret = ftrace_rec_set_nop_ops(rec);
if (ret)
return ret;
@@ -157,9 +169,7 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
nops[0] = to_auipc_t0(offset);
nops[1] = RISCV_INSN_NOP4;
- mutex_lock(&text_mutex);
ret = patch_insn_write((void *)pc, nops, 2 * MCOUNT_INSN_SIZE);
- mutex_unlock(&text_mutex);
return ret;
}
diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
index 9c83848797a7..80230de167de 100644
--- a/arch/riscv/kernel/traps.c
+++ b/arch/riscv/kernel/traps.c
@@ -6,6 +6,7 @@
#include <linux/cpu.h>
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/irqflags.h>
#include <linux/randomize_kstack.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
@@ -151,7 +152,9 @@ asmlinkage __visible __trap_section void name(struct pt_regs *regs) \
{ \
if (user_mode(regs)) { \
irqentry_enter_from_user_mode(regs); \
+ local_irq_enable(); \
do_trap_error(regs, signo, code, regs->epc, "Oops - " str); \
+ local_irq_disable(); \
irqentry_exit_to_user_mode(regs); \
} else { \
irqentry_state_t state = irqentry_nmi_enter(regs); \
@@ -173,17 +176,14 @@ asmlinkage __visible __trap_section void do_trap_insn_illegal(struct pt_regs *re
if (user_mode(regs)) {
irqentry_enter_from_user_mode(regs);
-
local_irq_enable();
handled = riscv_v_first_use_handler(regs);
-
- local_irq_disable();
-
if (!handled)
do_trap_error(regs, SIGILL, ILL_ILLOPC, regs->epc,
"Oops - illegal instruction");
+ local_irq_disable();
irqentry_exit_to_user_mode(regs);
} else {
irqentry_state_t state = irqentry_nmi_enter(regs);
@@ -308,9 +308,11 @@ asmlinkage __visible __trap_section void do_trap_break(struct pt_regs *regs)
{
if (user_mode(regs)) {
irqentry_enter_from_user_mode(regs);
+ local_irq_enable();
handle_break(regs);
+ local_irq_disable();
irqentry_exit_to_user_mode(regs);
} else {
irqentry_state_t state = irqentry_nmi_enter(regs);
diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c
index 93043924fe6c..f760e4fcc052 100644
--- a/arch/riscv/kernel/traps_misaligned.c
+++ b/arch/riscv/kernel/traps_misaligned.c
@@ -461,7 +461,7 @@ static int handle_scalar_misaligned_load(struct pt_regs *regs)
}
if (!fp)
- SET_RD(insn, regs, val.data_ulong << shift >> shift);
+ SET_RD(insn, regs, (long)(val.data_ulong << shift) >> shift);
else if (len == 8)
set_f64_rd(insn, regs, val.data_u64);
else
diff --git a/arch/riscv/kvm/aia.c b/arch/riscv/kvm/aia.c
index 19afd1f23537..dad318185660 100644
--- a/arch/riscv/kvm/aia.c
+++ b/arch/riscv/kvm/aia.c
@@ -30,28 +30,6 @@ unsigned int kvm_riscv_aia_nr_hgei;
unsigned int kvm_riscv_aia_max_ids;
DEFINE_STATIC_KEY_FALSE(kvm_riscv_aia_available);
-static int aia_find_hgei(struct kvm_vcpu *owner)
-{
- int i, hgei;
- unsigned long flags;
- struct aia_hgei_control *hgctrl = get_cpu_ptr(&aia_hgei);
-
- raw_spin_lock_irqsave(&hgctrl->lock, flags);
-
- hgei = -1;
- for (i = 1; i <= kvm_riscv_aia_nr_hgei; i++) {
- if (hgctrl->owners[i] == owner) {
- hgei = i;
- break;
- }
- }
-
- raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
-
- put_cpu_ptr(&aia_hgei);
- return hgei;
-}
-
static inline unsigned long aia_hvictl_value(bool ext_irq_pending)
{
unsigned long hvictl;
@@ -95,7 +73,6 @@ void kvm_riscv_vcpu_aia_sync_interrupts(struct kvm_vcpu *vcpu)
bool kvm_riscv_vcpu_aia_has_interrupts(struct kvm_vcpu *vcpu, u64 mask)
{
- int hgei;
unsigned long seip;
if (!kvm_riscv_aia_available())
@@ -114,11 +91,7 @@ bool kvm_riscv_vcpu_aia_has_interrupts(struct kvm_vcpu *vcpu, u64 mask)
if (!kvm_riscv_aia_initialized(vcpu->kvm) || !seip)
return false;
- hgei = aia_find_hgei(vcpu);
- if (hgei > 0)
- return !!(ncsr_read(CSR_HGEIP) & BIT(hgei));
-
- return false;
+ return kvm_riscv_vcpu_aia_imsic_has_interrupt(vcpu);
}
void kvm_riscv_vcpu_aia_update_hvip(struct kvm_vcpu *vcpu)
@@ -164,6 +137,9 @@ void kvm_riscv_vcpu_aia_load(struct kvm_vcpu *vcpu, int cpu)
csr_write(CSR_HVIPRIO2H, csr->hviprio2h);
#endif
}
+
+ if (kvm_riscv_aia_initialized(vcpu->kvm))
+ kvm_riscv_vcpu_aia_imsic_load(vcpu, cpu);
}
void kvm_riscv_vcpu_aia_put(struct kvm_vcpu *vcpu)
@@ -174,6 +150,9 @@ void kvm_riscv_vcpu_aia_put(struct kvm_vcpu *vcpu)
if (!kvm_riscv_aia_available())
return;
+ if (kvm_riscv_aia_initialized(vcpu->kvm))
+ kvm_riscv_vcpu_aia_imsic_put(vcpu);
+
if (kvm_riscv_nacl_available()) {
nsh = nacl_shmem();
csr->vsiselect = nacl_csr_read(nsh, CSR_VSISELECT);
@@ -472,22 +451,6 @@ void kvm_riscv_aia_free_hgei(int cpu, int hgei)
raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
}
-void kvm_riscv_aia_wakeon_hgei(struct kvm_vcpu *owner, bool enable)
-{
- int hgei;
-
- if (!kvm_riscv_aia_available())
- return;
-
- hgei = aia_find_hgei(owner);
- if (hgei > 0) {
- if (enable)
- csr_set(CSR_HGEIE, BIT(hgei));
- else
- csr_clear(CSR_HGEIE, BIT(hgei));
- }
-}
-
static irqreturn_t hgei_interrupt(int irq, void *dev_id)
{
int i;
diff --git a/arch/riscv/kvm/aia_imsic.c b/arch/riscv/kvm/aia_imsic.c
index 29ef9c2133a9..2ff865943ebb 100644
--- a/arch/riscv/kvm/aia_imsic.c
+++ b/arch/riscv/kvm/aia_imsic.c
@@ -676,6 +676,48 @@ static void imsic_swfile_update(struct kvm_vcpu *vcpu,
imsic_swfile_extirq_update(vcpu);
}
+bool kvm_riscv_vcpu_aia_imsic_has_interrupt(struct kvm_vcpu *vcpu)
+{
+ struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
+ unsigned long flags;
+ bool ret = false;
+
+ /*
+ * The IMSIC SW-file directly injects interrupt via hvip so
+ * only check for interrupt when IMSIC VS-file is being used.
+ */
+
+ read_lock_irqsave(&imsic->vsfile_lock, flags);
+ if (imsic->vsfile_cpu > -1)
+ ret = !!(csr_read(CSR_HGEIP) & BIT(imsic->vsfile_hgei));
+ read_unlock_irqrestore(&imsic->vsfile_lock, flags);
+
+ return ret;
+}
+
+void kvm_riscv_vcpu_aia_imsic_load(struct kvm_vcpu *vcpu, int cpu)
+{
+ /*
+ * No need to explicitly clear HGEIE CSR bits because the
+ * hgei interrupt handler (aka hgei_interrupt()) will always
+ * clear it for us.
+ */
+}
+
+void kvm_riscv_vcpu_aia_imsic_put(struct kvm_vcpu *vcpu)
+{
+ struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
+ unsigned long flags;
+
+ if (!kvm_vcpu_is_blocking(vcpu))
+ return;
+
+ read_lock_irqsave(&imsic->vsfile_lock, flags);
+ if (imsic->vsfile_cpu > -1)
+ csr_set(CSR_HGEIE, BIT(imsic->vsfile_hgei));
+ read_unlock_irqrestore(&imsic->vsfile_lock, flags);
+}
+
void kvm_riscv_vcpu_aia_imsic_release(struct kvm_vcpu *vcpu)
{
unsigned long flags;
@@ -781,6 +823,9 @@ int kvm_riscv_vcpu_aia_imsic_update(struct kvm_vcpu *vcpu)
* producers to the new IMSIC VS-file.
*/
+ /* Ensure HGEIE CSR bit is zero before using the new IMSIC VS-file */
+ csr_clear(CSR_HGEIE, BIT(new_vsfile_hgei));
+
/* Zero-out new IMSIC VS-file */
imsic_vsfile_local_clear(new_vsfile_hgei, imsic->nr_hw_eix);
diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
index e0a01af426ff..0462863206ca 100644
--- a/arch/riscv/kvm/vcpu.c
+++ b/arch/riscv/kvm/vcpu.c
@@ -207,16 +207,6 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
return kvm_riscv_vcpu_timer_pending(vcpu);
}
-void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
-{
- kvm_riscv_aia_wakeon_hgei(vcpu, true);
-}
-
-void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
-{
- kvm_riscv_aia_wakeon_hgei(vcpu, false);
-}
-
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{
return (kvm_riscv_vcpu_has_interrupts(vcpu, -1UL) &&
diff --git a/arch/riscv/kvm/vcpu_timer.c b/arch/riscv/kvm/vcpu_timer.c
index ff672fa71fcc..85a7262115e1 100644
--- a/arch/riscv/kvm/vcpu_timer.c
+++ b/arch/riscv/kvm/vcpu_timer.c
@@ -345,8 +345,24 @@ void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu)
/*
* The vstimecmp CSRs are saved by kvm_riscv_vcpu_timer_sync()
* upon every VM exit so no need to save here.
+ *
+ * If VS-timer expires when no VCPU running on a host CPU then
+ * WFI executed by such host CPU will be effective NOP resulting
+ * in no power savings. This is because as-per RISC-V Privileged
+ * specificaiton: "WFI is also required to resume execution for
+ * locally enabled interrupts pending at any privilege level,
+ * regardless of the global interrupt enable at each privilege
+ * level."
+ *
+ * To address the above issue, vstimecmp CSR must be set to -1UL
+ * over here when VCPU is scheduled-out or exits to user space.
*/
+ csr_write(CSR_VSTIMECMP, -1UL);
+#if defined(CONFIG_32BIT)
+ csr_write(CSR_VSTIMECMPH, -1UL);
+#endif
+
/* timer should be enabled for the remaining operations */
if (unlikely(!t->init_done))
return;
diff --git a/arch/riscv/tools/relocs_check.sh b/arch/riscv/tools/relocs_check.sh
index baeb2e7b2290..742993e6a8cb 100755
--- a/arch/riscv/tools/relocs_check.sh
+++ b/arch/riscv/tools/relocs_check.sh
@@ -14,7 +14,9 @@ bad_relocs=$(
${srctree}/scripts/relocs_check.sh "$@" |
# These relocations are okay
# R_RISCV_RELATIVE
- grep -F -w -v 'R_RISCV_RELATIVE'
+ # R_RISCV_NONE
+ grep -F -w -v 'R_RISCV_RELATIVE
+R_RISCV_NONE'
)
if [ -z "$bad_relocs" ]; then
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index c7f8313ba449..0c9a35782c83 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -566,7 +566,15 @@ static void bpf_jit_plt(struct bpf_plt *plt, void *ret, void *target)
{
memcpy(plt, &bpf_plt, sizeof(*plt));
plt->ret = ret;
- plt->target = target;
+ /*
+ * (target == NULL) implies that the branch to this PLT entry was
+ * patched and became a no-op. However, some CPU could have jumped
+ * to this PLT entry before patching and may be still executing it.
+ *
+ * Since the intention in this case is to make the PLT entry a no-op,
+ * make the target point to the return label instead of NULL.
+ */
+ plt->target = target ?: ret;
}
/*
diff --git a/arch/x86/coco/sev/Makefile b/arch/x86/coco/sev/Makefile
index db3255b979bd..342d79f0ab6a 100644
--- a/arch/x86/coco/sev/Makefile
+++ b/arch/x86/coco/sev/Makefile
@@ -5,5 +5,6 @@ obj-y += core.o sev-nmi.o vc-handle.o
# Clang 14 and older may fail to respect __no_sanitize_undefined when inlining
UBSAN_SANITIZE_sev-nmi.o := n
-# GCC may fail to respect __no_sanitize_address when inlining
+# GCC may fail to respect __no_sanitize_address or __no_kcsan when inlining
KASAN_SANITIZE_sev-nmi.o := n
+KCSAN_SANITIZE_sev-nmi.o := n
diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
index 3d1d3547095a..afdbda2dd7b7 100644
--- a/arch/x86/hyperv/hv_init.c
+++ b/arch/x86/hyperv/hv_init.c
@@ -34,6 +34,7 @@
#include <linux/syscore_ops.h>
#include <clocksource/hyperv_timer.h>
#include <linux/highmem.h>
+#include <linux/export.h>
void *hv_hypercall_pg;
EXPORT_SYMBOL_GPL(hv_hypercall_pg);
diff --git a/arch/x86/hyperv/irqdomain.c b/arch/x86/hyperv/irqdomain.c
index 31f0d29cbc5e..090f5ac9f492 100644
--- a/arch/x86/hyperv/irqdomain.c
+++ b/arch/x86/hyperv/irqdomain.c
@@ -10,6 +10,7 @@
#include <linux/pci.h>
#include <linux/irq.h>
+#include <linux/export.h>
#include <asm/mshyperv.h>
static int hv_map_interrupt(union hv_device_id device_id, bool level,
@@ -46,7 +47,7 @@ static int hv_map_interrupt(union hv_device_id device_id, bool level,
if (nr_bank < 0) {
local_irq_restore(flags);
pr_err("%s: unable to generate VP set\n", __func__);
- return EINVAL;
+ return -EINVAL;
}
intr_desc->target.flags = HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET;
@@ -66,7 +67,7 @@ static int hv_map_interrupt(union hv_device_id device_id, bool level,
if (!hv_result_success(status))
hv_status_err(status, "\n");
- return hv_result(status);
+ return hv_result_to_errno(status);
}
static int hv_unmap_interrupt(u64 id, struct hv_interrupt_entry *old_entry)
@@ -88,7 +89,10 @@ static int hv_unmap_interrupt(u64 id, struct hv_interrupt_entry *old_entry)
status = hv_do_hypercall(HVCALL_UNMAP_DEVICE_INTERRUPT, input, NULL);
local_irq_restore(flags);
- return hv_result(status);
+ if (!hv_result_success(status))
+ hv_status_err(status, "\n");
+
+ return hv_result_to_errno(status);
}
#ifdef CONFIG_PCI_MSI
@@ -169,13 +173,34 @@ static union hv_device_id hv_build_pci_dev_id(struct pci_dev *dev)
return dev_id;
}
-static int hv_map_msi_interrupt(struct pci_dev *dev, int cpu, int vector,
- struct hv_interrupt_entry *entry)
+/**
+ * hv_map_msi_interrupt() - "Map" the MSI IRQ in the hypervisor.
+ * @data: Describes the IRQ
+ * @out_entry: Hypervisor (MSI) interrupt entry (can be NULL)
+ *
+ * Map the IRQ in the hypervisor by issuing a MAP_DEVICE_INTERRUPT hypercall.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int hv_map_msi_interrupt(struct irq_data *data,
+ struct hv_interrupt_entry *out_entry)
{
- union hv_device_id device_id = hv_build_pci_dev_id(dev);
+ struct irq_cfg *cfg = irqd_cfg(data);
+ struct hv_interrupt_entry dummy;
+ union hv_device_id device_id;
+ struct msi_desc *msidesc;
+ struct pci_dev *dev;
+ int cpu;
- return hv_map_interrupt(device_id, false, cpu, vector, entry);
+ msidesc = irq_data_get_msi_desc(data);
+ dev = msi_desc_to_pci_dev(msidesc);
+ device_id = hv_build_pci_dev_id(dev);
+ cpu = cpumask_first(irq_data_get_effective_affinity_mask(data));
+
+ return hv_map_interrupt(device_id, false, cpu, cfg->vector,
+ out_entry ? out_entry : &dummy);
}
+EXPORT_SYMBOL_GPL(hv_map_msi_interrupt);
static inline void entry_to_msi_msg(struct hv_interrupt_entry *entry, struct msi_msg *msg)
{
@@ -188,13 +213,11 @@ static inline void entry_to_msi_msg(struct hv_interrupt_entry *entry, struct msi
static int hv_unmap_msi_interrupt(struct pci_dev *dev, struct hv_interrupt_entry *old_entry);
static void hv_irq_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
+ struct hv_interrupt_entry *stored_entry;
+ struct irq_cfg *cfg = irqd_cfg(data);
struct msi_desc *msidesc;
struct pci_dev *dev;
- struct hv_interrupt_entry out_entry, *stored_entry;
- struct irq_cfg *cfg = irqd_cfg(data);
- const cpumask_t *affinity;
- int cpu;
- u64 status;
+ int ret;
msidesc = irq_data_get_msi_desc(data);
dev = msi_desc_to_pci_dev(msidesc);
@@ -204,9 +227,6 @@ static void hv_irq_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
return;
}
- affinity = irq_data_get_effective_affinity_mask(data);
- cpu = cpumask_first_and(affinity, cpu_online_mask);
-
if (data->chip_data) {
/*
* This interrupt is already mapped. Let's unmap first.
@@ -219,14 +239,12 @@ static void hv_irq_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
stored_entry = data->chip_data;
data->chip_data = NULL;
- status = hv_unmap_msi_interrupt(dev, stored_entry);
+ ret = hv_unmap_msi_interrupt(dev, stored_entry);
kfree(stored_entry);
- if (status != HV_STATUS_SUCCESS) {
- hv_status_debug(status, "failed to unmap\n");
+ if (ret)
return;
- }
}
stored_entry = kzalloc(sizeof(*stored_entry), GFP_ATOMIC);
@@ -235,15 +253,14 @@ static void hv_irq_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
return;
}
- status = hv_map_msi_interrupt(dev, cpu, cfg->vector, &out_entry);
- if (status != HV_STATUS_SUCCESS) {
+ ret = hv_map_msi_interrupt(data, stored_entry);
+ if (ret) {
kfree(stored_entry);
return;
}
- *stored_entry = out_entry;
data->chip_data = stored_entry;
- entry_to_msi_msg(&out_entry, msg);
+ entry_to_msi_msg(data->chip_data, msg);
return;
}
@@ -257,7 +274,6 @@ static void hv_teardown_msi_irq(struct pci_dev *dev, struct irq_data *irqd)
{
struct hv_interrupt_entry old_entry;
struct msi_msg msg;
- u64 status;
if (!irqd->chip_data) {
pr_debug("%s: no chip data\n!", __func__);
@@ -270,10 +286,7 @@ static void hv_teardown_msi_irq(struct pci_dev *dev, struct irq_data *irqd)
kfree(irqd->chip_data);
irqd->chip_data = NULL;
- status = hv_unmap_msi_interrupt(dev, &old_entry);
-
- if (status != HV_STATUS_SUCCESS)
- hv_status_err(status, "\n");
+ (void)hv_unmap_msi_interrupt(dev, &old_entry);
}
static void hv_msi_free_irq(struct irq_domain *domain,
diff --git a/arch/x86/hyperv/ivm.c b/arch/x86/hyperv/ivm.c
index e93a2f488ff7..ade6c665c97e 100644
--- a/arch/x86/hyperv/ivm.c
+++ b/arch/x86/hyperv/ivm.c
@@ -10,6 +10,7 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/cpu.h>
+#include <linux/export.h>
#include <asm/svm.h>
#include <asm/sev.h>
#include <asm/io.h>
diff --git a/arch/x86/hyperv/nested.c b/arch/x86/hyperv/nested.c
index 1083dc8646f9..8ccbb7c4fc27 100644
--- a/arch/x86/hyperv/nested.c
+++ b/arch/x86/hyperv/nested.c
@@ -11,6 +11,7 @@
#include <linux/types.h>
+#include <linux/export.h>
#include <hyperv/hvhdk.h>
#include <asm/mshyperv.h>
#include <asm/tlbflush.h>
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index e1752ba47e67..abc4659f5809 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -112,12 +112,6 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output)
return hv_status;
}
-/* Hypercall to the L0 hypervisor */
-static inline u64 hv_do_nested_hypercall(u64 control, void *input, void *output)
-{
- return hv_do_hypercall(control | HV_HYPERCALL_NESTED, input, output);
-}
-
/* Fast hypercall with 8 bytes of input and no output */
static inline u64 _hv_do_fast_hypercall8(u64 control, u64 input1)
{
@@ -165,13 +159,6 @@ static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1)
return _hv_do_fast_hypercall8(control, input1);
}
-static inline u64 hv_do_fast_nested_hypercall8(u16 code, u64 input1)
-{
- u64 control = (u64)code | HV_HYPERCALL_FAST_BIT | HV_HYPERCALL_NESTED;
-
- return _hv_do_fast_hypercall8(control, input1);
-}
-
/* Fast hypercall with 16 bytes of input */
static inline u64 _hv_do_fast_hypercall16(u64 control, u64 input1, u64 input2)
{
@@ -223,13 +210,6 @@ static inline u64 hv_do_fast_hypercall16(u16 code, u64 input1, u64 input2)
return _hv_do_fast_hypercall16(control, input1, input2);
}
-static inline u64 hv_do_fast_nested_hypercall16(u16 code, u64 input1, u64 input2)
-{
- u64 control = (u64)code | HV_HYPERCALL_FAST_BIT | HV_HYPERCALL_NESTED;
-
- return _hv_do_fast_hypercall16(control, input1, input2);
-}
-
extern struct hv_vp_assist_page **hv_vp_assist_page;
static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu)
@@ -262,6 +242,8 @@ static inline void hv_apic_init(void) {}
struct irq_domain *hv_create_pci_msi_domain(void);
+int hv_map_msi_interrupt(struct irq_data *data,
+ struct hv_interrupt_entry *out_entry);
int hv_map_ioapic_interrupt(int ioapic_id, bool level, int vcpu, int vector,
struct hv_interrupt_entry *entry);
int hv_unmap_ioapic_interrupt(int ioapic_id, struct hv_interrupt_entry *entry);
diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
index f31ccdeb905b..ec79aacc446f 100644
--- a/arch/x86/kvm/vmx/tdx.c
+++ b/arch/x86/kvm/vmx/tdx.c
@@ -173,7 +173,6 @@ static void td_init_cpuid_entry2(struct kvm_cpuid_entry2 *entry, unsigned char i
tdx_clear_unsupported_cpuid(entry);
}
-#define TDVMCALLINFO_GET_QUOTE BIT(0)
#define TDVMCALLINFO_SETUP_EVENT_NOTIFY_INTERRUPT BIT(1)
static int init_kvm_tdx_caps(const struct tdx_sys_info_td_conf *td_conf,
@@ -192,7 +191,6 @@ static int init_kvm_tdx_caps(const struct tdx_sys_info_td_conf *td_conf,
caps->cpuid.nent = td_conf->num_cpuid_config;
caps->user_tdvmcallinfo_1_r11 =
- TDVMCALLINFO_GET_QUOTE |
TDVMCALLINFO_SETUP_EVENT_NOTIFY_INTERRUPT;
for (i = 0; i < td_conf->num_cpuid_config; i++)
@@ -2271,25 +2269,26 @@ static int tdx_get_capabilities(struct kvm_tdx_cmd *cmd)
const struct tdx_sys_info_td_conf *td_conf = &tdx_sysinfo->td_conf;
struct kvm_tdx_capabilities __user *user_caps;
struct kvm_tdx_capabilities *caps = NULL;
+ u32 nr_user_entries;
int ret = 0;
/* flags is reserved for future use */
if (cmd->flags)
return -EINVAL;
- caps = kmalloc(sizeof(*caps) +
+ caps = kzalloc(sizeof(*caps) +
sizeof(struct kvm_cpuid_entry2) * td_conf->num_cpuid_config,
GFP_KERNEL);
if (!caps)
return -ENOMEM;
user_caps = u64_to_user_ptr(cmd->data);
- if (copy_from_user(caps, user_caps, sizeof(*caps))) {
+ if (get_user(nr_user_entries, &user_caps->cpuid.nent)) {
ret = -EFAULT;
goto out;
}
- if (caps->cpuid.nent < td_conf->num_cpuid_config) {
+ if (nr_user_entries < td_conf->num_cpuid_config) {
ret = -E2BIG;
goto out;
}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 357b9e3a6cef..93636f77c42d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6188,6 +6188,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
u32 user_tsc_khz;
r = -EINVAL;
+
+ if (vcpu->arch.guest_tsc_protected)
+ goto out;
+
user_tsc_khz = (u32)arg;
if (kvm_caps.has_tsc_control &&
diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c
index 5fa2cca43653..d6b2a665b499 100644
--- a/arch/x86/kvm/xen.c
+++ b/arch/x86/kvm/xen.c
@@ -1526,7 +1526,7 @@ static bool kvm_xen_schedop_poll(struct kvm_vcpu *vcpu, bool longmode,
if (kvm_read_guest_virt(vcpu, (gva_t)sched_poll.ports, ports,
sched_poll.nr_ports * sizeof(*ports), &e)) {
*r = -EFAULT;
- return true;
+ goto out;
}
for (i = 0; i < sched_poll.nr_ports; i++) {
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index b2b9b89d6967..c611444480b3 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -960,4 +960,5 @@ void blk_unregister_queue(struct gendisk *disk)
elevator_set_none(q);
blk_debugfs_remove(disk);
+ kobject_put(&disk->queue_kobj);
}
diff --git a/drivers/acpi/riscv/cppc.c b/drivers/acpi/riscv/cppc.c
index 4cdff387deff..440cf9fb91aa 100644
--- a/drivers/acpi/riscv/cppc.c
+++ b/drivers/acpi/riscv/cppc.c
@@ -37,10 +37,8 @@ static int __init sbi_cppc_init(void)
{
if (sbi_spec_version >= sbi_mk_version(2, 0) &&
sbi_probe_extension(SBI_EXT_CPPC) > 0) {
- pr_info("SBI CPPC extension detected\n");
cppc_ext_present = true;
} else {
- pr_info("SBI CPPC extension NOT detected!!\n");
cppc_ext_present = false;
}
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index f2843f814675..1f3f782a04ba 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -1173,6 +1173,8 @@ err_name:
err_map:
kfree(map);
err:
+ if (bus && bus->free_on_exit)
+ kfree(bus);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(__regmap_init);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 500840e4a74e..8d994cae3b83 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -308,14 +308,13 @@ end_io:
static void lo_rw_aio_do_completion(struct loop_cmd *cmd)
{
struct request *rq = blk_mq_rq_from_pdu(cmd);
- struct loop_device *lo = rq->q->queuedata;
if (!atomic_dec_and_test(&cmd->ref))
return;
kfree(cmd->bvec);
cmd->bvec = NULL;
if (req_op(rq) == REQ_OP_WRITE)
- file_end_write(lo->lo_backing_file);
+ kiocb_end_write(&cmd->iocb);
if (likely(!blk_should_fake_timeout(rq->q)))
blk_mq_complete_request(rq);
}
@@ -391,7 +390,7 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
}
if (rw == ITER_SOURCE) {
- file_start_write(lo->lo_backing_file);
+ kiocb_start_write(&cmd->iocb);
ret = file->f_op->write_iter(&cmd->iocb, &iter);
} else
ret = file->f_op->read_iter(&cmd->iocb, &iter);
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index 7671bd158545..c1c0a4759c7e 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -943,6 +943,7 @@ struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev,
struct fsl_mc_obj_desc endpoint_desc = {{ 0 }};
struct dprc_endpoint endpoint1 = {{ 0 }};
struct dprc_endpoint endpoint2 = {{ 0 }};
+ struct fsl_mc_bus *mc_bus;
int state, err;
mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
@@ -966,6 +967,8 @@ struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev,
strcpy(endpoint_desc.type, endpoint2.type);
endpoint_desc.id = endpoint2.id;
endpoint = fsl_mc_device_lookup(&endpoint_desc, mc_bus_dev);
+ if (endpoint)
+ return endpoint;
/*
* We know that the device has an endpoint because we verified by
@@ -973,17 +976,13 @@ struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev,
* yet discovered by the fsl-mc bus, thus the lookup returned NULL.
* Force a rescan of the devices in this container and retry the lookup.
*/
- if (!endpoint) {
- struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
-
- if (mutex_trylock(&mc_bus->scan_mutex)) {
- err = dprc_scan_objects(mc_bus_dev, true);
- mutex_unlock(&mc_bus->scan_mutex);
- }
-
- if (err < 0)
- return ERR_PTR(err);
+ mc_bus = to_fsl_mc_bus(mc_bus_dev);
+ if (mutex_trylock(&mc_bus->scan_mutex)) {
+ err = dprc_scan_objects(mc_bus_dev, true);
+ mutex_unlock(&mc_bus->scan_mutex);
}
+ if (err < 0)
+ return ERR_PTR(err);
endpoint = fsl_mc_device_lookup(&endpoint_desc, mc_bus_dev);
/*
diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c
index 09549451dd51..2edc13ca184e 100644
--- a/drivers/clocksource/hyperv_timer.c
+++ b/drivers/clocksource/hyperv_timer.c
@@ -22,6 +22,7 @@
#include <linux/irq.h>
#include <linux/acpi.h>
#include <linux/hyperv.h>
+#include <linux/export.h>
#include <clocksource/hyperv_timer.h>
#include <hyperv/hvhdk.h>
#include <asm/mshyperv.h>
diff --git a/drivers/comedi/comedi_fops.c b/drivers/comedi/comedi_fops.c
index 3383a7ce27ff..c83fd14dd7ad 100644
--- a/drivers/comedi/comedi_fops.c
+++ b/drivers/comedi/comedi_fops.c
@@ -1556,21 +1556,27 @@ static int do_insnlist_ioctl(struct comedi_device *dev,
}
for (i = 0; i < n_insns; ++i) {
+ unsigned int n = insns[i].n;
+
if (insns[i].insn & INSN_MASK_WRITE) {
if (copy_from_user(data, insns[i].data,
- insns[i].n * sizeof(unsigned int))) {
+ n * sizeof(unsigned int))) {
dev_dbg(dev->class_dev,
"copy_from_user failed\n");
ret = -EFAULT;
goto error;
}
+ if (n < MIN_SAMPLES) {
+ memset(&data[n], 0, (MIN_SAMPLES - n) *
+ sizeof(unsigned int));
+ }
}
ret = parse_insn(dev, insns + i, data, file);
if (ret < 0)
goto error;
if (insns[i].insn & INSN_MASK_READ) {
if (copy_to_user(insns[i].data, data,
- insns[i].n * sizeof(unsigned int))) {
+ n * sizeof(unsigned int))) {
dev_dbg(dev->class_dev,
"copy_to_user failed\n");
ret = -EFAULT;
@@ -1589,6 +1595,16 @@ error:
return i;
}
+#define MAX_INSNS MAX_SAMPLES
+static int check_insnlist_len(struct comedi_device *dev, unsigned int n_insns)
+{
+ if (n_insns > MAX_INSNS) {
+ dev_dbg(dev->class_dev, "insnlist length too large\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
/*
* COMEDI_INSN ioctl
* synchronous instruction
@@ -1633,6 +1649,10 @@ static int do_insn_ioctl(struct comedi_device *dev,
ret = -EFAULT;
goto error;
}
+ if (insn->n < MIN_SAMPLES) {
+ memset(&data[insn->n], 0,
+ (MIN_SAMPLES - insn->n) * sizeof(unsigned int));
+ }
}
ret = parse_insn(dev, insn, data, file);
if (ret < 0)
@@ -2239,6 +2259,9 @@ static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd,
rc = -EFAULT;
break;
}
+ rc = check_insnlist_len(dev, insnlist.n_insns);
+ if (rc)
+ break;
insns = kcalloc(insnlist.n_insns, sizeof(*insns), GFP_KERNEL);
if (!insns) {
rc = -ENOMEM;
@@ -3142,6 +3165,9 @@ static int compat_insnlist(struct file *file, unsigned long arg)
if (copy_from_user(&insnlist32, compat_ptr(arg), sizeof(insnlist32)))
return -EFAULT;
+ rc = check_insnlist_len(dev, insnlist32.n_insns);
+ if (rc)
+ return rc;
insns = kcalloc(insnlist32.n_insns, sizeof(*insns), GFP_KERNEL);
if (!insns)
return -ENOMEM;
diff --git a/drivers/comedi/drivers.c b/drivers/comedi/drivers.c
index 376130bfba8a..9e4b7c840a8f 100644
--- a/drivers/comedi/drivers.c
+++ b/drivers/comedi/drivers.c
@@ -339,10 +339,10 @@ int comedi_dio_insn_config(struct comedi_device *dev,
unsigned int *data,
unsigned int mask)
{
- unsigned int chan_mask = 1 << CR_CHAN(insn->chanspec);
+ unsigned int chan = CR_CHAN(insn->chanspec);
- if (!mask)
- mask = chan_mask;
+ if (!mask && chan < 32)
+ mask = 1U << chan;
switch (data[0]) {
case INSN_CONFIG_DIO_INPUT:
@@ -382,7 +382,7 @@ EXPORT_SYMBOL_GPL(comedi_dio_insn_config);
unsigned int comedi_dio_update_state(struct comedi_subdevice *s,
unsigned int *data)
{
- unsigned int chanmask = (s->n_chan < 32) ? ((1 << s->n_chan) - 1)
+ unsigned int chanmask = (s->n_chan < 32) ? ((1U << s->n_chan) - 1)
: 0xffffffff;
unsigned int mask = data[0] & chanmask;
unsigned int bits = data[1];
@@ -615,6 +615,9 @@ static int insn_rw_emulate_bits(struct comedi_device *dev,
unsigned int _data[2];
int ret;
+ if (insn->n == 0)
+ return 0;
+
memset(_data, 0, sizeof(_data));
memset(&_insn, 0, sizeof(_insn));
_insn.insn = INSN_BITS;
@@ -625,8 +628,8 @@ static int insn_rw_emulate_bits(struct comedi_device *dev,
if (insn->insn == INSN_WRITE) {
if (!(s->subdev_flags & SDF_WRITABLE))
return -EINVAL;
- _data[0] = 1 << (chan - base_chan); /* mask */
- _data[1] = data[0] ? (1 << (chan - base_chan)) : 0; /* bits */
+ _data[0] = 1U << (chan - base_chan); /* mask */
+ _data[1] = data[0] ? (1U << (chan - base_chan)) : 0; /* bits */
}
ret = s->insn_bits(dev, s, &_insn, _data);
@@ -709,7 +712,7 @@ static int __comedi_device_postconfig(struct comedi_device *dev)
if (s->type == COMEDI_SUBD_DO) {
if (s->n_chan < 32)
- s->io_bits = (1 << s->n_chan) - 1;
+ s->io_bits = (1U << s->n_chan) - 1;
else
s->io_bits = 0xffffffff;
}
diff --git a/drivers/comedi/drivers/aio_iiro_16.c b/drivers/comedi/drivers/aio_iiro_16.c
index b00fab0b89d4..739cc4db52ac 100644
--- a/drivers/comedi/drivers/aio_iiro_16.c
+++ b/drivers/comedi/drivers/aio_iiro_16.c
@@ -177,7 +177,8 @@ static int aio_iiro_16_attach(struct comedi_device *dev,
* Digital input change of state interrupts are optionally supported
* using IRQ 2-7, 10-12, 14, or 15.
*/
- if ((1 << it->options[1]) & 0xdcfc) {
+ if (it->options[1] > 0 && it->options[1] < 16 &&
+ (1 << it->options[1]) & 0xdcfc) {
ret = request_irq(it->options[1], aio_iiro_16_cos, 0,
dev->board_name, dev);
if (ret == 0)
diff --git a/drivers/comedi/drivers/comedi_test.c b/drivers/comedi/drivers/comedi_test.c
index 9747e6d1f6eb..7984950f0f99 100644
--- a/drivers/comedi/drivers/comedi_test.c
+++ b/drivers/comedi/drivers/comedi_test.c
@@ -792,7 +792,7 @@ static void waveform_detach(struct comedi_device *dev)
{
struct waveform_private *devpriv = dev->private;
- if (devpriv) {
+ if (devpriv && dev->n_subdevices) {
timer_delete_sync(&devpriv->ai_timer);
timer_delete_sync(&devpriv->ao_timer);
}
diff --git a/drivers/comedi/drivers/das16m1.c b/drivers/comedi/drivers/das16m1.c
index b8ea737ad3d1..1b638f5b5a4f 100644
--- a/drivers/comedi/drivers/das16m1.c
+++ b/drivers/comedi/drivers/das16m1.c
@@ -522,7 +522,8 @@ static int das16m1_attach(struct comedi_device *dev,
devpriv->extra_iobase = dev->iobase + DAS16M1_8255_IOBASE;
/* only irqs 2, 3, 4, 5, 6, 7, 10, 11, 12, 14, and 15 are valid */
- if ((1 << it->options[1]) & 0xdcfc) {
+ if (it->options[1] >= 2 && it->options[1] <= 15 &&
+ (1 << it->options[1]) & 0xdcfc) {
ret = request_irq(it->options[1], das16m1_interrupt, 0,
dev->board_name, dev);
if (ret == 0)
diff --git a/drivers/comedi/drivers/das6402.c b/drivers/comedi/drivers/das6402.c
index 68f95330de45..7660487e563c 100644
--- a/drivers/comedi/drivers/das6402.c
+++ b/drivers/comedi/drivers/das6402.c
@@ -567,7 +567,8 @@ static int das6402_attach(struct comedi_device *dev,
das6402_reset(dev);
/* IRQs 2,3,5,6,7, 10,11,15 are valid for "enhanced" mode */
- if ((1 << it->options[1]) & 0x8cec) {
+ if (it->options[1] > 0 && it->options[1] < 16 &&
+ (1 << it->options[1]) & 0x8cec) {
ret = request_irq(it->options[1], das6402_interrupt, 0,
dev->board_name, dev);
if (ret == 0) {
diff --git a/drivers/comedi/drivers/pcl812.c b/drivers/comedi/drivers/pcl812.c
index 0df639c6a595..abca61a72cf7 100644
--- a/drivers/comedi/drivers/pcl812.c
+++ b/drivers/comedi/drivers/pcl812.c
@@ -1149,7 +1149,8 @@ static int pcl812_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (IS_ERR(dev->pacer))
return PTR_ERR(dev->pacer);
- if ((1 << it->options[1]) & board->irq_bits) {
+ if (it->options[1] > 0 && it->options[1] < 16 &&
+ (1 << it->options[1]) & board->irq_bits) {
ret = request_irq(it->options[1], pcl812_interrupt, 0,
dev->board_name, dev);
if (ret == 0)
diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c
index 4e1ba35deda9..b19bc60cc627 100644
--- a/drivers/cpuidle/cpuidle-psci.c
+++ b/drivers/cpuidle/cpuidle-psci.c
@@ -45,7 +45,6 @@ struct psci_cpuidle_domain_state {
static DEFINE_PER_CPU_READ_MOSTLY(struct psci_cpuidle_data, psci_cpuidle_data);
static DEFINE_PER_CPU(struct psci_cpuidle_domain_state, psci_domain_state);
static bool psci_cpuidle_use_syscore;
-static bool psci_cpuidle_use_cpuhp;
void psci_set_domain_state(struct generic_pm_domain *pd, unsigned int state_idx,
u32 state)
@@ -124,8 +123,12 @@ static int psci_idle_cpuhp_up(unsigned int cpu)
{
struct device *pd_dev = __this_cpu_read(psci_cpuidle_data.dev);
- if (pd_dev)
- pm_runtime_get_sync(pd_dev);
+ if (pd_dev) {
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ pm_runtime_get_sync(pd_dev);
+ else
+ dev_pm_genpd_resume(pd_dev);
+ }
return 0;
}
@@ -135,7 +138,11 @@ static int psci_idle_cpuhp_down(unsigned int cpu)
struct device *pd_dev = __this_cpu_read(psci_cpuidle_data.dev);
if (pd_dev) {
- pm_runtime_put_sync(pd_dev);
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ pm_runtime_put_sync(pd_dev);
+ else
+ dev_pm_genpd_suspend(pd_dev);
+
/* Clear domain state to start fresh at next online. */
psci_clear_domain_state();
}
@@ -196,9 +203,6 @@ static void psci_idle_init_cpuhp(void)
{
int err;
- if (!psci_cpuidle_use_cpuhp)
- return;
-
err = cpuhp_setup_state_nocalls(CPUHP_AP_CPU_PM_STARTING,
"cpuidle/psci:online",
psci_idle_cpuhp_up,
@@ -259,10 +263,8 @@ static int psci_dt_cpu_init_topology(struct cpuidle_driver *drv,
* s2ram and s2idle.
*/
drv->states[state_count - 1].enter_s2idle = psci_enter_s2idle_domain_idle_state;
- if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
drv->states[state_count - 1].enter = psci_enter_domain_idle_state;
- psci_cpuidle_use_cpuhp = true;
- }
return 0;
}
@@ -339,7 +341,6 @@ static void psci_cpu_deinit_idle(int cpu)
dt_idle_detach_cpu(data->dev);
psci_cpuidle_use_syscore = false;
- psci_cpuidle_use_cpuhp = false;
}
static int psci_idle_init_cpu(struct device *dev, int cpu)
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index af37477ffd8d..be21e4e2016c 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -314,30 +314,30 @@ static int chcr_compute_partial_hash(struct shash_desc *desc,
if (digest_size == SHA1_DIGEST_SIZE) {
error = crypto_shash_init(desc) ?:
crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
- crypto_shash_export(desc, (void *)&sha1_st);
+ crypto_shash_export_core(desc, &sha1_st);
memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
} else if (digest_size == SHA224_DIGEST_SIZE) {
error = crypto_shash_init(desc) ?:
crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
- crypto_shash_export(desc, (void *)&sha256_st);
+ crypto_shash_export_core(desc, &sha256_st);
memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
} else if (digest_size == SHA256_DIGEST_SIZE) {
error = crypto_shash_init(desc) ?:
crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
- crypto_shash_export(desc, (void *)&sha256_st);
+ crypto_shash_export_core(desc, &sha256_st);
memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
} else if (digest_size == SHA384_DIGEST_SIZE) {
error = crypto_shash_init(desc) ?:
crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
- crypto_shash_export(desc, (void *)&sha512_st);
+ crypto_shash_export_core(desc, &sha512_st);
memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
} else if (digest_size == SHA512_DIGEST_SIZE) {
error = crypto_shash_init(desc) ?:
crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
- crypto_shash_export(desc, (void *)&sha512_st);
+ crypto_shash_export_core(desc, &sha512_st);
memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
} else {
error = -EINVAL;
diff --git a/drivers/crypto/intel/qat/qat_common/qat_algs.c b/drivers/crypto/intel/qat/qat_common/qat_algs.c
index 3c4bba4a8779..c03a69851114 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_algs.c
@@ -5,11 +5,11 @@
#include <linux/crypto.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/cipher.h>
+#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
#include <crypto/aes.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
-#include <crypto/hash.h>
#include <crypto/hmac.h>
#include <crypto/algapi.h>
#include <crypto/authenc.h>
@@ -154,19 +154,19 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
switch (ctx->qat_hash_alg) {
case ICP_QAT_HW_AUTH_ALGO_SHA1:
- if (crypto_shash_export(shash, &ctx->sha1))
+ if (crypto_shash_export_core(shash, &ctx->sha1))
return -EFAULT;
for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
*hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
break;
case ICP_QAT_HW_AUTH_ALGO_SHA256:
- if (crypto_shash_export(shash, &ctx->sha256))
+ if (crypto_shash_export_core(shash, &ctx->sha256))
return -EFAULT;
for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
*hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
break;
case ICP_QAT_HW_AUTH_ALGO_SHA512:
- if (crypto_shash_export(shash, &ctx->sha512))
+ if (crypto_shash_export_core(shash, &ctx->sha512))
return -EFAULT;
for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
*hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
@@ -190,19 +190,19 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
switch (ctx->qat_hash_alg) {
case ICP_QAT_HW_AUTH_ALGO_SHA1:
- if (crypto_shash_export(shash, &ctx->sha1))
+ if (crypto_shash_export_core(shash, &ctx->sha1))
return -EFAULT;
for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
*hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
break;
case ICP_QAT_HW_AUTH_ALGO_SHA256:
- if (crypto_shash_export(shash, &ctx->sha256))
+ if (crypto_shash_export_core(shash, &ctx->sha256))
return -EFAULT;
for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
*hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
break;
case ICP_QAT_HW_AUTH_ALGO_SHA512:
- if (crypto_shash_export(shash, &ctx->sha512))
+ if (crypto_shash_export_core(shash, &ctx->sha512))
return -EFAULT;
for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
*hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
diff --git a/drivers/dma/dw-edma/dw-edma-pcie.c b/drivers/dma/dw-edma/dw-edma-pcie.c
index 49f09998e5c0..3371e0a76d3c 100644
--- a/drivers/dma/dw-edma/dw-edma-pcie.c
+++ b/drivers/dma/dw-edma/dw-edma-pcie.c
@@ -161,12 +161,16 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
const struct pci_device_id *pid)
{
struct dw_edma_pcie_data *pdata = (void *)pid->driver_data;
- struct dw_edma_pcie_data vsec_data;
+ struct dw_edma_pcie_data *vsec_data __free(kfree) = NULL;
struct device *dev = &pdev->dev;
struct dw_edma_chip *chip;
int err, nr_irqs;
int i, mask;
+ vsec_data = kmalloc(sizeof(*vsec_data), GFP_KERNEL);
+ if (!vsec_data)
+ return -ENOMEM;
+
/* Enable PCI device */
err = pcim_enable_device(pdev);
if (err) {
@@ -174,23 +178,23 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
return err;
}
- memcpy(&vsec_data, pdata, sizeof(struct dw_edma_pcie_data));
+ memcpy(vsec_data, pdata, sizeof(struct dw_edma_pcie_data));
/*
* Tries to find if exists a PCIe Vendor-Specific Extended Capability
* for the DMA, if one exists, then reconfigures it.
*/
- dw_edma_pcie_get_vsec_dma_data(pdev, &vsec_data);
+ dw_edma_pcie_get_vsec_dma_data(pdev, vsec_data);
/* Mapping PCI BAR regions */
- mask = BIT(vsec_data.rg.bar);
- for (i = 0; i < vsec_data.wr_ch_cnt; i++) {
- mask |= BIT(vsec_data.ll_wr[i].bar);
- mask |= BIT(vsec_data.dt_wr[i].bar);
+ mask = BIT(vsec_data->rg.bar);
+ for (i = 0; i < vsec_data->wr_ch_cnt; i++) {
+ mask |= BIT(vsec_data->ll_wr[i].bar);
+ mask |= BIT(vsec_data->dt_wr[i].bar);
}
- for (i = 0; i < vsec_data.rd_ch_cnt; i++) {
- mask |= BIT(vsec_data.ll_rd[i].bar);
- mask |= BIT(vsec_data.dt_rd[i].bar);
+ for (i = 0; i < vsec_data->rd_ch_cnt; i++) {
+ mask |= BIT(vsec_data->ll_rd[i].bar);
+ mask |= BIT(vsec_data->dt_rd[i].bar);
}
err = pcim_iomap_regions(pdev, mask, pci_name(pdev));
if (err) {
@@ -213,7 +217,7 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
return -ENOMEM;
/* IRQs allocation */
- nr_irqs = pci_alloc_irq_vectors(pdev, 1, vsec_data.irqs,
+ nr_irqs = pci_alloc_irq_vectors(pdev, 1, vsec_data->irqs,
PCI_IRQ_MSI | PCI_IRQ_MSIX);
if (nr_irqs < 1) {
pci_err(pdev, "fail to alloc IRQ vector (number of IRQs=%u)\n",
@@ -224,22 +228,22 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
/* Data structure initialization */
chip->dev = dev;
- chip->mf = vsec_data.mf;
+ chip->mf = vsec_data->mf;
chip->nr_irqs = nr_irqs;
chip->ops = &dw_edma_pcie_plat_ops;
- chip->ll_wr_cnt = vsec_data.wr_ch_cnt;
- chip->ll_rd_cnt = vsec_data.rd_ch_cnt;
+ chip->ll_wr_cnt = vsec_data->wr_ch_cnt;
+ chip->ll_rd_cnt = vsec_data->rd_ch_cnt;
- chip->reg_base = pcim_iomap_table(pdev)[vsec_data.rg.bar];
+ chip->reg_base = pcim_iomap_table(pdev)[vsec_data->rg.bar];
if (!chip->reg_base)
return -ENOMEM;
for (i = 0; i < chip->ll_wr_cnt; i++) {
struct dw_edma_region *ll_region = &chip->ll_region_wr[i];
struct dw_edma_region *dt_region = &chip->dt_region_wr[i];
- struct dw_edma_block *ll_block = &vsec_data.ll_wr[i];
- struct dw_edma_block *dt_block = &vsec_data.dt_wr[i];
+ struct dw_edma_block *ll_block = &vsec_data->ll_wr[i];
+ struct dw_edma_block *dt_block = &vsec_data->dt_wr[i];
ll_region->vaddr.io = pcim_iomap_table(pdev)[ll_block->bar];
if (!ll_region->vaddr.io)
@@ -263,8 +267,8 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
for (i = 0; i < chip->ll_rd_cnt; i++) {
struct dw_edma_region *ll_region = &chip->ll_region_rd[i];
struct dw_edma_region *dt_region = &chip->dt_region_rd[i];
- struct dw_edma_block *ll_block = &vsec_data.ll_rd[i];
- struct dw_edma_block *dt_block = &vsec_data.dt_rd[i];
+ struct dw_edma_block *ll_block = &vsec_data->ll_rd[i];
+ struct dw_edma_block *dt_block = &vsec_data->dt_rd[i];
ll_region->vaddr.io = pcim_iomap_table(pdev)[ll_block->bar];
if (!ll_region->vaddr.io)
@@ -298,31 +302,31 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
pci_dbg(pdev, "Version:\tUnknown (0x%x)\n", chip->mf);
pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p)\n",
- vsec_data.rg.bar, vsec_data.rg.off, vsec_data.rg.sz,
+ vsec_data->rg.bar, vsec_data->rg.off, vsec_data->rg.sz,
chip->reg_base);
for (i = 0; i < chip->ll_wr_cnt; i++) {
pci_dbg(pdev, "L. List:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
- i, vsec_data.ll_wr[i].bar,
- vsec_data.ll_wr[i].off, chip->ll_region_wr[i].sz,
+ i, vsec_data->ll_wr[i].bar,
+ vsec_data->ll_wr[i].off, chip->ll_region_wr[i].sz,
chip->ll_region_wr[i].vaddr.io, &chip->ll_region_wr[i].paddr);
pci_dbg(pdev, "Data:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
- i, vsec_data.dt_wr[i].bar,
- vsec_data.dt_wr[i].off, chip->dt_region_wr[i].sz,
+ i, vsec_data->dt_wr[i].bar,
+ vsec_data->dt_wr[i].off, chip->dt_region_wr[i].sz,
chip->dt_region_wr[i].vaddr.io, &chip->dt_region_wr[i].paddr);
}
for (i = 0; i < chip->ll_rd_cnt; i++) {
pci_dbg(pdev, "L. List:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
- i, vsec_data.ll_rd[i].bar,
- vsec_data.ll_rd[i].off, chip->ll_region_rd[i].sz,
+ i, vsec_data->ll_rd[i].bar,
+ vsec_data->ll_rd[i].off, chip->ll_region_rd[i].sz,
chip->ll_region_rd[i].vaddr.io, &chip->ll_region_rd[i].paddr);
pci_dbg(pdev, "Data:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
- i, vsec_data.dt_rd[i].bar,
- vsec_data.dt_rd[i].off, chip->dt_region_rd[i].sz,
+ i, vsec_data->dt_rd[i].bar,
+ vsec_data->dt_rd[i].off, chip->dt_region_rd[i].sz,
chip->dt_region_rd[i].vaddr.io, &chip->dt_region_rd[i].paddr);
}
diff --git a/drivers/dma/mediatek/mtk-cqdma.c b/drivers/dma/mediatek/mtk-cqdma.c
index 47c8adfdc155..9f0c41ca7770 100644
--- a/drivers/dma/mediatek/mtk-cqdma.c
+++ b/drivers/dma/mediatek/mtk-cqdma.c
@@ -449,9 +449,9 @@ static enum dma_status mtk_cqdma_tx_status(struct dma_chan *c,
return ret;
spin_lock_irqsave(&cvc->pc->lock, flags);
- spin_lock_irqsave(&cvc->vc.lock, flags);
+ spin_lock(&cvc->vc.lock);
vd = mtk_cqdma_find_active_desc(c, cookie);
- spin_unlock_irqrestore(&cvc->vc.lock, flags);
+ spin_unlock(&cvc->vc.lock);
spin_unlock_irqrestore(&cvc->pc->lock, flags);
if (vd) {
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index 0d6324c4e2be..7a2488a0d6a3 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -1351,7 +1351,7 @@ static int nbpf_probe(struct platform_device *pdev)
if (irqs == 1) {
eirq = irqbuf[0];
- for (i = 0; i <= num_channels; i++)
+ for (i = 0; i < num_channels; i++)
nbpf->chan[i].irq = irqbuf[0];
} else {
eirq = platform_get_irq_byname(pdev, "error");
@@ -1361,16 +1361,15 @@ static int nbpf_probe(struct platform_device *pdev)
if (irqs == num_channels + 1) {
struct nbpf_channel *chan;
- for (i = 0, chan = nbpf->chan; i <= num_channels;
+ for (i = 0, chan = nbpf->chan; i < num_channels;
i++, chan++) {
/* Skip the error IRQ */
if (irqbuf[i] == eirq)
i++;
+ if (i >= ARRAY_SIZE(irqbuf))
+ return -EINVAL;
chan->irq = irqbuf[i];
}
-
- if (chan != nbpf->chan + num_channels)
- return -EINVAL;
} else {
/* 2 IRQs and more than one channel */
if (irqbuf[0] == eirq)
@@ -1378,7 +1377,7 @@ static int nbpf_probe(struct platform_device *pdev)
else
irq = irqbuf[0];
- for (i = 0; i <= num_channels; i++)
+ for (i = 0; i < num_channels; i++)
nbpf->chan[i].irq = irq;
}
}
diff --git a/drivers/gpio/gpiolib-acpi-quirks.c b/drivers/gpio/gpiolib-acpi-quirks.c
index 219667315b2c..c13545dce349 100644
--- a/drivers/gpio/gpiolib-acpi-quirks.c
+++ b/drivers/gpio/gpiolib-acpi-quirks.c
@@ -331,6 +331,19 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
.ignore_interrupt = "AMDI0030:00@11",
},
},
+ {
+ /*
+ * Wakeup only works when keyboard backlight is turned off
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/4169
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "Acer Nitro V 15"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_interrupt = "AMDI0030:00@8",
+ },
+ },
{} /* Terminating entry */
};
diff --git a/drivers/gpio/gpiolib-devres.c b/drivers/gpio/gpiolib-devres.c
index 4d5f83b17624..72422c5db364 100644
--- a/drivers/gpio/gpiolib-devres.c
+++ b/drivers/gpio/gpiolib-devres.c
@@ -319,7 +319,7 @@ EXPORT_SYMBOL_GPL(devm_gpiod_unhinge);
*/
void devm_gpiod_put_array(struct device *dev, struct gpio_descs *descs)
{
- devm_remove_action(dev, devm_gpiod_release_array, descs);
+ devm_release_action(dev, devm_gpiod_release_array, descs);
}
EXPORT_SYMBOL_GPL(devm_gpiod_put_array);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 78f8755996f0..aa32df7e2fb2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -5193,6 +5193,8 @@ exit:
dev->dev->power.disable_depth--;
#endif
}
+
+ amdgpu_vram_mgr_clear_reset_blocks(adev);
adev->in_suspend = false;
if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 426834806fbf..6ac0ce361a2d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -427,6 +427,7 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
{
unsigned long flags;
ktime_t deadline;
+ bool ret;
if (unlikely(ring->adev->debug_disable_soft_recovery))
return false;
@@ -441,12 +442,16 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
dma_fence_set_error(fence, -ENODATA);
spin_unlock_irqrestore(fence->lock, flags);
- atomic_inc(&ring->adev->gpu_reset_counter);
while (!dma_fence_is_signaled(fence) &&
ktime_to_ns(ktime_sub(deadline, ktime_get())) > 0)
ring->funcs->soft_recovery(ring, vmid);
- return dma_fence_is_signaled(fence);
+ ret = dma_fence_is_signaled(fence);
+ /* increment the counter only if soft reset worked */
+ if (ret)
+ atomic_inc(&ring->adev->gpu_reset_counter);
+
+ return ret;
}
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 208b7d1d8a27..450e4bf093b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -154,6 +154,7 @@ int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
uint64_t start, uint64_t size);
int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
uint64_t start);
+void amdgpu_vram_mgr_clear_reset_blocks(struct amdgpu_device *adev);
bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
struct ttm_resource *res);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index abdc52b0895a..07c936e90d8e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -783,6 +783,23 @@ uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr *mgr)
}
/**
+ * amdgpu_vram_mgr_clear_reset_blocks - reset clear blocks
+ *
+ * @adev: amdgpu device pointer
+ *
+ * Reset the cleared drm buddy blocks.
+ */
+void amdgpu_vram_mgr_clear_reset_blocks(struct amdgpu_device *adev)
+{
+ struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
+ struct drm_buddy *mm = &mgr->mm;
+
+ mutex_lock(&mgr->lock);
+ drm_buddy_reset_clear(mm, false);
+ mutex_unlock(&mgr->lock);
+}
+
+/**
* amdgpu_vram_mgr_intersects - test each drm buddy block for intersection
*
* @man: TTM memory type manager
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 5ee2237d8ee8..bc983ecf3d99 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -4640,6 +4640,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
/* reset ring buffer */
ring->wptr = 0;
+ atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
amdgpu_ring_clear_ring(ring);
}
return 0;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index 87058271b00c..2551823382f8 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -728,7 +728,16 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
* support programmable degamma anywhere.
*/
is_dcn = dm->adev->dm.dc->caps.color.dpp.dcn_arch;
- drm_crtc_enable_color_mgmt(&acrtc->base, is_dcn ? MAX_COLOR_LUT_ENTRIES : 0,
+ /* Dont't enable DRM CRTC degamma property for DCN401 since the
+ * pre-blending degamma LUT doesn't apply to cursor, and therefore
+ * can't work similar to a post-blending degamma LUT as in other hw
+ * versions.
+ * TODO: revisit it once KMS plane color API is merged.
+ */
+ drm_crtc_enable_color_mgmt(&acrtc->base,
+ (is_dcn &&
+ dm->adev->dm.dc->ctx->dce_version != DCN_VERSION_4_01) ?
+ MAX_COLOR_LUT_ENTRIES : 0,
true, MAX_COLOR_LUT_ENTRIES);
drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
index a3b8e3d4a429..4b17d2fcd565 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
@@ -1565,7 +1565,7 @@ struct clk_mgr_internal *dcn401_clk_mgr_construct(
clk_mgr->base.bw_params = kzalloc(sizeof(*clk_mgr->base.bw_params), GFP_KERNEL);
if (!clk_mgr->base.bw_params) {
BREAK_TO_DEBUGGER();
- kfree(clk_mgr);
+ kfree(clk_mgr401);
return NULL;
}
@@ -1576,6 +1576,7 @@ struct clk_mgr_internal *dcn401_clk_mgr_construct(
if (!clk_mgr->wm_range_table) {
BREAK_TO_DEBUGGER();
kfree(clk_mgr->base.bw_params);
+ kfree(clk_mgr401);
return NULL;
}
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index de9c23537465..834b42a4d31f 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -1373,7 +1373,7 @@ static int ti_sn_bridge_probe(struct auxiliary_device *adev,
regmap_update_bits(pdata->regmap, SN_HPD_DISABLE_REG,
HPD_DISABLE, 0);
mutex_unlock(&pdata->comms_mutex);
- };
+ }
drm_bridge_add(&pdata->bridge);
diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c
index dc622c78db9d..ea78c6c8ca7a 100644
--- a/drivers/gpu/drm/display/drm_dp_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_helper.c
@@ -725,7 +725,7 @@ ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
* monitor doesn't power down exactly after the throw away read.
*/
if (!aux->is_remote) {
- ret = drm_dp_dpcd_probe(aux, DP_LANE0_1_STATUS);
+ ret = drm_dp_dpcd_probe(aux, DP_TRAINING_PATTERN_SET);
if (ret < 0)
return ret;
}
diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
index 241c855f891f..66aff35f8647 100644
--- a/drivers/gpu/drm/drm_buddy.c
+++ b/drivers/gpu/drm/drm_buddy.c
@@ -405,6 +405,49 @@ drm_get_buddy(struct drm_buddy_block *block)
EXPORT_SYMBOL(drm_get_buddy);
/**
+ * drm_buddy_reset_clear - reset blocks clear state
+ *
+ * @mm: DRM buddy manager
+ * @is_clear: blocks clear state
+ *
+ * Reset the clear state based on @is_clear value for each block
+ * in the freelist.
+ */
+void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear)
+{
+ u64 root_size, size, start;
+ unsigned int order;
+ int i;
+
+ size = mm->size;
+ for (i = 0; i < mm->n_roots; ++i) {
+ order = ilog2(size) - ilog2(mm->chunk_size);
+ start = drm_buddy_block_offset(mm->roots[i]);
+ __force_merge(mm, start, start + size, order);
+
+ root_size = mm->chunk_size << order;
+ size -= root_size;
+ }
+
+ for (i = 0; i <= mm->max_order; ++i) {
+ struct drm_buddy_block *block;
+
+ list_for_each_entry_reverse(block, &mm->free_list[i], link) {
+ if (is_clear != drm_buddy_block_is_clear(block)) {
+ if (is_clear) {
+ mark_cleared(block);
+ mm->clear_avail += drm_buddy_block_size(mm, block);
+ } else {
+ clear_reset(block);
+ mm->clear_avail -= drm_buddy_block_size(mm, block);
+ }
+ }
+ }
+ }
+}
+EXPORT_SYMBOL(drm_buddy_reset_clear);
+
+/**
* drm_buddy_free_block - free a block
*
* @mm: DRM buddy manager
diff --git a/drivers/gpu/drm/drm_gem_dma_helper.c b/drivers/gpu/drm/drm_gem_dma_helper.c
index b7f033d4352a..4f0320df858f 100644
--- a/drivers/gpu/drm/drm_gem_dma_helper.c
+++ b/drivers/gpu/drm/drm_gem_dma_helper.c
@@ -230,7 +230,7 @@ void drm_gem_dma_free(struct drm_gem_dma_object *dma_obj)
if (drm_gem_is_imported(gem_obj)) {
if (dma_obj->vaddr)
- dma_buf_vunmap_unlocked(gem_obj->dma_buf, &map);
+ dma_buf_vunmap_unlocked(gem_obj->import_attach->dmabuf, &map);
drm_prime_gem_destroy(gem_obj, dma_obj->sgt);
} else if (dma_obj->vaddr) {
if (dma_obj->map_noncoherent)
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
index 6f72e7a0f427..6ff22e04029e 100644
--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -419,6 +419,7 @@ EXPORT_SYMBOL(drm_gem_fb_vunmap);
static void __drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_data_direction dir,
unsigned int num_planes)
{
+ struct dma_buf_attachment *import_attach;
struct drm_gem_object *obj;
int ret;
@@ -427,9 +428,10 @@ static void __drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_dat
obj = drm_gem_fb_get_obj(fb, num_planes);
if (!obj)
continue;
+ import_attach = obj->import_attach;
if (!drm_gem_is_imported(obj))
continue;
- ret = dma_buf_end_cpu_access(obj->dma_buf, dir);
+ ret = dma_buf_end_cpu_access(import_attach->dmabuf, dir);
if (ret)
drm_err(fb->dev, "dma_buf_end_cpu_access(%u, %d) failed: %d\n",
ret, num_planes, dir);
@@ -452,6 +454,7 @@ static void __drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_dat
*/
int drm_gem_fb_begin_cpu_access(struct drm_framebuffer *fb, enum dma_data_direction dir)
{
+ struct dma_buf_attachment *import_attach;
struct drm_gem_object *obj;
unsigned int i;
int ret;
@@ -462,9 +465,10 @@ int drm_gem_fb_begin_cpu_access(struct drm_framebuffer *fb, enum dma_data_direct
ret = -EINVAL;
goto err___drm_gem_fb_end_cpu_access;
}
+ import_attach = obj->import_attach;
if (!drm_gem_is_imported(obj))
continue;
- ret = dma_buf_begin_cpu_access(obj->dma_buf, dir);
+ ret = dma_buf_begin_cpu_access(import_attach->dmabuf, dir);
if (ret)
goto err___drm_gem_fb_end_cpu_access;
}
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index aa43265f4f4f..a5dbee6974ab 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -349,7 +349,7 @@ int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
int ret = 0;
if (drm_gem_is_imported(obj)) {
- ret = dma_buf_vmap(obj->dma_buf, map);
+ ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
} else {
pgprot_t prot = PAGE_KERNEL;
@@ -409,7 +409,7 @@ void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
struct drm_gem_object *obj = &shmem->base;
if (drm_gem_is_imported(obj)) {
- dma_buf_vunmap(obj->dma_buf, map);
+ dma_buf_vunmap(obj->import_attach->dmabuf, map);
} else {
dma_resv_assert_held(shmem->base.resv);
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index d828502268b8..a0a5d725eab0 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -453,7 +453,13 @@ struct dma_buf *drm_gem_prime_handle_to_dmabuf(struct drm_device *dev,
}
mutex_lock(&dev->object_name_lock);
- /* re-export the original imported/exported object */
+ /* re-export the original imported object */
+ if (obj->import_attach) {
+ dmabuf = obj->import_attach->dmabuf;
+ get_dma_buf(dmabuf);
+ goto out_have_obj;
+ }
+
if (obj->dma_buf) {
get_dma_buf(obj->dma_buf);
dmabuf = obj->dma_buf;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
index 917ad527c961..40a50c60dfff 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
@@ -65,7 +65,7 @@ static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj)
struct iosys_map map = IOSYS_MAP_INIT_VADDR(etnaviv_obj->vaddr);
if (etnaviv_obj->vaddr)
- dma_buf_vunmap_unlocked(etnaviv_obj->base.dma_buf, &map);
+ dma_buf_vunmap_unlocked(etnaviv_obj->base.import_attach->dmabuf, &map);
/* Don't drop the pages for imported dmabuf, as they are not
* ours, just free the array we allocated:
@@ -82,7 +82,7 @@ static void *etnaviv_gem_prime_vmap_impl(struct etnaviv_gem_object *etnaviv_obj)
lockdep_assert_held(&etnaviv_obj->lock);
- ret = dma_buf_vmap(etnaviv_obj->base.dma_buf, &map);
+ ret = dma_buf_vmap(etnaviv_obj->base.import_attach->dmabuf, &map);
if (ret)
return NULL;
return map.vaddr;
diff --git a/drivers/gpu/drm/mediatek/mtk_crtc.c b/drivers/gpu/drm/mediatek/mtk_crtc.c
index 8f6fba4217ec..bc7527542fdc 100644
--- a/drivers/gpu/drm/mediatek/mtk_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_crtc.c
@@ -719,6 +719,39 @@ int mtk_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
return 0;
}
+void mtk_crtc_plane_disable(struct drm_crtc *crtc, struct drm_plane *plane)
+{
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ struct mtk_plane_state *plane_state = to_mtk_plane_state(plane->state);
+ int i;
+
+ /* no need to wait for disabling the plane by CPU */
+ if (!mtk_crtc->cmdq_client.chan)
+ return;
+
+ if (!mtk_crtc->enabled)
+ return;
+
+ /* set pending plane state to disabled */
+ for (i = 0; i < mtk_crtc->layer_nr; i++) {
+ struct drm_plane *mtk_plane = &mtk_crtc->planes[i];
+ struct mtk_plane_state *mtk_plane_state = to_mtk_plane_state(mtk_plane->state);
+
+ if (mtk_plane->index == plane->index) {
+ memcpy(mtk_plane_state, plane_state, sizeof(*plane_state));
+ break;
+ }
+ }
+ mtk_crtc_update_config(mtk_crtc, false);
+
+ /* wait for planes to be disabled by CMDQ */
+ wait_event_timeout(mtk_crtc->cb_blocking_queue,
+ mtk_crtc->cmdq_vblank_cnt == 0,
+ msecs_to_jiffies(500));
+#endif
+}
+
void mtk_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane,
struct drm_atomic_state *state)
{
@@ -930,7 +963,8 @@ static int mtk_crtc_init_comp_planes(struct drm_device *drm_dev,
mtk_ddp_comp_supported_rotations(comp),
mtk_ddp_comp_get_blend_modes(comp),
mtk_ddp_comp_get_formats(comp),
- mtk_ddp_comp_get_num_formats(comp), i);
+ mtk_ddp_comp_get_num_formats(comp),
+ mtk_ddp_comp_is_afbc_supported(comp), i);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/mediatek/mtk_crtc.h b/drivers/gpu/drm/mediatek/mtk_crtc.h
index 388e900b6f4d..828f109b83e7 100644
--- a/drivers/gpu/drm/mediatek/mtk_crtc.h
+++ b/drivers/gpu/drm/mediatek/mtk_crtc.h
@@ -21,6 +21,7 @@ int mtk_crtc_create(struct drm_device *drm_dev, const unsigned int *path,
unsigned int num_conn_routes);
int mtk_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
struct mtk_plane_state *state);
+void mtk_crtc_plane_disable(struct drm_crtc *crtc, struct drm_plane *plane);
void mtk_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane,
struct drm_atomic_state *plane_state);
struct device *mtk_crtc_dma_dev_get(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/mediatek/mtk_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_ddp_comp.c
index edc6417639e6..ac6620e10262 100644
--- a/drivers/gpu/drm/mediatek/mtk_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_ddp_comp.c
@@ -366,6 +366,7 @@ static const struct mtk_ddp_comp_funcs ddp_ovl = {
.get_blend_modes = mtk_ovl_get_blend_modes,
.get_formats = mtk_ovl_get_formats,
.get_num_formats = mtk_ovl_get_num_formats,
+ .is_afbc_supported = mtk_ovl_is_afbc_supported,
};
static const struct mtk_ddp_comp_funcs ddp_postmask = {
diff --git a/drivers/gpu/drm/mediatek/mtk_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_ddp_comp.h
index 39720b27f4e9..7289b3dcf22f 100644
--- a/drivers/gpu/drm/mediatek/mtk_ddp_comp.h
+++ b/drivers/gpu/drm/mediatek/mtk_ddp_comp.h
@@ -83,6 +83,7 @@ struct mtk_ddp_comp_funcs {
u32 (*get_blend_modes)(struct device *dev);
const u32 *(*get_formats)(struct device *dev);
size_t (*get_num_formats)(struct device *dev);
+ bool (*is_afbc_supported)(struct device *dev);
void (*connect)(struct device *dev, struct device *mmsys_dev, unsigned int next);
void (*disconnect)(struct device *dev, struct device *mmsys_dev, unsigned int next);
void (*add)(struct device *dev, struct mtk_mutex *mutex);
@@ -294,6 +295,14 @@ size_t mtk_ddp_comp_get_num_formats(struct mtk_ddp_comp *comp)
return 0;
}
+static inline bool mtk_ddp_comp_is_afbc_supported(struct mtk_ddp_comp *comp)
+{
+ if (comp->funcs && comp->funcs->is_afbc_supported)
+ return comp->funcs->is_afbc_supported(comp->dev);
+
+ return false;
+}
+
static inline bool mtk_ddp_comp_add(struct mtk_ddp_comp *comp, struct mtk_mutex *mutex)
{
if (comp->funcs && comp->funcs->add) {
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_drv.h b/drivers/gpu/drm/mediatek/mtk_disp_drv.h
index 04217a36939c..679d413bf10b 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_drv.h
+++ b/drivers/gpu/drm/mediatek/mtk_disp_drv.h
@@ -106,6 +106,7 @@ void mtk_ovl_disable_vblank(struct device *dev);
u32 mtk_ovl_get_blend_modes(struct device *dev);
const u32 *mtk_ovl_get_formats(struct device *dev);
size_t mtk_ovl_get_num_formats(struct device *dev);
+bool mtk_ovl_is_afbc_supported(struct device *dev);
void mtk_ovl_adaptor_add_comp(struct device *dev, struct mtk_mutex *mutex);
void mtk_ovl_adaptor_remove_comp(struct device *dev, struct mtk_mutex *mutex);
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index d0581c4e3c99..e0236353d499 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -236,6 +236,13 @@ size_t mtk_ovl_get_num_formats(struct device *dev)
return ovl->data->num_formats;
}
+bool mtk_ovl_is_afbc_supported(struct device *dev)
+{
+ struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
+
+ return ovl->data->supports_afbc;
+}
+
int mtk_ovl_clk_enable(struct device *dev)
{
struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index 6fb85bc6487a..a2fdceadf209 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -1095,7 +1095,6 @@ static const u32 mt8183_output_fmts[] = {
};
static const u32 mt8195_dpi_output_fmts[] = {
- MEDIA_BUS_FMT_BGR888_1X24,
MEDIA_BUS_FMT_RGB888_1X24,
MEDIA_BUS_FMT_RGB888_2X12_LE,
MEDIA_BUS_FMT_RGB888_2X12_BE,
@@ -1103,18 +1102,19 @@ static const u32 mt8195_dpi_output_fmts[] = {
MEDIA_BUS_FMT_YUYV8_1X16,
MEDIA_BUS_FMT_YUYV10_1X20,
MEDIA_BUS_FMT_YUYV12_1X24,
+ MEDIA_BUS_FMT_BGR888_1X24,
MEDIA_BUS_FMT_YUV8_1X24,
MEDIA_BUS_FMT_YUV10_1X30,
};
static const u32 mt8195_dp_intf_output_fmts[] = {
- MEDIA_BUS_FMT_BGR888_1X24,
MEDIA_BUS_FMT_RGB888_1X24,
MEDIA_BUS_FMT_RGB888_2X12_LE,
MEDIA_BUS_FMT_RGB888_2X12_BE,
MEDIA_BUS_FMT_RGB101010_1X30,
MEDIA_BUS_FMT_YUYV8_1X16,
MEDIA_BUS_FMT_YUYV10_1X20,
+ MEDIA_BUS_FMT_BGR888_1X24,
MEDIA_BUS_FMT_YUV8_1X24,
MEDIA_BUS_FMT_YUV10_1X30,
};
diff --git a/drivers/gpu/drm/mediatek/mtk_plane.c b/drivers/gpu/drm/mediatek/mtk_plane.c
index 655106bbb76d..cbc4f37da8ba 100644
--- a/drivers/gpu/drm/mediatek/mtk_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_plane.c
@@ -285,9 +285,14 @@ static void mtk_plane_atomic_disable(struct drm_plane *plane,
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
struct mtk_plane_state *mtk_plane_state = to_mtk_plane_state(new_state);
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ plane);
+
mtk_plane_state->pending.enable = false;
wmb(); /* Make sure the above parameter is set before update */
mtk_plane_state->pending.dirty = true;
+
+ mtk_crtc_plane_disable(old_state->crtc, plane);
}
static void mtk_plane_atomic_update(struct drm_plane *plane,
@@ -321,7 +326,8 @@ static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = {
int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
unsigned long possible_crtcs, enum drm_plane_type type,
unsigned int supported_rotations, const u32 blend_modes,
- const u32 *formats, size_t num_formats, unsigned int plane_idx)
+ const u32 *formats, size_t num_formats,
+ bool supports_afbc, unsigned int plane_idx)
{
int err;
@@ -332,7 +338,9 @@ int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
err = drm_universal_plane_init(dev, plane, possible_crtcs,
&mtk_plane_funcs, formats,
- num_formats, modifiers, type, NULL);
+ num_formats,
+ supports_afbc ? modifiers : NULL,
+ type, NULL);
if (err) {
DRM_ERROR("failed to initialize plane\n");
return err;
diff --git a/drivers/gpu/drm/mediatek/mtk_plane.h b/drivers/gpu/drm/mediatek/mtk_plane.h
index 3b13b89989c7..95c5fa5295d8 100644
--- a/drivers/gpu/drm/mediatek/mtk_plane.h
+++ b/drivers/gpu/drm/mediatek/mtk_plane.h
@@ -49,5 +49,6 @@ to_mtk_plane_state(struct drm_plane_state *state)
int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
unsigned long possible_crtcs, enum drm_plane_type type,
unsigned int supported_rotations, const u32 blend_modes,
- const u32 *formats, size_t num_formats, unsigned int plane_idx);
+ const u32 *formats, size_t num_formats,
+ bool supports_afbc, unsigned int plane_idx);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvif/chan.c b/drivers/gpu/drm/nouveau/nvif/chan.c
index baa10227d51a..80c01017d642 100644
--- a/drivers/gpu/drm/nouveau/nvif/chan.c
+++ b/drivers/gpu/drm/nouveau/nvif/chan.c
@@ -39,6 +39,9 @@ nvif_chan_gpfifo_post(struct nvif_chan *chan)
const u32 pbptr = (chan->push.cur - map) + chan->func->gpfifo.post_size;
const u32 gpptr = (chan->gpfifo.cur + 1) & chan->gpfifo.max;
+ if (!chan->func->gpfifo.post)
+ return 0;
+
return chan->func->gpfifo.post(chan, gpptr, pbptr);
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index 5657106c2f7d..15e2d505550f 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -841,7 +841,6 @@ int panfrost_job_init(struct panfrost_device *pfdev)
.num_rqs = DRM_SCHED_PRIORITY_COUNT,
.credit_limit = 2,
.timeout = msecs_to_jiffies(JOB_TIMEOUT_MS),
- .timeout_wq = pfdev->reset.wq,
.name = "pan_js",
.dev = pfdev->dev,
};
@@ -879,6 +878,7 @@ int panfrost_job_init(struct panfrost_device *pfdev)
pfdev->reset.wq = alloc_ordered_workqueue("panfrost-reset", 0);
if (!pfdev->reset.wq)
return -ENOMEM;
+ args.timeout_wq = pfdev->reset.wq;
for (j = 0; j < NUM_JOB_SLOTS; j++) {
js->queue[j].fence_context = dma_fence_context_alloc(1);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index bbd39348a7ab..7a3e510327b7 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -26,7 +26,6 @@
* Jerome Glisse
*/
-#include <linux/console.h>
#include <linux/efi.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
@@ -1635,11 +1634,9 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
pci_set_power_state(pdev, PCI_D3hot);
}
- if (notify_clients) {
- console_lock();
- drm_client_dev_suspend(dev, true);
- console_unlock();
- }
+ if (notify_clients)
+ drm_client_dev_suspend(dev, false);
+
return 0;
}
@@ -1661,17 +1658,11 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool notify_clients)
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- if (notify_clients) {
- console_lock();
- }
if (resume) {
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
- if (pci_enable_device(pdev)) {
- if (notify_clients)
- console_unlock();
+ if (pci_enable_device(pdev))
return -1;
- }
}
/* resume AGP if in use */
radeon_agp_resume(rdev);
@@ -1747,10 +1738,8 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool notify_clients)
if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
radeon_pm_compute_clocks(rdev);
- if (notify_clients) {
- drm_client_dev_resume(dev, true);
- console_unlock();
- }
+ if (notify_clients)
+ drm_client_dev_resume(dev, false);
return 0;
}
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index e671aa241720..ac678de7fe5e 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -355,17 +355,6 @@ void drm_sched_entity_destroy(struct drm_sched_entity *entity)
}
EXPORT_SYMBOL(drm_sched_entity_destroy);
-/* drm_sched_entity_clear_dep - callback to clear the entities dependency */
-static void drm_sched_entity_clear_dep(struct dma_fence *f,
- struct dma_fence_cb *cb)
-{
- struct drm_sched_entity *entity =
- container_of(cb, struct drm_sched_entity, cb);
-
- entity->dependency = NULL;
- dma_fence_put(f);
-}
-
/*
* drm_sched_entity_wakeup - callback to clear the entity's dependency and
* wake up the scheduler
@@ -376,7 +365,8 @@ static void drm_sched_entity_wakeup(struct dma_fence *f,
struct drm_sched_entity *entity =
container_of(cb, struct drm_sched_entity, cb);
- drm_sched_entity_clear_dep(f, cb);
+ entity->dependency = NULL;
+ dma_fence_put(f);
drm_sched_wakeup(entity->rq->sched);
}
@@ -429,13 +419,6 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
fence = dma_fence_get(&s_fence->scheduled);
dma_fence_put(entity->dependency);
entity->dependency = fence;
- if (!dma_fence_add_callback(fence, &entity->cb,
- drm_sched_entity_clear_dep))
- return true;
-
- /* Ignore it when it is already scheduled */
- dma_fence_put(fence);
- return false;
}
if (!dma_fence_add_callback(entity->dependency, &entity->cb,
diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c
index 1118a0250279..ce49282198cb 100644
--- a/drivers/gpu/drm/virtio/virtgpu_prime.c
+++ b/drivers/gpu/drm/virtio/virtgpu_prime.c
@@ -204,15 +204,16 @@ static void virtgpu_dma_buf_free_obj(struct drm_gem_object *obj)
{
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
struct virtio_gpu_device *vgdev = obj->dev->dev_private;
+ struct dma_buf_attachment *attach = obj->import_attach;
if (drm_gem_is_imported(obj)) {
- struct dma_buf *dmabuf = obj->dma_buf;
+ struct dma_buf *dmabuf = attach->dmabuf;
dma_resv_lock(dmabuf->resv, NULL);
virtgpu_dma_buf_unmap(bo);
dma_resv_unlock(dmabuf->resv);
- dma_buf_detach(dmabuf, obj->import_attach);
+ dma_buf_detach(dmabuf, attach);
dma_buf_put(dmabuf);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
index c55382167c1b..e417921af584 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
@@ -85,10 +85,10 @@ static int vmw_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
int ret;
if (drm_gem_is_imported(obj)) {
- ret = dma_buf_vmap(obj->dma_buf, map);
+ ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
if (!ret) {
if (drm_WARN_ON(obj->dev, map->is_iomem)) {
- dma_buf_vunmap(obj->dma_buf, map);
+ dma_buf_vunmap(obj->import_attach->dmabuf, map);
return -EIO;
}
}
@@ -102,7 +102,7 @@ static int vmw_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
static void vmw_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
{
if (drm_gem_is_imported(obj))
- dma_buf_vunmap(obj->dma_buf, map);
+ dma_buf_vunmap(obj->import_attach->dmabuf, map);
else
drm_gem_ttm_vunmap(obj, map);
}
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index 6c4cb9576fb6..e3517ce2e18c 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -417,6 +417,8 @@ int xe_gt_init_early(struct xe_gt *gt)
if (err)
return err;
+ xe_mocs_init_early(gt);
+
return 0;
}
@@ -630,17 +632,15 @@ int xe_gt_init(struct xe_gt *gt)
if (err)
return err;
- err = xe_gt_pagefault_init(gt);
+ err = xe_gt_sysfs_init(gt);
if (err)
return err;
- xe_mocs_init_early(gt);
-
- err = xe_gt_sysfs_init(gt);
+ err = gt_fw_domain_init(gt);
if (err)
return err;
- err = gt_fw_domain_init(gt);
+ err = xe_gt_pagefault_init(gt);
if (err)
return err;
@@ -839,6 +839,9 @@ static int gt_reset(struct xe_gt *gt)
goto err_out;
}
+ if (IS_SRIOV_PF(gt_to_xe(gt)))
+ xe_gt_sriov_pf_stop_prepare(gt);
+
xe_uc_gucrc_disable(&gt->uc);
xe_uc_stop_prepare(&gt->uc);
xe_gt_pagefault_reset(gt);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
index c08efca6420e..35489fa81825 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
@@ -172,6 +172,25 @@ void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid)
pf_clear_vf_scratch_regs(gt, vfid);
}
+static void pf_cancel_restart(struct xe_gt *gt)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+
+ if (cancel_work_sync(&gt->sriov.pf.workers.restart))
+ xe_gt_sriov_dbg_verbose(gt, "pending restart canceled!\n");
+}
+
+/**
+ * xe_gt_sriov_pf_stop_prepare() - Prepare to stop SR-IOV support.
+ * @gt: the &xe_gt
+ *
+ * This function can only be called on the PF.
+ */
+void xe_gt_sriov_pf_stop_prepare(struct xe_gt *gt)
+{
+ pf_cancel_restart(gt);
+}
+
static void pf_restart(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
index f474509411c0..e2b2ff8132dc 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
@@ -13,6 +13,7 @@ int xe_gt_sriov_pf_init_early(struct xe_gt *gt);
int xe_gt_sriov_pf_init(struct xe_gt *gt);
void xe_gt_sriov_pf_init_hw(struct xe_gt *gt);
void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid);
+void xe_gt_sriov_pf_stop_prepare(struct xe_gt *gt);
void xe_gt_sriov_pf_restart(struct xe_gt *gt);
#else
static inline int xe_gt_sriov_pf_init_early(struct xe_gt *gt)
@@ -29,6 +30,10 @@ static inline void xe_gt_sriov_pf_init_hw(struct xe_gt *gt)
{
}
+static inline void xe_gt_sriov_pf_stop_prepare(struct xe_gt *gt)
+{
+}
+
static inline void xe_gt_sriov_pf_restart(struct xe_gt *gt)
{
}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
index 2420a548cacc..53a44702c04a 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
@@ -2364,6 +2364,21 @@ int xe_gt_sriov_pf_config_restore(struct xe_gt *gt, unsigned int vfid,
return err;
}
+static int pf_push_self_config(struct xe_gt *gt)
+{
+ int err;
+
+ err = pf_push_full_vf_config(gt, PFID);
+ if (err) {
+ xe_gt_sriov_err(gt, "Failed to push self configuration (%pe)\n",
+ ERR_PTR(err));
+ return err;
+ }
+
+ xe_gt_sriov_dbg_verbose(gt, "self configuration completed\n");
+ return 0;
+}
+
static void fini_config(void *arg)
{
struct xe_gt *gt = arg;
@@ -2387,9 +2402,17 @@ static void fini_config(void *arg)
int xe_gt_sriov_pf_config_init(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
+ int err;
xe_gt_assert(gt, IS_SRIOV_PF(xe));
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ err = pf_push_self_config(gt);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ if (err)
+ return err;
+
return devm_add_action_or_reset(xe->drm.dev, fini_config, gt);
}
@@ -2407,6 +2430,10 @@ void xe_gt_sriov_pf_config_restart(struct xe_gt *gt)
unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
unsigned int fail = 0, skip = 0;
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ pf_push_self_config(gt);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
for (n = 1; n <= total_vfs; n++) {
if (xe_gt_sriov_pf_config_is_empty(gt, n))
skip++;
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index 66bc02302c55..07a5161c7d5b 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -1817,8 +1817,8 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
xe_bo_assert_held(bo);
/* Use bounce buffer for small access and unaligned access */
- if (len & XE_CACHELINE_MASK ||
- ((uintptr_t)buf | offset) & XE_CACHELINE_MASK) {
+ if (!IS_ALIGNED(len, XE_CACHELINE_BYTES) ||
+ !IS_ALIGNED((unsigned long)buf + offset, XE_CACHELINE_BYTES)) {
int buf_offset = 0;
/*
@@ -1848,7 +1848,7 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
err = xe_migrate_access_memory(m, bo,
offset & ~XE_CACHELINE_MASK,
(void *)ptr,
- sizeof(bounce), 0);
+ sizeof(bounce), write);
if (err)
return err;
} else {
diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c
index bc1689db4cd7..7b50c7c1ee21 100644
--- a/drivers/gpu/drm/xe/xe_ring_ops.c
+++ b/drivers/gpu/drm/xe/xe_ring_ops.c
@@ -110,13 +110,14 @@ static int emit_bb_start(u64 batch_addr, u32 ppgtt_flag, u32 *dw, int i)
return i;
}
-static int emit_flush_invalidate(u32 *dw, int i)
+static int emit_flush_invalidate(u32 addr, u32 val, u32 *dw, int i)
{
dw[i++] = MI_FLUSH_DW | MI_INVALIDATE_TLB | MI_FLUSH_DW_OP_STOREDW |
- MI_FLUSH_IMM_DW | MI_FLUSH_DW_STORE_INDEX;
- dw[i++] = LRC_PPHWSP_FLUSH_INVAL_SCRATCH_ADDR;
- dw[i++] = 0;
+ MI_FLUSH_IMM_DW;
+
+ dw[i++] = addr | MI_FLUSH_DW_USE_GTT;
dw[i++] = 0;
+ dw[i++] = val;
return i;
}
@@ -397,23 +398,20 @@ static void __emit_job_gen12_render_compute(struct xe_sched_job *job,
static void emit_migration_job_gen12(struct xe_sched_job *job,
struct xe_lrc *lrc, u32 seqno)
{
+ u32 saddr = xe_lrc_start_seqno_ggtt_addr(lrc);
u32 dw[MAX_JOB_SIZE_DW], i = 0;
i = emit_copy_timestamp(lrc, dw, i);
- i = emit_store_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
- seqno, dw, i);
+ i = emit_store_imm_ggtt(saddr, seqno, dw, i);
dw[i++] = MI_ARB_ON_OFF | MI_ARB_DISABLE; /* Enabled again below */
i = emit_bb_start(job->ptrs[0].batch_addr, BIT(8), dw, i);
- if (!IS_SRIOV_VF(gt_to_xe(job->q->gt))) {
- /* XXX: Do we need this? Leaving for now. */
- dw[i++] = preparser_disable(true);
- i = emit_flush_invalidate(dw, i);
- dw[i++] = preparser_disable(false);
- }
+ dw[i++] = preparser_disable(true);
+ i = emit_flush_invalidate(saddr, seqno, dw, i);
+ dw[i++] = preparser_disable(false);
i = emit_bb_start(job->ptrs[1].batch_addr, BIT(8), dw, i);
diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig
index 1cd188b73b74..57623ca7f350 100644
--- a/drivers/hv/Kconfig
+++ b/drivers/hv/Kconfig
@@ -9,7 +9,7 @@ config HYPERV
select PARAVIRT
select X86_HV_CALLBACK_VECTOR if X86
select OF_EARLY_FLATTREE if OF
- select SYSFB if !HYPERV_VTL_MODE
+ select SYSFB if EFI && !HYPERV_VTL_MODE
help
Select this option to run Linux as a Hyper-V client operating
system.
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 35f26fa1ffe7..7c7c66e0dc3f 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -18,6 +18,7 @@
#include <linux/uio.h>
#include <linux/interrupt.h>
#include <linux/set_memory.h>
+#include <linux/export.h>
#include <asm/page.h>
#include <asm/mshyperv.h>
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 6e084c207414..65dd299e2944 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -20,6 +20,7 @@
#include <linux/delay.h>
#include <linux/cpu.h>
#include <linux/hyperv.h>
+#include <linux/export.h>
#include <asm/mshyperv.h>
#include <linux/sched/isolation.h>
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index be490c598785..1fe3573ae52a 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -519,7 +519,10 @@ void vmbus_set_event(struct vmbus_channel *channel)
else
WARN_ON_ONCE(1);
} else {
- hv_do_fast_hypercall8(HVCALL_SIGNAL_EVENT, channel->sig_event);
+ u64 control = HVCALL_SIGNAL_EVENT;
+
+ control |= hv_nested ? HV_HYPERCALL_NESTED : 0;
+ hv_do_fast_hypercall8(control, channel->sig_event);
}
}
EXPORT_SYMBOL_GPL(vmbus_set_event);
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 308c8f279df8..b14c5f9e0ef2 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -85,8 +85,10 @@ int hv_post_message(union hv_connection_id connection_id,
else
status = HV_STATUS_INVALID_PARAMETER;
} else {
- status = hv_do_hypercall(HVCALL_POST_MESSAGE,
- aligned_msg, NULL);
+ u64 control = HVCALL_POST_MESSAGE;
+
+ control |= hv_nested ? HV_HYPERCALL_NESTED : 0;
+ status = hv_do_hypercall(control, aligned_msg, NULL);
}
local_irq_restore(flags);
diff --git a/drivers/hv/hv_proc.c b/drivers/hv/hv_proc.c
index 7d7ecb6f6137..fbb4eb3901bb 100644
--- a/drivers/hv/hv_proc.c
+++ b/drivers/hv/hv_proc.c
@@ -6,6 +6,7 @@
#include <linux/slab.h>
#include <linux/cpuhotplug.h>
#include <linux/minmax.h>
+#include <linux/export.h>
#include <asm/mshyperv.h>
/*
diff --git a/drivers/hv/mshv_common.c b/drivers/hv/mshv_common.c
index 2575e6d7a71f..6f227a8a5af7 100644
--- a/drivers/hv/mshv_common.c
+++ b/drivers/hv/mshv_common.c
@@ -13,6 +13,7 @@
#include <linux/mm.h>
#include <asm/mshyperv.h>
#include <linux/resume_user_mode.h>
+#include <linux/export.h>
#include "mshv.h"
diff --git a/drivers/hv/mshv_root_hv_call.c b/drivers/hv/mshv_root_hv_call.c
index a222a16107f6..c9c274f29c3c 100644
--- a/drivers/hv/mshv_root_hv_call.c
+++ b/drivers/hv/mshv_root_hv_call.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/mm.h>
+#include <linux/export.h>
#include <asm/mshyperv.h>
#include "mshv_root.h"
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 3c9b02471760..23ce1fb70de1 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -18,6 +18,7 @@
#include <linux/slab.h>
#include <linux/prefetch.h>
#include <linux/io.h>
+#include <linux/export.h>
#include <asm/mshyperv.h>
#include "hyperv_vmbus.h"
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 33b524b4eb5e..2ed5a1e89d69 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -2509,7 +2509,7 @@ static int vmbus_acpi_add(struct platform_device *pdev)
return 0;
}
#endif
-
+#ifndef HYPERVISOR_CALLBACK_VECTOR
static int vmbus_set_irq(struct platform_device *pdev)
{
struct irq_data *data;
@@ -2534,6 +2534,7 @@ static int vmbus_set_irq(struct platform_device *pdev)
return 0;
}
+#endif
static int vmbus_device_add(struct platform_device *pdev)
{
@@ -2549,11 +2550,11 @@ static int vmbus_device_add(struct platform_device *pdev)
if (ret)
return ret;
- if (!__is_defined(HYPERVISOR_CALLBACK_VECTOR))
- ret = vmbus_set_irq(pdev);
+#ifndef HYPERVISOR_CALLBACK_VECTOR
+ ret = vmbus_set_irq(pdev);
if (ret)
return ret;
-
+#endif
for_each_of_range(&parser, &range) {
struct resource *res;
diff --git a/drivers/hwmon/corsair-cpro.c b/drivers/hwmon/corsair-cpro.c
index e1a7f7aa7f80..b7b911f8359c 100644
--- a/drivers/hwmon/corsair-cpro.c
+++ b/drivers/hwmon/corsair-cpro.c
@@ -89,6 +89,7 @@ struct ccp_device {
struct mutex mutex; /* whenever buffer is used, lock before send_usb_cmd */
u8 *cmd_buffer;
u8 *buffer;
+ int buffer_recv_size; /* number of received bytes in buffer */
int target[6];
DECLARE_BITMAP(temp_cnct, NUM_TEMP_SENSORS);
DECLARE_BITMAP(fan_cnct, NUM_FANS);
@@ -146,6 +147,9 @@ static int send_usb_cmd(struct ccp_device *ccp, u8 command, u8 byte1, u8 byte2,
if (!t)
return -ETIMEDOUT;
+ if (ccp->buffer_recv_size != IN_BUFFER_SIZE)
+ return -EPROTO;
+
return ccp_get_errno(ccp);
}
@@ -157,6 +161,7 @@ static int ccp_raw_event(struct hid_device *hdev, struct hid_report *report, u8
spin_lock(&ccp->wait_input_report_lock);
if (!completion_done(&ccp->wait_input_report)) {
memcpy(ccp->buffer, data, min(IN_BUFFER_SIZE, size));
+ ccp->buffer_recv_size = size;
complete_all(&ccp->wait_input_report);
}
spin_unlock(&ccp->wait_input_report_lock);
diff --git a/drivers/hwmon/ina238.c b/drivers/hwmon/ina238.c
index a4a41742786b..9a5fd16a4ec2 100644
--- a/drivers/hwmon/ina238.c
+++ b/drivers/hwmon/ina238.c
@@ -97,7 +97,7 @@
* Power (mW) = 0.2 * register value * 20000 / rshunt / 4 * gain
* (Specific for SQ52206)
* Power (mW) = 0.24 * register value * 20000 / rshunt / 4 * gain
- * Energy (mJ) = 16 * 0.24 * register value * 20000 / rshunt / 4 * gain
+ * Energy (uJ) = 16 * 0.24 * register value * 20000 / rshunt / 4 * gain * 1000
*/
#define INA238_CALIBRATION_VALUE 16384
#define INA238_FIXED_SHUNT 20000
@@ -500,9 +500,9 @@ static ssize_t energy1_input_show(struct device *dev,
if (ret)
return ret;
- /* result in mJ */
- energy = div_u64(regval * INA238_FIXED_SHUNT * data->gain * 16 *
- data->config->power_calculate_factor, 4 * 100 * data->rshunt);
+ /* result in uJ */
+ energy = div_u64(regval * INA238_FIXED_SHUNT * data->gain * 16 * 10 *
+ data->config->power_calculate_factor, 4 * data->rshunt);
return sysfs_emit(buf, "%llu\n", energy);
}
diff --git a/drivers/hwmon/pmbus/ucd9000.c b/drivers/hwmon/pmbus/ucd9000.c
index 2bc8cccb01fd..52d4000902d5 100644
--- a/drivers/hwmon/pmbus/ucd9000.c
+++ b/drivers/hwmon/pmbus/ucd9000.c
@@ -226,15 +226,15 @@ static int ucd9000_gpio_set(struct gpio_chip *gc, unsigned int offset,
}
if (value) {
- if (ret & UCD9000_GPIO_CONFIG_STATUS)
+ if (ret & UCD9000_GPIO_CONFIG_OUT_VALUE)
return 0;
- ret |= UCD9000_GPIO_CONFIG_STATUS;
+ ret |= UCD9000_GPIO_CONFIG_OUT_VALUE;
} else {
- if (!(ret & UCD9000_GPIO_CONFIG_STATUS))
+ if (!(ret & UCD9000_GPIO_CONFIG_OUT_VALUE))
return 0;
- ret &= ~UCD9000_GPIO_CONFIG_STATUS;
+ ret &= ~UCD9000_GPIO_CONFIG_OUT_VALUE;
}
ret |= UCD9000_GPIO_CONFIG_ENABLE;
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 8b01df3cc8e9..5fcc9f6c33e5 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1472,7 +1472,9 @@ omap_i2c_probe(struct platform_device *pdev)
}
/* reset ASAP, clearing any IRQs */
- omap_i2c_init(omap);
+ r = omap_i2c_init(omap);
+ if (r)
+ goto err_mux_state_deselect;
if (omap->rev < OMAP_I2C_OMAP1_REV_2)
r = devm_request_irq(&pdev->dev, omap->irq, omap_i2c_omap1_isr,
@@ -1515,12 +1517,13 @@ omap_i2c_probe(struct platform_device *pdev)
err_unuse_clocks:
omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, 0);
+err_mux_state_deselect:
if (omap->mux_state)
mux_state_deselect(omap->mux_state);
err_put_pm:
- pm_runtime_dont_use_autosuspend(omap->dev);
pm_runtime_put_sync(omap->dev);
err_disable_pm:
+ pm_runtime_dont_use_autosuspend(omap->dev);
pm_runtime_disable(&pdev->dev);
return r;
diff --git a/drivers/i2c/busses/i2c-stm32.c b/drivers/i2c/busses/i2c-stm32.c
index 157c64e27d0b..f84ec056e36d 100644
--- a/drivers/i2c/busses/i2c-stm32.c
+++ b/drivers/i2c/busses/i2c-stm32.c
@@ -102,7 +102,6 @@ int stm32_i2c_prep_dma_xfer(struct device *dev, struct stm32_i2c_dma *dma,
void *dma_async_param)
{
struct dma_async_tx_descriptor *txdesc;
- struct device *chan_dev;
int ret;
if (rd_wr) {
@@ -116,11 +115,10 @@ int stm32_i2c_prep_dma_xfer(struct device *dev, struct stm32_i2c_dma *dma,
}
dma->dma_len = len;
- chan_dev = dma->chan_using->device->dev;
- dma->dma_buf = dma_map_single(chan_dev, buf, dma->dma_len,
+ dma->dma_buf = dma_map_single(dev, buf, dma->dma_len,
dma->dma_data_dir);
- if (dma_mapping_error(chan_dev, dma->dma_buf)) {
+ if (dma_mapping_error(dev, dma->dma_buf)) {
dev_err(dev, "DMA mapping failed\n");
return -EINVAL;
}
@@ -150,7 +148,7 @@ int stm32_i2c_prep_dma_xfer(struct device *dev, struct stm32_i2c_dma *dma,
return 0;
err:
- dma_unmap_single(chan_dev, dma->dma_buf, dma->dma_len,
+ dma_unmap_single(dev, dma->dma_buf, dma->dma_len,
dma->dma_data_dir);
return ret;
}
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index e4aaeb2262d0..73a7b8894c0d 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -739,12 +739,13 @@ static void stm32f7_i2c_disable_dma_req(struct stm32f7_i2c_dev *i2c_dev)
static void stm32f7_i2c_dma_callback(void *arg)
{
- struct stm32f7_i2c_dev *i2c_dev = (struct stm32f7_i2c_dev *)arg;
+ struct stm32f7_i2c_dev *i2c_dev = arg;
struct stm32_i2c_dma *dma = i2c_dev->dma;
- struct device *dev = dma->chan_using->device->dev;
stm32f7_i2c_disable_dma_req(i2c_dev);
- dma_unmap_single(dev, dma->dma_buf, dma->dma_len, dma->dma_data_dir);
+ dmaengine_terminate_async(dma->chan_using);
+ dma_unmap_single(i2c_dev->dev, dma->dma_buf, dma->dma_len,
+ dma->dma_data_dir);
complete(&dma->dma_complete);
}
@@ -1510,7 +1511,6 @@ static irqreturn_t stm32f7_i2c_handle_isr_errs(struct stm32f7_i2c_dev *i2c_dev,
u16 addr = f7_msg->addr;
void __iomem *base = i2c_dev->base;
struct device *dev = i2c_dev->dev;
- struct stm32_i2c_dma *dma = i2c_dev->dma;
/* Bus error */
if (status & STM32F7_I2C_ISR_BERR) {
@@ -1551,10 +1551,8 @@ static irqreturn_t stm32f7_i2c_handle_isr_errs(struct stm32f7_i2c_dev *i2c_dev,
}
/* Disable dma */
- if (i2c_dev->use_dma) {
- stm32f7_i2c_disable_dma_req(i2c_dev);
- dmaengine_terminate_async(dma->chan_using);
- }
+ if (i2c_dev->use_dma)
+ stm32f7_i2c_dma_callback(i2c_dev);
i2c_dev->master_mode = false;
complete(&i2c_dev->complete);
@@ -1600,7 +1598,6 @@ static irqreturn_t stm32f7_i2c_isr_event_thread(int irq, void *data)
{
struct stm32f7_i2c_dev *i2c_dev = data;
struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg;
- struct stm32_i2c_dma *dma = i2c_dev->dma;
void __iomem *base = i2c_dev->base;
u32 status, mask;
int ret;
@@ -1619,10 +1616,8 @@ static irqreturn_t stm32f7_i2c_isr_event_thread(int irq, void *data)
dev_dbg(i2c_dev->dev, "<%s>: Receive NACK (addr %x)\n",
__func__, f7_msg->addr);
writel_relaxed(STM32F7_I2C_ICR_NACKCF, base + STM32F7_I2C_ICR);
- if (i2c_dev->use_dma) {
- stm32f7_i2c_disable_dma_req(i2c_dev);
- dmaengine_terminate_async(dma->chan_using);
- }
+ if (i2c_dev->use_dma)
+ stm32f7_i2c_dma_callback(i2c_dev);
f7_msg->result = -ENXIO;
}
@@ -1640,8 +1635,7 @@ static irqreturn_t stm32f7_i2c_isr_event_thread(int irq, void *data)
ret = wait_for_completion_timeout(&i2c_dev->dma->dma_complete, HZ);
if (!ret) {
dev_dbg(i2c_dev->dev, "<%s>: Timed out\n", __func__);
- stm32f7_i2c_disable_dma_req(i2c_dev);
- dmaengine_terminate_async(dma->chan_using);
+ stm32f7_i2c_dma_callback(i2c_dev);
f7_msg->result = -ETIMEDOUT;
}
}
diff --git a/drivers/iio/accel/fxls8962af-core.c b/drivers/iio/accel/fxls8962af-core.c
index 12598feaa693..b10a30960e1e 100644
--- a/drivers/iio/accel/fxls8962af-core.c
+++ b/drivers/iio/accel/fxls8962af-core.c
@@ -877,6 +877,8 @@ static int fxls8962af_buffer_predisable(struct iio_dev *indio_dev)
if (ret)
return ret;
+ synchronize_irq(data->irq);
+
ret = __fxls8962af_fifo_set_mode(data, false);
if (data->enable_event)
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index 99cb661fabb2..a7961c610ed2 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -1353,6 +1353,7 @@ static int apply_acpi_orientation(struct iio_dev *indio_dev)
union acpi_object *ont;
union acpi_object *elements;
acpi_status status;
+ struct device *parent = indio_dev->dev.parent;
int ret = -EINVAL;
unsigned int val;
int i, j;
@@ -1371,7 +1372,7 @@ static int apply_acpi_orientation(struct iio_dev *indio_dev)
};
- adev = ACPI_COMPANION(indio_dev->dev.parent);
+ adev = ACPI_COMPANION(parent);
if (!adev)
return -ENXIO;
@@ -1380,8 +1381,7 @@ static int apply_acpi_orientation(struct iio_dev *indio_dev)
if (status == AE_NOT_FOUND) {
return -ENXIO;
} else if (ACPI_FAILURE(status)) {
- dev_warn(&indio_dev->dev, "failed to execute _ONT: %d\n",
- status);
+ dev_warn(parent, "failed to execute _ONT: %d\n", status);
return status;
}
@@ -1457,12 +1457,12 @@ static int apply_acpi_orientation(struct iio_dev *indio_dev)
}
ret = 0;
- dev_info(&indio_dev->dev, "computed mount matrix from ACPI\n");
+ dev_info(parent, "computed mount matrix from ACPI\n");
out:
kfree(buffer.pointer);
if (ret)
- dev_dbg(&indio_dev->dev,
+ dev_dbg(parent,
"failed to apply ACPI orientation data: %d\n", ret);
return ret;
diff --git a/drivers/iio/adc/ad7380.c b/drivers/iio/adc/ad7380.c
index d96bd12dfea6..cabf5511d116 100644
--- a/drivers/iio/adc/ad7380.c
+++ b/drivers/iio/adc/ad7380.c
@@ -1953,8 +1953,9 @@ static int ad7380_probe(struct spi_device *spi)
if (st->chip_info->has_hardware_gain) {
device_for_each_child_node_scoped(dev, node) {
- unsigned int channel, gain;
+ unsigned int channel;
int gain_idx;
+ u16 gain;
ret = fwnode_property_read_u32(node, "reg", &channel);
if (ret)
@@ -1966,7 +1967,7 @@ static int ad7380_probe(struct spi_device *spi)
"Invalid channel number %i\n",
channel);
- ret = fwnode_property_read_u32(node, "adi,gain-milli",
+ ret = fwnode_property_read_u16(node, "adi,gain-milli",
&gain);
if (ret && ret != -EINVAL)
return dev_err_probe(dev, ret,
diff --git a/drivers/iio/adc/ad7949.c b/drivers/iio/adc/ad7949.c
index edd0c3a35ab7..202561cad401 100644
--- a/drivers/iio/adc/ad7949.c
+++ b/drivers/iio/adc/ad7949.c
@@ -308,7 +308,6 @@ static void ad7949_disable_reg(void *reg)
static int ad7949_spi_probe(struct spi_device *spi)
{
- u32 spi_ctrl_mask = spi->controller->bits_per_word_mask;
struct device *dev = &spi->dev;
const struct ad7949_adc_spec *spec;
struct ad7949_adc_chip *ad7949_adc;
@@ -337,11 +336,11 @@ static int ad7949_spi_probe(struct spi_device *spi)
ad7949_adc->resolution = spec->resolution;
/* Set SPI bits per word */
- if (spi_ctrl_mask & SPI_BPW_MASK(ad7949_adc->resolution)) {
+ if (spi_is_bpw_supported(spi, ad7949_adc->resolution)) {
spi->bits_per_word = ad7949_adc->resolution;
- } else if (spi_ctrl_mask == SPI_BPW_MASK(16)) {
+ } else if (spi_is_bpw_supported(spi, 16)) {
spi->bits_per_word = 16;
- } else if (spi_ctrl_mask == SPI_BPW_MASK(8)) {
+ } else if (spi_is_bpw_supported(spi, 8)) {
spi->bits_per_word = 8;
} else {
dev_err(dev, "unable to find common BPW with spi controller\n");
diff --git a/drivers/iio/adc/adi-axi-adc.c b/drivers/iio/adc/adi-axi-adc.c
index 4116c44197b8..2dbaa0b5b3d6 100644
--- a/drivers/iio/adc/adi-axi-adc.c
+++ b/drivers/iio/adc/adi-axi-adc.c
@@ -445,7 +445,7 @@ static int axi_adc_raw_read(struct iio_backend *back, u32 *val)
static int ad7606_bus_reg_read(struct iio_backend *back, u32 reg, u32 *val)
{
struct adi_axi_adc_state *st = iio_backend_get_priv(back);
- int addr;
+ u32 addr, reg_val;
guard(mutex)(&st->lock);
@@ -455,7 +455,9 @@ static int ad7606_bus_reg_read(struct iio_backend *back, u32 reg, u32 *val)
*/
addr = FIELD_PREP(ADI_AXI_REG_ADDRESS_MASK, reg) | ADI_AXI_REG_READ_BIT;
axi_adc_raw_write(back, addr);
- axi_adc_raw_read(back, val);
+ axi_adc_raw_read(back, &reg_val);
+
+ *val = FIELD_GET(ADI_AXI_REG_VALUE_MASK, reg_val);
/* Write 0x0 on the bus to get back to ADC mode */
axi_adc_raw_write(back, 0);
diff --git a/drivers/iio/adc/axp20x_adc.c b/drivers/iio/adc/axp20x_adc.c
index 71584ffd3632..1b49325ec1ce 100644
--- a/drivers/iio/adc/axp20x_adc.c
+++ b/drivers/iio/adc/axp20x_adc.c
@@ -187,6 +187,7 @@ static struct iio_map axp717_maps[] = {
.consumer_channel = "batt_chrg_i",
.adc_channel_label = "batt_chrg_i",
},
+ { }
};
/*
diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
index a7e9912fb44a..9dd547e62b6c 100644
--- a/drivers/iio/adc/max1363.c
+++ b/drivers/iio/adc/max1363.c
@@ -511,10 +511,10 @@ static const struct iio_event_spec max1363_events[] = {
MAX1363_CHAN_U(1, _s1, 1, bits, ev_spec, num_ev_spec), \
MAX1363_CHAN_U(2, _s2, 2, bits, ev_spec, num_ev_spec), \
MAX1363_CHAN_U(3, _s3, 3, bits, ev_spec, num_ev_spec), \
- MAX1363_CHAN_B(0, 1, d0m1, 4, bits, ev_spec, num_ev_spec), \
- MAX1363_CHAN_B(2, 3, d2m3, 5, bits, ev_spec, num_ev_spec), \
- MAX1363_CHAN_B(1, 0, d1m0, 6, bits, ev_spec, num_ev_spec), \
- MAX1363_CHAN_B(3, 2, d3m2, 7, bits, ev_spec, num_ev_spec), \
+ MAX1363_CHAN_B(0, 1, d0m1, 12, bits, ev_spec, num_ev_spec), \
+ MAX1363_CHAN_B(2, 3, d2m3, 13, bits, ev_spec, num_ev_spec), \
+ MAX1363_CHAN_B(1, 0, d1m0, 18, bits, ev_spec, num_ev_spec), \
+ MAX1363_CHAN_B(3, 2, d3m2, 19, bits, ev_spec, num_ev_spec), \
IIO_CHAN_SOFT_TIMESTAMP(8) \
}
@@ -532,23 +532,23 @@ static const struct iio_chan_spec max1363_channels[] =
/* Applies to max1236, max1237 */
static const enum max1363_modes max1236_mode_list[] = {
_s0, _s1, _s2, _s3,
- s0to1, s0to2, s0to3,
+ s0to1, s0to2, s2to3, s0to3,
d0m1, d2m3, d1m0, d3m2,
d0m1to2m3, d1m0to3m2,
- s2to3,
};
/* Applies to max1238, max1239 */
static const enum max1363_modes max1238_mode_list[] = {
_s0, _s1, _s2, _s3, _s4, _s5, _s6, _s7, _s8, _s9, _s10, _s11,
s0to1, s0to2, s0to3, s0to4, s0to5, s0to6,
+ s6to7, s6to8, s6to9, s6to10, s6to11,
s0to7, s0to8, s0to9, s0to10, s0to11,
d0m1, d2m3, d4m5, d6m7, d8m9, d10m11,
d1m0, d3m2, d5m4, d7m6, d9m8, d11m10,
- d0m1to2m3, d0m1to4m5, d0m1to6m7, d0m1to8m9, d0m1to10m11,
- d1m0to3m2, d1m0to5m4, d1m0to7m6, d1m0to9m8, d1m0to11m10,
- s6to7, s6to8, s6to9, s6to10, s6to11,
- d6m7to8m9, d6m7to10m11, d7m6to9m8, d7m6to11m10,
+ d0m1to2m3, d0m1to4m5, d0m1to6m7, d6m7to8m9,
+ d0m1to8m9, d6m7to10m11, d0m1to10m11, d1m0to3m2,
+ d1m0to5m4, d1m0to7m6, d7m6to9m8, d1m0to9m8,
+ d7m6to11m10, d1m0to11m10,
};
#define MAX1363_12X_CHANS(bits) { \
@@ -584,16 +584,15 @@ static const struct iio_chan_spec max1238_channels[] = MAX1363_12X_CHANS(12);
static const enum max1363_modes max11607_mode_list[] = {
_s0, _s1, _s2, _s3,
- s0to1, s0to2, s0to3,
- s2to3,
+ s0to1, s0to2, s2to3,
+ s0to3,
d0m1, d2m3, d1m0, d3m2,
d0m1to2m3, d1m0to3m2,
};
static const enum max1363_modes max11608_mode_list[] = {
_s0, _s1, _s2, _s3, _s4, _s5, _s6, _s7,
- s0to1, s0to2, s0to3, s0to4, s0to5, s0to6, s0to7,
- s6to7,
+ s0to1, s0to2, s0to3, s0to4, s0to5, s0to6, s6to7, s0to7,
d0m1, d2m3, d4m5, d6m7,
d1m0, d3m2, d5m4, d7m6,
d0m1to2m3, d0m1to4m5, d0m1to6m7,
@@ -609,14 +608,14 @@ static const enum max1363_modes max11608_mode_list[] = {
MAX1363_CHAN_U(5, _s5, 5, bits, NULL, 0), \
MAX1363_CHAN_U(6, _s6, 6, bits, NULL, 0), \
MAX1363_CHAN_U(7, _s7, 7, bits, NULL, 0), \
- MAX1363_CHAN_B(0, 1, d0m1, 8, bits, NULL, 0), \
- MAX1363_CHAN_B(2, 3, d2m3, 9, bits, NULL, 0), \
- MAX1363_CHAN_B(4, 5, d4m5, 10, bits, NULL, 0), \
- MAX1363_CHAN_B(6, 7, d6m7, 11, bits, NULL, 0), \
- MAX1363_CHAN_B(1, 0, d1m0, 12, bits, NULL, 0), \
- MAX1363_CHAN_B(3, 2, d3m2, 13, bits, NULL, 0), \
- MAX1363_CHAN_B(5, 4, d5m4, 14, bits, NULL, 0), \
- MAX1363_CHAN_B(7, 6, d7m6, 15, bits, NULL, 0), \
+ MAX1363_CHAN_B(0, 1, d0m1, 12, bits, NULL, 0), \
+ MAX1363_CHAN_B(2, 3, d2m3, 13, bits, NULL, 0), \
+ MAX1363_CHAN_B(4, 5, d4m5, 14, bits, NULL, 0), \
+ MAX1363_CHAN_B(6, 7, d6m7, 15, bits, NULL, 0), \
+ MAX1363_CHAN_B(1, 0, d1m0, 18, bits, NULL, 0), \
+ MAX1363_CHAN_B(3, 2, d3m2, 19, bits, NULL, 0), \
+ MAX1363_CHAN_B(5, 4, d5m4, 20, bits, NULL, 0), \
+ MAX1363_CHAN_B(7, 6, d7m6, 21, bits, NULL, 0), \
IIO_CHAN_SOFT_TIMESTAMP(16) \
}
static const struct iio_chan_spec max11602_channels[] = MAX1363_8X_CHANS(8);
diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
index bd3458965bff..21c04a98b3b6 100644
--- a/drivers/iio/adc/stm32-adc-core.c
+++ b/drivers/iio/adc/stm32-adc-core.c
@@ -430,10 +430,9 @@ static int stm32_adc_irq_probe(struct platform_device *pdev,
return -ENOMEM;
}
- for (i = 0; i < priv->cfg->num_irqs; i++) {
- irq_set_chained_handler(priv->irq[i], stm32_adc_irq_handler);
- irq_set_handler_data(priv->irq[i], priv);
- }
+ for (i = 0; i < priv->cfg->num_irqs; i++)
+ irq_set_chained_handler_and_data(priv->irq[i],
+ stm32_adc_irq_handler, priv);
return 0;
}
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index 8ce1dccfea4f..dac593be5695 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -154,7 +154,7 @@ static int st_sensors_set_fullscale(struct iio_dev *indio_dev, unsigned int fs)
return err;
st_accel_set_fullscale_error:
- dev_err(&indio_dev->dev, "failed to set new fullscale.\n");
+ dev_err(indio_dev->dev.parent, "failed to set new fullscale.\n");
return err;
}
@@ -231,8 +231,7 @@ int st_sensors_power_enable(struct iio_dev *indio_dev)
ARRAY_SIZE(regulator_names),
regulator_names);
if (err)
- return dev_err_probe(&indio_dev->dev, err,
- "unable to enable supplies\n");
+ return dev_err_probe(parent, err, "unable to enable supplies\n");
return 0;
}
@@ -241,13 +240,14 @@ EXPORT_SYMBOL_NS(st_sensors_power_enable, "IIO_ST_SENSORS");
static int st_sensors_set_drdy_int_pin(struct iio_dev *indio_dev,
struct st_sensors_platform_data *pdata)
{
+ struct device *parent = indio_dev->dev.parent;
struct st_sensor_data *sdata = iio_priv(indio_dev);
/* Sensor does not support interrupts */
if (!sdata->sensor_settings->drdy_irq.int1.addr &&
!sdata->sensor_settings->drdy_irq.int2.addr) {
if (pdata->drdy_int_pin)
- dev_info(&indio_dev->dev,
+ dev_info(parent,
"DRDY on pin INT%d specified, but sensor does not support interrupts\n",
pdata->drdy_int_pin);
return 0;
@@ -256,29 +256,27 @@ static int st_sensors_set_drdy_int_pin(struct iio_dev *indio_dev,
switch (pdata->drdy_int_pin) {
case 1:
if (!sdata->sensor_settings->drdy_irq.int1.mask) {
- dev_err(&indio_dev->dev,
- "DRDY on INT1 not available.\n");
+ dev_err(parent, "DRDY on INT1 not available.\n");
return -EINVAL;
}
sdata->drdy_int_pin = 1;
break;
case 2:
if (!sdata->sensor_settings->drdy_irq.int2.mask) {
- dev_err(&indio_dev->dev,
- "DRDY on INT2 not available.\n");
+ dev_err(parent, "DRDY on INT2 not available.\n");
return -EINVAL;
}
sdata->drdy_int_pin = 2;
break;
default:
- dev_err(&indio_dev->dev, "DRDY on pdata not valid.\n");
+ dev_err(parent, "DRDY on pdata not valid.\n");
return -EINVAL;
}
if (pdata->open_drain) {
if (!sdata->sensor_settings->drdy_irq.int1.addr_od &&
!sdata->sensor_settings->drdy_irq.int2.addr_od)
- dev_err(&indio_dev->dev,
+ dev_err(parent,
"open drain requested but unsupported.\n");
else
sdata->int_pin_open_drain = true;
@@ -336,6 +334,7 @@ EXPORT_SYMBOL_NS(st_sensors_dev_name_probe, "IIO_ST_SENSORS");
int st_sensors_init_sensor(struct iio_dev *indio_dev,
struct st_sensors_platform_data *pdata)
{
+ struct device *parent = indio_dev->dev.parent;
struct st_sensor_data *sdata = iio_priv(indio_dev);
struct st_sensors_platform_data *of_pdata;
int err = 0;
@@ -343,7 +342,7 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev,
mutex_init(&sdata->odr_lock);
/* If OF/DT pdata exists, it will take precedence of anything else */
- of_pdata = st_sensors_dev_probe(indio_dev->dev.parent, pdata);
+ of_pdata = st_sensors_dev_probe(parent, pdata);
if (IS_ERR(of_pdata))
return PTR_ERR(of_pdata);
if (of_pdata)
@@ -370,7 +369,7 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev,
if (err < 0)
return err;
} else
- dev_info(&indio_dev->dev, "Full-scale not possible\n");
+ dev_info(parent, "Full-scale not possible\n");
err = st_sensors_set_odr(indio_dev, sdata->odr);
if (err < 0)
@@ -405,7 +404,7 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev,
mask = sdata->sensor_settings->drdy_irq.int2.mask_od;
}
- dev_info(&indio_dev->dev,
+ dev_info(parent,
"set interrupt line to open drain mode on pin %d\n",
sdata->drdy_int_pin);
err = st_sensors_write_data_with_mask(indio_dev, addr,
@@ -593,21 +592,20 @@ EXPORT_SYMBOL_NS(st_sensors_get_settings_index, "IIO_ST_SENSORS");
int st_sensors_verify_id(struct iio_dev *indio_dev)
{
struct st_sensor_data *sdata = iio_priv(indio_dev);
+ struct device *parent = indio_dev->dev.parent;
int wai, err;
if (sdata->sensor_settings->wai_addr) {
err = regmap_read(sdata->regmap,
sdata->sensor_settings->wai_addr, &wai);
if (err < 0) {
- dev_err(&indio_dev->dev,
- "failed to read Who-Am-I register.\n");
- return err;
+ return dev_err_probe(parent, err,
+ "failed to read Who-Am-I register.\n");
}
if (sdata->sensor_settings->wai != wai) {
- dev_warn(&indio_dev->dev,
- "%s: WhoAmI mismatch (0x%x).\n",
- indio_dev->name, wai);
+ dev_warn(parent, "%s: WhoAmI mismatch (0x%x).\n",
+ indio_dev->name, wai);
}
}
diff --git a/drivers/iio/common/st_sensors/st_sensors_trigger.c b/drivers/iio/common/st_sensors/st_sensors_trigger.c
index 9d4bf822a15d..8a8ab688d798 100644
--- a/drivers/iio/common/st_sensors/st_sensors_trigger.c
+++ b/drivers/iio/common/st_sensors/st_sensors_trigger.c
@@ -127,7 +127,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
sdata->trig = devm_iio_trigger_alloc(parent, "%s-trigger",
indio_dev->name);
if (sdata->trig == NULL) {
- dev_err(&indio_dev->dev, "failed to allocate iio trigger.\n");
+ dev_err(parent, "failed to allocate iio trigger.\n");
return -ENOMEM;
}
@@ -143,7 +143,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
case IRQF_TRIGGER_FALLING:
case IRQF_TRIGGER_LOW:
if (!sdata->sensor_settings->drdy_irq.addr_ihl) {
- dev_err(&indio_dev->dev,
+ dev_err(parent,
"falling/low specified for IRQ but hardware supports only rising/high: will request rising/high\n");
if (irq_trig == IRQF_TRIGGER_FALLING)
irq_trig = IRQF_TRIGGER_RISING;
@@ -156,21 +156,19 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
sdata->sensor_settings->drdy_irq.mask_ihl, 1);
if (err < 0)
return err;
- dev_info(&indio_dev->dev,
+ dev_info(parent,
"interrupts on the falling edge or active low level\n");
}
break;
case IRQF_TRIGGER_RISING:
- dev_info(&indio_dev->dev,
- "interrupts on the rising edge\n");
+ dev_info(parent, "interrupts on the rising edge\n");
break;
case IRQF_TRIGGER_HIGH:
- dev_info(&indio_dev->dev,
- "interrupts active high level\n");
+ dev_info(parent, "interrupts active high level\n");
break;
default:
/* This is the most preferred mode, if possible */
- dev_err(&indio_dev->dev,
+ dev_err(parent,
"unsupported IRQ trigger specified (%lx), enforce rising edge\n", irq_trig);
irq_trig = IRQF_TRIGGER_RISING;
}
@@ -179,7 +177,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
if (irq_trig == IRQF_TRIGGER_FALLING ||
irq_trig == IRQF_TRIGGER_RISING) {
if (!sdata->sensor_settings->drdy_irq.stat_drdy.addr) {
- dev_err(&indio_dev->dev,
+ dev_err(parent,
"edge IRQ not supported w/o stat register.\n");
return -EOPNOTSUPP;
}
@@ -214,13 +212,13 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
sdata->trig->name,
sdata->trig);
if (err) {
- dev_err(&indio_dev->dev, "failed to request trigger IRQ.\n");
+ dev_err(parent, "failed to request trigger IRQ.\n");
return err;
}
err = devm_iio_trigger_register(parent, sdata->trig);
if (err < 0) {
- dev_err(&indio_dev->dev, "failed to register iio trigger.\n");
+ dev_err(parent, "failed to register iio trigger.\n");
return err;
}
indio_dev->trig = iio_trigger_get(sdata->trig);
diff --git a/drivers/iio/dac/ad3530r.c b/drivers/iio/dac/ad3530r.c
index f9752a571aa5..6134613777b8 100644
--- a/drivers/iio/dac/ad3530r.c
+++ b/drivers/iio/dac/ad3530r.c
@@ -166,7 +166,9 @@ static ssize_t ad3530r_set_dac_powerdown(struct iio_dev *indio_dev,
AD3530R_OUTPUT_OPERATING_MODE_0 :
AD3530R_OUTPUT_OPERATING_MODE_1;
pdmode = powerdown ? st->chan[chan->channel].powerdown_mode : 0;
- mask = AD3530R_OP_MODE_CHAN_MSK(chan->channel);
+ mask = chan->channel < AD3531R_MAX_CHANNELS ?
+ AD3530R_OP_MODE_CHAN_MSK(chan->channel) :
+ AD3530R_OP_MODE_CHAN_MSK(chan->channel - 4);
val = field_prep(mask, pdmode);
ret = regmap_update_bits(st->regmap, reg, mask, val);
diff --git a/drivers/iio/industrialio-backend.c b/drivers/iio/industrialio-backend.c
index c1eb9ef9db08..266e1b29bf91 100644
--- a/drivers/iio/industrialio-backend.c
+++ b/drivers/iio/industrialio-backend.c
@@ -155,11 +155,14 @@ static ssize_t iio_backend_debugfs_write_reg(struct file *file,
ssize_t rc;
int ret;
+ if (count >= sizeof(buf))
+ return -ENOSPC;
+
rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, userbuf, count);
if (rc < 0)
return rc;
- buf[count] = '\0';
+ buf[rc] = '\0';
ret = sscanf(buf, "%i %i", &back->cached_reg_addr, &val);
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 178e99b111de..5ffda104d4b2 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -411,12 +411,15 @@ static ssize_t iio_debugfs_write_reg(struct file *file,
char buf[80];
int ret;
+ if (count >= sizeof(buf))
+ return -EINVAL;
+
ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, userbuf,
count);
if (ret < 0)
return ret;
- buf[count] = '\0';
+ buf[ret] = '\0';
ret = sscanf(buf, "%i %i", &reg, &val);
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 5d9b7007a730..1d8c579b5433 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -172,12 +172,12 @@ static const struct xpad_device {
{ 0x046d, 0xca88, "Logitech Compact Controller for Xbox", 0, XTYPE_XBOX },
{ 0x046d, 0xca8a, "Logitech Precision Vibration Feedback Wheel", 0, XTYPE_XBOX },
{ 0x046d, 0xcaa3, "Logitech DriveFx Racing Wheel", 0, XTYPE_XBOX360 },
+ { 0x0502, 0x1305, "Acer NGR200", 0, XTYPE_XBOX360 },
{ 0x056e, 0x2004, "Elecom JC-U3613M", 0, XTYPE_XBOX360 },
{ 0x05fd, 0x1007, "Mad Catz Controller (unverified)", 0, XTYPE_XBOX },
{ 0x05fd, 0x107a, "InterAct 'PowerPad Pro' X-Box pad (Germany)", 0, XTYPE_XBOX },
{ 0x05fe, 0x3030, "Chic Controller", 0, XTYPE_XBOX },
{ 0x05fe, 0x3031, "Chic Controller", 0, XTYPE_XBOX },
- { 0x0502, 0x1305, "Acer NGR200", 0, XTYPE_XBOX },
{ 0x062a, 0x0020, "Logic3 Xbox GamePad", 0, XTYPE_XBOX },
{ 0x062a, 0x0033, "Competition Pro Steering Wheel", 0, XTYPE_XBOX },
{ 0x06a3, 0x0200, "Saitek Racing Wheel", 0, XTYPE_XBOX },
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index 1a41e59c77f8..3ebf37ddfc18 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -20,7 +20,7 @@
#include "internal.h"
-#define ICC_DYN_ID_START 10000
+#define ICC_DYN_ID_START 100000
#define CREATE_TRACE_POINTS
#include "trace.h"
@@ -819,6 +819,9 @@ static struct icc_node *icc_node_create_nolock(int id)
{
struct icc_node *node;
+ if (id >= ICC_DYN_ID_START)
+ return ERR_PTR(-EINVAL);
+
/* check if node already exists */
node = node_find(id);
if (node)
@@ -906,11 +909,36 @@ void icc_node_destroy(int id)
return;
kfree(node->links);
+ if (node->id >= ICC_DYN_ID_START)
+ kfree(node->name);
kfree(node);
}
EXPORT_SYMBOL_GPL(icc_node_destroy);
/**
+ * icc_node_set_name() - set node name
+ * @node: node
+ * @provider: node provider
+ * @name: node name
+ *
+ * Return: 0 on success, or -ENOMEM on allocation failure
+ */
+int icc_node_set_name(struct icc_node *node, const struct icc_provider *provider, const char *name)
+{
+ if (node->id >= ICC_DYN_ID_START) {
+ node->name = kasprintf(GFP_KERNEL, "%s@%s", name,
+ dev_name(provider->dev));
+ if (!node->name)
+ return -ENOMEM;
+ } else {
+ node->name = name;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(icc_node_set_name);
+
+/**
* icc_link_nodes() - create link between two nodes
* @src_node: source node
* @dst_node: destination node
@@ -1038,10 +1066,6 @@ void icc_node_add(struct icc_node *node, struct icc_provider *provider)
node->avg_bw = node->init_avg;
node->peak_bw = node->init_peak;
- if (node->id >= ICC_DYN_ID_START)
- node->name = devm_kasprintf(provider->dev, GFP_KERNEL, "%s@%s",
- node->name, dev_name(provider->dev));
-
if (node->avg_bw || node->peak_bw) {
if (provider->pre_aggregate)
provider->pre_aggregate(node);
diff --git a/drivers/interconnect/icc-clk.c b/drivers/interconnect/icc-clk.c
index 88f311c11020..93c030608d3e 100644
--- a/drivers/interconnect/icc-clk.c
+++ b/drivers/interconnect/icc-clk.c
@@ -117,6 +117,7 @@ struct icc_provider *icc_clk_register(struct device *dev,
node->name = devm_kasprintf(dev, GFP_KERNEL, "%s_master", data[i].name);
if (!node->name) {
+ icc_node_destroy(node->id);
ret = -ENOMEM;
goto err;
}
@@ -135,6 +136,7 @@ struct icc_provider *icc_clk_register(struct device *dev,
node->name = devm_kasprintf(dev, GFP_KERNEL, "%s_slave", data[i].name);
if (!node->name) {
+ icc_node_destroy(node->id);
ret = -ENOMEM;
goto err;
}
diff --git a/drivers/interconnect/qcom/icc-rpmh.c b/drivers/interconnect/qcom/icc-rpmh.c
index 41bfc6e7ee1d..001404e91041 100644
--- a/drivers/interconnect/qcom/icc-rpmh.c
+++ b/drivers/interconnect/qcom/icc-rpmh.c
@@ -293,7 +293,12 @@ int qcom_icc_rpmh_probe(struct platform_device *pdev)
goto err_remove_nodes;
}
- node->name = qn->name;
+ ret = icc_node_set_name(node, provider, qn->name);
+ if (ret) {
+ icc_node_destroy(node->id);
+ goto err_remove_nodes;
+ }
+
node->data = qn;
icc_node_add(node, provider);
diff --git a/drivers/interconnect/qcom/osm-l3.c b/drivers/interconnect/qcom/osm-l3.c
index baecbf2533f7..b33f00da1880 100644
--- a/drivers/interconnect/qcom/osm-l3.c
+++ b/drivers/interconnect/qcom/osm-l3.c
@@ -236,7 +236,12 @@ static int qcom_osm_l3_probe(struct platform_device *pdev)
goto err;
}
- node->name = qnodes[i]->name;
+ ret = icc_node_set_name(node, provider, qnodes[i]->name);
+ if (ret) {
+ icc_node_destroy(node->id);
+ goto err;
+ }
+
/* Cast away const and add it back in qcom_osm_l3_set() */
node->data = (void *)qnodes[i];
icc_node_add(node, provider);
diff --git a/drivers/interconnect/qcom/sc7280.c b/drivers/interconnect/qcom/sc7280.c
index 346f18d70e9e..905403a3a930 100644
--- a/drivers/interconnect/qcom/sc7280.c
+++ b/drivers/interconnect/qcom/sc7280.c
@@ -238,6 +238,7 @@ static struct qcom_icc_node xm_pcie3_1 = {
.id = SC7280_MASTER_PCIE_1,
.channels = 1,
.buswidth = 8,
+ .num_links = 1,
.links = { SC7280_SLAVE_ANOC_PCIE_GEM_NOC },
};
diff --git a/drivers/interconnect/samsung/exynos.c b/drivers/interconnect/samsung/exynos.c
index 9e041365d909..8e8f56186a36 100644
--- a/drivers/interconnect/samsung/exynos.c
+++ b/drivers/interconnect/samsung/exynos.c
@@ -134,6 +134,11 @@ static int exynos_generic_icc_probe(struct platform_device *pdev)
priv->node = icc_node;
icc_node->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%pOFn",
bus_dev->of_node);
+ if (!icc_node->name) {
+ icc_node_destroy(pdev->id);
+ return -ENOMEM;
+ }
+
if (of_property_read_u32(bus_dev->of_node, "samsung,data-clock-ratio",
&priv->bus_clk_ratio))
priv->bus_clk_ratio = EXYNOS_ICC_DEFAULT_BUS_CLK_RATIO;
diff --git a/drivers/iommu/hyperv-iommu.c b/drivers/iommu/hyperv-iommu.c
index 761ab647f372..0961ac805944 100644
--- a/drivers/iommu/hyperv-iommu.c
+++ b/drivers/iommu/hyperv-iommu.c
@@ -193,15 +193,13 @@ struct hyperv_root_ir_data {
static void
hyperv_root_ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg)
{
- u64 status;
- u32 vector;
- struct irq_cfg *cfg;
- int ioapic_id;
- const struct cpumask *affinity;
- int cpu;
- struct hv_interrupt_entry entry;
struct hyperv_root_ir_data *data = irq_data->chip_data;
+ struct hv_interrupt_entry entry;
+ const struct cpumask *affinity;
struct IO_APIC_route_entry e;
+ struct irq_cfg *cfg;
+ int cpu, ioapic_id;
+ u32 vector;
cfg = irqd_cfg(irq_data);
affinity = irq_data_get_effective_affinity_mask(irq_data);
@@ -214,23 +212,16 @@ hyperv_root_ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg)
&& data->entry.ioapic_rte.as_uint64) {
entry = data->entry;
- status = hv_unmap_ioapic_interrupt(ioapic_id, &entry);
-
- if (status != HV_STATUS_SUCCESS)
- hv_status_debug(status, "failed to unmap\n");
+ (void)hv_unmap_ioapic_interrupt(ioapic_id, &entry);
data->entry.ioapic_rte.as_uint64 = 0;
data->entry.source = 0; /* Invalid source */
}
- status = hv_map_ioapic_interrupt(ioapic_id, data->is_level, cpu,
- vector, &entry);
-
- if (status != HV_STATUS_SUCCESS) {
- hv_status_err(status, "map failed\n");
+ if (hv_map_ioapic_interrupt(ioapic_id, data->is_level, cpu,
+ vector, &entry))
return;
- }
data->entry = entry;
@@ -322,10 +313,10 @@ static void hyperv_root_irq_remapping_free(struct irq_domain *domain,
data = irq_data->chip_data;
e = &data->entry;
- if (e->source == HV_DEVICE_TYPE_IOAPIC
- && e->ioapic_rte.as_uint64)
- hv_unmap_ioapic_interrupt(data->ioapic_id,
- &data->entry);
+ if (e->source == HV_DEVICE_TYPE_IOAPIC &&
+ e->ioapic_rte.as_uint64)
+ (void)hv_unmap_ioapic_interrupt(data->ioapic_id,
+ &data->entry);
kfree(data);
}
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
index 043b9ec756ff..7f3f47db4c98 100644
--- a/drivers/memstick/core/memstick.c
+++ b/drivers/memstick/core/memstick.c
@@ -324,7 +324,7 @@ EXPORT_SYMBOL(memstick_init_req);
static int h_memstick_read_dev_id(struct memstick_dev *card,
struct memstick_request **mrq)
{
- struct ms_id_register id_reg;
+ struct ms_id_register id_reg = {};
if (!(*mrq)) {
memstick_init_req(&card->current_mrq, MS_TPC_READ_REG, &id_reg,
diff --git a/drivers/misc/amd-sbi/rmi-core.c b/drivers/misc/amd-sbi/rmi-core.c
index b653a21a909e..3dec2fc00124 100644
--- a/drivers/misc/amd-sbi/rmi-core.c
+++ b/drivers/misc/amd-sbi/rmi-core.c
@@ -42,7 +42,6 @@
#define RD_MCA_CMD 0x86
/* CPUID MCAMSR mask & index */
-#define CPUID_MCA_THRD_MASK GENMASK(15, 0)
#define CPUID_MCA_THRD_INDEX 32
#define CPUID_MCA_FUNC_MASK GENMASK(31, 0)
#define CPUID_EXT_FUNC_INDEX 56
@@ -129,7 +128,7 @@ static int rmi_cpuid_read(struct sbrmi_data *data,
goto exit_unlock;
}
- thread = msg->cpu_in_out << CPUID_MCA_THRD_INDEX & CPUID_MCA_THRD_MASK;
+ thread = msg->cpu_in_out >> CPUID_MCA_THRD_INDEX;
/* Thread > 127, Thread128 CS register, 1'b1 needs to be set to 1 */
if (thread > 127) {
@@ -210,7 +209,7 @@ static int rmi_mca_msr_read(struct sbrmi_data *data,
goto exit_unlock;
}
- thread = msg->mcamsr_in_out << CPUID_MCA_THRD_INDEX & CPUID_MCA_THRD_MASK;
+ thread = msg->mcamsr_in_out >> CPUID_MCA_THRD_INDEX;
/* Thread > 127, Thread128 CS register, 1'b1 needs to be set to 1 */
if (thread > 127) {
@@ -321,6 +320,10 @@ int rmi_mailbox_xfer(struct sbrmi_data *data,
ret = regmap_read(data->regmap, SBRMI_OUTBNDMSG7, &ec);
if (ret || ec)
goto exit_clear_alert;
+
+ /* Clear the input value before updating the output data */
+ msg->mb_in_out = 0;
+
/*
* For a read operation, the initiator (BMC) reads the firmware
* response Command Data Out[31:0] from SBRMI::OutBndMsg_inst[4:1]
@@ -373,7 +376,8 @@ static int apml_rmi_reg_xfer(struct sbrmi_data *data,
mutex_unlock(&data->lock);
if (msg.rflag && !ret)
- return copy_to_user(arg, &msg, sizeof(struct apml_reg_xfer_msg));
+ if (copy_to_user(arg, &msg, sizeof(struct apml_reg_xfer_msg)))
+ return -EFAULT;
return ret;
}
@@ -391,7 +395,9 @@ static int apml_mailbox_xfer(struct sbrmi_data *data, struct apml_mbox_msg __use
if (ret && ret != -EPROTOTYPE)
return ret;
- return copy_to_user(arg, &msg, sizeof(struct apml_mbox_msg));
+ if (copy_to_user(arg, &msg, sizeof(struct apml_mbox_msg)))
+ return -EFAULT;
+ return ret;
}
static int apml_cpuid_xfer(struct sbrmi_data *data, struct apml_cpuid_msg __user *arg)
@@ -408,7 +414,9 @@ static int apml_cpuid_xfer(struct sbrmi_data *data, struct apml_cpuid_msg __user
if (ret && ret != -EPROTOTYPE)
return ret;
- return copy_to_user(arg, &msg, sizeof(struct apml_cpuid_msg));
+ if (copy_to_user(arg, &msg, sizeof(struct apml_cpuid_msg)))
+ return -EFAULT;
+ return ret;
}
static int apml_mcamsr_xfer(struct sbrmi_data *data, struct apml_mcamsr_msg __user *arg)
@@ -425,7 +433,9 @@ static int apml_mcamsr_xfer(struct sbrmi_data *data, struct apml_mcamsr_msg __us
if (ret && ret != -EPROTOTYPE)
return ret;
- return copy_to_user(arg, &msg, sizeof(struct apml_mcamsr_msg));
+ if (copy_to_user(arg, &msg, sizeof(struct apml_mcamsr_msg)))
+ return -EFAULT;
+ return ret;
}
static long sbrmi_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c
index def054ddd256..4fced9b36c80 100644
--- a/drivers/mmc/host/bcm2835.c
+++ b/drivers/mmc/host/bcm2835.c
@@ -503,7 +503,8 @@ void bcm2835_prepare_dma(struct bcm2835_host *host, struct mmc_data *data)
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc) {
- dma_unmap_sg(dma_chan->device->dev, data->sg, sg_len, dir_data);
+ dma_unmap_sg(dma_chan->device->dev, data->sg, data->sg_len,
+ dir_data);
return;
}
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 13a84b9309e0..e3877a1c72a9 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -913,7 +913,8 @@ static bool glk_broken_cqhci(struct sdhci_pci_slot *slot)
{
return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC &&
(dmi_match(DMI_BIOS_VENDOR, "LENOVO") ||
- dmi_match(DMI_SYS_VENDOR, "IRBIS"));
+ dmi_match(DMI_SYS_VENDOR, "IRBIS") ||
+ dmi_match(DMI_SYS_VENDOR, "Positivo Tecnologia SA"));
}
static bool jsl_broken_hs400es(struct sdhci_pci_slot *slot)
diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
index 73385ff4c0f3..9e94998e8df7 100644
--- a/drivers/mmc/host/sdhci_am654.c
+++ b/drivers/mmc/host/sdhci_am654.c
@@ -613,7 +613,8 @@ static const struct sdhci_ops sdhci_am654_ops = {
static const struct sdhci_pltfm_data sdhci_am654_pdata = {
.ops = &sdhci_am654_ops,
.quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
- .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ SDHCI_QUIRK2_DISABLE_HW_TIMEOUT,
};
static const struct sdhci_am654_driver_data sdhci_am654_sr1_drvdata = {
@@ -643,7 +644,8 @@ static const struct sdhci_ops sdhci_j721e_8bit_ops = {
static const struct sdhci_pltfm_data sdhci_j721e_8bit_pdata = {
.ops = &sdhci_j721e_8bit_ops,
.quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
- .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ SDHCI_QUIRK2_DISABLE_HW_TIMEOUT,
};
static const struct sdhci_am654_driver_data sdhci_j721e_8bit_drvdata = {
@@ -667,7 +669,8 @@ static const struct sdhci_ops sdhci_j721e_4bit_ops = {
static const struct sdhci_pltfm_data sdhci_j721e_4bit_pdata = {
.ops = &sdhci_j721e_4bit_ops,
.quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
- .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ SDHCI_QUIRK2_DISABLE_HW_TIMEOUT,
};
static const struct sdhci_am654_driver_data sdhci_j721e_4bit_drvdata = {
diff --git a/drivers/mux/Kconfig b/drivers/mux/Kconfig
index 80f015cf6e54..c68132e38138 100644
--- a/drivers/mux/Kconfig
+++ b/drivers/mux/Kconfig
@@ -48,6 +48,7 @@ config MUX_GPIO
config MUX_MMIO
tristate "MMIO/Regmap register bitfield-controlled Multiplexer"
depends on OF
+ select REGMAP_MMIO
help
MMIO/Regmap register bitfield-controlled Multiplexer controller.
diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c
index ea8c807af4d8..3913971125de 100644
--- a/drivers/net/can/dev/dev.c
+++ b/drivers/net/can/dev/dev.c
@@ -145,13 +145,16 @@ void can_change_state(struct net_device *dev, struct can_frame *cf,
EXPORT_SYMBOL_GPL(can_change_state);
/* CAN device restart for bus-off recovery */
-static void can_restart(struct net_device *dev)
+static int can_restart(struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
struct sk_buff *skb;
struct can_frame *cf;
int err;
+ if (!priv->do_set_mode)
+ return -EOPNOTSUPP;
+
if (netif_carrier_ok(dev))
netdev_err(dev, "Attempt to restart for bus-off recovery, but carrier is OK?\n");
@@ -173,10 +176,14 @@ static void can_restart(struct net_device *dev)
if (err) {
netdev_err(dev, "Restart failed, error %pe\n", ERR_PTR(err));
netif_carrier_off(dev);
+
+ return err;
} else {
netdev_dbg(dev, "Restarted\n");
priv->can_stats.restarts++;
}
+
+ return 0;
}
static void can_restart_work(struct work_struct *work)
@@ -201,9 +208,8 @@ int can_restart_now(struct net_device *dev)
return -EBUSY;
cancel_delayed_work_sync(&priv->restart_work);
- can_restart(dev);
- return 0;
+ return can_restart(dev);
}
/* CAN bus-off
diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c
index 13826e8a707b..d9f6ab3efb97 100644
--- a/drivers/net/can/dev/netlink.c
+++ b/drivers/net/can/dev/netlink.c
@@ -285,6 +285,12 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
}
if (data[IFLA_CAN_RESTART_MS]) {
+ if (!priv->do_set_mode) {
+ NL_SET_ERR_MSG(extack,
+ "Device doesn't support restart from Bus Off");
+ return -EOPNOTSUPP;
+ }
+
/* Do not allow changing restart delay while running */
if (dev->flags & IFF_UP)
return -EBUSY;
@@ -292,6 +298,12 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
}
if (data[IFLA_CAN_RESTART]) {
+ if (!priv->do_set_mode) {
+ NL_SET_ERR_MSG(extack,
+ "Device doesn't support restart from Bus Off");
+ return -EOPNOTSUPP;
+ }
+
/* Do not allow a restart while not running */
if (!(dev->flags & IFF_UP))
return -EINVAL;
diff --git a/drivers/net/ethernet/amd/xgbe/Makefile b/drivers/net/ethernet/amd/xgbe/Makefile
index 620785ffbd51..5b0ab6240cf2 100644
--- a/drivers/net/ethernet/amd/xgbe/Makefile
+++ b/drivers/net/ethernet/amd/xgbe/Makefile
@@ -3,7 +3,7 @@ obj-$(CONFIG_AMD_XGBE) += amd-xgbe.o
amd-xgbe-objs := xgbe-main.o xgbe-drv.o xgbe-dev.o \
xgbe-desc.o xgbe-ethtool.o xgbe-mdio.o \
- xgbe-ptp.o \
+ xgbe-hwtstamp.o xgbe-ptp.o \
xgbe-i2c.o xgbe-phy-v1.o xgbe-phy-v2.o \
xgbe-platform.o
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index e54e3e36d3f9..009fbc9b11ce 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -223,6 +223,10 @@
#define MAC_TSSR 0x0d20
#define MAC_TXSNR 0x0d30
#define MAC_TXSSR 0x0d34
+#define MAC_TICNR 0x0d58
+#define MAC_TICSNR 0x0d5C
+#define MAC_TECNR 0x0d60
+#define MAC_TECSNR 0x0d64
#define MAC_QTFCR_INC 4
#define MAC_MACA_INC 4
@@ -428,6 +432,8 @@
#define MAC_TSCR_SNAPTYPSEL_WIDTH 2
#define MAC_TSCR_TSADDREG_INDEX 5
#define MAC_TSCR_TSADDREG_WIDTH 1
+#define MAC_TSCR_TSUPDT_INDEX 3
+#define MAC_TSCR_TSUPDT_WIDTH 1
#define MAC_TSCR_TSCFUPDT_INDEX 1
#define MAC_TSCR_TSCFUPDT_WIDTH 1
#define MAC_TSCR_TSCTRLSSR_INDEX 9
@@ -456,6 +462,10 @@
#define MAC_TSSR_TXTSC_WIDTH 1
#define MAC_TXSNR_TXTSSTSMIS_INDEX 31
#define MAC_TXSNR_TXTSSTSMIS_WIDTH 1
+#define MAC_TICSNR_TSICSNS_INDEX 8
+#define MAC_TICSNR_TSICSNS_WIDTH 8
+#define MAC_TECSNR_TSECSNS_INDEX 8
+#define MAC_TECSNR_TSECSNS_WIDTH 8
#define MAC_VLANHTR_VLHT_INDEX 0
#define MAC_VLANHTR_VLHT_WIDTH 16
#define MAC_VLANIR_VLTI_INDEX 20
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 9e4e79bfe624..e5391a2eca51 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -1558,125 +1558,6 @@ static void xgbe_rx_desc_init(struct xgbe_channel *channel)
DBGPR("<--rx_desc_init\n");
}
-static void xgbe_update_tstamp_addend(struct xgbe_prv_data *pdata,
- unsigned int addend)
-{
- unsigned int count = 10000;
-
- /* Set the addend register value and tell the device */
- XGMAC_IOWRITE(pdata, MAC_TSAR, addend);
- XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 1);
-
- /* Wait for addend update to complete */
- while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG))
- udelay(5);
-
- if (!count)
- netdev_err(pdata->netdev,
- "timed out updating timestamp addend register\n");
-}
-
-static void xgbe_set_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec,
- unsigned int nsec)
-{
- unsigned int count = 10000;
-
- /* Set the time values and tell the device */
- XGMAC_IOWRITE(pdata, MAC_STSUR, sec);
- XGMAC_IOWRITE(pdata, MAC_STNUR, nsec);
- XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSINIT, 1);
-
- /* Wait for time update to complete */
- while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT))
- udelay(5);
-
- if (!count)
- netdev_err(pdata->netdev, "timed out initializing timestamp\n");
-}
-
-static u64 xgbe_get_tstamp_time(struct xgbe_prv_data *pdata)
-{
- u64 nsec;
-
- nsec = XGMAC_IOREAD(pdata, MAC_STSR);
- nsec *= NSEC_PER_SEC;
- nsec += XGMAC_IOREAD(pdata, MAC_STNR);
-
- return nsec;
-}
-
-static u64 xgbe_get_tx_tstamp(struct xgbe_prv_data *pdata)
-{
- unsigned int tx_snr, tx_ssr;
- u64 nsec;
-
- if (pdata->vdata->tx_tstamp_workaround) {
- tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
- tx_ssr = XGMAC_IOREAD(pdata, MAC_TXSSR);
- } else {
- tx_ssr = XGMAC_IOREAD(pdata, MAC_TXSSR);
- tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
- }
-
- if (XGMAC_GET_BITS(tx_snr, MAC_TXSNR, TXTSSTSMIS))
- return 0;
-
- nsec = tx_ssr;
- nsec *= NSEC_PER_SEC;
- nsec += tx_snr;
-
- return nsec;
-}
-
-static void xgbe_get_rx_tstamp(struct xgbe_packet_data *packet,
- struct xgbe_ring_desc *rdesc)
-{
- u64 nsec;
-
- if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSA) &&
- !XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSD)) {
- nsec = le32_to_cpu(rdesc->desc1);
- nsec <<= 32;
- nsec |= le32_to_cpu(rdesc->desc0);
- if (nsec != 0xffffffffffffffffULL) {
- packet->rx_tstamp = nsec;
- XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
- RX_TSTAMP, 1);
- }
- }
-}
-
-static int xgbe_config_tstamp(struct xgbe_prv_data *pdata,
- unsigned int mac_tscr)
-{
- /* Set one nano-second accuracy */
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCTRLSSR, 1);
-
- /* Set fine timestamp update */
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCFUPDT, 1);
-
- /* Overwrite earlier timestamps */
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TXTSSTSM, 1);
-
- XGMAC_IOWRITE(pdata, MAC_TSCR, mac_tscr);
-
- /* Exit if timestamping is not enabled */
- if (!XGMAC_GET_BITS(mac_tscr, MAC_TSCR, TSENA))
- return 0;
-
- /* Initialize time registers */
- XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SSINC, XGBE_TSTAMP_SSINC);
- XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SNSINC, XGBE_TSTAMP_SNSINC);
- xgbe_update_tstamp_addend(pdata, pdata->tstamp_addend);
- xgbe_set_tstamp_time(pdata, 0, 0);
-
- /* Initialize the timecounter */
- timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc,
- ktime_to_ns(ktime_get_real()));
-
- return 0;
-}
-
static void xgbe_tx_start_xmit(struct xgbe_channel *channel,
struct xgbe_ring *ring)
{
@@ -3671,13 +3552,6 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
hw_if->rx_mmc_int = xgbe_rx_mmc_int;
hw_if->read_mmc_stats = xgbe_read_mmc_stats;
- /* For PTP config */
- hw_if->config_tstamp = xgbe_config_tstamp;
- hw_if->update_tstamp_addend = xgbe_update_tstamp_addend;
- hw_if->set_tstamp_time = xgbe_set_tstamp_time;
- hw_if->get_tstamp_time = xgbe_get_tstamp_time;
- hw_if->get_tx_tstamp = xgbe_get_tx_tstamp;
-
/* For Data Center Bridging config */
hw_if->config_tc = xgbe_config_tc;
hw_if->config_dcb_tc = xgbe_config_dcb_tc;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 65447f9a0a59..2e9b95a94f89 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -448,7 +448,7 @@ static void xgbe_isr_bh_work(struct work_struct *work)
if (XGMAC_GET_BITS(mac_tssr, MAC_TSSR, TXTSC)) {
/* Read Tx Timestamp to clear interrupt */
pdata->tx_tstamp =
- hw_if->get_tx_tstamp(pdata);
+ xgbe_get_tx_tstamp(pdata);
queue_work(pdata->dev_workqueue,
&pdata->tx_tstamp_work);
}
@@ -1371,199 +1371,6 @@ static void xgbe_restart(struct work_struct *work)
rtnl_unlock();
}
-static void xgbe_tx_tstamp(struct work_struct *work)
-{
- struct xgbe_prv_data *pdata = container_of(work,
- struct xgbe_prv_data,
- tx_tstamp_work);
- struct skb_shared_hwtstamps hwtstamps;
- u64 nsec;
- unsigned long flags;
-
- spin_lock_irqsave(&pdata->tstamp_lock, flags);
- if (!pdata->tx_tstamp_skb)
- goto unlock;
-
- if (pdata->tx_tstamp) {
- nsec = timecounter_cyc2time(&pdata->tstamp_tc,
- pdata->tx_tstamp);
-
- memset(&hwtstamps, 0, sizeof(hwtstamps));
- hwtstamps.hwtstamp = ns_to_ktime(nsec);
- skb_tstamp_tx(pdata->tx_tstamp_skb, &hwtstamps);
- }
-
- dev_kfree_skb_any(pdata->tx_tstamp_skb);
-
- pdata->tx_tstamp_skb = NULL;
-
-unlock:
- spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
-}
-
-static int xgbe_get_hwtstamp_settings(struct xgbe_prv_data *pdata,
- struct ifreq *ifreq)
-{
- if (copy_to_user(ifreq->ifr_data, &pdata->tstamp_config,
- sizeof(pdata->tstamp_config)))
- return -EFAULT;
-
- return 0;
-}
-
-static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
- struct ifreq *ifreq)
-{
- struct hwtstamp_config config;
- unsigned int mac_tscr;
-
- if (copy_from_user(&config, ifreq->ifr_data, sizeof(config)))
- return -EFAULT;
-
- mac_tscr = 0;
-
- switch (config.tx_type) {
- case HWTSTAMP_TX_OFF:
- break;
-
- case HWTSTAMP_TX_ON:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- default:
- return -ERANGE;
- }
-
- switch (config.rx_filter) {
- case HWTSTAMP_FILTER_NONE:
- break;
-
- case HWTSTAMP_FILTER_NTP_ALL:
- case HWTSTAMP_FILTER_ALL:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* PTP v2, UDP, any kind of event packet */
- case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- fallthrough; /* to PTP v1, UDP, any kind of event packet */
- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* PTP v2, UDP, Sync packet */
- case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- fallthrough; /* to PTP v1, UDP, Sync packet */
- case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* PTP v2, UDP, Delay_req packet */
- case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- fallthrough; /* to PTP v1, UDP, Delay_req packet */
- case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* 802.AS1, Ethernet, any kind of event packet */
- case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* 802.AS1, Ethernet, Sync packet */
- case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* 802.AS1, Ethernet, Delay_req packet */
- case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* PTP v2/802.AS1, any layer, any kind of event packet */
- case HWTSTAMP_FILTER_PTP_V2_EVENT:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* PTP v2/802.AS1, any layer, Sync packet */
- case HWTSTAMP_FILTER_PTP_V2_SYNC:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* PTP v2/802.AS1, any layer, Delay_req packet */
- case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- default:
- return -ERANGE;
- }
-
- pdata->hw_if.config_tstamp(pdata, mac_tscr);
-
- memcpy(&pdata->tstamp_config, &config, sizeof(config));
-
- return 0;
-}
-
-static void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata,
- struct sk_buff *skb,
- struct xgbe_packet_data *packet)
-{
- unsigned long flags;
-
- if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) {
- spin_lock_irqsave(&pdata->tstamp_lock, flags);
- if (pdata->tx_tstamp_skb) {
- /* Another timestamp in progress, ignore this one */
- XGMAC_SET_BITS(packet->attributes,
- TX_PACKET_ATTRIBUTES, PTP, 0);
- } else {
- pdata->tx_tstamp_skb = skb_get(skb);
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- }
- spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
- }
-
- skb_tx_timestamp(skb);
-}
-
static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet)
{
if (skb_vlan_tag_present(skb))
@@ -1776,6 +1583,9 @@ static int xgbe_open(struct net_device *netdev)
INIT_WORK(&pdata->stopdev_work, xgbe_stopdev);
INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
+ /* Initialize PTP timestamping and clock. */
+ xgbe_init_ptp(pdata);
+
ret = xgbe_alloc_memory(pdata);
if (ret)
goto err_ptpclk;
@@ -2546,12 +2356,8 @@ skip_data:
if (XGMAC_GET_BITS(packet->attributes,
RX_PACKET_ATTRIBUTES, RX_TSTAMP)) {
- u64 nsec;
-
- nsec = timecounter_cyc2time(&pdata->tstamp_tc,
- packet->rx_tstamp);
hwtstamps = skb_hwtstamps(skb);
- hwtstamps->hwtstamp = ns_to_ktime(nsec);
+ hwtstamps->hwtstamp = ns_to_ktime(packet->rx_tstamp);
}
if (XGMAC_GET_BITS(packet->attributes,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-hwtstamp.c b/drivers/net/ethernet/amd/xgbe/xgbe-hwtstamp.c
new file mode 100644
index 000000000000..bc52e5ec6420
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-hwtstamp.c
@@ -0,0 +1,401 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
+/*
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
+ *
+ * Author: Raju Rangoju <Raju.Rangoju@amd.com>
+ */
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+void xgbe_update_tstamp_time(struct xgbe_prv_data *pdata,
+ unsigned int sec, unsigned int nsec)
+{
+ int count;
+
+ /* Set the time values and tell the device */
+ XGMAC_IOWRITE(pdata, MAC_STSUR, sec);
+ XGMAC_IOWRITE(pdata, MAC_STNUR, nsec);
+
+ /* issue command to update the system time value */
+ XGMAC_IOWRITE(pdata, MAC_TSCR,
+ XGMAC_IOREAD(pdata, MAC_TSCR) |
+ (1 << MAC_TSCR_TSUPDT_INDEX));
+
+ /* Wait for the time adjust/update to complete */
+ count = 10000;
+ while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSUPDT))
+ udelay(5);
+
+ if (count < 0)
+ netdev_err(pdata->netdev,
+ "timed out updating system timestamp\n");
+}
+
+void xgbe_update_tstamp_addend(struct xgbe_prv_data *pdata,
+ unsigned int addend)
+{
+ unsigned int count = 10000;
+
+ /* Set the addend register value and tell the device */
+ XGMAC_IOWRITE(pdata, MAC_TSAR, addend);
+ XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 1);
+
+ /* Wait for addend update to complete */
+ while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG))
+ udelay(5);
+
+ if (!count)
+ netdev_err(pdata->netdev,
+ "timed out updating timestamp addend register\n");
+}
+
+void xgbe_set_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec,
+ unsigned int nsec)
+{
+ unsigned int count = 10000;
+
+ /* Set the time values and tell the device */
+ XGMAC_IOWRITE(pdata, MAC_STSUR, sec);
+ XGMAC_IOWRITE(pdata, MAC_STNUR, nsec);
+ XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSINIT, 1);
+
+ /* Wait for time update to complete */
+ while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT))
+ udelay(5);
+
+ if (!count)
+ netdev_err(pdata->netdev, "timed out initializing timestamp\n");
+}
+
+u64 xgbe_get_tstamp_time(struct xgbe_prv_data *pdata)
+{
+ u64 nsec;
+
+ nsec = XGMAC_IOREAD(pdata, MAC_STSR);
+ nsec *= NSEC_PER_SEC;
+ nsec += XGMAC_IOREAD(pdata, MAC_STNR);
+
+ return nsec;
+}
+
+u64 xgbe_get_tx_tstamp(struct xgbe_prv_data *pdata)
+{
+ unsigned int tx_snr, tx_ssr;
+ u64 nsec;
+
+ if (pdata->vdata->tx_tstamp_workaround) {
+ tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
+ tx_ssr = XGMAC_IOREAD(pdata, MAC_TXSSR);
+ } else {
+ tx_ssr = XGMAC_IOREAD(pdata, MAC_TXSSR);
+ tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
+ }
+
+ if (XGMAC_GET_BITS(tx_snr, MAC_TXSNR, TXTSSTSMIS))
+ return 0;
+
+ nsec = tx_ssr;
+ nsec *= NSEC_PER_SEC;
+ nsec += tx_snr;
+
+ return nsec;
+}
+
+void xgbe_get_rx_tstamp(struct xgbe_packet_data *packet,
+ struct xgbe_ring_desc *rdesc)
+{
+ u64 nsec;
+
+ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSA) &&
+ !XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSD)) {
+ nsec = le32_to_cpu(rdesc->desc1);
+ nsec *= NSEC_PER_SEC;
+ nsec += le32_to_cpu(rdesc->desc0);
+ if (nsec != 0xffffffffffffffffULL) {
+ packet->rx_tstamp = nsec;
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ RX_TSTAMP, 1);
+ }
+ }
+}
+
+void xgbe_config_tstamp(struct xgbe_prv_data *pdata, unsigned int mac_tscr)
+{
+ unsigned int value = 0;
+
+ value = XGMAC_IOREAD(pdata, MAC_TSCR);
+ value |= mac_tscr;
+ XGMAC_IOWRITE(pdata, MAC_TSCR, value);
+}
+
+void xgbe_tx_tstamp(struct work_struct *work)
+{
+ struct xgbe_prv_data *pdata = container_of(work,
+ struct xgbe_prv_data,
+ tx_tstamp_work);
+ struct skb_shared_hwtstamps hwtstamps;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdata->tstamp_lock, flags);
+ if (!pdata->tx_tstamp_skb)
+ goto unlock;
+
+ if (pdata->tx_tstamp) {
+ memset(&hwtstamps, 0, sizeof(hwtstamps));
+ hwtstamps.hwtstamp = ns_to_ktime(pdata->tx_tstamp);
+ skb_tstamp_tx(pdata->tx_tstamp_skb, &hwtstamps);
+ }
+
+ dev_kfree_skb_any(pdata->tx_tstamp_skb);
+
+ pdata->tx_tstamp_skb = NULL;
+
+unlock:
+ spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+}
+
+int xgbe_get_hwtstamp_settings(struct xgbe_prv_data *pdata, struct ifreq *ifreq)
+{
+ if (copy_to_user(ifreq->ifr_data, &pdata->tstamp_config,
+ sizeof(pdata->tstamp_config)))
+ return -EFAULT;
+
+ return 0;
+}
+
+int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata, struct ifreq *ifreq)
+{
+ struct hwtstamp_config config;
+ unsigned int mac_tscr;
+
+ if (copy_from_user(&config, ifreq->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ mac_tscr = 0;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ break;
+
+ case HWTSTAMP_TX_ON:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+
+ case HWTSTAMP_FILTER_NTP_ALL:
+ case HWTSTAMP_FILTER_ALL:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2, UDP, any kind of event packet */
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ fallthrough; /* to PTP v1, UDP, any kind of event packet */
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+ /* PTP v2, UDP, Sync packet */
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ fallthrough; /* to PTP v1, UDP, Sync packet */
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2, UDP, Delay_req packet */
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ fallthrough; /* to PTP v1, UDP, Delay_req packet */
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* 802.AS1, Ethernet, any kind of event packet */
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* 802.AS1, Ethernet, Sync packet */
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* 802.AS1, Ethernet, Delay_req packet */
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2/802.AS1, any layer, any kind of event packet */
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2/802.AS1, any layer, Sync packet */
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2/802.AS1, any layer, Delay_req packet */
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ default:
+ return -ERANGE;
+ }
+
+ xgbe_config_tstamp(pdata, mac_tscr);
+
+ memcpy(&pdata->tstamp_config, &config, sizeof(config));
+
+ return 0;
+}
+
+void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata,
+ struct sk_buff *skb,
+ struct xgbe_packet_data *packet)
+{
+ unsigned long flags;
+
+ if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) {
+ spin_lock_irqsave(&pdata->tstamp_lock, flags);
+ if (pdata->tx_tstamp_skb) {
+ /* Another timestamp in progress, ignore this one */
+ XGMAC_SET_BITS(packet->attributes,
+ TX_PACKET_ATTRIBUTES, PTP, 0);
+ } else {
+ pdata->tx_tstamp_skb = skb_get(skb);
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ }
+ spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+ }
+
+ skb_tx_timestamp(skb);
+}
+
+int xgbe_init_ptp(struct xgbe_prv_data *pdata)
+{
+ unsigned int mac_tscr = 0;
+ struct timespec64 now;
+ u64 dividend;
+
+ /* Register Settings to be done based on the link speed. */
+ switch (pdata->phy.speed) {
+ case SPEED_1000:
+ XGMAC_IOWRITE(pdata, MAC_TICNR, MAC_TICNR_1G_INITVAL);
+ XGMAC_IOWRITE(pdata, MAC_TECNR, MAC_TECNR_1G_INITVAL);
+ break;
+ case SPEED_2500:
+ case SPEED_10000:
+ XGMAC_IOWRITE_BITS(pdata, MAC_TICSNR, TSICSNS,
+ MAC_TICSNR_10G_INITVAL);
+ XGMAC_IOWRITE(pdata, MAC_TECNR, MAC_TECNR_10G_INITVAL);
+ XGMAC_IOWRITE_BITS(pdata, MAC_TECSNR, TSECSNS,
+ MAC_TECSNR_10G_INITVAL);
+ break;
+ case SPEED_UNKNOWN:
+ default:
+ break;
+ }
+
+ /* Enable IEEE1588 PTP clock. */
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+
+ /* Overwrite earlier timestamps */
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TXTSSTSM, 1);
+
+ /* Set one nano-second accuracy */
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCTRLSSR, 1);
+
+ /* Set fine timestamp update */
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCFUPDT, 1);
+
+ xgbe_config_tstamp(pdata, mac_tscr);
+
+ /* Exit if timestamping is not enabled */
+ if (!XGMAC_GET_BITS(mac_tscr, MAC_TSCR, TSENA))
+ return -EOPNOTSUPP;
+
+ if (pdata->vdata->tstamp_ptp_clock_freq) {
+ /* Initialize time registers based on
+ * 125MHz PTP Clock Frequency
+ */
+ XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SSINC,
+ XGBE_V2_TSTAMP_SSINC);
+ XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SNSINC,
+ XGBE_V2_TSTAMP_SNSINC);
+ } else {
+ /* Initialize time registers based on
+ * 50MHz PTP Clock Frequency
+ */
+ XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SSINC, XGBE_TSTAMP_SSINC);
+ XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SNSINC, XGBE_TSTAMP_SNSINC);
+ }
+
+ /* Calculate the addend:
+ * addend = 2^32 / (PTP ref clock / (PTP clock based on SSINC))
+ * = (2^32 * (PTP clock based on SSINC)) / PTP ref clock
+ */
+ if (pdata->vdata->tstamp_ptp_clock_freq)
+ dividend = XGBE_V2_PTP_ACT_CLK_FREQ;
+ else
+ dividend = XGBE_PTP_ACT_CLK_FREQ;
+
+ dividend = (u64)(dividend << 32);
+ pdata->tstamp_addend = div_u64(dividend, pdata->ptpclk_rate);
+
+ xgbe_update_tstamp_addend(pdata, pdata->tstamp_addend);
+
+ dma_wmb();
+ /* initialize system time */
+ ktime_get_real_ts64(&now);
+
+ /* lower 32 bits of tv_sec are safe until y2106 */
+ xgbe_set_tstamp_time(pdata, (u32)now.tv_sec, now.tv_nsec);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
index 097ec5e4f261..e3e1dca9856a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
@@ -414,6 +414,7 @@ static struct xgbe_version_data xgbe_v2a = {
.tx_max_fifo_size = 229376,
.rx_max_fifo_size = 229376,
.tx_tstamp_workaround = 1,
+ .tstamp_ptp_clock_freq = 1,
.ecc_support = 1,
.i2c_support = 1,
.irq_reissue_support = 1,
@@ -430,6 +431,7 @@ static struct xgbe_version_data xgbe_v2b = {
.tx_max_fifo_size = 65536,
.rx_max_fifo_size = 65536,
.tx_tstamp_workaround = 1,
+ .tstamp_ptp_clock_freq = 1,
.ecc_support = 1,
.i2c_support = 1,
.irq_reissue_support = 1,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
index 978c4dd01fa0..3658afc7801d 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
@@ -13,18 +13,6 @@
#include "xgbe.h"
#include "xgbe-common.h"
-static u64 xgbe_cc_read(const struct cyclecounter *cc)
-{
- struct xgbe_prv_data *pdata = container_of(cc,
- struct xgbe_prv_data,
- tstamp_cc);
- u64 nsec;
-
- nsec = pdata->hw_if.get_tstamp_time(pdata);
-
- return nsec;
-}
-
static int xgbe_adjfine(struct ptp_clock_info *info, long scaled_ppm)
{
struct xgbe_prv_data *pdata = container_of(info,
@@ -37,7 +25,7 @@ static int xgbe_adjfine(struct ptp_clock_info *info, long scaled_ppm)
spin_lock_irqsave(&pdata->tstamp_lock, flags);
- pdata->hw_if.update_tstamp_addend(pdata, addend);
+ xgbe_update_tstamp_addend(pdata, addend);
spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
@@ -49,16 +37,39 @@ static int xgbe_adjtime(struct ptp_clock_info *info, s64 delta)
struct xgbe_prv_data *pdata = container_of(info,
struct xgbe_prv_data,
ptp_clock_info);
+ unsigned int neg_adjust = 0;
+ unsigned int sec, nsec;
+ u32 quotient, reminder;
unsigned long flags;
+ if (delta < 0) {
+ neg_adjust = 1;
+ delta = -delta;
+ }
+
+ quotient = div_u64_rem(delta, 1000000000ULL, &reminder);
+ sec = quotient;
+ nsec = reminder;
+
+ /* Negative adjustment for Hw timer register. */
+ if (neg_adjust) {
+ sec = -sec;
+ if (XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSCTRLSSR))
+ nsec = (1000000000UL - nsec);
+ else
+ nsec = (0x80000000UL - nsec);
+ }
+ nsec = (neg_adjust << 31) | nsec;
+
spin_lock_irqsave(&pdata->tstamp_lock, flags);
- timecounter_adjtime(&pdata->tstamp_tc, delta);
+ xgbe_update_tstamp_time(pdata, sec, nsec);
spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
return 0;
}
-static int xgbe_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
+static int xgbe_gettimex(struct ptp_clock_info *info, struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
{
struct xgbe_prv_data *pdata = container_of(info,
struct xgbe_prv_data,
@@ -67,9 +78,9 @@ static int xgbe_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
u64 nsec;
spin_lock_irqsave(&pdata->tstamp_lock, flags);
-
- nsec = timecounter_read(&pdata->tstamp_tc);
-
+ ptp_read_system_prets(sts);
+ nsec = xgbe_get_tstamp_time(pdata);
+ ptp_read_system_postts(sts);
spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
*ts = ns_to_timespec64(nsec);
@@ -84,14 +95,9 @@ static int xgbe_settime(struct ptp_clock_info *info,
struct xgbe_prv_data,
ptp_clock_info);
unsigned long flags;
- u64 nsec;
-
- nsec = timespec64_to_ns(ts);
spin_lock_irqsave(&pdata->tstamp_lock, flags);
-
- timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc, nsec);
-
+ xgbe_set_tstamp_time(pdata, ts->tv_sec, ts->tv_nsec);
spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
return 0;
@@ -107,8 +113,6 @@ void xgbe_ptp_register(struct xgbe_prv_data *pdata)
{
struct ptp_clock_info *info = &pdata->ptp_clock_info;
struct ptp_clock *clock;
- struct cyclecounter *cc = &pdata->tstamp_cc;
- u64 dividend;
snprintf(info->name, sizeof(info->name), "%s",
netdev_name(pdata->netdev));
@@ -116,7 +120,7 @@ void xgbe_ptp_register(struct xgbe_prv_data *pdata)
info->max_adj = pdata->ptpclk_rate;
info->adjfine = xgbe_adjfine;
info->adjtime = xgbe_adjtime;
- info->gettime64 = xgbe_gettime;
+ info->gettimex64 = xgbe_gettimex;
info->settime64 = xgbe_settime;
info->enable = xgbe_enable;
@@ -128,23 +132,6 @@ void xgbe_ptp_register(struct xgbe_prv_data *pdata)
pdata->ptp_clock = clock;
- /* Calculate the addend:
- * addend = 2^32 / (PTP ref clock / 50Mhz)
- * = (2^32 * 50Mhz) / PTP ref clock
- */
- dividend = 50000000;
- dividend <<= 32;
- pdata->tstamp_addend = div_u64(dividend, pdata->ptpclk_rate);
-
- /* Setup the timecounter */
- cc->read = xgbe_cc_read;
- cc->mask = CLOCKSOURCE_MASK(64);
- cc->mult = 1;
- cc->shift = 0;
-
- timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc,
- ktime_to_ns(ktime_get_real()));
-
/* Disable all timestamping to start */
XGMAC_IOWRITE(pdata, MAC_TSCR, 0);
pdata->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index 70169ea23c7f..d7e03e292ec4 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -119,6 +119,14 @@
#define XGBE_MSI_BASE_COUNT 4
#define XGBE_MSI_MIN_COUNT (XGBE_MSI_BASE_COUNT + 1)
+/* Initial PTP register values based on Link Speed. */
+#define MAC_TICNR_1G_INITVAL 0x10
+#define MAC_TECNR_1G_INITVAL 0x28
+
+#define MAC_TICSNR_10G_INITVAL 0x33
+#define MAC_TECNR_10G_INITVAL 0x14
+#define MAC_TECSNR_10G_INITVAL 0xCC
+
/* PCI clock frequencies */
#define XGBE_V2_DMA_CLOCK_FREQ 500000000 /* 500 MHz */
#define XGBE_V2_PTP_CLOCK_FREQ 125000000 /* 125 MHz */
@@ -128,6 +136,11 @@
*/
#define XGBE_TSTAMP_SSINC 20
#define XGBE_TSTAMP_SNSINC 0
+#define XGBE_PTP_ACT_CLK_FREQ 500000000
+
+#define XGBE_V2_TSTAMP_SSINC 0xA
+#define XGBE_V2_TSTAMP_SNSINC 0
+#define XGBE_V2_PTP_ACT_CLK_FREQ 1000000000
/* Driver PMT macros */
#define XGMAC_DRIVER_CONTEXT 1
@@ -741,14 +754,6 @@ struct xgbe_hw_if {
void (*tx_mmc_int)(struct xgbe_prv_data *);
void (*read_mmc_stats)(struct xgbe_prv_data *);
- /* For Timestamp config */
- int (*config_tstamp)(struct xgbe_prv_data *, unsigned int);
- void (*update_tstamp_addend)(struct xgbe_prv_data *, unsigned int);
- void (*set_tstamp_time)(struct xgbe_prv_data *, unsigned int sec,
- unsigned int nsec);
- u64 (*get_tstamp_time)(struct xgbe_prv_data *);
- u64 (*get_tx_tstamp)(struct xgbe_prv_data *);
-
/* For Data Center Bridging config */
void (*config_tc)(struct xgbe_prv_data *);
void (*config_dcb_tc)(struct xgbe_prv_data *);
@@ -946,6 +951,7 @@ struct xgbe_version_data {
unsigned int tx_max_fifo_size;
unsigned int rx_max_fifo_size;
unsigned int tx_tstamp_workaround;
+ unsigned int tstamp_ptp_clock_freq;
unsigned int ecc_support;
unsigned int i2c_support;
unsigned int irq_reissue_support;
@@ -1131,8 +1137,6 @@ struct xgbe_prv_data {
struct ptp_clock_info ptp_clock_info;
struct ptp_clock *ptp_clock;
struct hwtstamp_config tstamp_config;
- struct cyclecounter tstamp_cc;
- struct timecounter tstamp_tc;
unsigned int tstamp_addend;
struct work_struct tx_tstamp_work;
struct sk_buff *tx_tstamp_skb;
@@ -1277,6 +1281,29 @@ void xgbe_init_tx_coalesce(struct xgbe_prv_data *);
void xgbe_restart_dev(struct xgbe_prv_data *pdata);
void xgbe_full_restart_dev(struct xgbe_prv_data *pdata);
+/* For Timestamp config */
+void xgbe_config_tstamp(struct xgbe_prv_data *pdata, unsigned int mac_tscr);
+u64 xgbe_get_tstamp_time(struct xgbe_prv_data *pdata);
+u64 xgbe_get_tx_tstamp(struct xgbe_prv_data *pdata);
+void xgbe_get_rx_tstamp(struct xgbe_packet_data *packet,
+ struct xgbe_ring_desc *rdesc);
+void xgbe_get_rx_tstamp(struct xgbe_packet_data *packet,
+ struct xgbe_ring_desc *rdesc);
+void xgbe_update_tstamp_addend(struct xgbe_prv_data *pdata,
+ unsigned int addend);
+void xgbe_set_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec,
+ unsigned int nsec);
+void xgbe_tx_tstamp(struct work_struct *work);
+int xgbe_get_hwtstamp_settings(struct xgbe_prv_data *pdata,
+ struct ifreq *ifreq);
+int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
+ struct ifreq *ifreq);
+void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata,
+ struct sk_buff *skb,
+ struct xgbe_packet_data *packet);
+int xgbe_init_ptp(struct xgbe_prv_data *pdata);
+void xgbe_update_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec,
+ unsigned int nsec);
#ifdef CONFIG_DEBUG_FS
void xgbe_debugfs_init(struct xgbe_prv_data *);
void xgbe_debugfs_exit(struct xgbe_prv_data *);
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
index a6ea477bce3c..b9973956c480 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
@@ -816,6 +816,9 @@ static void bcmasp_init_tx(struct bcmasp_intf *intf)
/* Tx SPB */
tx_spb_ctrl_wl(intf, ((intf->channel + 8) << TX_SPB_CTRL_XF_BID_SHIFT),
TX_SPB_CTRL_XF_CTRL2);
+
+ if (intf->parent->tx_chan_offset)
+ tx_pause_ctrl_wl(intf, (1 << (intf->channel + 8)), TX_PAUSE_MAP_VECTOR);
tx_spb_top_wl(intf, 0x1e, TX_SPB_TOP_BLKOUT);
tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_READ);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index b82f121cadad..0f4efd505332 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -4666,12 +4666,19 @@ static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv)
return PTR_ERR(dpmac_dev);
}
- if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
+ if (IS_ERR(dpmac_dev))
return 0;
+ if (dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type) {
+ err = 0;
+ goto out_put_device;
+ }
+
mac = kzalloc(sizeof(struct dpaa2_mac), GFP_KERNEL);
- if (!mac)
- return -ENOMEM;
+ if (!mac) {
+ err = -ENOMEM;
+ goto out_put_device;
+ }
mac->mc_dev = dpmac_dev;
mac->mc_io = priv->mc_io;
@@ -4705,6 +4712,8 @@ err_close_mac:
dpaa2_mac_close(mac);
err_free_mac:
kfree(mac);
+out_put_device:
+ put_device(&dpmac_dev->dev);
return err;
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
index 147a93bf9fa9..4643a3380618 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
@@ -1448,12 +1448,19 @@ static int dpaa2_switch_port_connect_mac(struct ethsw_port_priv *port_priv)
if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER)
return PTR_ERR(dpmac_dev);
- if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
+ if (IS_ERR(dpmac_dev))
return 0;
+ if (dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type) {
+ err = 0;
+ goto out_put_device;
+ }
+
mac = kzalloc(sizeof(*mac), GFP_KERNEL);
- if (!mac)
- return -ENOMEM;
+ if (!mac) {
+ err = -ENOMEM;
+ goto out_put_device;
+ }
mac->mc_dev = dpmac_dev;
mac->mc_io = port_priv->ethsw_data->mc_io;
@@ -1483,6 +1490,8 @@ err_close_mac:
dpaa2_mac_close(mac);
err_free_mac:
kfree(mac);
+out_put_device:
+ put_device(&dpmac_dev->dev);
return err;
}
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index b481ee8ee478..1383918f8a3f 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1045,7 +1045,9 @@ static void fec_enet_bd_init(struct net_device *dev)
struct page *page = txq->tx_buf[i].buf_p;
if (page)
- page_pool_put_page(page->pp, page, 0, false);
+ page_pool_put_page(pp_page_to_nmdesc(page)->pp,
+ page, 0,
+ false);
}
txq->tx_buf[i].buf_p = NULL;
@@ -1586,7 +1588,8 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
xdp_return_frame_rx_napi(xdpf);
} else { /* recycle pages of XDP_TX frames */
/* The dma_sync_size = 0 as XDP_TX has already synced DMA for_device */
- page_pool_put_page(page->pp, page, 0, true);
+ page_pool_put_page(pp_page_to_nmdesc(page)->pp, page,
+ 0, true);
}
txq->tx_buf[index].buf_p = NULL;
@@ -3348,7 +3351,8 @@ static void fec_enet_free_buffers(struct net_device *ndev)
} else {
struct page *page = txq->tx_buf[i].buf_p;
- page_pool_put_page(page->pp, page, 0, false);
+ page_pool_put_page(pp_page_to_nmdesc(page)->pp,
+ page, 0, false);
}
txq->tx_buf[i].buf_p = NULL;
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index 53899096e89e..bceaf9b05cb4 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -190,6 +190,9 @@ struct gve_rx_buf_state_dqo {
/* The page posted to HW. */
struct gve_rx_slot_page_info page_info;
+ /* XSK buffer */
+ struct xdp_buff *xsk_buff;
+
/* The DMA address corresponding to `page_info`. */
dma_addr_t addr;
@@ -331,7 +334,6 @@ struct gve_rx_ring {
/* XDP stuff */
struct xdp_rxq_info xdp_rxq;
- struct xdp_rxq_info xsk_rxq;
struct xsk_buff_pool *xsk_pool;
struct page_frag_cache page_cache; /* Page cache to allocate XDP frames */
};
@@ -400,11 +402,17 @@ enum gve_packet_state {
GVE_PACKET_STATE_PENDING_REINJECT_COMPL,
/* No valid completion received within the specified timeout. */
GVE_PACKET_STATE_TIMED_OUT_COMPL,
+ /* XSK pending packet has received a packet/reinjection completion, or
+ * has timed out. At this point, the pending packet can be counted by
+ * xsk_tx_complete and freed.
+ */
+ GVE_PACKET_STATE_XSK_COMPLETE,
};
enum gve_tx_pending_packet_dqo_type {
GVE_TX_PENDING_PACKET_DQO_SKB,
- GVE_TX_PENDING_PACKET_DQO_XDP_FRAME
+ GVE_TX_PENDING_PACKET_DQO_XDP_FRAME,
+ GVE_TX_PENDING_PACKET_DQO_XSK,
};
struct gve_tx_pending_packet_dqo {
@@ -441,10 +449,10 @@ struct gve_tx_pending_packet_dqo {
/* Identifies the current state of the packet as defined in
* `enum gve_packet_state`.
*/
- u8 state : 2;
+ u8 state : 3;
/* gve_tx_pending_packet_dqo_type */
- u8 type : 1;
+ u8 type : 2;
/* If packet is an outstanding miss completion, then the packet is
* freed if the corresponding re-injection completion is not received
@@ -513,6 +521,8 @@ struct gve_tx_ring {
/* Cached value of `dqo_compl.free_tx_qpl_buf_cnt` */
u32 free_tx_qpl_buf_cnt;
};
+
+ atomic_t xsk_reorder_queue_tail;
} dqo_tx;
};
@@ -546,6 +556,9 @@ struct gve_tx_ring {
/* Last TX ring index fetched by HW */
atomic_t hw_tx_head;
+ u16 xsk_reorder_queue_head;
+ u16 xsk_reorder_queue_tail;
+
/* List to track pending packets which received a miss
* completion but not a corresponding reinjection.
*/
@@ -599,6 +612,8 @@ struct gve_tx_ring {
struct gve_tx_pending_packet_dqo *pending_packets;
s16 num_pending_packets;
+ u16 *xsk_reorder_queue;
+
u32 complq_mask; /* complq size is complq_mask + 1 */
/* QPL fields */
@@ -803,6 +818,7 @@ struct gve_priv {
struct gve_tx_queue_config tx_cfg;
struct gve_rx_queue_config rx_cfg;
+ unsigned long *xsk_pools; /* bitmap of RX queues with XSK pools */
u32 num_ntfy_blks; /* split between TX and RX so must be even */
int numa_node;
diff --git a/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c b/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c
index 6c3c459a1b5e..8f5021e59e0a 100644
--- a/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c
@@ -4,6 +4,7 @@
* Copyright (C) 2015-2024 Google, Inc.
*/
+#include <net/xdp_sock_drv.h>
#include "gve.h"
#include "gve_utils.h"
@@ -29,6 +30,10 @@ struct gve_rx_buf_state_dqo *gve_alloc_buf_state(struct gve_rx_ring *rx)
/* Point buf_state to itself to mark it as allocated */
buf_state->next = buffer_id;
+ /* Clear the buffer pointers */
+ buf_state->page_info.page = NULL;
+ buf_state->xsk_buff = NULL;
+
return buf_state;
}
@@ -286,7 +291,24 @@ int gve_alloc_buffer(struct gve_rx_ring *rx, struct gve_rx_desc_dqo *desc)
{
struct gve_rx_buf_state_dqo *buf_state;
- if (rx->dqo.page_pool) {
+ if (rx->xsk_pool) {
+ buf_state = gve_alloc_buf_state(rx);
+ if (unlikely(!buf_state))
+ return -ENOMEM;
+
+ buf_state->xsk_buff = xsk_buff_alloc(rx->xsk_pool);
+ if (unlikely(!buf_state->xsk_buff)) {
+ xsk_set_rx_need_wakeup(rx->xsk_pool);
+ gve_free_buf_state(rx, buf_state);
+ return -ENOMEM;
+ }
+ /* Allocated xsk buffer. Clear wakeup in case it was set. */
+ xsk_clear_rx_need_wakeup(rx->xsk_pool);
+ desc->buf_id = cpu_to_le16(buf_state - rx->dqo.buf_states);
+ desc->buf_addr =
+ cpu_to_le64(xsk_buff_xdp_get_dma(buf_state->xsk_buff));
+ return 0;
+ } else if (rx->dqo.page_pool) {
buf_state = gve_alloc_buf_state(rx);
if (WARN_ON_ONCE(!buf_state))
return -ENOMEM;
diff --git a/drivers/net/ethernet/google/gve/gve_dqo.h b/drivers/net/ethernet/google/gve/gve_dqo.h
index bb278727f4d9..6eb442096e02 100644
--- a/drivers/net/ethernet/google/gve/gve_dqo.h
+++ b/drivers/net/ethernet/google/gve/gve_dqo.h
@@ -38,6 +38,7 @@ netdev_features_t gve_features_check_dqo(struct sk_buff *skb,
netdev_features_t features);
bool gve_tx_poll_dqo(struct gve_notify_block *block, bool do_clean);
bool gve_xdp_poll_dqo(struct gve_notify_block *block);
+bool gve_xsk_tx_poll_dqo(struct gve_notify_block *block, int budget);
int gve_rx_poll_dqo(struct gve_notify_block *block, int budget);
int gve_tx_alloc_rings_dqo(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *cfg);
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index be461751ff31..1f411d7c4373 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -4,6 +4,7 @@
* Copyright (C) 2015-2024 Google LLC
*/
+#include <linux/bitmap.h>
#include <linux/bpf.h>
#include <linux/cpumask.h>
#include <linux/etherdevice.h>
@@ -426,6 +427,12 @@ int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
if (block->rx) {
work_done = gve_rx_poll_dqo(block, budget);
+
+ /* Poll XSK TX as part of RX NAPI. Setup re-poll based on if
+ * either datapath has more work to do.
+ */
+ if (priv->xdp_prog)
+ reschedule |= gve_xsk_tx_poll_dqo(block, budget);
reschedule |= work_done == budget;
}
@@ -1158,18 +1165,84 @@ static int gve_reset_recovery(struct gve_priv *priv, bool was_up);
static void gve_turndown(struct gve_priv *priv);
static void gve_turnup(struct gve_priv *priv);
+static void gve_unreg_xsk_pool(struct gve_priv *priv, u16 qid)
+{
+ struct gve_rx_ring *rx;
+
+ if (!priv->rx)
+ return;
+
+ rx = &priv->rx[qid];
+ rx->xsk_pool = NULL;
+ if (xdp_rxq_info_is_reg(&rx->xdp_rxq))
+ xdp_rxq_info_unreg_mem_model(&rx->xdp_rxq);
+
+ if (!priv->tx)
+ return;
+ priv->tx[gve_xdp_tx_queue_id(priv, qid)].xsk_pool = NULL;
+}
+
+static int gve_reg_xsk_pool(struct gve_priv *priv, struct net_device *dev,
+ struct xsk_buff_pool *pool, u16 qid)
+{
+ struct gve_rx_ring *rx;
+ u16 tx_qid;
+ int err;
+
+ rx = &priv->rx[qid];
+ err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq,
+ MEM_TYPE_XSK_BUFF_POOL, pool);
+ if (err) {
+ gve_unreg_xsk_pool(priv, qid);
+ return err;
+ }
+
+ rx->xsk_pool = pool;
+
+ tx_qid = gve_xdp_tx_queue_id(priv, qid);
+ priv->tx[tx_qid].xsk_pool = pool;
+
+ return 0;
+}
+
+static void gve_unreg_xdp_info(struct gve_priv *priv)
+{
+ int i;
+
+ if (!priv->tx_cfg.num_xdp_queues || !priv->rx)
+ return;
+
+ for (i = 0; i < priv->rx_cfg.num_queues; i++) {
+ struct gve_rx_ring *rx = &priv->rx[i];
+
+ if (xdp_rxq_info_is_reg(&rx->xdp_rxq))
+ xdp_rxq_info_unreg(&rx->xdp_rxq);
+
+ gve_unreg_xsk_pool(priv, i);
+ }
+}
+
+static struct xsk_buff_pool *gve_get_xsk_pool(struct gve_priv *priv, int qid)
+{
+ if (!test_bit(qid, priv->xsk_pools))
+ return NULL;
+
+ return xsk_get_pool_from_qid(priv->dev, qid);
+}
+
static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev)
{
struct napi_struct *napi;
struct gve_rx_ring *rx;
int err = 0;
- int i, j;
- u32 tx_qid;
+ int i;
if (!priv->tx_cfg.num_xdp_queues)
return 0;
for (i = 0; i < priv->rx_cfg.num_queues; i++) {
+ struct xsk_buff_pool *xsk_pool;
+
rx = &priv->rx[i];
napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
@@ -1177,7 +1250,11 @@ static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev)
napi->napi_id);
if (err)
goto err;
- if (gve_is_qpl(priv))
+
+ xsk_pool = gve_get_xsk_pool(priv, i);
+ if (xsk_pool)
+ err = gve_reg_xsk_pool(priv, dev, xsk_pool, i);
+ else if (gve_is_qpl(priv))
err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq,
MEM_TYPE_PAGE_SHARED,
NULL);
@@ -1187,60 +1264,14 @@ static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev)
rx->dqo.page_pool);
if (err)
goto err;
- rx->xsk_pool = xsk_get_pool_from_qid(dev, i);
- if (rx->xsk_pool) {
- err = xdp_rxq_info_reg(&rx->xsk_rxq, dev, i,
- napi->napi_id);
- if (err)
- goto err;
- err = xdp_rxq_info_reg_mem_model(&rx->xsk_rxq,
- MEM_TYPE_XSK_BUFF_POOL, NULL);
- if (err)
- goto err;
- xsk_pool_set_rxq_info(rx->xsk_pool,
- &rx->xsk_rxq);
- }
- }
-
- for (i = 0; i < priv->tx_cfg.num_xdp_queues; i++) {
- tx_qid = gve_xdp_tx_queue_id(priv, i);
- priv->tx[tx_qid].xsk_pool = xsk_get_pool_from_qid(dev, i);
}
return 0;
err:
- for (j = i; j >= 0; j--) {
- rx = &priv->rx[j];
- if (xdp_rxq_info_is_reg(&rx->xdp_rxq))
- xdp_rxq_info_unreg(&rx->xdp_rxq);
- if (xdp_rxq_info_is_reg(&rx->xsk_rxq))
- xdp_rxq_info_unreg(&rx->xsk_rxq);
- }
+ gve_unreg_xdp_info(priv);
return err;
}
-static void gve_unreg_xdp_info(struct gve_priv *priv)
-{
- int i, tx_qid;
-
- if (!priv->tx_cfg.num_xdp_queues || !priv->rx || !priv->tx)
- return;
-
- for (i = 0; i < priv->rx_cfg.num_queues; i++) {
- struct gve_rx_ring *rx = &priv->rx[i];
-
- xdp_rxq_info_unreg(&rx->xdp_rxq);
- if (rx->xsk_pool) {
- xdp_rxq_info_unreg(&rx->xsk_rxq);
- rx->xsk_pool = NULL;
- }
- }
-
- for (i = 0; i < priv->tx_cfg.num_xdp_queues; i++) {
- tx_qid = gve_xdp_tx_queue_id(priv, i);
- priv->tx[tx_qid].xsk_pool = NULL;
- }
-}
static void gve_drain_page_cache(struct gve_priv *priv)
{
@@ -1555,9 +1586,6 @@ static int gve_xsk_pool_enable(struct net_device *dev,
u16 qid)
{
struct gve_priv *priv = netdev_priv(dev);
- struct napi_struct *napi;
- struct gve_rx_ring *rx;
- int tx_qid;
int err;
if (qid >= priv->rx_cfg.num_queues) {
@@ -1575,34 +1603,31 @@ static int gve_xsk_pool_enable(struct net_device *dev,
if (err)
return err;
+ set_bit(qid, priv->xsk_pools);
+
/* If XDP prog is not installed or interface is down, return. */
if (!priv->xdp_prog || !netif_running(dev))
return 0;
- rx = &priv->rx[qid];
- napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
- err = xdp_rxq_info_reg(&rx->xsk_rxq, dev, qid, napi->napi_id);
- if (err)
- goto err;
-
- err = xdp_rxq_info_reg_mem_model(&rx->xsk_rxq,
- MEM_TYPE_XSK_BUFF_POOL, NULL);
+ err = gve_reg_xsk_pool(priv, dev, pool, qid);
if (err)
- goto err;
-
- xsk_pool_set_rxq_info(pool, &rx->xsk_rxq);
- rx->xsk_pool = pool;
-
- tx_qid = gve_xdp_tx_queue_id(priv, qid);
- priv->tx[tx_qid].xsk_pool = pool;
+ goto err_xsk_pool_dma_mapped;
+ /* Stop and start RDA queues to repost buffers. */
+ if (!gve_is_qpl(priv)) {
+ err = gve_configure_rings_xdp(priv, priv->rx_cfg.num_queues);
+ if (err)
+ goto err_xsk_pool_registered;
+ }
return 0;
-err:
- if (xdp_rxq_info_is_reg(&rx->xsk_rxq))
- xdp_rxq_info_unreg(&rx->xsk_rxq);
+err_xsk_pool_registered:
+ gve_unreg_xsk_pool(priv, qid);
+err_xsk_pool_dma_mapped:
+ clear_bit(qid, priv->xsk_pools);
xsk_pool_dma_unmap(pool,
- DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
+ DMA_ATTR_SKIP_CPU_SYNC |
+ DMA_ATTR_WEAK_ORDERING);
return err;
}
@@ -1614,18 +1639,28 @@ static int gve_xsk_pool_disable(struct net_device *dev,
struct napi_struct *napi_tx;
struct xsk_buff_pool *pool;
int tx_qid;
+ int err;
- pool = xsk_get_pool_from_qid(dev, qid);
- if (!pool)
- return -EINVAL;
if (qid >= priv->rx_cfg.num_queues)
return -EINVAL;
- /* If XDP prog is not installed or interface is down, unmap DMA and
- * return.
- */
- if (!priv->xdp_prog || !netif_running(dev))
- goto done;
+ clear_bit(qid, priv->xsk_pools);
+
+ pool = xsk_get_pool_from_qid(dev, qid);
+ if (pool)
+ xsk_pool_dma_unmap(pool,
+ DMA_ATTR_SKIP_CPU_SYNC |
+ DMA_ATTR_WEAK_ORDERING);
+
+ if (!netif_running(dev) || !priv->tx_cfg.num_xdp_queues)
+ return 0;
+
+ /* Stop and start RDA queues to repost buffers. */
+ if (!gve_is_qpl(priv) && priv->xdp_prog) {
+ err = gve_configure_rings_xdp(priv, priv->rx_cfg.num_queues);
+ if (err)
+ return err;
+ }
napi_rx = &priv->ntfy_blocks[priv->rx[qid].ntfy_id].napi;
napi_disable(napi_rx); /* make sure current rx poll is done */
@@ -1634,22 +1669,19 @@ static int gve_xsk_pool_disable(struct net_device *dev,
napi_tx = &priv->ntfy_blocks[priv->tx[tx_qid].ntfy_id].napi;
napi_disable(napi_tx); /* make sure current tx poll is done */
- priv->rx[qid].xsk_pool = NULL;
- xdp_rxq_info_unreg(&priv->rx[qid].xsk_rxq);
- priv->tx[tx_qid].xsk_pool = NULL;
+ gve_unreg_xsk_pool(priv, qid);
smp_mb(); /* Make sure it is visible to the workers on datapath */
napi_enable(napi_rx);
- if (gve_rx_work_pending(&priv->rx[qid]))
- napi_schedule(napi_rx);
-
napi_enable(napi_tx);
- if (gve_tx_clean_pending(priv, &priv->tx[tx_qid]))
- napi_schedule(napi_tx);
+ if (gve_is_gqi(priv)) {
+ if (gve_rx_work_pending(&priv->rx[qid]))
+ napi_schedule(napi_rx);
+
+ if (gve_tx_clean_pending(priv, &priv->tx[tx_qid]))
+ napi_schedule(napi_tx);
+ }
-done:
- xsk_pool_dma_unmap(pool,
- DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
return 0;
}
@@ -1956,49 +1988,56 @@ static void gve_turnup_and_check_status(struct gve_priv *priv)
gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
}
-static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
+static struct gve_notify_block *gve_get_tx_notify_block(struct gve_priv *priv,
+ unsigned int txqueue)
{
- struct gve_notify_block *block;
- struct gve_tx_ring *tx = NULL;
- struct gve_priv *priv;
- u32 last_nic_done;
- u32 current_time;
u32 ntfy_idx;
- netdev_info(dev, "Timeout on tx queue, %d", txqueue);
- priv = netdev_priv(dev);
if (txqueue > priv->tx_cfg.num_queues)
- goto reset;
+ return NULL;
ntfy_idx = gve_tx_idx_to_ntfy(priv, txqueue);
if (ntfy_idx >= priv->num_ntfy_blks)
- goto reset;
+ return NULL;
+
+ return &priv->ntfy_blocks[ntfy_idx];
+}
+
+static bool gve_tx_timeout_try_q_kick(struct gve_priv *priv,
+ unsigned int txqueue)
+{
+ struct gve_notify_block *block;
+ u32 current_time;
- block = &priv->ntfy_blocks[ntfy_idx];
- tx = block->tx;
+ block = gve_get_tx_notify_block(priv, txqueue);
+
+ if (!block)
+ return false;
current_time = jiffies_to_msecs(jiffies);
- if (tx->last_kick_msec + MIN_TX_TIMEOUT_GAP > current_time)
- goto reset;
+ if (block->tx->last_kick_msec + MIN_TX_TIMEOUT_GAP > current_time)
+ return false;
- /* Check to see if there are missed completions, which will allow us to
- * kick the queue.
- */
- last_nic_done = gve_tx_load_event_counter(priv, tx);
- if (last_nic_done - tx->done) {
- netdev_info(dev, "Kicking queue %d", txqueue);
- iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block));
- napi_schedule(&block->napi);
- tx->last_kick_msec = current_time;
- goto out;
- } // Else reset.
+ netdev_info(priv->dev, "Kicking queue %d", txqueue);
+ napi_schedule(&block->napi);
+ block->tx->last_kick_msec = current_time;
+ return true;
+}
-reset:
- gve_schedule_reset(priv);
+static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ struct gve_notify_block *block;
+ struct gve_priv *priv;
-out:
- if (tx)
- tx->queue_timeout++;
+ netdev_info(dev, "Timeout on tx queue, %d", txqueue);
+ priv = netdev_priv(dev);
+
+ if (!gve_tx_timeout_try_q_kick(priv, txqueue))
+ gve_schedule_reset(priv);
+
+ block = gve_get_tx_notify_block(priv, txqueue);
+ if (block)
+ block->tx->queue_timeout++;
priv->tx_timeo_cnt++;
}
@@ -2275,6 +2314,7 @@ static void gve_set_netdev_xdp_features(struct gve_priv *priv)
} else if (priv->queue_format == GVE_DQO_RDA_FORMAT) {
xdp_features = NETDEV_XDP_ACT_BASIC;
xdp_features |= NETDEV_XDP_ACT_REDIRECT;
+ xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
} else {
xdp_features = 0;
}
@@ -2370,10 +2410,22 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
priv->ts_config.rx_filter = HWTSTAMP_FILTER_NONE;
setup_device:
+ priv->xsk_pools = bitmap_zalloc(priv->rx_cfg.max_queues, GFP_KERNEL);
+ if (!priv->xsk_pools) {
+ err = -ENOMEM;
+ goto err;
+ }
+
gve_set_netdev_xdp_features(priv);
err = gve_setup_device_resources(priv);
- if (!err)
- return 0;
+ if (err)
+ goto err_free_xsk_bitmap;
+
+ return 0;
+
+err_free_xsk_bitmap:
+ bitmap_free(priv->xsk_pools);
+ priv->xsk_pools = NULL;
err:
gve_adminq_free(&priv->pdev->dev, priv);
return err;
@@ -2383,6 +2435,8 @@ static void gve_teardown_priv_resources(struct gve_priv *priv)
{
gve_teardown_device_resources(priv);
gve_adminq_free(&priv->pdev->dev, priv);
+ bitmap_free(priv->xsk_pools);
+ priv->xsk_pools = NULL;
}
static void gve_trigger_reset(struct gve_priv *priv)
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index afaa822b1227..7380c2b7a2d8 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -16,6 +16,7 @@
#include <net/ip6_checksum.h>
#include <net/ipv6.h>
#include <net/tcp.h>
+#include <net/xdp_sock_drv.h>
static void gve_rx_free_hdr_bufs(struct gve_priv *priv, struct gve_rx_ring *rx)
{
@@ -149,6 +150,10 @@ void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
gve_free_to_page_pool(rx, bs, false);
else
gve_free_qpl_page_dqo(bs);
+ if (gve_buf_state_is_allocated(rx, bs) && bs->xsk_buff) {
+ xsk_buff_free(bs->xsk_buff);
+ bs->xsk_buff = NULL;
+ }
}
if (rx->dqo.qpl) {
@@ -580,8 +585,11 @@ static int gve_xdp_tx_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
int err;
xdpf = xdp_convert_buff_to_frame(xdp);
- if (unlikely(!xdpf))
+ if (unlikely(!xdpf)) {
+ if (rx->xsk_pool)
+ xsk_buff_free(xdp);
return -ENOSPC;
+ }
tx_qid = gve_xdp_tx_queue_id(priv, rx->q_num);
tx = &priv->tx[tx_qid];
@@ -592,6 +600,41 @@ static int gve_xdp_tx_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
return err;
}
+static void gve_xsk_done_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct xdp_buff *xdp, struct bpf_prog *xprog,
+ int xdp_act)
+{
+ switch (xdp_act) {
+ case XDP_ABORTED:
+ case XDP_DROP:
+ default:
+ xsk_buff_free(xdp);
+ break;
+ case XDP_TX:
+ if (unlikely(gve_xdp_tx_dqo(priv, rx, xdp)))
+ goto err;
+ break;
+ case XDP_REDIRECT:
+ if (unlikely(xdp_do_redirect(priv->dev, xdp, xprog)))
+ goto err;
+ break;
+ }
+
+ u64_stats_update_begin(&rx->statss);
+ if ((u32)xdp_act < GVE_XDP_ACTIONS)
+ rx->xdp_actions[xdp_act]++;
+ u64_stats_update_end(&rx->statss);
+ return;
+
+err:
+ u64_stats_update_begin(&rx->statss);
+ if (xdp_act == XDP_TX)
+ rx->xdp_tx_errors++;
+ if (xdp_act == XDP_REDIRECT)
+ rx->xdp_redirect_errors++;
+ u64_stats_update_end(&rx->statss);
+}
+
static void gve_xdp_done_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
struct xdp_buff *xdp, struct bpf_prog *xprog,
int xdp_act,
@@ -633,6 +676,48 @@ err:
return;
}
+static int gve_rx_xsk_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
+ struct gve_rx_buf_state_dqo *buf_state, int buf_len,
+ struct bpf_prog *xprog)
+{
+ struct xdp_buff *xdp = buf_state->xsk_buff;
+ struct gve_priv *priv = rx->gve;
+ int xdp_act;
+
+ xdp->data_end = xdp->data + buf_len;
+ xsk_buff_dma_sync_for_cpu(xdp);
+
+ if (xprog) {
+ xdp_act = bpf_prog_run_xdp(xprog, xdp);
+ buf_len = xdp->data_end - xdp->data;
+ if (xdp_act != XDP_PASS) {
+ gve_xsk_done_dqo(priv, rx, xdp, xprog, xdp_act);
+ gve_free_buf_state(rx, buf_state);
+ return 0;
+ }
+ }
+
+ /* Copy the data to skb */
+ rx->ctx.skb_head = gve_rx_copy_data(priv->dev, napi,
+ xdp->data, buf_len);
+ if (unlikely(!rx->ctx.skb_head)) {
+ xsk_buff_free(xdp);
+ gve_free_buf_state(rx, buf_state);
+ return -ENOMEM;
+ }
+ rx->ctx.skb_tail = rx->ctx.skb_head;
+
+ /* Free XSK buffer and Buffer state */
+ xsk_buff_free(xdp);
+ gve_free_buf_state(rx, buf_state);
+
+ /* Update Stats */
+ u64_stats_update_begin(&rx->statss);
+ rx->xdp_actions[XDP_PASS]++;
+ u64_stats_update_end(&rx->statss);
+ return 0;
+}
+
/* Returns 0 if descriptor is completed successfully.
* Returns -EINVAL if descriptor is invalid.
* Returns -ENOMEM if data cannot be copied to skb.
@@ -671,7 +756,11 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
buf_len = compl_desc->packet_len;
hdr_len = compl_desc->header_len;
- /* Page might have not been used for a while and was likely last written
+ xprog = READ_ONCE(priv->xdp_prog);
+ if (buf_state->xsk_buff)
+ return gve_rx_xsk_dqo(napi, rx, buf_state, buf_len, xprog);
+
+ /* Page might have not been used for awhile and was likely last written
* by a different thread.
*/
if (rx->dqo.page_pool) {
@@ -721,7 +810,6 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
return 0;
}
- xprog = READ_ONCE(priv->xdp_prog);
if (xprog) {
struct xdp_buff xdp;
void *old_data;
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
index ce5370b741ec..6f1d515673d2 100644
--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
@@ -13,6 +13,7 @@
#include <linux/tcp.h>
#include <linux/slab.h>
#include <linux/skbuff.h>
+#include <net/xdp_sock_drv.h>
/* Returns true if tx_bufs are available. */
static bool gve_has_free_tx_qpl_bufs(struct gve_tx_ring *tx, int count)
@@ -241,6 +242,9 @@ static void gve_tx_free_ring_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
tx->dqo.tx_ring = NULL;
}
+ kvfree(tx->dqo.xsk_reorder_queue);
+ tx->dqo.xsk_reorder_queue = NULL;
+
kvfree(tx->dqo.pending_packets);
tx->dqo.pending_packets = NULL;
@@ -345,6 +349,17 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv,
tx->dqo.pending_packets[tx->dqo.num_pending_packets - 1].next = -1;
atomic_set_release(&tx->dqo_compl.free_pending_packets, -1);
+
+ /* Only alloc xsk pool for XDP queues */
+ if (idx >= cfg->qcfg->num_queues && cfg->num_xdp_rings) {
+ tx->dqo.xsk_reorder_queue =
+ kvcalloc(tx->dqo.complq_mask + 1,
+ sizeof(tx->dqo.xsk_reorder_queue[0]),
+ GFP_KERNEL);
+ if (!tx->dqo.xsk_reorder_queue)
+ goto err;
+ }
+
tx->dqo_compl.miss_completions.head = -1;
tx->dqo_compl.miss_completions.tail = -1;
tx->dqo_compl.timed_out_completions.head = -1;
@@ -992,6 +1007,38 @@ drop:
return 0;
}
+static void gve_xsk_reorder_queue_push_dqo(struct gve_tx_ring *tx,
+ u16 completion_tag)
+{
+ u32 tail = atomic_read(&tx->dqo_tx.xsk_reorder_queue_tail);
+
+ tx->dqo.xsk_reorder_queue[tail] = completion_tag;
+ tail = (tail + 1) & tx->dqo.complq_mask;
+ atomic_set_release(&tx->dqo_tx.xsk_reorder_queue_tail, tail);
+}
+
+static struct gve_tx_pending_packet_dqo *
+gve_xsk_reorder_queue_head(struct gve_tx_ring *tx)
+{
+ u32 head = tx->dqo_compl.xsk_reorder_queue_head;
+
+ if (head == tx->dqo_compl.xsk_reorder_queue_tail) {
+ tx->dqo_compl.xsk_reorder_queue_tail =
+ atomic_read_acquire(&tx->dqo_tx.xsk_reorder_queue_tail);
+
+ if (head == tx->dqo_compl.xsk_reorder_queue_tail)
+ return NULL;
+ }
+
+ return &tx->dqo.pending_packets[tx->dqo.xsk_reorder_queue[head]];
+}
+
+static void gve_xsk_reorder_queue_pop_dqo(struct gve_tx_ring *tx)
+{
+ tx->dqo_compl.xsk_reorder_queue_head++;
+ tx->dqo_compl.xsk_reorder_queue_head &= tx->dqo.complq_mask;
+}
+
/* Transmit a given skb and ring the doorbell. */
netdev_tx_t gve_tx_dqo(struct sk_buff *skb, struct net_device *dev)
{
@@ -1015,6 +1062,62 @@ netdev_tx_t gve_tx_dqo(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
+static bool gve_xsk_tx_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
+ int budget)
+{
+ struct xsk_buff_pool *pool = tx->xsk_pool;
+ struct xdp_desc desc;
+ bool repoll = false;
+ int sent = 0;
+
+ spin_lock(&tx->dqo_tx.xdp_lock);
+ for (; sent < budget; sent++) {
+ struct gve_tx_pending_packet_dqo *pkt;
+ s16 completion_tag;
+ dma_addr_t addr;
+ u32 desc_idx;
+
+ if (unlikely(!gve_has_avail_slots_tx_dqo(tx, 1, 1))) {
+ repoll = true;
+ break;
+ }
+
+ if (!xsk_tx_peek_desc(pool, &desc))
+ break;
+
+ pkt = gve_alloc_pending_packet(tx);
+ pkt->type = GVE_TX_PENDING_PACKET_DQO_XSK;
+ pkt->num_bufs = 0;
+ completion_tag = pkt - tx->dqo.pending_packets;
+
+ addr = xsk_buff_raw_get_dma(pool, desc.addr);
+ xsk_buff_raw_dma_sync_for_device(pool, addr, desc.len);
+
+ desc_idx = tx->dqo_tx.tail;
+ gve_tx_fill_pkt_desc_dqo(tx, &desc_idx,
+ true, desc.len,
+ addr, completion_tag, true,
+ false);
+ ++pkt->num_bufs;
+ gve_tx_update_tail(tx, desc_idx);
+ tx->dqo_tx.posted_packet_desc_cnt += pkt->num_bufs;
+ gve_xsk_reorder_queue_push_dqo(tx, completion_tag);
+ }
+
+ if (sent) {
+ gve_tx_put_doorbell_dqo(priv, tx->q_resources, tx->dqo_tx.tail);
+ xsk_tx_release(pool);
+ }
+
+ spin_unlock(&tx->dqo_tx.xdp_lock);
+
+ u64_stats_update_begin(&tx->statss);
+ tx->xdp_xsk_sent += sent;
+ u64_stats_update_end(&tx->statss);
+
+ return (sent == budget) || repoll;
+}
+
static void add_to_list(struct gve_tx_ring *tx, struct gve_index_list *list,
struct gve_tx_pending_packet_dqo *pending_packet)
{
@@ -1152,6 +1255,9 @@ static void gve_handle_packet_completion(struct gve_priv *priv,
pending_packet->xdpf = NULL;
gve_free_pending_packet(tx, pending_packet);
break;
+ case GVE_TX_PENDING_PACKET_DQO_XSK:
+ pending_packet->state = GVE_PACKET_STATE_XSK_COMPLETE;
+ break;
default:
WARN_ON_ONCE(1);
}
@@ -1251,8 +1357,34 @@ static void remove_timed_out_completions(struct gve_priv *priv,
remove_from_list(tx, &tx->dqo_compl.timed_out_completions,
pending_packet);
+
+ /* Need to count XSK packets in xsk_tx_completed. */
+ if (pending_packet->type == GVE_TX_PENDING_PACKET_DQO_XSK)
+ pending_packet->state = GVE_PACKET_STATE_XSK_COMPLETE;
+ else
+ gve_free_pending_packet(tx, pending_packet);
+ }
+}
+
+static void gve_tx_process_xsk_completions(struct gve_tx_ring *tx)
+{
+ u32 num_xsks = 0;
+
+ while (true) {
+ struct gve_tx_pending_packet_dqo *pending_packet =
+ gve_xsk_reorder_queue_head(tx);
+
+ if (!pending_packet ||
+ pending_packet->state != GVE_PACKET_STATE_XSK_COMPLETE)
+ break;
+
+ num_xsks++;
+ gve_xsk_reorder_queue_pop_dqo(tx);
gve_free_pending_packet(tx, pending_packet);
}
+
+ if (num_xsks)
+ xsk_tx_completed(tx->xsk_pool, num_xsks);
}
int gve_clean_tx_done_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
@@ -1333,6 +1465,9 @@ int gve_clean_tx_done_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
remove_miss_completions(priv, tx);
remove_timed_out_completions(priv, tx);
+ if (tx->xsk_pool)
+ gve_tx_process_xsk_completions(tx);
+
u64_stats_update_begin(&tx->statss);
tx->bytes_done += pkt_compl_bytes + reinject_compl_bytes;
tx->pkt_done += pkt_compl_pkts + reinject_compl_pkts;
@@ -1365,6 +1500,19 @@ bool gve_tx_poll_dqo(struct gve_notify_block *block, bool do_clean)
return compl_desc->generation != tx->dqo_compl.cur_gen_bit;
}
+bool gve_xsk_tx_poll_dqo(struct gve_notify_block *rx_block, int budget)
+{
+ struct gve_rx_ring *rx = rx_block->rx;
+ struct gve_priv *priv = rx->gve;
+ struct gve_tx_ring *tx;
+
+ tx = &priv->tx[gve_xdp_tx_queue_id(priv, rx->q_num)];
+ if (tx->xsk_pool)
+ return gve_xsk_tx_dqo(priv, tx, budget);
+
+ return 0;
+}
+
bool gve_xdp_poll_dqo(struct gve_notify_block *block)
{
struct gve_tx_compl_desc *compl_desc;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 52f42fe1d56f..dd3ddb223a92 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -11,6 +11,7 @@
#include <linux/irq.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
+#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/skbuff.h>
@@ -1039,6 +1040,8 @@ static bool hns3_can_use_tx_sgl(struct hns3_enet_ring *ring,
static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
{
u32 alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size;
+ struct net_device *netdev = ring_to_netdev(ring);
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hns3_tx_spare *tx_spare;
struct page *page;
dma_addr_t dma;
@@ -1080,6 +1083,7 @@ static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
tx_spare->buf = page_address(page);
tx_spare->len = PAGE_SIZE << order;
ring->tx_spare = tx_spare;
+ ring->tx_copybreak = priv->tx_copybreak;
return;
dma_mapping_error:
@@ -4874,6 +4878,30 @@ static void hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
devm_kfree(&pdev->dev, priv->tqp_vector);
}
+static void hns3_update_tx_spare_buf_config(struct hns3_nic_priv *priv)
+{
+#define HNS3_MIN_SPARE_BUF_SIZE (2 * 1024 * 1024)
+#define HNS3_MAX_PACKET_SIZE (64 * 1024)
+
+ struct iommu_domain *domain = iommu_get_domain_for_dev(priv->dev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle);
+ struct hnae3_handle *handle = priv->ae_handle;
+
+ if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3)
+ return;
+
+ if (!(domain && iommu_is_dma_domain(domain)))
+ return;
+
+ priv->min_tx_copybreak = HNS3_MAX_PACKET_SIZE;
+ priv->min_tx_spare_buf_size = HNS3_MIN_SPARE_BUF_SIZE;
+
+ if (priv->tx_copybreak < priv->min_tx_copybreak)
+ priv->tx_copybreak = priv->min_tx_copybreak;
+ if (handle->kinfo.tx_spare_buf_size < priv->min_tx_spare_buf_size)
+ handle->kinfo.tx_spare_buf_size = priv->min_tx_spare_buf_size;
+}
+
static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
unsigned int ring_type)
{
@@ -5107,6 +5135,7 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv)
int i, j;
int ret;
+ hns3_update_tx_spare_buf_config(priv);
for (i = 0; i < ring_num; i++) {
ret = hns3_alloc_ring_memory(&priv->ring[i]);
if (ret) {
@@ -5313,6 +5342,8 @@ static int hns3_client_init(struct hnae3_handle *handle)
priv->ae_handle = handle;
priv->tx_timeout_count = 0;
priv->max_non_tso_bd_num = ae_dev->dev_specs.max_non_tso_bd_num;
+ priv->min_tx_copybreak = 0;
+ priv->min_tx_spare_buf_size = 0;
set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index d3bad5d1b888..933e3527ed82 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -596,6 +596,8 @@ struct hns3_nic_priv {
struct hns3_enet_coalesce rx_coal;
u32 tx_copybreak;
u32 rx_copybreak;
+ u32 min_tx_copybreak;
+ u32 min_tx_spare_buf_size;
};
union l3_hdr_info {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index d3c71bc1855d..be917e4a8372 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -9576,33 +9576,36 @@ static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport)
return false;
}
-int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en)
+static int __hclge_enable_vport_vlan_filter(struct hclge_vport *vport,
+ bool request_en)
{
- struct hclge_dev *hdev = vport->back;
bool need_en;
int ret;
- mutex_lock(&hdev->vport_lock);
-
- vport->req_vlan_fltr_en = request_en;
-
need_en = hclge_need_enable_vport_vlan_filter(vport);
- if (need_en == vport->cur_vlan_fltr_en) {
- mutex_unlock(&hdev->vport_lock);
+ if (need_en == vport->cur_vlan_fltr_en)
return 0;
- }
ret = hclge_set_vport_vlan_filter(vport, need_en);
- if (ret) {
- mutex_unlock(&hdev->vport_lock);
+ if (ret)
return ret;
- }
vport->cur_vlan_fltr_en = need_en;
+ return 0;
+}
+
+int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en)
+{
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ mutex_lock(&hdev->vport_lock);
+ vport->req_vlan_fltr_en = request_en;
+ ret = __hclge_enable_vport_vlan_filter(vport, request_en);
mutex_unlock(&hdev->vport_lock);
- return 0;
+ return ret;
}
static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
@@ -10623,16 +10626,19 @@ static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev)
&vport->state))
continue;
- ret = hclge_enable_vport_vlan_filter(vport,
- vport->req_vlan_fltr_en);
+ mutex_lock(&hdev->vport_lock);
+ ret = __hclge_enable_vport_vlan_filter(vport,
+ vport->req_vlan_fltr_en);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to sync vlan filter state for vport%u, ret = %d\n",
vport->vport_id, ret);
set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
&vport->state);
+ mutex_unlock(&hdev->vport_lock);
return;
}
+ mutex_unlock(&hdev->vport_lock);
}
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
index ec581d4b696f..4bd52eab3914 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
@@ -497,14 +497,14 @@ int hclge_ptp_init(struct hclge_dev *hdev)
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to init freq, ret = %d\n", ret);
- goto out;
+ goto out_clear_int;
}
ret = hclge_ptp_set_ts_mode(hdev, &hdev->ptp->ts_cfg);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to init ts mode, ret = %d\n", ret);
- goto out;
+ goto out_clear_int;
}
ktime_get_real_ts64(&ts);
@@ -512,7 +512,7 @@ int hclge_ptp_init(struct hclge_dev *hdev)
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to init ts time, ret = %d\n", ret);
- goto out;
+ goto out_clear_int;
}
set_bit(HCLGE_STATE_PTP_EN, &hdev->state);
@@ -520,6 +520,9 @@ int hclge_ptp_init(struct hclge_dev *hdev)
return 0;
+out_clear_int:
+ clear_bit(HCLGE_PTP_FLAG_EN, &hdev->ptp->flags);
+ hclge_ptp_int_en(hdev, false);
out:
hclge_ptp_destroy_clock(hdev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 33136a1e02cf..8fcf220a120d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -3094,11 +3094,7 @@ static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev)
{
- struct hnae3_handle *nic = &hdev->nic;
- struct hnae3_knic_private_info *kinfo = &nic->kinfo;
-
- return min_t(u32, hdev->rss_size_max,
- hdev->num_tqps / kinfo->tc_info.num_tc);
+ return min(hdev->rss_size_max, hdev->num_tqps);
}
/**
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 24046fe16634..6f0821f1e798 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -211,98 +211,169 @@ static inline void ibmveth_flush_buffer(void *addr, unsigned long length)
static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
struct ibmveth_buff_pool *pool)
{
- u32 i;
- u32 count = pool->size - atomic_read(&pool->available);
- u32 buffers_added = 0;
- struct sk_buff *skb;
- unsigned int free_index, index;
- u64 correlator;
+ union ibmveth_buf_desc descs[IBMVETH_MAX_RX_PER_HCALL] = {0};
+ u32 remaining = pool->size - atomic_read(&pool->available);
+ u64 correlators[IBMVETH_MAX_RX_PER_HCALL] = {0};
unsigned long lpar_rc;
+ u32 buffers_added = 0;
+ u32 i, filled, batch;
+ struct vio_dev *vdev;
dma_addr_t dma_addr;
+ struct device *dev;
+ u32 index;
+
+ vdev = adapter->vdev;
+ dev = &vdev->dev;
mb();
- for (i = 0; i < count; ++i) {
- union ibmveth_buf_desc desc;
+ batch = adapter->rx_buffers_per_hcall;
- free_index = pool->consumer_index;
- index = pool->free_map[free_index];
- skb = NULL;
+ while (remaining > 0) {
+ unsigned int free_index = pool->consumer_index;
- if (WARN_ON(index == IBM_VETH_INVALID_MAP)) {
- schedule_work(&adapter->work);
- goto bad_index_failure;
- }
+ /* Fill a batch of descriptors */
+ for (filled = 0; filled < min(remaining, batch); filled++) {
+ index = pool->free_map[free_index];
+ if (WARN_ON(index == IBM_VETH_INVALID_MAP)) {
+ adapter->replenish_add_buff_failure++;
+ netdev_info(adapter->netdev,
+ "Invalid map index %u, reset\n",
+ index);
+ schedule_work(&adapter->work);
+ break;
+ }
+
+ if (!pool->skbuff[index]) {
+ struct sk_buff *skb = NULL;
- /* are we allocating a new buffer or recycling an old one */
- if (pool->skbuff[index])
- goto reuse;
+ skb = netdev_alloc_skb(adapter->netdev,
+ pool->buff_size);
+ if (!skb) {
+ adapter->replenish_no_mem++;
+ adapter->replenish_add_buff_failure++;
+ break;
+ }
+
+ dma_addr = dma_map_single(dev, skb->data,
+ pool->buff_size,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, dma_addr)) {
+ dev_kfree_skb_any(skb);
+ adapter->replenish_add_buff_failure++;
+ break;
+ }
- skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
+ pool->dma_addr[index] = dma_addr;
+ pool->skbuff[index] = skb;
+ } else {
+ /* re-use case */
+ dma_addr = pool->dma_addr[index];
+ }
- if (!skb) {
- netdev_dbg(adapter->netdev,
- "replenish: unable to allocate skb\n");
- adapter->replenish_no_mem++;
- break;
- }
+ if (rx_flush) {
+ unsigned int len;
- dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
- pool->buff_size, DMA_FROM_DEVICE);
+ len = adapter->netdev->mtu + IBMVETH_BUFF_OH;
+ len = min(pool->buff_size, len);
+ ibmveth_flush_buffer(pool->skbuff[index]->data,
+ len);
+ }
- if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
- goto failure;
+ descs[filled].fields.flags_len = IBMVETH_BUF_VALID |
+ pool->buff_size;
+ descs[filled].fields.address = dma_addr;
- pool->dma_addr[index] = dma_addr;
- pool->skbuff[index] = skb;
+ correlators[filled] = ((u64)pool->index << 32) | index;
+ *(u64 *)pool->skbuff[index]->data = correlators[filled];
- if (rx_flush) {
- unsigned int len = min(pool->buff_size,
- adapter->netdev->mtu +
- IBMVETH_BUFF_OH);
- ibmveth_flush_buffer(skb->data, len);
+ free_index++;
+ if (free_index >= pool->size)
+ free_index = 0;
}
-reuse:
- dma_addr = pool->dma_addr[index];
- desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
- desc.fields.address = dma_addr;
-
- correlator = ((u64)pool->index << 32) | index;
- *(u64 *)pool->skbuff[index]->data = correlator;
- lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address,
- desc.desc);
+ if (!filled)
+ break;
+ /* single buffer case*/
+ if (filled == 1)
+ lpar_rc = h_add_logical_lan_buffer(vdev->unit_address,
+ descs[0].desc);
+ else
+ /* Multi-buffer hcall */
+ lpar_rc = h_add_logical_lan_buffers(vdev->unit_address,
+ descs[0].desc,
+ descs[1].desc,
+ descs[2].desc,
+ descs[3].desc,
+ descs[4].desc,
+ descs[5].desc,
+ descs[6].desc,
+ descs[7].desc);
if (lpar_rc != H_SUCCESS) {
- netdev_warn(adapter->netdev,
- "%sadd_logical_lan failed %lu\n",
- skb ? "" : "When recycling: ", lpar_rc);
- goto failure;
+ dev_warn_ratelimited(dev,
+ "RX h_add_logical_lan failed: filled=%u, rc=%lu, batch=%u\n",
+ filled, lpar_rc, batch);
+ goto hcall_failure;
}
- pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
- pool->consumer_index++;
- if (pool->consumer_index >= pool->size)
- pool->consumer_index = 0;
+ /* Only update pool state after hcall succeeds */
+ for (i = 0; i < filled; i++) {
+ free_index = pool->consumer_index;
+ pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
- buffers_added++;
- adapter->replenish_add_buff_success++;
- }
+ pool->consumer_index++;
+ if (pool->consumer_index >= pool->size)
+ pool->consumer_index = 0;
+ }
- mb();
- atomic_add(buffers_added, &(pool->available));
- return;
+ buffers_added += filled;
+ adapter->replenish_add_buff_success += filled;
+ remaining -= filled;
-failure:
+ memset(&descs, 0, sizeof(descs));
+ memset(&correlators, 0, sizeof(correlators));
+ continue;
- if (dma_addr && !dma_mapping_error(&adapter->vdev->dev, dma_addr))
- dma_unmap_single(&adapter->vdev->dev,
- pool->dma_addr[index], pool->buff_size,
- DMA_FROM_DEVICE);
- dev_kfree_skb_any(pool->skbuff[index]);
- pool->skbuff[index] = NULL;
-bad_index_failure:
- adapter->replenish_add_buff_failure++;
+hcall_failure:
+ for (i = 0; i < filled; i++) {
+ index = correlators[i] & 0xffffffffUL;
+ dma_addr = pool->dma_addr[index];
+
+ if (pool->skbuff[index]) {
+ if (dma_addr &&
+ !dma_mapping_error(dev, dma_addr))
+ dma_unmap_single(dev, dma_addr,
+ pool->buff_size,
+ DMA_FROM_DEVICE);
+
+ dev_kfree_skb_any(pool->skbuff[index]);
+ pool->skbuff[index] = NULL;
+ }
+ }
+ adapter->replenish_add_buff_failure += filled;
+
+ /*
+ * If multi rx buffers hcall is no longer supported by FW
+ * e.g. in the case of Live Parttion Migration
+ */
+ if (batch > 1 && lpar_rc == H_FUNCTION) {
+ /*
+ * Instead of retry submit single buffer individually
+ * here just set the max rx buffer per hcall to 1
+ * buffers will be respleshed next time
+ * when ibmveth_replenish_buffer_pool() is called again
+ * with single-buffer case
+ */
+ netdev_info(adapter->netdev,
+ "RX Multi buffers not supported by FW, rc=%lu\n",
+ lpar_rc);
+ adapter->rx_buffers_per_hcall = 1;
+ netdev_info(adapter->netdev,
+ "Next rx replesh will fall back to single-buffer hcall\n");
+ }
+ break;
+ }
mb();
atomic_add(buffers_added, &(pool->available));
@@ -1783,6 +1854,19 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
netdev->features |= NETIF_F_FRAGLIST;
}
+ if (ret == H_SUCCESS &&
+ (ret_attr & IBMVETH_ILLAN_RX_MULTI_BUFF_SUPPORT)) {
+ adapter->rx_buffers_per_hcall = IBMVETH_MAX_RX_PER_HCALL;
+ netdev_dbg(netdev,
+ "RX Multi-buffer hcall supported by FW, batch set to %u\n",
+ adapter->rx_buffers_per_hcall);
+ } else {
+ adapter->rx_buffers_per_hcall = 1;
+ netdev_dbg(netdev,
+ "RX Single-buffer hcall mode, batch set to %u\n",
+ adapter->rx_buffers_per_hcall);
+ }
+
netdev->min_mtu = IBMVETH_MIN_MTU;
netdev->max_mtu = ETH_MAX_MTU - IBMVETH_BUFF_OH;
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
index b0a2460ec9f9..068f99df133e 100644
--- a/drivers/net/ethernet/ibm/ibmveth.h
+++ b/drivers/net/ethernet/ibm/ibmveth.h
@@ -28,6 +28,7 @@
#define IbmVethMcastRemoveFilter 0x2UL
#define IbmVethMcastClearFilterTable 0x3UL
+#define IBMVETH_ILLAN_RX_MULTI_BUFF_SUPPORT 0x0000000000040000UL
#define IBMVETH_ILLAN_LRG_SR_ENABLED 0x0000000000010000UL
#define IBMVETH_ILLAN_LRG_SND_SUPPORT 0x0000000000008000UL
#define IBMVETH_ILLAN_PADDED_PKT_CSUM 0x0000000000002000UL
@@ -46,6 +47,24 @@
#define h_add_logical_lan_buffer(ua, buf) \
plpar_hcall_norets(H_ADD_LOGICAL_LAN_BUFFER, ua, buf)
+static inline long h_add_logical_lan_buffers(unsigned long unit_address,
+ unsigned long desc1,
+ unsigned long desc2,
+ unsigned long desc3,
+ unsigned long desc4,
+ unsigned long desc5,
+ unsigned long desc6,
+ unsigned long desc7,
+ unsigned long desc8)
+{
+ unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
+
+ return plpar_hcall9(H_ADD_LOGICAL_LAN_BUFFERS,
+ retbuf, unit_address,
+ desc1, desc2, desc3, desc4,
+ desc5, desc6, desc7, desc8);
+}
+
/* FW allows us to send 6 descriptors but we only use one so mark
* the other 5 as unused (0)
*/
@@ -101,6 +120,7 @@ static inline long h_illan_attributes(unsigned long unit_address,
#define IBMVETH_MAX_TX_BUF_SIZE (1024 * 64)
#define IBMVETH_MAX_QUEUES 16U
#define IBMVETH_DEFAULT_QUEUES 8U
+#define IBMVETH_MAX_RX_PER_HCALL 8U
static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 };
static int pool_count[] = { 256, 512, 256, 256, 256 };
@@ -151,6 +171,7 @@ struct ibmveth_adapter {
int rx_csum;
int large_send;
bool is_active_trunk;
+ unsigned int rx_buffers_per_hcall;
u64 fw_ipv6_csum_support;
u64 fw_ipv4_csum_support;
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index 8294a7c4f122..ba331899d186 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -638,6 +638,9 @@
/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
#define NVM_SUM 0xBABA
+/* Uninitialized ("empty") checksum word value */
+#define NVM_CHECKSUM_UNINITIALIZED 0xFFFF
+
/* PBA (printed board assembly) number words */
#define NVM_PBA_OFFSET_0 8
#define NVM_PBA_OFFSET_1 9
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 364378133526..df4e7d781cb1 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -4274,6 +4274,8 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
ret_val = e1000e_update_nvm_checksum(hw);
if (ret_val)
return ret_val;
+ } else if (hw->mac.type == e1000_pch_tgp) {
+ return 0;
}
}
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c
index e609f4df86f4..16369e6d245a 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.c
+++ b/drivers/net/ethernet/intel/e1000e/nvm.c
@@ -558,6 +558,12 @@ s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw)
checksum += nvm_data;
}
+ if (hw->mac.type == e1000_pch_tgp &&
+ nvm_data == NVM_CHECKSUM_UNINITIALIZED) {
+ e_dbg("Uninitialized NVM Checksum on TGP platform - ignoring\n");
+ return 0;
+ }
+
if (checksum != (u16)NVM_SUM) {
e_dbg("NVM Checksum Invalid\n");
return -E1000_ERR_NVM;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index b232edf68ab1..1a7e9532f8e3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -3138,10 +3138,10 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
const u8 *addr = al->list[i].addr;
/* Allow to delete VF primary MAC only if it was not set
- * administratively by PF or if VF is trusted.
+ * administratively by PF.
*/
if (ether_addr_equal(addr, vf->default_lan_addr.addr)) {
- if (i40e_can_vf_change_mac(vf))
+ if (!vf->pf_set_mac)
was_unimac_deleted = true;
else
continue;
@@ -5008,7 +5008,7 @@ int i40e_get_vf_stats(struct net_device *netdev, int vf_id,
vf_stats->broadcast = stats->rx_broadcast;
vf_stats->multicast = stats->rx_multicast;
vf_stats->rx_dropped = stats->rx_discards + stats->rx_discards_other;
- vf_stats->tx_dropped = stats->tx_discards;
+ vf_stats->tx_dropped = stats->tx_errors;
return 0;
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index aaf70c625655..363c42bf3dcf 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -1216,7 +1216,7 @@ static struct sk_buff *iavf_build_skb(const struct libeth_fqe *rx_buffer,
unsigned int size)
{
struct page *buf_page = __netmem_to_page(rx_buffer->netmem);
- u32 hr = buf_page->pp->p.offset;
+ u32 hr = pp_page_to_nmdesc(buf_page)->pp->p.offset;
struct sk_buff *skb;
void *va;
diff --git a/drivers/net/ethernet/intel/ice/devlink/port.c b/drivers/net/ethernet/intel/ice/devlink/port.c
index 767419a67fef..63fb36fc4b3d 100644
--- a/drivers/net/ethernet/intel/ice/devlink/port.c
+++ b/drivers/net/ethernet/intel/ice/devlink/port.c
@@ -30,6 +30,8 @@ static const char *ice_devlink_port_opt_speed_str(u8 speed)
return "10";
case ICE_AQC_PORT_OPT_MAX_LANE_25G:
return "25";
+ case ICE_AQC_PORT_OPT_MAX_LANE_40G:
+ return "40";
case ICE_AQC_PORT_OPT_MAX_LANE_50G:
return "50";
case ICE_AQC_PORT_OPT_MAX_LANE_100G:
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 97f9ebd62d93..39d99c2f7976 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -1684,6 +1684,7 @@ struct ice_aqc_get_port_options_elem {
#define ICE_AQC_PORT_OPT_MAX_LANE_50G 6
#define ICE_AQC_PORT_OPT_MAX_LANE_100G 7
#define ICE_AQC_PORT_OPT_MAX_LANE_200G 8
+#define ICE_AQC_PORT_OPT_MAX_LANE_40G 9
u8 global_scid[2];
u8 phy_scid[2];
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 1b435e108d3c..b542e1e0f0c9 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -171,6 +171,15 @@ static int ice_set_mac_type(struct ice_hw *hw)
case ICE_DEV_ID_E830_XXV_QSFP:
case ICE_DEV_ID_E830C_SFP:
case ICE_DEV_ID_E830_XXV_SFP:
+ case ICE_DEV_ID_E835CC_BACKPLANE:
+ case ICE_DEV_ID_E835CC_QSFP56:
+ case ICE_DEV_ID_E835CC_SFP:
+ case ICE_DEV_ID_E835C_BACKPLANE:
+ case ICE_DEV_ID_E835C_QSFP:
+ case ICE_DEV_ID_E835C_SFP:
+ case ICE_DEV_ID_E835_L_BACKPLANE:
+ case ICE_DEV_ID_E835_L_QSFP:
+ case ICE_DEV_ID_E835_L_SFP:
hw->mac_type = ICE_MAC_E830;
break;
default:
@@ -4307,7 +4316,7 @@ int ice_get_phy_lane_number(struct ice_hw *hw)
speed = options[active_idx].max_lane_speed;
/* If we don't get speed for this lane, it's unoccupied */
- if (speed > ICE_AQC_PORT_OPT_MAX_LANE_200G)
+ if (speed > ICE_AQC_PORT_OPT_MAX_LANE_40G)
continue;
if (hw->pf_id == lport) {
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c
index 59323c019544..351824dc3c62 100644
--- a/drivers/net/ethernet/intel/ice/ice_ddp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.c
@@ -2301,6 +2301,8 @@ enum ice_ddp_state ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf,
return ICE_DDP_PKG_ERR;
buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL);
+ if (!buf_copy)
+ return ICE_DDP_PKG_ERR;
state = ice_init_pkg(hw, buf_copy, len);
if (!ice_is_init_pkg_successful(state)) {
diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h
index 34fd604132f5..bd4e66df0372 100644
--- a/drivers/net/ethernet/intel/ice/ice_devids.h
+++ b/drivers/net/ethernet/intel/ice/ice_devids.h
@@ -6,6 +6,24 @@
/* Device IDs */
#define ICE_DEV_ID_E822_SI_DFLT 0x1888
+/* Intel(R) Ethernet Controller E835-CC for backplane */
+#define ICE_DEV_ID_E835CC_BACKPLANE 0x1248
+/* Intel(R) Ethernet Controller E835-CC for QSFP */
+#define ICE_DEV_ID_E835CC_QSFP56 0x1249
+/* Intel(R) Ethernet Controller E835-CC for SFP */
+#define ICE_DEV_ID_E835CC_SFP 0x124A
+/* Intel(R) Ethernet Controller E835-C for backplane */
+#define ICE_DEV_ID_E835C_BACKPLANE 0x1261
+/* Intel(R) Ethernet Controller E835-C for QSFP */
+#define ICE_DEV_ID_E835C_QSFP 0x1262
+/* Intel(R) Ethernet Controller E835-C for SFP */
+#define ICE_DEV_ID_E835C_SFP 0x1263
+/* Intel(R) Ethernet Controller E835-L for backplane */
+#define ICE_DEV_ID_E835_L_BACKPLANE 0x1265
+/* Intel(R) Ethernet Controller E835-L for QSFP */
+#define ICE_DEV_ID_E835_L_QSFP 0x1266
+/* Intel(R) Ethernet Controller E835-L for SFP */
+#define ICE_DEV_ID_E835_L_SFP 0x1267
/* Intel(R) Ethernet Connection E823-L for backplane */
#define ICE_DEV_ID_E823L_BACKPLANE 0x124C
/* Intel(R) Ethernet Connection E823-L for SFP */
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index e54221fba849..58ed875093cf 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -667,7 +667,8 @@ static int ice_get_port_topology(struct ice_hw *hw, u8 lport,
if (max_speed == ICE_AQC_PORT_OPT_MAX_LANE_100G)
port_topology->serdes_lane_count = 4;
- else if (max_speed == ICE_AQC_PORT_OPT_MAX_LANE_50G)
+ else if (max_speed == ICE_AQC_PORT_OPT_MAX_LANE_50G ||
+ max_speed == ICE_AQC_PORT_OPT_MAX_LANE_40G)
port_topology->serdes_lane_count = 2;
else
port_topology->serdes_lane_count = 1;
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index ed95072ca6e3..363ae79a3620 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -3043,16 +3043,16 @@ ice_disable_fd_swap(struct ice_hw *hw, u8 prof_id)
* the ID value used here.
*/
int
-ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
- const struct ice_ptype_attributes *attr, u16 attr_cnt,
- struct ice_fv_word *es, u16 *masks, bool symm, bool fd_swap)
+ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id,
+ unsigned long *ptypes, const struct ice_ptype_attributes *attr,
+ u16 attr_cnt, struct ice_fv_word *es, u16 *masks, bool symm,
+ bool fd_swap)
{
- u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
struct ice_prof_map *prof;
- u8 byte = 0;
- u8 prof_id;
int status;
+ u8 prof_id;
+ u16 ptype;
bitmap_zero(ptgs_used, ICE_XLT1_CNT);
@@ -3102,57 +3102,35 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
prof->context = 0;
/* build list of ptgs */
- while (bytes && prof->ptg_cnt < ICE_MAX_PTG_PER_PROFILE) {
- u8 bit;
+ for_each_set_bit(ptype, ptypes, ICE_FLOW_PTYPE_MAX) {
+ u8 ptg;
- if (!ptypes[byte]) {
- bytes--;
- byte++;
+ /* The package should place all ptypes in a non-zero
+ * PTG, so the following call should never fail.
+ */
+ if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
continue;
- }
- /* Examine 8 bits per byte */
- for_each_set_bit(bit, (unsigned long *)&ptypes[byte],
- BITS_PER_BYTE) {
- u16 ptype;
- u8 ptg;
-
- ptype = byte * BITS_PER_BYTE + bit;
-
- /* The package should place all ptypes in a non-zero
- * PTG, so the following call should never fail.
- */
- if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
- continue;
+ /* If PTG is already added, skip and continue */
+ if (test_bit(ptg, ptgs_used))
+ continue;
- /* If PTG is already added, skip and continue */
- if (test_bit(ptg, ptgs_used))
- continue;
+ set_bit(ptg, ptgs_used);
+ /* Check to see there are any attributes for this ptype, and
+ * add them if found.
+ */
+ status = ice_add_prof_attrib(prof, ptg, ptype, attr, attr_cnt);
+ if (status == -ENOSPC)
+ break;
+ if (status) {
+ /* This is simple a ptype/PTG with no attribute */
+ prof->ptg[prof->ptg_cnt] = ptg;
+ prof->attr[prof->ptg_cnt].flags = 0;
+ prof->attr[prof->ptg_cnt].mask = 0;
- __set_bit(ptg, ptgs_used);
- /* Check to see there are any attributes for
- * this PTYPE, and add them if found.
- */
- status = ice_add_prof_attrib(prof, ptg, ptype,
- attr, attr_cnt);
- if (status == -ENOSPC)
+ if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
break;
- if (status) {
- /* This is simple a PTYPE/PTG with no
- * attribute
- */
- prof->ptg[prof->ptg_cnt] = ptg;
- prof->attr[prof->ptg_cnt].flags = 0;
- prof->attr[prof->ptg_cnt].mask = 0;
-
- if (++prof->ptg_cnt >=
- ICE_MAX_PTG_PER_PROFILE)
- break;
- }
}
-
- bytes--;
- byte++;
}
list_add(&prof->list, &hw->blk[blk].es.prof_map);
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
index 28b0897adf32..ee5d9f9c9d53 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
@@ -39,9 +39,10 @@ bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype);
/* XLT2/VSI group functions */
int
-ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
- const struct ice_ptype_attributes *attr, u16 attr_cnt,
- struct ice_fv_word *es, u16 *masks, bool symm, bool fd_swap);
+ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id,
+ unsigned long *ptypes, const struct ice_ptype_attributes *attr,
+ u16 attr_cnt, struct ice_fv_word *es, u16 *masks, bool symm,
+ bool fd_swap);
struct ice_prof_map *
ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id);
int
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c
index 278e57686274..6d5c939dc8a5 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.c
+++ b/drivers/net/ethernet/intel/ice/ice_flow.c
@@ -1421,7 +1421,7 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
}
/* Add a HW profile for this flow profile */
- status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
+ status = ice_add_prof(hw, blk, prof_id, params->ptypes,
params->attr, params->attr_cnt, params->es,
params->mask, symm, true);
if (status) {
@@ -1617,7 +1617,7 @@ ice_flow_set_parser_prof(struct ice_hw *hw, u16 dest_vsi, u16 fdir_vsi,
break;
}
- status = ice_add_prof(hw, blk, id, (u8 *)prof->ptypes,
+ status = ice_add_prof(hw, blk, id, prof->ptypes,
params->attr, params->attr_cnt,
params->es, params->mask, false, false);
if (status)
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index d132eb477551..c8b4fa3efbd4 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -823,6 +823,48 @@ cp_free:
}
/**
+ * ice_lag_prepare_vf_reset - helper to adjust vf lag for reset
+ * @lag: lag struct for interface that owns VF
+ *
+ * Context: must be called with the lag_mutex lock held.
+ *
+ * Return: active lport value or ICE_LAG_INVALID_PORT if nothing moved.
+ */
+u8 ice_lag_prepare_vf_reset(struct ice_lag *lag)
+{
+ u8 pri_prt, act_prt;
+
+ if (lag && lag->bonded && lag->primary && lag->upper_netdev) {
+ pri_prt = lag->pf->hw.port_info->lport;
+ act_prt = lag->active_port;
+ if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT) {
+ ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
+ return act_prt;
+ }
+ }
+
+ return ICE_LAG_INVALID_PORT;
+}
+
+/**
+ * ice_lag_complete_vf_reset - helper for lag after reset
+ * @lag: lag struct for primary interface
+ * @act_prt: which port should be active for lag
+ *
+ * Context: must be called while holding the lag_mutex.
+ */
+void ice_lag_complete_vf_reset(struct ice_lag *lag, u8 act_prt)
+{
+ u8 pri_prt;
+
+ if (lag && lag->bonded && lag->primary &&
+ act_prt != ICE_LAG_INVALID_PORT) {
+ pri_prt = lag->pf->hw.port_info->lport;
+ ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
+ }
+}
+
+/**
* ice_lag_info_event - handle NETDEV_BONDING_INFO event
* @lag: LAG info struct
* @ptr: opaque data pointer
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.h b/drivers/net/ethernet/intel/ice/ice_lag.h
index bab2c83142a1..69347d9f986b 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.h
+++ b/drivers/net/ethernet/intel/ice/ice_lag.h
@@ -70,4 +70,6 @@ void ice_deinit_lag(struct ice_pf *pf);
void ice_lag_rebuild(struct ice_pf *pf);
bool ice_lag_is_switchdev_running(struct ice_pf *pf);
void ice_lag_move_vf_nodes_cfg(struct ice_lag *lag, u8 src_prt, u8 dst_prt);
+u8 ice_lag_prepare_vf_reset(struct ice_lag *lag);
+void ice_lag_complete_vf_reset(struct ice_lag *lag, u8 act_prt);
#endif /* _ICE_LAG_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index af68869693ed..204e906af591 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -5897,6 +5897,15 @@ static const struct pci_device_id ice_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_QSFP), },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_SFP), },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_SFP), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835CC_BACKPLANE), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835CC_QSFP56), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835CC_SFP), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835C_BACKPLANE), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835C_QSFP), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835C_SFP), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835_L_BACKPLANE), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835_L_QSFP), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835_L_SFP), },
/* required last entry */
{}
};
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index c639ce716d32..5ee74f3e82dc 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -859,16 +859,13 @@ static void ice_notify_vf_reset(struct ice_vf *vf)
int ice_reset_vf(struct ice_vf *vf, u32 flags)
{
struct ice_pf *pf = vf->pf;
- struct ice_lag *lag;
struct ice_vsi *vsi;
- u8 act_prt, pri_prt;
struct device *dev;
int err = 0;
+ u8 act_prt;
bool rsd;
dev = ice_pf_to_dev(pf);
- act_prt = ICE_LAG_INVALID_PORT;
- pri_prt = pf->hw.port_info->lport;
if (flags & ICE_VF_RESET_NOTIFY)
ice_notify_vf_reset(vf);
@@ -884,16 +881,8 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
else
lockdep_assert_held(&vf->cfg_lock);
- lag = pf->lag;
mutex_lock(&pf->lag_mutex);
- if (lag && lag->bonded && lag->primary) {
- act_prt = lag->active_port;
- if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT &&
- lag->upper_netdev)
- ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
- else
- act_prt = ICE_LAG_INVALID_PORT;
- }
+ act_prt = ice_lag_prepare_vf_reset(pf->lag);
if (ice_is_vf_disabled(vf)) {
vsi = ice_get_vf_vsi(vf);
@@ -979,9 +968,7 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
ice_reset_vf_mbx_cnt(vf);
out_unlock:
- if (lag && lag->bonded && lag->primary &&
- act_prt != ICE_LAG_INVALID_PORT)
- ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
+ ice_lag_complete_vf_reset(pf->lag, act_prt);
mutex_unlock(&pf->lag_mutex);
if (flags & ICE_VF_RESET_LOCK)
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index 9460b6561b69..05511157c571 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -1996,24 +1996,13 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
(struct virtchnl_vsi_queue_config_info *)msg;
struct virtchnl_queue_pair_info *qpi;
struct ice_pf *pf = vf->pf;
- struct ice_lag *lag;
struct ice_vsi *vsi;
- u8 act_prt, pri_prt;
int i = -1, q_idx;
bool ena_ts;
+ u8 act_prt;
- lag = pf->lag;
mutex_lock(&pf->lag_mutex);
- act_prt = ICE_LAG_INVALID_PORT;
- pri_prt = pf->hw.port_info->lport;
- if (lag && lag->bonded && lag->primary) {
- act_prt = lag->active_port;
- if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT &&
- lag->upper_netdev)
- ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
- else
- act_prt = ICE_LAG_INVALID_PORT;
- }
+ act_prt = ice_lag_prepare_vf_reset(pf->lag);
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
goto error_param;
@@ -2141,9 +2130,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
}
}
- if (lag && lag->bonded && lag->primary &&
- act_prt != ICE_LAG_INVALID_PORT)
- ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
+ ice_lag_complete_vf_reset(pf->lag, act_prt);
mutex_unlock(&pf->lag_mutex);
/* send the response to the VF */
@@ -2160,9 +2147,7 @@ error_param:
vf->vf_id, i);
}
- if (lag && lag->bonded && lag->primary &&
- act_prt != ICE_LAG_INVALID_PORT)
- ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
+ ice_lag_complete_vf_reset(pf->lag, act_prt);
mutex_unlock(&pf->lag_mutex);
ice_lag_move_new_vf_nodes(vf);
diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h
index 0cf9120d1f97..f4c0eaf9bde3 100644
--- a/drivers/net/ethernet/intel/idpf/idpf.h
+++ b/drivers/net/ethernet/intel/idpf/idpf.h
@@ -269,6 +269,12 @@ struct idpf_port_stats {
struct virtchnl2_vport_stats vport_stats;
};
+struct idpf_fsteer_fltr {
+ struct list_head list;
+ u32 loc;
+ u32 q_index;
+};
+
/**
* struct idpf_vport - Handle for netdevices and queue resources
* @num_txq: Number of allocated TX queues
@@ -400,9 +406,27 @@ struct idpf_rss_data {
};
/**
+ * struct idpf_q_coalesce - User defined coalescing configuration values for
+ * a single queue.
+ * @tx_intr_mode: Dynamic TX ITR or not
+ * @rx_intr_mode: Dynamic RX ITR or not
+ * @tx_coalesce_usecs: TX interrupt throttling rate
+ * @rx_coalesce_usecs: RX interrupt throttling rate
+ *
+ * Used to restore user coalescing configuration after a reset.
+ */
+struct idpf_q_coalesce {
+ u32 tx_intr_mode;
+ u32 rx_intr_mode;
+ u32 tx_coalesce_usecs;
+ u32 rx_coalesce_usecs;
+};
+
+/**
* struct idpf_vport_user_config_data - User defined configuration values for
* each vport.
* @rss_data: See struct idpf_rss_data
+ * @q_coalesce: Array of per queue coalescing data
* @num_req_tx_qs: Number of user requested TX queues through ethtool
* @num_req_rx_qs: Number of user requested RX queues through ethtool
* @num_req_txq_desc: Number of user requested TX queue descriptors through
@@ -411,17 +435,22 @@ struct idpf_rss_data {
* ethtool
* @user_flags: User toggled config flags
* @mac_filter_list: List of MAC filters
+ * @num_fsteer_fltrs: number of flow steering filters
+ * @flow_steer_list: list of flow steering filters
*
* Used to restore configuration after a reset as the vport will get wiped.
*/
struct idpf_vport_user_config_data {
struct idpf_rss_data rss_data;
+ struct idpf_q_coalesce *q_coalesce;
u16 num_req_tx_qs;
u16 num_req_rx_qs;
u32 num_req_txq_desc;
u32 num_req_rxq_desc;
DECLARE_BITMAP(user_flags, __IDPF_USER_FLAGS_NBITS);
struct list_head mac_filter_list;
+ u32 num_fsteer_fltrs;
+ struct list_head flow_steer_list;
};
/**
@@ -667,16 +696,16 @@ static inline bool idpf_is_rdma_cap_ena(struct idpf_adapter *adapter)
}
#define IDPF_CAP_RSS (\
- VIRTCHNL2_CAP_RSS_IPV4_TCP |\
- VIRTCHNL2_CAP_RSS_IPV4_TCP |\
- VIRTCHNL2_CAP_RSS_IPV4_UDP |\
- VIRTCHNL2_CAP_RSS_IPV4_SCTP |\
- VIRTCHNL2_CAP_RSS_IPV4_OTHER |\
- VIRTCHNL2_CAP_RSS_IPV6_TCP |\
- VIRTCHNL2_CAP_RSS_IPV6_TCP |\
- VIRTCHNL2_CAP_RSS_IPV6_UDP |\
- VIRTCHNL2_CAP_RSS_IPV6_SCTP |\
- VIRTCHNL2_CAP_RSS_IPV6_OTHER)
+ VIRTCHNL2_FLOW_IPV4_TCP |\
+ VIRTCHNL2_FLOW_IPV4_TCP |\
+ VIRTCHNL2_FLOW_IPV4_UDP |\
+ VIRTCHNL2_FLOW_IPV4_SCTP |\
+ VIRTCHNL2_FLOW_IPV4_OTHER |\
+ VIRTCHNL2_FLOW_IPV6_TCP |\
+ VIRTCHNL2_FLOW_IPV6_TCP |\
+ VIRTCHNL2_FLOW_IPV6_UDP |\
+ VIRTCHNL2_FLOW_IPV6_SCTP |\
+ VIRTCHNL2_FLOW_IPV6_OTHER)
#define IDPF_CAP_RSC (\
VIRTCHNL2_CAP_RSC_IPV4_TCP |\
@@ -960,4 +989,7 @@ void idpf_idc_issue_reset_event(struct iidc_rdma_core_dev_info *cdev_info);
void idpf_idc_vdev_mtu_event(struct iidc_rdma_vport_dev_info *vdev_info,
enum iidc_rdma_event_type event_type);
+int idpf_add_del_fsteer_filters(struct idpf_adapter *adapter,
+ struct virtchnl2_flow_rule_add_del *rule,
+ enum virtchnl2_op opcode);
#endif /* !_IDPF_H_ */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
index eaf7a2606faa..0eb812ac19c2 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
@@ -3,6 +3,7 @@
#include "idpf.h"
#include "idpf_ptp.h"
+#include "idpf_virtchnl.h"
/**
* idpf_get_rxnfc - command to get RX flow classification rules
@@ -13,26 +14,312 @@
* Returns Success if the command is supported.
*/
static int idpf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
- u32 __always_unused *rule_locs)
+ u32 *rule_locs)
{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_vport_user_config_data *user_config;
+ struct idpf_fsteer_fltr *f;
struct idpf_vport *vport;
+ unsigned int cnt = 0;
+ int err = 0;
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
+ user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
cmd->data = vport->num_rxq;
- idpf_vport_ctrl_unlock(netdev);
-
- return 0;
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = user_config->num_fsteer_fltrs;
+ cmd->data = idpf_fsteer_max_rules(vport);
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ err = -EINVAL;
+ list_for_each_entry(f, &user_config->flow_steer_list, list)
+ if (f->loc == cmd->fs.location) {
+ cmd->fs.ring_cookie = f->q_index;
+ err = 0;
+ break;
+ }
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ cmd->data = idpf_fsteer_max_rules(vport);
+ list_for_each_entry(f, &user_config->flow_steer_list, list) {
+ if (cnt == cmd->rule_cnt) {
+ err = -EMSGSIZE;
+ break;
+ }
+ rule_locs[cnt] = f->loc;
+ cnt++;
+ }
+ if (!err)
+ cmd->rule_cnt = user_config->num_fsteer_fltrs;
+ break;
default:
break;
}
idpf_vport_ctrl_unlock(netdev);
- return -EOPNOTSUPP;
+ return err;
+}
+
+static void idpf_fsteer_fill_ipv4(struct virtchnl2_proto_hdrs *hdrs,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ struct iphdr *iph;
+
+ hdrs->proto_hdr[0].hdr_type = cpu_to_le32(VIRTCHNL2_PROTO_HDR_IPV4);
+
+ iph = (struct iphdr *)hdrs->proto_hdr[0].buffer_spec;
+ iph->saddr = fsp->h_u.tcp_ip4_spec.ip4src;
+ iph->daddr = fsp->h_u.tcp_ip4_spec.ip4dst;
+
+ iph = (struct iphdr *)hdrs->proto_hdr[0].buffer_mask;
+ iph->saddr = fsp->m_u.tcp_ip4_spec.ip4src;
+ iph->daddr = fsp->m_u.tcp_ip4_spec.ip4dst;
+}
+
+static void idpf_fsteer_fill_udp(struct virtchnl2_proto_hdrs *hdrs,
+ struct ethtool_rx_flow_spec *fsp,
+ bool v4)
+{
+ struct udphdr *udph, *udpm;
+
+ hdrs->proto_hdr[1].hdr_type = cpu_to_le32(VIRTCHNL2_PROTO_HDR_UDP);
+
+ udph = (struct udphdr *)hdrs->proto_hdr[1].buffer_spec;
+ udpm = (struct udphdr *)hdrs->proto_hdr[1].buffer_mask;
+
+ if (v4) {
+ udph->source = fsp->h_u.udp_ip4_spec.psrc;
+ udph->dest = fsp->h_u.udp_ip4_spec.pdst;
+ udpm->source = fsp->m_u.udp_ip4_spec.psrc;
+ udpm->dest = fsp->m_u.udp_ip4_spec.pdst;
+ } else {
+ udph->source = fsp->h_u.udp_ip6_spec.psrc;
+ udph->dest = fsp->h_u.udp_ip6_spec.pdst;
+ udpm->source = fsp->m_u.udp_ip6_spec.psrc;
+ udpm->dest = fsp->m_u.udp_ip6_spec.pdst;
+ }
+}
+
+static void idpf_fsteer_fill_tcp(struct virtchnl2_proto_hdrs *hdrs,
+ struct ethtool_rx_flow_spec *fsp,
+ bool v4)
+{
+ struct tcphdr *tcph, *tcpm;
+
+ hdrs->proto_hdr[1].hdr_type = cpu_to_le32(VIRTCHNL2_PROTO_HDR_TCP);
+
+ tcph = (struct tcphdr *)hdrs->proto_hdr[1].buffer_spec;
+ tcpm = (struct tcphdr *)hdrs->proto_hdr[1].buffer_mask;
+
+ if (v4) {
+ tcph->source = fsp->h_u.tcp_ip4_spec.psrc;
+ tcph->dest = fsp->h_u.tcp_ip4_spec.pdst;
+ tcpm->source = fsp->m_u.tcp_ip4_spec.psrc;
+ tcpm->dest = fsp->m_u.tcp_ip4_spec.pdst;
+ } else {
+ tcph->source = fsp->h_u.tcp_ip6_spec.psrc;
+ tcph->dest = fsp->h_u.tcp_ip6_spec.pdst;
+ tcpm->source = fsp->m_u.tcp_ip6_spec.psrc;
+ tcpm->dest = fsp->m_u.tcp_ip6_spec.pdst;
+ }
+}
+
+/**
+ * idpf_add_flow_steer - add a Flow Steering filter
+ * @netdev: network interface device structure
+ * @cmd: command to add Flow Steering filter
+ *
+ * Return: 0 on success and negative values for failure
+ */
+static int idpf_add_flow_steer(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct idpf_fsteer_fltr *fltr, *parent = NULL, *f;
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_vport_user_config_data *user_config;
+ struct ethtool_rx_flow_spec *fsp = &cmd->fs;
+ struct virtchnl2_flow_rule_add_del *rule;
+ struct idpf_vport_config *vport_config;
+ struct virtchnl2_rule_action_set *acts;
+ struct virtchnl2_flow_rule_info *info;
+ struct virtchnl2_proto_hdrs *hdrs;
+ struct idpf_vport *vport;
+ u32 flow_type, q_index;
+ u16 num_rxq;
+ int err;
+
+ vport = idpf_netdev_to_vport(netdev);
+ vport_config = vport->adapter->vport_config[np->vport_idx];
+ user_config = &vport_config->user_config;
+ num_rxq = user_config->num_req_rx_qs;
+
+ flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
+ if (flow_type != fsp->flow_type)
+ return -EINVAL;
+
+ if (!idpf_sideband_action_ena(vport, fsp) ||
+ !idpf_sideband_flow_type_ena(vport, flow_type))
+ return -EOPNOTSUPP;
+
+ if (user_config->num_fsteer_fltrs > idpf_fsteer_max_rules(vport))
+ return -ENOSPC;
+
+ q_index = fsp->ring_cookie;
+ if (q_index >= num_rxq)
+ return -EINVAL;
+
+ rule = kzalloc(struct_size(rule, rule_info, 1), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ rule->vport_id = cpu_to_le32(vport->vport_id);
+ rule->count = cpu_to_le32(1);
+ info = &rule->rule_info[0];
+ info->rule_id = cpu_to_le32(fsp->location);
+
+ hdrs = &info->rule_cfg.proto_hdrs;
+ hdrs->tunnel_level = 0;
+ hdrs->count = cpu_to_le32(2);
+
+ acts = &info->rule_cfg.action_set;
+ acts->count = cpu_to_le32(1);
+ acts->actions[0].action_type = cpu_to_le32(VIRTCHNL2_ACTION_QUEUE);
+ acts->actions[0].act_conf.q_id = cpu_to_le32(q_index);
+
+ switch (flow_type) {
+ case UDP_V4_FLOW:
+ idpf_fsteer_fill_ipv4(hdrs, fsp);
+ idpf_fsteer_fill_udp(hdrs, fsp, true);
+ break;
+ case TCP_V4_FLOW:
+ idpf_fsteer_fill_ipv4(hdrs, fsp);
+ idpf_fsteer_fill_tcp(hdrs, fsp, true);
+ break;
+ default:
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = idpf_add_del_fsteer_filters(vport->adapter, rule,
+ VIRTCHNL2_OP_ADD_FLOW_RULE);
+ if (err)
+ goto out;
+
+ if (info->status != cpu_to_le32(VIRTCHNL2_FLOW_RULE_SUCCESS)) {
+ err = -EIO;
+ goto out;
+ }
+
+ fltr = kzalloc(sizeof(*fltr), GFP_KERNEL);
+ if (!fltr) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ fltr->loc = fsp->location;
+ fltr->q_index = q_index;
+ list_for_each_entry(f, &user_config->flow_steer_list, list) {
+ if (f->loc >= fltr->loc)
+ break;
+ parent = f;
+ }
+
+ parent ? list_add(&fltr->list, &parent->list) :
+ list_add(&fltr->list, &user_config->flow_steer_list);
+
+ user_config->num_fsteer_fltrs++;
+
+out:
+ kfree(rule);
+ return err;
+}
+
+/**
+ * idpf_del_flow_steer - delete a Flow Steering filter
+ * @netdev: network interface device structure
+ * @cmd: command to add Flow Steering filter
+ *
+ * Return: 0 on success and negative values for failure
+ */
+static int idpf_del_flow_steer(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_vport_user_config_data *user_config;
+ struct ethtool_rx_flow_spec *fsp = &cmd->fs;
+ struct virtchnl2_flow_rule_add_del *rule;
+ struct idpf_vport_config *vport_config;
+ struct virtchnl2_flow_rule_info *info;
+ struct idpf_fsteer_fltr *f, *iter;
+ struct idpf_vport *vport;
+ int err;
+
+ vport = idpf_netdev_to_vport(netdev);
+ vport_config = vport->adapter->vport_config[np->vport_idx];
+ user_config = &vport_config->user_config;
+
+ if (!idpf_sideband_action_ena(vport, fsp))
+ return -EOPNOTSUPP;
+
+ rule = kzalloc(struct_size(rule, rule_info, 1), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ rule->vport_id = cpu_to_le32(vport->vport_id);
+ rule->count = cpu_to_le32(1);
+ info = &rule->rule_info[0];
+ info->rule_id = cpu_to_le32(fsp->location);
+
+ err = idpf_add_del_fsteer_filters(vport->adapter, rule,
+ VIRTCHNL2_OP_DEL_FLOW_RULE);
+ if (err)
+ goto out;
+
+ if (info->status != cpu_to_le32(VIRTCHNL2_FLOW_RULE_SUCCESS)) {
+ err = -EIO;
+ goto out;
+ }
+
+ list_for_each_entry_safe(f, iter,
+ &user_config->flow_steer_list, list) {
+ if (f->loc == fsp->location) {
+ list_del(&f->list);
+ kfree(f);
+ user_config->num_fsteer_fltrs--;
+ goto out;
+ }
+ }
+ err = -EINVAL;
+
+out:
+ kfree(rule);
+ return err;
+}
+
+static int idpf_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
+{
+ int ret = -EOPNOTSUPP;
+
+ idpf_vport_ctrl_lock(netdev);
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ ret = idpf_add_flow_steer(netdev, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = idpf_del_flow_steer(netdev, cmd);
+ break;
+ default:
+ break;
+ }
+
+ idpf_vport_ctrl_unlock(netdev);
+ return ret;
}
/**
@@ -1090,12 +1377,14 @@ static int idpf_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
/**
* __idpf_set_q_coalesce - set ITR values for specific queue
* @ec: ethtool structure from user to update ITR settings
+ * @q_coal: per queue coalesce settings
* @qv: queue vector for which itr values has to be set
* @is_rxq: is queue type rx
*
* Returns 0 on success, negative otherwise.
*/
static int __idpf_set_q_coalesce(const struct ethtool_coalesce *ec,
+ struct idpf_q_coalesce *q_coal,
struct idpf_q_vector *qv, bool is_rxq)
{
u32 use_adaptive_coalesce, coalesce_usecs;
@@ -1139,20 +1428,25 @@ static int __idpf_set_q_coalesce(const struct ethtool_coalesce *ec,
if (is_rxq) {
qv->rx_itr_value = coalesce_usecs;
+ q_coal->rx_coalesce_usecs = coalesce_usecs;
if (use_adaptive_coalesce) {
qv->rx_intr_mode = IDPF_ITR_DYNAMIC;
+ q_coal->rx_intr_mode = IDPF_ITR_DYNAMIC;
} else {
qv->rx_intr_mode = !IDPF_ITR_DYNAMIC;
- idpf_vport_intr_write_itr(qv, qv->rx_itr_value,
- false);
+ q_coal->rx_intr_mode = !IDPF_ITR_DYNAMIC;
+ idpf_vport_intr_write_itr(qv, coalesce_usecs, false);
}
} else {
qv->tx_itr_value = coalesce_usecs;
+ q_coal->tx_coalesce_usecs = coalesce_usecs;
if (use_adaptive_coalesce) {
qv->tx_intr_mode = IDPF_ITR_DYNAMIC;
+ q_coal->tx_intr_mode = IDPF_ITR_DYNAMIC;
} else {
qv->tx_intr_mode = !IDPF_ITR_DYNAMIC;
- idpf_vport_intr_write_itr(qv, qv->tx_itr_value, true);
+ q_coal->tx_intr_mode = !IDPF_ITR_DYNAMIC;
+ idpf_vport_intr_write_itr(qv, coalesce_usecs, true);
}
}
@@ -1165,6 +1459,7 @@ static int __idpf_set_q_coalesce(const struct ethtool_coalesce *ec,
/**
* idpf_set_q_coalesce - set ITR values for specific queue
* @vport: vport associated to the queue that need updating
+ * @q_coal: per queue coalesce settings
* @ec: coalesce settings to program the device with
* @q_num: update ITR/INTRL (coalesce) settings for this queue number/index
* @is_rxq: is queue type rx
@@ -1172,6 +1467,7 @@ static int __idpf_set_q_coalesce(const struct ethtool_coalesce *ec,
* Return 0 on success, and negative on failure
*/
static int idpf_set_q_coalesce(const struct idpf_vport *vport,
+ struct idpf_q_coalesce *q_coal,
const struct ethtool_coalesce *ec,
int q_num, bool is_rxq)
{
@@ -1180,7 +1476,7 @@ static int idpf_set_q_coalesce(const struct idpf_vport *vport,
qv = is_rxq ? idpf_find_rxq_vec(vport, q_num) :
idpf_find_txq_vec(vport, q_num);
- if (qv && __idpf_set_q_coalesce(ec, qv, is_rxq))
+ if (qv && __idpf_set_q_coalesce(ec, q_coal, qv, is_rxq))
return -EINVAL;
return 0;
@@ -1201,9 +1497,13 @@ static int idpf_set_coalesce(struct net_device *netdev,
struct netlink_ext_ack *extack)
{
struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_vport_user_config_data *user_config;
+ struct idpf_q_coalesce *q_coal;
struct idpf_vport *vport;
int i, err = 0;
+ user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
+
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
@@ -1211,13 +1511,15 @@ static int idpf_set_coalesce(struct net_device *netdev,
goto unlock_mutex;
for (i = 0; i < vport->num_txq; i++) {
- err = idpf_set_q_coalesce(vport, ec, i, false);
+ q_coal = &user_config->q_coalesce[i];
+ err = idpf_set_q_coalesce(vport, q_coal, ec, i, false);
if (err)
goto unlock_mutex;
}
for (i = 0; i < vport->num_rxq; i++) {
- err = idpf_set_q_coalesce(vport, ec, i, true);
+ q_coal = &user_config->q_coalesce[i];
+ err = idpf_set_q_coalesce(vport, q_coal, ec, i, true);
if (err)
goto unlock_mutex;
}
@@ -1239,20 +1541,25 @@ unlock_mutex:
static int idpf_set_per_q_coalesce(struct net_device *netdev, u32 q_num,
struct ethtool_coalesce *ec)
{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_vport_user_config_data *user_config;
+ struct idpf_q_coalesce *q_coal;
struct idpf_vport *vport;
int err;
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
+ user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
+ q_coal = &user_config->q_coalesce[q_num];
- err = idpf_set_q_coalesce(vport, ec, q_num, false);
+ err = idpf_set_q_coalesce(vport, q_coal, ec, q_num, false);
if (err) {
idpf_vport_ctrl_unlock(netdev);
return err;
}
- err = idpf_set_q_coalesce(vport, ec, q_num, true);
+ err = idpf_set_q_coalesce(vport, q_coal, ec, q_num, true);
idpf_vport_ctrl_unlock(netdev);
@@ -1394,6 +1701,7 @@ static const struct ethtool_ops idpf_ethtool_ops = {
.get_sset_count = idpf_get_sset_count,
.get_channels = idpf_get_channels,
.get_rxnfc = idpf_get_rxnfc,
+ .set_rxnfc = idpf_set_rxnfc,
.get_rxfh_key_size = idpf_get_rxfh_key_size,
.get_rxfh_indir_size = idpf_get_rxfh_indir_size,
.get_rxfh = idpf_get_rxfh,
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
index 338aa1bab71e..2c2a3e85d693 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
@@ -804,6 +804,10 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
if (idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
dflt_features |= NETIF_F_RXHASH;
+ if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS,
+ VIRTCHNL2_CAP_FLOW_STEER) &&
+ idpf_vport_is_cap_ena(vport, VIRTCHNL2_VPORT_SIDEBAND_FLOW_STEER))
+ dflt_features |= NETIF_F_NTUPLE;
if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_CSUM_L4V4))
csum_offloads |= NETIF_F_IP_CSUM;
if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_CSUM_L4V6))
@@ -1130,8 +1134,10 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
if (!vport)
return vport;
+ num_max_q = max(max_q->max_txq, max_q->max_rxq);
if (!adapter->vport_config[idx]) {
struct idpf_vport_config *vport_config;
+ struct idpf_q_coalesce *q_coal;
vport_config = kzalloc(sizeof(*vport_config), GFP_KERNEL);
if (!vport_config) {
@@ -1140,6 +1146,21 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
return NULL;
}
+ q_coal = kcalloc(num_max_q, sizeof(*q_coal), GFP_KERNEL);
+ if (!q_coal) {
+ kfree(vport_config);
+ kfree(vport);
+
+ return NULL;
+ }
+ for (int i = 0; i < num_max_q; i++) {
+ q_coal[i].tx_intr_mode = IDPF_ITR_DYNAMIC;
+ q_coal[i].tx_coalesce_usecs = IDPF_ITR_TX_DEF;
+ q_coal[i].rx_intr_mode = IDPF_ITR_DYNAMIC;
+ q_coal[i].rx_coalesce_usecs = IDPF_ITR_RX_DEF;
+ }
+ vport_config->user_config.q_coalesce = q_coal;
+
adapter->vport_config[idx] = vport_config;
}
@@ -1149,7 +1170,6 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
vport->default_vport = adapter->num_alloc_vports <
idpf_get_default_vports(adapter);
- num_max_q = max(max_q->max_txq, max_q->max_rxq);
vport->q_vector_idxs = kcalloc(num_max_q, sizeof(u16), GFP_KERNEL);
if (!vport->q_vector_idxs)
goto free_vport;
@@ -1532,6 +1552,7 @@ void idpf_init_task(struct work_struct *work)
spin_lock_init(&vport_config->mac_filter_list_lock);
INIT_LIST_HEAD(&vport_config->user_config.mac_filter_list);
+ INIT_LIST_HEAD(&vport_config->user_config.flow_steer_list);
err = idpf_check_supported_desc_ids(vport);
if (err) {
diff --git a/drivers/net/ethernet/intel/idpf/idpf_main.c b/drivers/net/ethernet/intel/idpf/idpf_main.c
index b7422be3e967..dfe9126f1f4a 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_main.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_main.c
@@ -62,6 +62,7 @@ destroy_wqs:
destroy_workqueue(adapter->vc_event_wq);
for (i = 0; i < adapter->max_vports; i++) {
+ kfree(adapter->vport_config[i]->user_config.q_coalesce);
kfree(adapter->vport_config[i]);
adapter->vport_config[i] = NULL;
}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ptp.c b/drivers/net/ethernet/intel/idpf/idpf_ptp.c
index 4f8725c85332..ee21f2ff0cad 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_ptp.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_ptp.c
@@ -42,6 +42,13 @@ void idpf_ptp_get_features_access(const struct idpf_adapter *adapter)
direct,
mailbox);
+ /* Get the cross timestamp */
+ direct = VIRTCHNL2_CAP_PTP_GET_CROSS_TIME;
+ mailbox = VIRTCHNL2_CAP_PTP_GET_CROSS_TIME_MB;
+ ptp->get_cross_tstamp_access = idpf_ptp_get_access(adapter,
+ direct,
+ mailbox);
+
/* Set the device clock time */
direct = VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME;
mailbox = VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME;
@@ -171,6 +178,127 @@ static int idpf_ptp_read_src_clk_reg(struct idpf_adapter *adapter, u64 *src_clk,
return 0;
}
+#if IS_ENABLED(CONFIG_ARM_ARCH_TIMER) || IS_ENABLED(CONFIG_X86)
+/**
+ * idpf_ptp_get_sync_device_time_direct - Get the cross time stamp values
+ * directly
+ * @adapter: Driver specific private structure
+ * @dev_time: 64bit main timer value
+ * @sys_time: 64bit system time value
+ */
+static void idpf_ptp_get_sync_device_time_direct(struct idpf_adapter *adapter,
+ u64 *dev_time, u64 *sys_time)
+{
+ u32 dev_time_lo, dev_time_hi, sys_time_lo, sys_time_hi;
+ struct idpf_ptp *ptp = adapter->ptp;
+
+ spin_lock(&ptp->read_dev_clk_lock);
+
+ idpf_ptp_enable_shtime(adapter);
+
+ dev_time_lo = readl(ptp->dev_clk_regs.dev_clk_ns_l);
+ dev_time_hi = readl(ptp->dev_clk_regs.dev_clk_ns_h);
+
+ sys_time_lo = readl(ptp->dev_clk_regs.sys_time_ns_l);
+ sys_time_hi = readl(ptp->dev_clk_regs.sys_time_ns_h);
+
+ spin_unlock(&ptp->read_dev_clk_lock);
+
+ *dev_time = (u64)dev_time_hi << 32 | dev_time_lo;
+ *sys_time = (u64)sys_time_hi << 32 | sys_time_lo;
+}
+
+/**
+ * idpf_ptp_get_sync_device_time_mailbox - Get the cross time stamp values
+ * through mailbox
+ * @adapter: Driver specific private structure
+ * @dev_time: 64bit main timer value expressed in nanoseconds
+ * @sys_time: 64bit system time value expressed in nanoseconds
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int idpf_ptp_get_sync_device_time_mailbox(struct idpf_adapter *adapter,
+ u64 *dev_time, u64 *sys_time)
+{
+ struct idpf_ptp_dev_timers cross_time;
+ int err;
+
+ err = idpf_ptp_get_cross_time(adapter, &cross_time);
+ if (err)
+ return err;
+
+ *dev_time = cross_time.dev_clk_time_ns;
+ *sys_time = cross_time.sys_time_ns;
+
+ return err;
+}
+
+/**
+ * idpf_ptp_get_sync_device_time - Get the cross time stamp info
+ * @device: Current device time
+ * @system: System counter value read synchronously with device time
+ * @ctx: Context provided by timekeeping code
+ *
+ * The device and the system clocks time read simultaneously.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int idpf_ptp_get_sync_device_time(ktime_t *device,
+ struct system_counterval_t *system,
+ void *ctx)
+{
+ struct idpf_adapter *adapter = ctx;
+ u64 ns_time_dev, ns_time_sys;
+ int err;
+
+ switch (adapter->ptp->get_cross_tstamp_access) {
+ case IDPF_PTP_NONE:
+ return -EOPNOTSUPP;
+ case IDPF_PTP_DIRECT:
+ idpf_ptp_get_sync_device_time_direct(adapter, &ns_time_dev,
+ &ns_time_sys);
+ break;
+ case IDPF_PTP_MAILBOX:
+ err = idpf_ptp_get_sync_device_time_mailbox(adapter,
+ &ns_time_dev,
+ &ns_time_sys);
+ if (err)
+ return err;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ *device = ns_to_ktime(ns_time_dev);
+
+ system->cs_id = IS_ENABLED(CONFIG_X86) ? CSID_X86_ART
+ : CSID_ARM_ARCH_COUNTER;
+ system->cycles = ns_time_sys;
+ system->use_nsecs = true;
+
+ return 0;
+}
+
+/**
+ * idpf_ptp_get_crosststamp - Capture a device cross timestamp
+ * @info: the driver's PTP info structure
+ * @cts: The memory to fill the cross timestamp info
+ *
+ * Capture a cross timestamp between the system time and the device PTP hardware
+ * clock.
+ *
+ * Return: cross timestamp value on success, -errno on failure.
+ */
+static int idpf_ptp_get_crosststamp(struct ptp_clock_info *info,
+ struct system_device_crosststamp *cts)
+{
+ struct idpf_adapter *adapter = idpf_ptp_info_to_adapter(info);
+
+ return get_device_system_crosststamp(idpf_ptp_get_sync_device_time,
+ adapter, NULL, cts);
+}
+#endif /* CONFIG_ARM_ARCH_TIMER || CONFIG_X86 */
+
/**
* idpf_ptp_gettimex64 - Get the time of the clock
* @info: the driver's PTP info structure
@@ -661,6 +789,14 @@ static void idpf_ptp_set_caps(const struct idpf_adapter *adapter)
info->verify = idpf_ptp_verify_pin;
info->enable = idpf_ptp_gpio_enable;
info->do_aux_work = idpf_ptp_do_aux_work;
+#if IS_ENABLED(CONFIG_ARM_ARCH_TIMER)
+ info->getcrosststamp = idpf_ptp_get_crosststamp;
+#elif IS_ENABLED(CONFIG_X86)
+ if (pcie_ptm_enabled(adapter->pdev) &&
+ boot_cpu_has(X86_FEATURE_ART) &&
+ boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ))
+ info->getcrosststamp = idpf_ptp_get_crosststamp;
+#endif /* CONFIG_ARM_ARCH_TIMER */
}
/**
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ptp.h b/drivers/net/ethernet/intel/idpf/idpf_ptp.h
index a876749d6116..785da03e4cf5 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_ptp.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_ptp.h
@@ -21,6 +21,8 @@ struct idpf_ptp_cmd {
* @dev_clk_ns_h: high part of the device clock register
* @phy_clk_ns_l: low part of the PHY clock register
* @phy_clk_ns_h: high part of the PHY clock register
+ * @sys_time_ns_l: low part of the system time register
+ * @sys_time_ns_h: high part of the system time register
* @incval_l: low part of the increment value register
* @incval_h: high part of the increment value register
* @shadj_l: low part of the shadow adjust register
@@ -42,6 +44,10 @@ struct idpf_ptp_dev_clk_regs {
void __iomem *phy_clk_ns_l;
void __iomem *phy_clk_ns_h;
+ /* System time */
+ void __iomem *sys_time_ns_l;
+ void __iomem *sys_time_ns_h;
+
/* Main timer adjustments */
void __iomem *incval_l;
void __iomem *incval_h;
@@ -162,6 +168,7 @@ struct idpf_ptp_vport_tx_tstamp_caps {
* @dev_clk_regs: the set of registers to access the device clock
* @caps: PTP capabilities negotiated with the Control Plane
* @get_dev_clk_time_access: access type for getting the device clock time
+ * @get_cross_tstamp_access: access type for the cross timestamping
* @set_dev_clk_time_access: access type for setting the device clock time
* @adj_dev_clk_time_access: access type for the adjusting the device clock
* @tx_tstamp_access: access type for the Tx timestamp value read
@@ -182,6 +189,7 @@ struct idpf_ptp {
struct idpf_ptp_dev_clk_regs dev_clk_regs;
u32 caps;
enum idpf_ptp_access get_dev_clk_time_access:2;
+ enum idpf_ptp_access get_cross_tstamp_access:2;
enum idpf_ptp_access set_dev_clk_time_access:2;
enum idpf_ptp_access adj_dev_clk_time_access:2;
enum idpf_ptp_access tx_tstamp_access:2;
@@ -264,6 +272,8 @@ void idpf_ptp_get_features_access(const struct idpf_adapter *adapter);
bool idpf_ptp_get_txq_tstamp_capability(struct idpf_tx_queue *txq);
int idpf_ptp_get_dev_clk_time(struct idpf_adapter *adapter,
struct idpf_ptp_dev_timers *dev_clk_time);
+int idpf_ptp_get_cross_time(struct idpf_adapter *adapter,
+ struct idpf_ptp_dev_timers *cross_time);
int idpf_ptp_set_dev_clk_time(struct idpf_adapter *adapter, u64 time);
int idpf_ptp_adj_dev_clk_fine(struct idpf_adapter *adapter, u64 incval);
int idpf_ptp_adj_dev_clk_time(struct idpf_adapter *adapter, s64 delta);
@@ -305,6 +315,13 @@ idpf_ptp_get_dev_clk_time(struct idpf_adapter *adapter,
return -EOPNOTSUPP;
}
+static inline int
+idpf_ptp_get_cross_time(struct idpf_adapter *adapter,
+ struct idpf_ptp_dev_timers *cross_time)
+{
+ return -EOPNOTSUPP;
+}
+
static inline int idpf_ptp_set_dev_clk_time(struct idpf_adapter *adapter,
u64 time)
{
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index cef9dfb877e8..66a1b040639d 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -3276,8 +3276,10 @@ static u32 idpf_rx_hsplit_wa(const struct libeth_fqe *hdr,
hdr_page = __netmem_to_page(hdr->netmem);
buf_page = __netmem_to_page(buf->netmem);
- dst = page_address(hdr_page) + hdr->offset + hdr_page->pp->p.offset;
- src = page_address(buf_page) + buf->offset + buf_page->pp->p.offset;
+ dst = page_address(hdr_page) + hdr->offset +
+ pp_page_to_nmdesc(hdr_page)->pp->p.offset;
+ src = page_address(buf_page) + buf->offset +
+ pp_page_to_nmdesc(buf_page)->pp->p.offset;
memcpy(dst, src, LARGEST_ALIGN(copy));
buf->offset += copy;
@@ -3296,7 +3298,7 @@ static u32 idpf_rx_hsplit_wa(const struct libeth_fqe *hdr,
struct sk_buff *idpf_rx_build_skb(const struct libeth_fqe *buf, u32 size)
{
struct page *buf_page = __netmem_to_page(buf->netmem);
- u32 hr = buf_page->pp->p.offset;
+ u32 hr = pp_page_to_nmdesc(buf_page)->pp->p.offset;
struct sk_buff *skb;
void *va;
@@ -4355,9 +4357,13 @@ static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport)
int idpf_vport_intr_alloc(struct idpf_vport *vport)
{
u16 txqs_per_vector, rxqs_per_vector, bufqs_per_vector;
+ struct idpf_vport_user_config_data *user_config;
struct idpf_q_vector *q_vector;
+ struct idpf_q_coalesce *q_coal;
u32 complqs_per_vector, v_idx;
+ u16 idx = vport->idx;
+ user_config = &vport->adapter->vport_config[idx]->user_config;
vport->q_vectors = kcalloc(vport->num_q_vectors,
sizeof(struct idpf_q_vector), GFP_KERNEL);
if (!vport->q_vectors)
@@ -4375,14 +4381,15 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport)
for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
q_vector = &vport->q_vectors[v_idx];
+ q_coal = &user_config->q_coalesce[v_idx];
q_vector->vport = vport;
- q_vector->tx_itr_value = IDPF_ITR_TX_DEF;
- q_vector->tx_intr_mode = IDPF_ITR_DYNAMIC;
+ q_vector->tx_itr_value = q_coal->tx_coalesce_usecs;
+ q_vector->tx_intr_mode = q_coal->tx_intr_mode;
q_vector->tx_itr_idx = VIRTCHNL2_ITR_IDX_1;
- q_vector->rx_itr_value = IDPF_ITR_RX_DEF;
- q_vector->rx_intr_mode = IDPF_ITR_DYNAMIC;
+ q_vector->rx_itr_value = q_coal->rx_coalesce_usecs;
+ q_vector->rx_intr_mode = q_coal->rx_intr_mode;
q_vector->rx_itr_idx = VIRTCHNL2_ITR_IDX_0;
q_vector->tx = kcalloc(txqs_per_vector, sizeof(*q_vector->tx),
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
index 0d2199ac5c3e..a028c69f7fdc 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
@@ -850,14 +850,14 @@ static int idpf_send_get_caps_msg(struct idpf_adapter *adapter)
VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL);
caps.rss_caps =
- cpu_to_le64(VIRTCHNL2_CAP_RSS_IPV4_TCP |
- VIRTCHNL2_CAP_RSS_IPV4_UDP |
- VIRTCHNL2_CAP_RSS_IPV4_SCTP |
- VIRTCHNL2_CAP_RSS_IPV4_OTHER |
- VIRTCHNL2_CAP_RSS_IPV6_TCP |
- VIRTCHNL2_CAP_RSS_IPV6_UDP |
- VIRTCHNL2_CAP_RSS_IPV6_SCTP |
- VIRTCHNL2_CAP_RSS_IPV6_OTHER);
+ cpu_to_le64(VIRTCHNL2_FLOW_IPV4_TCP |
+ VIRTCHNL2_FLOW_IPV4_UDP |
+ VIRTCHNL2_FLOW_IPV4_SCTP |
+ VIRTCHNL2_FLOW_IPV4_OTHER |
+ VIRTCHNL2_FLOW_IPV6_TCP |
+ VIRTCHNL2_FLOW_IPV6_UDP |
+ VIRTCHNL2_FLOW_IPV6_SCTP |
+ VIRTCHNL2_FLOW_IPV6_OTHER);
caps.hsplit_caps =
cpu_to_le32(VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4 |
@@ -1016,6 +1016,41 @@ static int idpf_map_lan_mmio_regs(struct idpf_adapter *adapter)
}
/**
+ * idpf_add_del_fsteer_filters - Send virtchnl add/del Flow Steering message
+ * @adapter: adapter info struct
+ * @rule: Flow steering rule to add/delete
+ * @opcode: VIRTCHNL2_OP_ADD_FLOW_RULE to add filter, or
+ * VIRTCHNL2_OP_DEL_FLOW_RULE to delete. All other values are invalid.
+ *
+ * Send ADD/DELETE flow steering virtchnl message and receive the result.
+ *
+ * Return: 0 on success, negative on failure.
+ */
+int idpf_add_del_fsteer_filters(struct idpf_adapter *adapter,
+ struct virtchnl2_flow_rule_add_del *rule,
+ enum virtchnl2_op opcode)
+{
+ int rule_count = le32_to_cpu(rule->count);
+ struct idpf_vc_xn_params xn_params = {};
+ ssize_t reply_sz;
+
+ if (opcode != VIRTCHNL2_OP_ADD_FLOW_RULE &&
+ opcode != VIRTCHNL2_OP_DEL_FLOW_RULE)
+ return -EINVAL;
+
+ xn_params.vc_op = opcode;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ xn_params.async = false;
+ xn_params.send_buf.iov_base = rule;
+ xn_params.send_buf.iov_len = struct_size(rule, rule_info, rule_count);
+ xn_params.recv_buf.iov_base = rule;
+ xn_params.recv_buf.iov_len = struct_size(rule, rule_info, rule_count);
+
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ return reply_sz < 0 ? reply_sz : 0;
+}
+
+/**
* idpf_vport_alloc_max_qs - Allocate max queues for a vport
* @adapter: Driver specific private structure
* @max_q: vport max queue structure
@@ -3643,6 +3678,79 @@ bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all,
}
/**
+ * idpf_vport_is_cap_ena - Check if vport capability is enabled
+ * @vport: Private data struct
+ * @flag: flag(s) to check
+ *
+ * Return: true if the capability is supported, false otherwise
+ */
+bool idpf_vport_is_cap_ena(struct idpf_vport *vport, u16 flag)
+{
+ struct virtchnl2_create_vport *vport_msg;
+
+ vport_msg = vport->adapter->vport_params_recvd[vport->idx];
+
+ return !!(le16_to_cpu(vport_msg->vport_flags) & flag);
+}
+
+/**
+ * idpf_sideband_flow_type_ena - Check if steering is enabled for flow type
+ * @vport: Private data struct
+ * @flow_type: flow type to check (from ethtool.h)
+ *
+ * Return: true if sideband filters are allowed for @flow_type, false otherwise
+ */
+bool idpf_sideband_flow_type_ena(struct idpf_vport *vport, u32 flow_type)
+{
+ struct virtchnl2_create_vport *vport_msg;
+ __le64 caps;
+
+ vport_msg = vport->adapter->vport_params_recvd[vport->idx];
+ caps = vport_msg->sideband_flow_caps;
+
+ switch (flow_type) {
+ case TCP_V4_FLOW:
+ return !!(caps & cpu_to_le64(VIRTCHNL2_FLOW_IPV4_TCP));
+ case UDP_V4_FLOW:
+ return !!(caps & cpu_to_le64(VIRTCHNL2_FLOW_IPV4_UDP));
+ default:
+ return false;
+ }
+}
+
+/**
+ * idpf_sideband_action_ena - Check if steering is enabled for action
+ * @vport: Private data struct
+ * @fsp: flow spec
+ *
+ * Return: true if sideband filters are allowed for @fsp, false otherwise
+ */
+bool idpf_sideband_action_ena(struct idpf_vport *vport,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ struct virtchnl2_create_vport *vport_msg;
+ unsigned int supp_actions;
+
+ vport_msg = vport->adapter->vport_params_recvd[vport->idx];
+ supp_actions = le32_to_cpu(vport_msg->sideband_flow_actions);
+
+ /* Actions Drop/Wake are not supported */
+ if (fsp->ring_cookie == RX_CLS_FLOW_DISC ||
+ fsp->ring_cookie == RX_CLS_FLOW_WAKE)
+ return false;
+
+ return !!(supp_actions & VIRTCHNL2_ACTION_QUEUE);
+}
+
+unsigned int idpf_fsteer_max_rules(struct idpf_vport *vport)
+{
+ struct virtchnl2_create_vport *vport_msg;
+
+ vport_msg = vport->adapter->vport_params_recvd[vport->idx];
+ return le32_to_cpu(vport_msg->flow_steer_max_rules);
+}
+
+/**
* idpf_get_vport_id: Get vport id
* @vport: virtual port structure
*
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
index 7bae09483aed..86f30f0db07a 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
@@ -105,6 +105,12 @@ int idpf_get_reg_intr_vecs(struct idpf_vport *vport,
int idpf_queue_reg_init(struct idpf_vport *vport);
int idpf_vport_queue_ids_init(struct idpf_vport *vport);
+bool idpf_vport_is_cap_ena(struct idpf_vport *vport, u16 flag);
+bool idpf_sideband_flow_type_ena(struct idpf_vport *vport, u32 flow_type);
+bool idpf_sideband_action_ena(struct idpf_vport *vport,
+ struct ethtool_rx_flow_spec *fsp);
+unsigned int idpf_fsteer_max_rules(struct idpf_vport *vport);
+
int idpf_recv_mb_msg(struct idpf_adapter *adapter);
int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
u16 msg_size, u8 *msg, u16 cookie);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c
index bdcc54a5fb56..4f1fb0cefe51 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c
@@ -30,6 +30,7 @@ int idpf_ptp_get_caps(struct idpf_adapter *adapter)
.send_buf.iov_len = sizeof(send_ptp_caps_msg),
.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
};
+ struct virtchnl2_ptp_cross_time_reg_offsets cross_tstamp_offsets;
struct virtchnl2_ptp_clk_adj_reg_offsets clk_adj_offsets;
struct virtchnl2_ptp_clk_reg_offsets clock_offsets;
struct idpf_ptp_secondary_mbx *scnd_mbx;
@@ -71,7 +72,7 @@ int idpf_ptp_get_caps(struct idpf_adapter *adapter)
access_type = ptp->get_dev_clk_time_access;
if (access_type != IDPF_PTP_DIRECT)
- goto discipline_clock;
+ goto cross_tstamp;
clock_offsets = recv_ptp_caps_msg->clk_offsets;
@@ -90,6 +91,22 @@ int idpf_ptp_get_caps(struct idpf_adapter *adapter)
temp_offset = le32_to_cpu(clock_offsets.cmd_sync_trigger);
ptp->dev_clk_regs.cmd_sync = idpf_get_reg_addr(adapter, temp_offset);
+cross_tstamp:
+ access_type = ptp->get_cross_tstamp_access;
+ if (access_type != IDPF_PTP_DIRECT)
+ goto discipline_clock;
+
+ cross_tstamp_offsets = recv_ptp_caps_msg->cross_time_offsets;
+
+ temp_offset = le32_to_cpu(cross_tstamp_offsets.sys_time_ns_l);
+ ptp->dev_clk_regs.sys_time_ns_l = idpf_get_reg_addr(adapter,
+ temp_offset);
+ temp_offset = le32_to_cpu(cross_tstamp_offsets.sys_time_ns_h);
+ ptp->dev_clk_regs.sys_time_ns_h = idpf_get_reg_addr(adapter,
+ temp_offset);
+ temp_offset = le32_to_cpu(cross_tstamp_offsets.cmd_sync_trigger);
+ ptp->dev_clk_regs.cmd_sync = idpf_get_reg_addr(adapter, temp_offset);
+
discipline_clock:
access_type = ptp->adj_dev_clk_time_access;
if (access_type != IDPF_PTP_DIRECT)
@@ -163,6 +180,42 @@ int idpf_ptp_get_dev_clk_time(struct idpf_adapter *adapter,
}
/**
+ * idpf_ptp_get_cross_time - Send virtchnl get cross time message
+ * @adapter: Driver specific private structure
+ * @cross_time: Pointer to the device clock structure where the value is set
+ *
+ * Send virtchnl get cross time message to get the time of the clock and the
+ * system time.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int idpf_ptp_get_cross_time(struct idpf_adapter *adapter,
+ struct idpf_ptp_dev_timers *cross_time)
+{
+ struct virtchnl2_ptp_get_cross_time cross_time_msg;
+ struct idpf_vc_xn_params xn_params = {
+ .vc_op = VIRTCHNL2_OP_PTP_GET_CROSS_TIME,
+ .send_buf.iov_base = &cross_time_msg,
+ .send_buf.iov_len = sizeof(cross_time_msg),
+ .recv_buf.iov_base = &cross_time_msg,
+ .recv_buf.iov_len = sizeof(cross_time_msg),
+ .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
+ };
+ int reply_sz;
+
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (reply_sz != sizeof(cross_time_msg))
+ return -EIO;
+
+ cross_time->dev_clk_time_ns = le64_to_cpu(cross_time_msg.dev_time_ns);
+ cross_time->sys_time_ns = le64_to_cpu(cross_time_msg.sys_time_ns);
+
+ return 0;
+}
+
+/**
* idpf_ptp_set_dev_clk_time - Send virtchnl set device time message
* @adapter: Driver specific private structure
* @time: New time value
diff --git a/drivers/net/ethernet/intel/idpf/virtchnl2.h b/drivers/net/ethernet/intel/idpf/virtchnl2.h
index 48d3cc9236a4..02ae447cc24a 100644
--- a/drivers/net/ethernet/intel/idpf/virtchnl2.h
+++ b/drivers/net/ethernet/intel/idpf/virtchnl2.h
@@ -80,6 +80,10 @@ enum virtchnl2_op {
VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME = 547,
VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS = 548,
VIRTCHNL2_OP_GET_LAN_MEMORY_REGIONS = 549,
+ /* Opcode 550 is reserved */
+ VIRTCHNL2_OP_ADD_FLOW_RULE = 551,
+ VIRTCHNL2_OP_GET_FLOW_RULE = 552,
+ VIRTCHNL2_OP_DEL_FLOW_RULE = 553,
};
/**
@@ -153,22 +157,22 @@ enum virtchnl2_cap_seg {
VIRTCHNL2_CAP_SEG_TX_DOUBLE_TUNNEL = BIT(8),
};
-/* Receive Side Scaling Flow type capability flags */
-enum virtchnl2_cap_rss {
- VIRTCHNL2_CAP_RSS_IPV4_TCP = BIT(0),
- VIRTCHNL2_CAP_RSS_IPV4_UDP = BIT(1),
- VIRTCHNL2_CAP_RSS_IPV4_SCTP = BIT(2),
- VIRTCHNL2_CAP_RSS_IPV4_OTHER = BIT(3),
- VIRTCHNL2_CAP_RSS_IPV6_TCP = BIT(4),
- VIRTCHNL2_CAP_RSS_IPV6_UDP = BIT(5),
- VIRTCHNL2_CAP_RSS_IPV6_SCTP = BIT(6),
- VIRTCHNL2_CAP_RSS_IPV6_OTHER = BIT(7),
- VIRTCHNL2_CAP_RSS_IPV4_AH = BIT(8),
- VIRTCHNL2_CAP_RSS_IPV4_ESP = BIT(9),
- VIRTCHNL2_CAP_RSS_IPV4_AH_ESP = BIT(10),
- VIRTCHNL2_CAP_RSS_IPV6_AH = BIT(11),
- VIRTCHNL2_CAP_RSS_IPV6_ESP = BIT(12),
- VIRTCHNL2_CAP_RSS_IPV6_AH_ESP = BIT(13),
+/* Receive Side Scaling and Flow Steering Flow type capability flags */
+enum virtchnl2_flow_types {
+ VIRTCHNL2_FLOW_IPV4_TCP = BIT(0),
+ VIRTCHNL2_FLOW_IPV4_UDP = BIT(1),
+ VIRTCHNL2_FLOW_IPV4_SCTP = BIT(2),
+ VIRTCHNL2_FLOW_IPV4_OTHER = BIT(3),
+ VIRTCHNL2_FLOW_IPV6_TCP = BIT(4),
+ VIRTCHNL2_FLOW_IPV6_UDP = BIT(5),
+ VIRTCHNL2_FLOW_IPV6_SCTP = BIT(6),
+ VIRTCHNL2_FLOW_IPV6_OTHER = BIT(7),
+ VIRTCHNL2_FLOW_IPV4_AH = BIT(8),
+ VIRTCHNL2_FLOW_IPV4_ESP = BIT(9),
+ VIRTCHNL2_FLOW_IPV4_AH_ESP = BIT(10),
+ VIRTCHNL2_FLOW_IPV6_AH = BIT(11),
+ VIRTCHNL2_FLOW_IPV6_ESP = BIT(12),
+ VIRTCHNL2_FLOW_IPV6_AH_ESP = BIT(13),
};
/* Header split capability flags */
@@ -194,8 +198,9 @@ enum virtchnl2_cap_other {
VIRTCHNL2_CAP_RDMA = BIT_ULL(0),
VIRTCHNL2_CAP_SRIOV = BIT_ULL(1),
VIRTCHNL2_CAP_MACFILTER = BIT_ULL(2),
- VIRTCHNL2_CAP_FLOW_DIRECTOR = BIT_ULL(3),
- /* Queue based scheduling using split queue model */
+ /* Other capability 3 is available
+ * Queue based scheduling using split queue model
+ */
VIRTCHNL2_CAP_SPLITQ_QSCHED = BIT_ULL(4),
VIRTCHNL2_CAP_CRC = BIT_ULL(5),
VIRTCHNL2_CAP_ADQ = BIT_ULL(6),
@@ -209,17 +214,37 @@ enum virtchnl2_cap_other {
/* EDT: Earliest Departure Time capability used for Timing Wheel */
VIRTCHNL2_CAP_EDT = BIT_ULL(14),
VIRTCHNL2_CAP_ADV_RSS = BIT_ULL(15),
- VIRTCHNL2_CAP_FDIR = BIT_ULL(16),
+ /* Other capability 16 is available */
VIRTCHNL2_CAP_RX_FLEX_DESC = BIT_ULL(17),
VIRTCHNL2_CAP_PTYPE = BIT_ULL(18),
VIRTCHNL2_CAP_LOOPBACK = BIT_ULL(19),
- /* Other capability 20-21 is reserved */
+ /* Other capability 20 is reserved */
+ VIRTCHNL2_CAP_FLOW_STEER = BIT_ULL(21),
VIRTCHNL2_CAP_LAN_MEMORY_REGIONS = BIT_ULL(22),
/* this must be the last capability */
VIRTCHNL2_CAP_OEM = BIT_ULL(63),
};
+/**
+ * enum virtchnl2_action_types - Available actions for sideband flow steering
+ * @VIRTCHNL2_ACTION_DROP: Drop the packet
+ * @VIRTCHNL2_ACTION_PASSTHRU: Forward the packet to the next classifier/stage
+ * @VIRTCHNL2_ACTION_QUEUE: Forward the packet to a receive queue
+ * @VIRTCHNL2_ACTION_Q_GROUP: Forward the packet to a receive queue group
+ * @VIRTCHNL2_ACTION_MARK: Mark the packet with specific marker value
+ * @VIRTCHNL2_ACTION_COUNT: Increment the corresponding counter
+ */
+
+enum virtchnl2_action_types {
+ VIRTCHNL2_ACTION_DROP = BIT(0),
+ VIRTCHNL2_ACTION_PASSTHRU = BIT(1),
+ VIRTCHNL2_ACTION_QUEUE = BIT(2),
+ VIRTCHNL2_ACTION_Q_GROUP = BIT(3),
+ VIRTCHNL2_ACTION_MARK = BIT(4),
+ VIRTCHNL2_ACTION_COUNT = BIT(5),
+};
+
/* underlying device type */
enum virtchl2_device_type {
VIRTCHNL2_MEV_DEVICE = 0,
@@ -461,7 +486,7 @@ VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_version_info);
* @seg_caps: See enum virtchnl2_cap_seg.
* @hsplit_caps: See enum virtchnl2_cap_rx_hsplit_at.
* @rsc_caps: See enum virtchnl2_cap_rsc.
- * @rss_caps: See enum virtchnl2_cap_rss.
+ * @rss_caps: See enum virtchnl2_flow_types.
* @other_caps: See enum virtchnl2_cap_other.
* @mailbox_dyn_ctl: DYN_CTL register offset and vector id for mailbox
* provided by CP.
@@ -578,11 +603,17 @@ VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_queue_reg_chunks);
/**
* enum virtchnl2_vport_flags - Vport flags that indicate vport capabilities.
* @VIRTCHNL2_VPORT_UPLINK_PORT: Representatives of underlying physical ports
+ * @VIRTCHNL2_VPORT_INLINE_FLOW_STEER: Inline flow steering enabled
+ * @VIRTCHNL2_VPORT_INLINE_FLOW_STEER_RXQ: Inline flow steering enabled
+ * with explicit Rx queue action
+ * @VIRTCHNL2_VPORT_SIDEBAND_FLOW_STEER: Sideband flow steering enabled
* @VIRTCHNL2_VPORT_ENABLE_RDMA: RDMA is enabled for this vport
*/
enum virtchnl2_vport_flags {
- VIRTCHNL2_VPORT_UPLINK_PORT = BIT(0),
- /* VIRTCHNL2_VPORT_* bits [1:3] rsvd */
+ VIRTCHNL2_VPORT_UPLINK_PORT = BIT(0),
+ VIRTCHNL2_VPORT_INLINE_FLOW_STEER = BIT(1),
+ VIRTCHNL2_VPORT_INLINE_FLOW_STEER_RXQ = BIT(2),
+ VIRTCHNL2_VPORT_SIDEBAND_FLOW_STEER = BIT(3),
VIRTCHNL2_VPORT_ENABLE_RDMA = BIT(4),
};
@@ -608,6 +639,14 @@ enum virtchnl2_vport_flags {
* @rx_desc_ids: See VIRTCHNL2_RX_DESC_IDS definitions.
* @tx_desc_ids: See VIRTCHNL2_TX_DESC_IDS definitions.
* @pad1: Padding.
+ * @inline_flow_caps: Bit mask of supported inline-flow-steering
+ * flow types (See enum virtchnl2_flow_types)
+ * @sideband_flow_caps: Bit mask of supported sideband-flow-steering
+ * flow types (See enum virtchnl2_flow_types)
+ * @sideband_flow_actions: Bit mask of supported action types
+ * for sideband flow steering (See enum virtchnl2_action_types)
+ * @flow_steer_max_rules: Max rules allowed for inline and sideband
+ * flow steering combined
* @rss_algorithm: RSS algorithm.
* @rss_key_size: RSS key size.
* @rss_lut_size: RSS LUT size.
@@ -640,7 +679,11 @@ struct virtchnl2_create_vport {
__le16 vport_flags;
__le64 rx_desc_ids;
__le64 tx_desc_ids;
- u8 pad1[72];
+ u8 pad1[48];
+ __le64 inline_flow_caps;
+ __le64 sideband_flow_caps;
+ __le32 sideband_flow_actions;
+ __le32 flow_steer_max_rules;
__le32 rss_algorithm;
__le16 rss_key_size;
__le16 rss_lut_size;
@@ -1615,4 +1658,156 @@ struct virtchnl2_get_lan_memory_regions {
};
VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_get_lan_memory_regions);
+#define VIRTCHNL2_MAX_NUM_PROTO_HDRS 4
+#define VIRTCHNL2_MAX_SIZE_RAW_PACKET 256
+#define VIRTCHNL2_MAX_NUM_ACTIONS 8
+
+/**
+ * struct virtchnl2_proto_hdr - represent one protocol header
+ * @hdr_type: See enum virtchnl2_proto_hdr_type
+ * @pad: padding
+ * @buffer_spec: binary buffer based on header type.
+ * @buffer_mask: mask applied on buffer_spec.
+ *
+ * Structure to hold protocol headers based on hdr_type
+ */
+struct virtchnl2_proto_hdr {
+ __le32 hdr_type;
+ u8 pad[4];
+ u8 buffer_spec[64];
+ u8 buffer_mask[64];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(136, virtchnl2_proto_hdr);
+
+/**
+ * struct virtchnl2_proto_hdrs - struct to represent match criteria
+ * @tunnel_level: specify where protocol header(s) start from.
+ * must be 0 when sending a raw packet request.
+ * 0 - from the outer layer
+ * 1 - from the first inner layer
+ * 2 - from the second inner layer
+ * @pad: Padding bytes
+ * @count: total number of protocol headers in proto_hdr. 0 for raw packet.
+ * @proto_hdr: Array of protocol headers
+ * @raw: struct holding raw packet buffer when count is 0
+ */
+struct virtchnl2_proto_hdrs {
+ u8 tunnel_level;
+ u8 pad[3];
+ __le32 count;
+ union {
+ struct virtchnl2_proto_hdr
+ proto_hdr[VIRTCHNL2_MAX_NUM_PROTO_HDRS];
+ struct {
+ __le16 pkt_len;
+ u8 spec[VIRTCHNL2_MAX_SIZE_RAW_PACKET];
+ u8 mask[VIRTCHNL2_MAX_SIZE_RAW_PACKET];
+ } raw;
+ };
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(552, virtchnl2_proto_hdrs);
+
+/**
+ * struct virtchnl2_rule_action - struct representing single action for a flow
+ * @action_type: see enum virtchnl2_action_types
+ * @act_conf: union representing action depending on action_type.
+ * @act_conf.q_id: queue id to redirect the packets to.
+ * @act_conf.q_grp_id: queue group id to redirect the packets to.
+ * @act_conf.ctr_id: used for count action. If input value 0xFFFFFFFF control
+ * plane assigns a new counter and returns the counter ID to
+ * the driver. If input value is not 0xFFFFFFFF then it must
+ * be an existing counter given to the driver for an earlier
+ * flow. Then this flow will share the counter.
+ * @act_conf.mark_id: Value used to mark the packets. Used for mark action.
+ * @act_conf.reserved: Reserved for future use.
+ */
+struct virtchnl2_rule_action {
+ __le32 action_type;
+ union {
+ __le32 q_id;
+ __le32 q_grp_id;
+ __le32 ctr_id;
+ __le32 mark_id;
+ u8 reserved[8];
+ } act_conf;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(12, virtchnl2_rule_action);
+
+/**
+ * struct virtchnl2_rule_action_set - struct representing multiple actions
+ * @count: number of valid actions in the action set of a rule
+ * @actions: array of struct virtchnl2_rule_action
+ */
+struct virtchnl2_rule_action_set {
+ /* action count must be less than VIRTCHNL2_MAX_NUM_ACTIONS */
+ __le32 count;
+ struct virtchnl2_rule_action actions[VIRTCHNL2_MAX_NUM_ACTIONS];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(100, virtchnl2_rule_action_set);
+
+/**
+ * struct virtchnl2_flow_rule - represent one flow steering rule
+ * @proto_hdrs: array of protocol header buffers representing match criteria
+ * @action_set: series of actions to be applied for given rule
+ * @priority: rule priority.
+ * @pad: padding for future extensions.
+ */
+struct virtchnl2_flow_rule {
+ struct virtchnl2_proto_hdrs proto_hdrs;
+ struct virtchnl2_rule_action_set action_set;
+ __le32 priority;
+ u8 pad[8];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(664, virtchnl2_flow_rule);
+
+enum virtchnl2_flow_rule_status {
+ VIRTCHNL2_FLOW_RULE_SUCCESS = 1,
+ VIRTCHNL2_FLOW_RULE_NORESOURCE = 2,
+ VIRTCHNL2_FLOW_RULE_EXIST = 3,
+ VIRTCHNL2_FLOW_RULE_TIMEOUT = 4,
+ VIRTCHNL2_FLOW_RULE_FLOW_TYPE_NOT_SUPPORTED = 5,
+ VIRTCHNL2_FLOW_RULE_MATCH_KEY_NOT_SUPPORTED = 6,
+ VIRTCHNL2_FLOW_RULE_ACTION_NOT_SUPPORTED = 7,
+ VIRTCHNL2_FLOW_RULE_ACTION_COMBINATION_INVALID = 8,
+ VIRTCHNL2_FLOW_RULE_ACTION_DATA_INVALID = 9,
+ VIRTCHNL2_FLOW_RULE_NOT_ADDED = 10,
+};
+
+/**
+ * struct virtchnl2_flow_rule_info: structure representing single flow rule
+ * @rule_id: rule_id associated with the flow_rule.
+ * @rule_cfg: structure representing rule.
+ * @status: status of rule programming. See enum virtchnl2_flow_rule_status.
+ */
+struct virtchnl2_flow_rule_info {
+ __le32 rule_id;
+ struct virtchnl2_flow_rule rule_cfg;
+ __le32 status;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(672, virtchnl2_flow_rule_info);
+
+/**
+ * struct virtchnl2_flow_rule_add_del - add/delete a flow steering rule
+ * @vport_id: vport id for which the rule is to be added or deleted.
+ * @count: Indicates number of rules to be added or deleted.
+ * @rule_info: Array of flow rules to be added or deleted.
+ *
+ * For VIRTCHNL2_OP_FLOW_RULE_ADD, rule_info contains list of rules to be
+ * added. If rule_id is 0xFFFFFFFF, then the rule is programmed and not cached.
+ *
+ * For VIRTCHNL2_OP_FLOW_RULE_DEL, there are two possibilities. The structure
+ * can contain either array of rule_ids or array of match keys to be deleted.
+ * When match keys are used the corresponding rule_ids must be 0xFFFFFFFF.
+ *
+ * status member of each rule indicates the result. Maximum of 6 rules can be
+ * added or deleted using this method. Driver has to retry in case of any
+ * failure of ADD or DEL opcode. CP doesn't retry in case of failure.
+ */
+struct virtchnl2_flow_rule_add_del {
+ __le32 vport_id;
+ __le32 count;
+ struct virtchnl2_flow_rule_info rule_info[] __counted_by_le(count);
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_flow_rule_add_del);
+
#endif /* _VIRTCHNL_2_H_ */
diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
index ba9c3fee6da7..da8e1fd47301 100644
--- a/drivers/net/ethernet/intel/igbvf/igbvf.h
+++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
@@ -154,7 +154,6 @@ struct igbvf_ring {
/* board specific private data structure */
struct igbvf_adapter {
struct timer_list watchdog_timer;
- struct timer_list blink_timer;
struct work_struct reset_task;
struct work_struct watchdog_task;
@@ -162,10 +161,7 @@ struct igbvf_adapter {
const struct igbvf_info *ei;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
- u32 bd_number;
u32 rx_buffer_len;
- u32 polling_interval;
- u16 mng_vlan_id;
u16 link_speed;
u16 link_duplex;
@@ -183,9 +179,6 @@ struct igbvf_adapter {
unsigned int restart_queue;
u32 txd_cmd;
- u32 tx_int_delay;
- u32 tx_abs_int_delay;
-
unsigned int total_tx_bytes;
unsigned int total_tx_packets;
unsigned int total_rx_bytes;
@@ -193,23 +186,15 @@ struct igbvf_adapter {
/* Tx stats */
u32 tx_timeout_count;
- u32 tx_fifo_head;
- u32 tx_head_addr;
- u32 tx_fifo_size;
- u32 tx_dma_failed;
/* Rx */
struct igbvf_ring *rx_ring;
- u32 rx_int_delay;
- u32 rx_abs_int_delay;
-
/* Rx stats */
u64 hw_csum_err;
u64 hw_csum_good;
u64 rx_hdr_split;
u32 alloc_rx_buff_failed;
- u32 rx_dma_failed;
unsigned int rx_ps_hdr_size;
u32 max_frame_size;
@@ -229,24 +214,14 @@ struct igbvf_adapter {
struct e1000_vf_stats stats;
u64 zero_base;
- struct igbvf_ring test_tx_ring;
- struct igbvf_ring test_rx_ring;
- u32 test_icr;
-
u32 msg_enable;
struct msix_entry *msix_entries;
- int int_mode;
u32 eims_enable_mask;
u32 eims_other;
- u32 eeprom_wol;
u32 wol;
u32 pba;
- bool fc_autoneg;
-
- unsigned long led_status;
-
unsigned int flags;
unsigned long last_reset;
};
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index aed9162afd38..61dfcd8cb370 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1629,10 +1629,6 @@ static int igbvf_sw_init(struct igbvf_adapter *adapter)
adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
- adapter->tx_int_delay = 8;
- adapter->tx_abs_int_delay = 32;
- adapter->rx_int_delay = 0;
- adapter->rx_abs_int_delay = 8;
adapter->requested_itr = 3;
adapter->current_itr = IGBVF_START_ITR;
@@ -2708,7 +2704,6 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct igbvf_adapter *adapter;
struct e1000_hw *hw;
const struct igbvf_info *ei = igbvf_info_tbl[ent->driver_data];
- static int cards_found;
int err;
err = pci_enable_device_mem(pdev);
@@ -2780,8 +2775,6 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->watchdog_timeo = 5 * HZ;
strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
- adapter->bd_number = cards_found++;
-
netdev->hw_features = NETIF_F_SG |
NETIF_F_TSO |
NETIF_F_TSO6 |
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 97b1a2c820ee..266bfcf2a28f 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -406,10 +406,6 @@ extern char igc_driver_name[];
#define IGC_FLAG_RSS_FIELD_IPV4_UDP BIT(6)
#define IGC_FLAG_RSS_FIELD_IPV6_UDP BIT(7)
-#define IGC_MRQC_ENABLE_RSS_MQ 0x00000002
-#define IGC_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
-#define IGC_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
-
/* RX-desc Write-Back format RSS Type's */
enum igc_rss_type_num {
IGC_RSS_TYPE_NO_HASH = 0,
@@ -635,6 +631,7 @@ enum igc_filter_match_flags {
IGC_FILTER_FLAG_DST_MAC_ADDR = BIT(3),
IGC_FILTER_FLAG_USER_DATA = BIT(4),
IGC_FILTER_FLAG_VLAN_ETYPE = BIT(5),
+ IGC_FILTER_FLAG_DEFAULT_QUEUE = BIT(6),
};
struct igc_nfc_filter {
@@ -662,10 +659,14 @@ struct igc_nfc_rule {
bool flex;
};
-/* IGC supports a total of 32 NFC rules: 16 MAC address based, 8 VLAN priority
- * based, 8 ethertype based and 32 Flex filter based rules.
+/* IGC supports a total of 65 NFC rules, listed below in order of priority:
+ * - 16 MAC address based filtering rules (highest priority)
+ * - 8 ethertype based filtering rules
+ * - 32 Flex filter based filtering rules
+ * - 8 VLAN priority based filtering rules
+ * - 1 default queue rule (lowest priority)
*/
-#define IGC_MAX_RXNFC_RULES 64
+#define IGC_MAX_RXNFC_RULES 65
struct igc_flex_filter {
u8 index;
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index 86b346687196..498ba1522ca4 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -383,11 +383,15 @@
#define IGC_RXDEXT_STATERR_IPE 0x40000000
#define IGC_RXDEXT_STATERR_RXE 0x80000000
+#define IGC_MRQC_ENABLE_RSS_MQ 0x00000002
#define IGC_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
#define IGC_MRQC_RSS_FIELD_IPV4 0x00020000
#define IGC_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000
#define IGC_MRQC_RSS_FIELD_IPV6 0x00100000
#define IGC_MRQC_RSS_FIELD_IPV6_TCP 0x00200000
+#define IGC_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
+#define IGC_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
+#define IGC_MRQC_DEFAULT_QUEUE_MASK GENMASK(5, 3)
/* Header split receive */
#define IGC_RFCTL_IPV6_EX_DIS 0x00010000
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index a7f397b58cd6..ecb35b693ce5 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -1283,6 +1283,24 @@ static void igc_ethtool_init_nfc_rule(struct igc_nfc_rule *rule,
rule->flex = true;
else
rule->flex = false;
+
+ /* The wildcard rule is only applied if:
+ * a) None of the other filtering rules match (match_flags is zero)
+ * b) The flow type is ETHER_FLOW only (no additional fields set)
+ * c) Mask for Source MAC address is not specified (all zeros)
+ * d) Mask for Destination MAC address is not specified (all zeros)
+ * e) Mask for L2 EtherType is not specified (zero)
+ *
+ * If all these conditions are met, the rule is treated as a wildcard
+ * rule. Default queue feature will be used, so that all packets that do
+ * not match any other rule will be routed to the default queue.
+ */
+ if (!rule->filter.match_flags &&
+ fsp->flow_type == ETHER_FLOW &&
+ is_zero_ether_addr(fsp->m_u.ether_spec.h_source) &&
+ is_zero_ether_addr(fsp->m_u.ether_spec.h_dest) &&
+ !fsp->m_u.ether_spec.h_proto)
+ rule->filter.match_flags = IGC_FILTER_FLAG_DEFAULT_QUEUE;
}
/**
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 32ea4fdd3e2b..458e5eaa92e5 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -3874,6 +3874,22 @@ static void igc_del_flex_filter(struct igc_adapter *adapter,
wr32(IGC_WUFC, wufc);
}
+static void igc_set_default_queue_filter(struct igc_adapter *adapter, u32 queue)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 mrqc = rd32(IGC_MRQC);
+
+ mrqc &= ~IGC_MRQC_DEFAULT_QUEUE_MASK;
+ mrqc |= FIELD_PREP(IGC_MRQC_DEFAULT_QUEUE_MASK, queue);
+ wr32(IGC_MRQC, mrqc);
+}
+
+static void igc_reset_default_queue_filter(struct igc_adapter *adapter)
+{
+ /* Reset the default queue to its default value which is Queue 0 */
+ igc_set_default_queue_filter(adapter, 0);
+}
+
static int igc_enable_nfc_rule(struct igc_adapter *adapter,
struct igc_nfc_rule *rule)
{
@@ -3912,6 +3928,9 @@ static int igc_enable_nfc_rule(struct igc_adapter *adapter,
return err;
}
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_DEFAULT_QUEUE)
+ igc_set_default_queue_filter(adapter, rule->action);
+
return 0;
}
@@ -3939,6 +3958,9 @@ static void igc_disable_nfc_rule(struct igc_adapter *adapter,
if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR)
igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST,
rule->filter.dst_addr);
+
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_DEFAULT_QUEUE)
+ igc_reset_default_queue_filter(adapter);
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 4384e892f967..3a379e6a3a2a 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -346,7 +346,6 @@ struct ixgbevf_adapter {
int num_rx_queues;
struct ixgbevf_ring *rx_ring[MAX_TX_QUEUES]; /* One per active queue */
u64 hw_csum_rx_error;
- u64 hw_rx_no_dma_resources;
int num_msix_vectors;
u64 alloc_rx_page_failed;
u64 alloc_rx_buff_failed;
@@ -363,8 +362,6 @@ struct ixgbevf_adapter {
/* structs defined in ixgbe_vf.h */
struct ixgbe_hw hw;
u16 msg_enable;
- /* Interrupt Throttle Rate */
- u32 eitr_param;
struct ixgbevf_hw_stats stats;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 846ee2b9edf1..ab5838865c3f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -1704,9 +1704,11 @@ unsigned long cgx_get_lmac_bmap(void *cgxd)
static int cgx_lmac_init(struct cgx *cgx)
{
+ u8 max_dmac_filters;
struct lmac *lmac;
u64 lmac_list;
int i, err;
+ int filter;
/* lmac_list specifies which lmacs are enabled
* when bit n is set to 1, LMAC[n] is enabled
@@ -1745,6 +1747,8 @@ static int cgx_lmac_init(struct cgx *cgx)
cgx->mac_ops->dmac_filter_count /
cgx->lmac_count;
+ max_dmac_filters = lmac->mac_to_index_bmap.max;
+
err = rvu_alloc_bitmap(&lmac->mac_to_index_bmap);
if (err)
goto err_name_free;
@@ -1774,6 +1778,15 @@ static int cgx_lmac_init(struct cgx *cgx)
set_bit(lmac->lmac_id, &cgx->lmac_bmap);
cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, true);
lmac->lmac_type = cgx->mac_ops->get_lmac_type(cgx, lmac->lmac_id);
+
+ /* Disable stale DMAC filters for sane state */
+ for (filter = 0; filter < max_dmac_filters; filter++)
+ cgx_lmac_addr_del(cgx->cgx_id, lmac->lmac_id, filter);
+
+ /* As cgx_lmac_addr_del does not clear entry for index 0
+ * so it needs to be done explicitly
+ */
+ cgx_lmac_addr_reset(cgx->cgx_id, lmac->lmac_id);
}
/* Start X2P reset on given MAC block */
@@ -1951,6 +1964,12 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_disable_device;
}
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "DMA mask config failed, abort\n");
+ goto err_release_regions;
+ }
+
/* MAP configuration registers */
cgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
if (!cgx->reg_base) {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 0bc0dc79868b..933073cd2280 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -664,7 +664,12 @@ struct cgx_lmac_fwdata_s {
/* Only applicable if SFP/QSFP slot is present */
struct sfp_eeprom_s sfp_eeprom;
struct phy_s phy;
-#define LMAC_FWDATA_RESERVED_MEM 1021
+ u32 lmac_type;
+ u32 portm_idx;
+ u64 mgmt_port:1;
+ u64 advertised_an:1;
+ u64 port;
+#define LMAC_FWDATA_RESERVED_MEM 1018
u64 reserved[LMAC_FWDATA_RESERVED_MEM];
};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 890a1a5df2de..3303c475414a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -682,16 +682,19 @@ int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
struct cgx_mac_addr_set_or_get *rsp)
{
int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
+ struct rvu_pfvf *pfvf;
u8 cgx_id, lmac_id;
- if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
- return -EPERM;
+ if (!is_pf_cgxmapped(rvu, pf))
+ return LMAC_AF_ERR_PF_NOT_MAPPED;
if (rvu_npc_exact_has_match_table(rvu))
return rvu_npc_exact_mac_addr_set(rvu, req, rsp);
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ pfvf = &rvu->pf[pf];
+ ether_addr_copy(pfvf->mac_addr, req->mac_addr);
cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr);
return 0;
@@ -769,20 +772,12 @@ int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
struct cgx_mac_addr_set_or_get *req,
struct cgx_mac_addr_set_or_get *rsp)
{
- int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
- u8 cgx_id, lmac_id;
- int rc = 0;
- u64 cfg;
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
- if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
- return -EPERM;
-
- rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ if (!is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, req->hdr.pcifunc)))
+ return LMAC_AF_ERR_PF_NOT_MAPPED;
- rsp->hdr.rc = rc;
- cfg = cgx_lmac_addr_get(cgx_id, lmac_id);
- /* copy 48 bit mac address to req->mac_addr */
- u64_to_ether_addr(cfg, rsp->mac_addr);
+ ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index 0c20642f81b9..8375f18c8e07 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -867,6 +867,71 @@ static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
RVU_DEBUG_SEQ_FOPS(rvu_pf_cgx_map, rvu_pf_cgx_map_display, NULL);
+static int rvu_dbg_rvu_fwdata_display(struct seq_file *s, void *unused)
+{
+ struct rvu *rvu = s->private;
+ struct rvu_fwdata *fwdata;
+ u8 mac[ETH_ALEN];
+ int count = 0, i;
+
+ if (!rvu->fwdata)
+ return -EAGAIN;
+
+ fwdata = rvu->fwdata;
+ seq_puts(s, "\nRVU Firmware Data:\n");
+ seq_puts(s, "\n\t\tPTP INFORMATION\n");
+ seq_puts(s, "\t\t===============\n");
+ seq_printf(s, "\t\texternal clockrate \t :%x\n",
+ fwdata->ptp_ext_clk_rate);
+ seq_printf(s, "\t\texternal timestamp \t :%x\n",
+ fwdata->ptp_ext_tstamp);
+ seq_puts(s, "\n");
+
+ seq_puts(s, "\n\t\tSDP CHANNEL INFORMATION\n");
+ seq_puts(s, "\t\t=======================\n");
+ seq_printf(s, "\t\tValid \t\t\t :%x\n", fwdata->channel_data.valid);
+ seq_printf(s, "\t\tNode ID \t\t :%x\n",
+ fwdata->channel_data.info.node_id);
+ seq_printf(s, "\t\tNumber of VFs \t\t :%x\n",
+ fwdata->channel_data.info.max_vfs);
+ seq_printf(s, "\t\tNumber of PF-Rings \t :%x\n",
+ fwdata->channel_data.info.num_pf_rings);
+ seq_printf(s, "\t\tPF SRN \t\t\t :%x\n",
+ fwdata->channel_data.info.pf_srn);
+ seq_puts(s, "\n");
+
+ seq_puts(s, "\n\t\tPF-INDEX MACADDRESS\n");
+ seq_puts(s, "\t\t====================\n");
+ for (i = 0; i < PF_MACNUM_MAX; i++) {
+ u64_to_ether_addr(fwdata->pf_macs[i], mac);
+ if (!is_zero_ether_addr(mac)) {
+ seq_printf(s, "\t\t %d %pM\n", i, mac);
+ count++;
+ }
+ }
+
+ if (!count)
+ seq_puts(s, "\t\tNo valid address found\n");
+
+ seq_puts(s, "\n\t\tVF-INDEX MACADDRESS\n");
+ seq_puts(s, "\t\t====================\n");
+ count = 0;
+ for (i = 0; i < VF_MACNUM_MAX; i++) {
+ u64_to_ether_addr(fwdata->vf_macs[i], mac);
+ if (!is_zero_ether_addr(mac)) {
+ seq_printf(s, "\t\t %d %pM\n", i, mac);
+ count++;
+ }
+ }
+
+ if (!count)
+ seq_puts(s, "\t\tNo valid address found\n");
+
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(rvu_fwdata, rvu_fwdata_display, NULL);
+
static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blkaddr, int lf,
u16 *pcifunc)
{
@@ -2923,6 +2988,97 @@ static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *s, void *unused)
RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL);
+static int cgx_print_fwdata(struct seq_file *s, int lmac_id)
+{
+ struct cgx_lmac_fwdata_s *fwdata;
+ void *cgxd = s->private;
+ struct phy_s *phy;
+ struct rvu *rvu;
+ int cgx_id, i;
+
+ rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
+ if (!rvu)
+ return -ENODEV;
+
+ if (!rvu->fwdata)
+ return -EAGAIN;
+
+ cgx_id = cgx_get_cgxid(cgxd);
+
+ if (rvu->hw->lmac_per_cgx == CGX_LMACS_USX)
+ fwdata = &rvu->fwdata->cgx_fw_data_usx[cgx_id][lmac_id];
+ else
+ fwdata = &rvu->fwdata->cgx_fw_data[cgx_id][lmac_id];
+
+ seq_puts(s, "\nFIRMWARE SHARED:\n");
+ seq_puts(s, "\t\tSUPPORTED LINK INFORMATION\t\t\n");
+ seq_puts(s, "\t\t==========================\n");
+ seq_printf(s, "\t\t Link modes \t\t :%llx\n",
+ fwdata->supported_link_modes);
+ seq_printf(s, "\t\t Autoneg \t\t :%llx\n", fwdata->supported_an);
+ seq_printf(s, "\t\t FEC \t\t\t :%llx\n", fwdata->supported_fec);
+ seq_puts(s, "\n");
+
+ seq_puts(s, "\t\tADVERTISED LINK INFORMATION\t\t\n");
+ seq_puts(s, "\t\t==========================\n");
+ seq_printf(s, "\t\t Link modes \t\t :%llx\n",
+ (u64)fwdata->advertised_link_modes);
+ seq_printf(s, "\t\t Autoneg \t\t :%x\n", fwdata->advertised_an);
+ seq_printf(s, "\t\t FEC \t\t\t :%llx\n", fwdata->advertised_fec);
+ seq_puts(s, "\n");
+
+ seq_puts(s, "\t\tLMAC CONFIG\t\t\n");
+ seq_puts(s, "\t\t============\n");
+ seq_printf(s, "\t\t rw_valid \t\t :%x\n", fwdata->rw_valid);
+ seq_printf(s, "\t\t lmac_type \t\t :%x\n", fwdata->lmac_type);
+ seq_printf(s, "\t\t portm_idx \t\t :%x\n", fwdata->portm_idx);
+ seq_printf(s, "\t\t mgmt_port \t\t :%x\n", fwdata->mgmt_port);
+ seq_printf(s, "\t\t Link modes own \t :%llx\n",
+ (u64)fwdata->advertised_link_modes_own);
+ seq_puts(s, "\n");
+
+ seq_puts(s, "\n\t\tEEPROM DATA\n");
+ seq_puts(s, "\t\t===========\n");
+ seq_printf(s, "\t\t sff_id \t\t :%x\n", fwdata->sfp_eeprom.sff_id);
+ seq_puts(s, "\t\t data \t\t\t :\n");
+ seq_puts(s, "\t\t");
+ for (i = 0; i < SFP_EEPROM_SIZE; i++) {
+ seq_printf(s, "%x", fwdata->sfp_eeprom.buf[i]);
+ if ((i + 1) % 16 == 0) {
+ seq_puts(s, "\n");
+ seq_puts(s, "\t\t");
+ }
+ }
+ seq_puts(s, "\n");
+
+ phy = &fwdata->phy;
+ seq_puts(s, "\n\t\tPHY INFORMATION\n");
+ seq_puts(s, "\t\t===============\n");
+ seq_printf(s, "\t\t Mod type configurable \t\t :%x\n",
+ phy->misc.can_change_mod_type);
+ seq_printf(s, "\t\t Mod type \t\t\t :%x\n", phy->misc.mod_type);
+ seq_printf(s, "\t\t Support FEC \t\t\t :%x\n", phy->misc.has_fec_stats);
+ seq_printf(s, "\t\t RSFEC corrected words \t\t :%x\n",
+ phy->fec_stats.rsfec_corr_cws);
+ seq_printf(s, "\t\t RSFEC uncorrected words \t :%x\n",
+ phy->fec_stats.rsfec_uncorr_cws);
+ seq_printf(s, "\t\t BRFEC corrected words \t\t :%x\n",
+ phy->fec_stats.brfec_corr_blks);
+ seq_printf(s, "\t\t BRFEC uncorrected words \t :%x\n",
+ phy->fec_stats.brfec_uncorr_blks);
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+static int rvu_dbg_cgx_fwdata_display(struct seq_file *s, void *unused)
+{
+ return cgx_print_fwdata(s, rvu_dbg_derive_lmacid(s));
+}
+
+RVU_DEBUG_SEQ_FOPS(cgx_fwdata, cgx_fwdata_display, NULL);
+
static void rvu_dbg_cgx_init(struct rvu *rvu)
{
struct mac_ops *mac_ops;
@@ -2962,6 +3118,9 @@ static void rvu_dbg_cgx_init(struct rvu *rvu)
debugfs_create_file_aux_num("mac_filter", 0600,
rvu->rvu_dbg.lmac, cgx, lmac_id,
&rvu_dbg_cgx_dmac_flt_fops);
+ debugfs_create_file("fwdata", 0600,
+ rvu->rvu_dbg.lmac, cgx,
+ &rvu_dbg_cgx_fwdata_fops);
}
}
}
@@ -3808,6 +3967,9 @@ void rvu_dbg_init(struct rvu *rvu)
debugfs_create_file("lmtst_map_table", 0444, rvu->rvu_dbg.root,
rvu, &rvu_dbg_lmtst_map_table_fops);
+ debugfs_create_file("rvu_fwdata", 0444, rvu->rvu_dbg.root, rvu,
+ &rvu_dbg_rvu_fwdata_fops);
+
if (!cgx_get_cgxcnt_max())
goto create;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index 99ace381cc78..625bb5a05344 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -1571,7 +1571,7 @@ handle_xdp_verdict:
cq->pool_ptrs++;
if (xsk_buff) {
xsk_buff_free(xsk_buff);
- } else if (page->pp) {
+ } else if (pp_page_to_nmdesc(page)->pp) {
page_pool_recycle_direct(pool->page_pool, page);
} else {
otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index b33285d755b9..92a16ddb7d86 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -460,9 +460,11 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
truesize += frag_info->frag_stride;
if (frag_info->frag_stride == PAGE_SIZE / 2) {
+ struct netmem_desc *desc = pp_page_to_nmdesc(page);
+
frags->page_offset ^= PAGE_SIZE / 2;
release = page_count(page) != 1 ||
- atomic_long_read(&page->pp_ref_count) != 1 ||
+ atomic_long_read(&desc->pp_ref_count) != 1 ||
page_is_pfmemalloc(page) ||
page_to_nid(page) != numa_mem_id();
} else if (!priv->rx_headroom) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index b1aeea7c4a91..e395ef5f356e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -1947,8 +1947,8 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
pages_queue, token, force_polling);
- if (callback)
- return err;
+ if (callback && !err)
+ return 0;
if (err > 0) /* Failed in FW, command didn't execute */
err = deliv_status_to_err(err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index b6340e9453c0..99295eaf2f02 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -84,9 +84,10 @@ struct page_pool;
#define MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE (9)
#define MLX5E_SHAMPO_WQ_HEADER_PER_PAGE (PAGE_SIZE >> MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE)
#define MLX5E_SHAMPO_LOG_WQ_HEADER_PER_PAGE (PAGE_SHIFT - MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE)
-#define MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE (64)
-#define MLX5E_SHAMPO_WQ_RESRV_SIZE (64 * 1024)
-#define MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE (4096)
+#define MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE_SHIFT (6)
+#define MLX5E_SHAMPO_WQ_RESRV_SIZE_BASE_SHIFT (12)
+#define MLX5E_SHAMPO_WQ_LOG_RESRV_SIZE (16)
+#define MLX5E_SHAMPO_WQ_RESRV_SIZE BIT(MLX5E_SHAMPO_WQ_LOG_RESRV_SIZE)
#define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \
(6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */
@@ -629,14 +630,13 @@ struct mlx5e_dma_info {
};
struct mlx5e_shampo_hd {
- u32 mkey;
struct mlx5e_frag_page *pages;
u32 hd_per_wq;
u16 hd_per_wqe;
unsigned long *bitmap;
u16 pi;
u16 ci;
- __be32 key;
+ __be32 mkey_be;
};
struct mlx5e_hw_gro_data {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index fc945bce933a..3cca06a74cf9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -414,25 +414,10 @@ u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5_core_dev *mdev,
return params->log_rq_mtu_frames - log_pkts_per_wqe;
}
-u8 mlx5e_shampo_get_log_hd_entry_size(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params)
+static u8 mlx5e_shampo_get_log_pkt_per_rsrv(struct mlx5e_params *params)
{
- return order_base_2(DIV_ROUND_UP(MLX5E_RX_MAX_HEAD, MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE));
-}
-
-u8 mlx5e_shampo_get_log_rsrv_size(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params)
-{
- return order_base_2(MLX5E_SHAMPO_WQ_RESRV_SIZE / MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE);
-}
-
-u8 mlx5e_shampo_get_log_pkt_per_rsrv(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params)
-{
- u32 resrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) *
- MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE;
-
- return order_base_2(DIV_ROUND_UP(resrv_size, params->sw_mtu));
+ return order_base_2(DIV_ROUND_UP(MLX5E_SHAMPO_WQ_RESRV_SIZE,
+ params->sw_mtu));
}
u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
@@ -834,13 +819,12 @@ static u32 mlx5e_shampo_get_log_cq_size(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
{
- int rsrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) *
- MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE;
u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk));
- int pkt_per_rsrv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params));
u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
+ int pkt_per_rsrv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(params));
int wq_size = BIT(mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk));
int wqe_size = BIT(log_stride_sz) * num_strides;
+ int rsrv_size = MLX5E_SHAMPO_WQ_RESRV_SIZE;
/* +1 is for the case that the pkt_per_rsrv dont consume the reservation
* so we get a filler cqe for the rest of the reservation.
@@ -932,12 +916,14 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
MLX5_SET(wq, wq, shampo_enable, true);
MLX5_SET(wq, wq, log_reservation_size,
- mlx5e_shampo_get_log_rsrv_size(mdev, params));
+ MLX5E_SHAMPO_WQ_LOG_RESRV_SIZE -
+ MLX5E_SHAMPO_WQ_RESRV_SIZE_BASE_SHIFT);
MLX5_SET(wq, wq,
log_max_num_of_packets_per_reservation,
- mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params));
+ mlx5e_shampo_get_log_pkt_per_rsrv(params));
MLX5_SET(wq, wq, log_headers_entry_size,
- mlx5e_shampo_get_log_hd_entry_size(mdev, params));
+ MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE -
+ MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE_SHIFT);
lro_timeout =
mlx5e_choose_lro_timeout(mdev,
MLX5E_DEFAULT_SHAMPO_TIMEOUT);
@@ -1048,18 +1034,17 @@ u32 mlx5e_shampo_hd_per_wqe(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_rq_param *rq_param)
{
- int resv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) *
- MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE;
u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, NULL));
- int pkt_per_resv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params));
u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL);
+ int pkt_per_rsrv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(params));
int wqe_size = BIT(log_stride_sz) * num_strides;
+ int rsrv_size = MLX5E_SHAMPO_WQ_RESRV_SIZE;
u32 hd_per_wqe;
/* Assumption: hd_per_wqe % 8 == 0. */
- hd_per_wqe = (wqe_size / resv_size) * pkt_per_resv;
- mlx5_core_dbg(mdev, "%s hd_per_wqe = %d rsrv_size = %d wqe_size = %d pkt_per_resv = %d\n",
- __func__, hd_per_wqe, resv_size, wqe_size, pkt_per_resv);
+ hd_per_wqe = (wqe_size / rsrv_size) * pkt_per_rsrv;
+ mlx5_core_dbg(mdev, "%s hd_per_wqe = %d rsrv_size = %d wqe_size = %d pkt_per_rsrv = %d\n",
+ __func__, hd_per_wqe, rsrv_size, wqe_size, pkt_per_rsrv);
return hd_per_wqe;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
index bd5877acc5b1..488ccdbc1e2c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
@@ -95,12 +95,6 @@ bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
-u8 mlx5e_shampo_get_log_hd_entry_size(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params);
-u8 mlx5e_shampo_get_log_rsrv_size(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params);
-u8 mlx5e_shampo_get_log_pkt_per_rsrv(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params);
u32 mlx5e_shampo_hd_per_wqe(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_rq_param *rq_param);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 5ce1b463b7a8..5d51600935a6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -710,7 +710,8 @@ static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
/* No need to check page_pool_page_is_pp() as we
* know this is a page_pool page.
*/
- page_pool_recycle_direct(page->pp, page);
+ page_pool_recycle_direct(pp_page_to_nmdesc(page)->pp,
+ page);
} while (++n < num);
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index bd481f3384d0..33bdb7f1e03f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -546,18 +546,26 @@ static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq
}
static int mlx5e_create_rq_hd_umr_mkey(struct mlx5_core_dev *mdev,
- u16 hd_per_wq, u32 *umr_mkey)
+ u16 hd_per_wq, __be32 *umr_mkey)
{
u32 max_ksm_size = BIT(MLX5_CAP_GEN(mdev, log_max_klm_list_size));
+ u32 mkey;
+ int err;
if (max_ksm_size < hd_per_wq) {
mlx5_core_err(mdev, "max ksm list size 0x%x is smaller than shampo header buffer list size 0x%x\n",
max_ksm_size, hd_per_wq);
return -EINVAL;
}
- return mlx5e_create_umr_ksm_mkey(mdev, hd_per_wq,
- MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE,
- umr_mkey);
+
+ err = mlx5e_create_umr_ksm_mkey(mdev, hd_per_wq,
+ MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE,
+ &mkey);
+ if (err)
+ return err;
+
+ *umr_mkey = cpu_to_be32(mkey);
+ return 0;
}
static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
@@ -783,11 +791,10 @@ static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
goto err_shampo_hd_info_alloc;
err = mlx5e_create_rq_hd_umr_mkey(mdev, hd_per_wq,
- &rq->mpwqe.shampo->mkey);
+ &rq->mpwqe.shampo->mkey_be);
if (err)
goto err_umr_mkey;
- rq->mpwqe.shampo->key = cpu_to_be32(rq->mpwqe.shampo->mkey);
rq->mpwqe.shampo->hd_per_wqe =
mlx5e_shampo_hd_per_wqe(mdev, params, rqp);
wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
@@ -832,7 +839,7 @@ static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
err_hw_gro_data:
page_pool_destroy(rq->hd_page_pool);
err_hds_page_pool:
- mlx5_core_destroy_mkey(mdev, rq->mpwqe.shampo->mkey);
+ mlx5_core_destroy_mkey(mdev, be32_to_cpu(rq->mpwqe.shampo->mkey_be));
err_umr_mkey:
mlx5e_rq_shampo_hd_info_free(rq);
err_shampo_hd_info_alloc:
@@ -849,7 +856,8 @@ static void mlx5e_rq_free_shampo(struct mlx5e_rq *rq)
if (rq->hd_page_pool != rq->page_pool)
page_pool_destroy(rq->hd_page_pool);
mlx5e_rq_shampo_hd_info_free(rq);
- mlx5_core_destroy_mkey(rq->mdev, rq->mpwqe.shampo->mkey);
+ mlx5_core_destroy_mkey(rq->mdev,
+ be32_to_cpu(rq->mpwqe.shampo->mkey_be));
kvfree(rq->mpwqe.shampo);
}
@@ -1122,7 +1130,8 @@ int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param, u16 q_cou
if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) {
MLX5_SET(wq, wq, log_headers_buffer_entry_num,
order_base_2(rq->mpwqe.shampo->hd_per_wq));
- MLX5_SET(wq, wq, headers_mkey, rq->mpwqe.shampo->mkey);
+ MLX5_SET(wq, wq, headers_mkey,
+ be32_to_cpu(rq->mpwqe.shampo->mkey_be));
}
mlx5_fill_page_frag_array(&rq->wq_ctrl.buf,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index a4896e89fa35..218b1a09534c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -676,7 +676,7 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
wqe_bbs = MLX5E_KSM_UMR_WQEBBS(ksm_entries);
pi = mlx5e_icosq_get_next_pi(sq, wqe_bbs);
umr_wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
- build_ksm_umr(sq, umr_wqe, shampo->key, index, ksm_entries);
+ build_ksm_umr(sq, umr_wqe, shampo->mkey_be, index, ksm_entries);
WARN_ON_ONCE(ksm_entries & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1));
while (i < ksm_entries) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 0e3a977d5332..bee906661282 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -1182,19 +1182,19 @@ static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw,
static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
struct mlx5_core_dev *peer_dev)
{
+ struct mlx5_eswitch *peer_esw = peer_dev->priv.eswitch;
struct mlx5_flow_destination dest = {};
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_handle **flows;
- /* total vports is the same for both e-switches */
- int nvports = esw->total_vports;
struct mlx5_flow_handle *flow;
+ struct mlx5_vport *peer_vport;
struct mlx5_flow_spec *spec;
- struct mlx5_vport *vport;
int err, pfindex;
unsigned long i;
void *misc;
- if (!MLX5_VPORT_MANAGER(esw->dev) && !mlx5_core_is_ecpf_esw_manager(esw->dev))
+ if (!MLX5_VPORT_MANAGER(peer_dev) &&
+ !mlx5_core_is_ecpf_esw_manager(peer_dev))
return 0;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
@@ -1203,7 +1203,7 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
peer_miss_rules_setup(esw, peer_dev, spec, &dest);
- flows = kvcalloc(nvports, sizeof(*flows), GFP_KERNEL);
+ flows = kvcalloc(peer_esw->total_vports, sizeof(*flows), GFP_KERNEL);
if (!flows) {
err = -ENOMEM;
goto alloc_flows_err;
@@ -1213,10 +1213,10 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
misc_parameters);
- if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
- esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
- spec, MLX5_VPORT_PF);
+ if (mlx5_core_is_ecpf_esw_manager(peer_dev)) {
+ peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_PF);
+ esw_set_peer_miss_rule_source_port(esw, peer_esw, spec,
+ MLX5_VPORT_PF);
flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
spec, &flow_act, &dest, 1);
@@ -1224,11 +1224,11 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
err = PTR_ERR(flow);
goto add_pf_flow_err;
}
- flows[vport->index] = flow;
+ flows[peer_vport->index] = flow;
}
- if (mlx5_ecpf_vport_exists(esw->dev)) {
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
+ if (mlx5_ecpf_vport_exists(peer_dev)) {
+ peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_ECPF);
MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
spec, &flow_act, &dest, 1);
@@ -1236,13 +1236,14 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
err = PTR_ERR(flow);
goto add_ecpf_flow_err;
}
- flows[vport->index] = flow;
+ flows[peer_vport->index] = flow;
}
- mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) {
+ mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport,
+ mlx5_core_max_vfs(peer_dev)) {
esw_set_peer_miss_rule_source_port(esw,
- peer_dev->priv.eswitch,
- spec, vport->vport);
+ peer_esw,
+ spec, peer_vport->vport);
flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
spec, &flow_act, &dest, 1);
@@ -1250,22 +1251,22 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
err = PTR_ERR(flow);
goto add_vf_flow_err;
}
- flows[vport->index] = flow;
+ flows[peer_vport->index] = flow;
}
- if (mlx5_core_ec_sriov_enabled(esw->dev)) {
- mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) {
- if (i >= mlx5_core_max_ec_vfs(peer_dev))
- break;
- esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
- spec, vport->vport);
+ if (mlx5_core_ec_sriov_enabled(peer_dev)) {
+ mlx5_esw_for_each_ec_vf_vport(peer_esw, i, peer_vport,
+ mlx5_core_max_ec_vfs(peer_dev)) {
+ esw_set_peer_miss_rule_source_port(esw, peer_esw,
+ spec,
+ peer_vport->vport);
flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
spec, &flow_act, &dest, 1);
if (IS_ERR(flow)) {
err = PTR_ERR(flow);
goto add_ec_vf_flow_err;
}
- flows[vport->index] = flow;
+ flows[peer_vport->index] = flow;
}
}
@@ -1282,25 +1283,27 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
return 0;
add_ec_vf_flow_err:
- mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) {
- if (!flows[vport->index])
+ mlx5_esw_for_each_ec_vf_vport(peer_esw, i, peer_vport,
+ mlx5_core_max_ec_vfs(peer_dev)) {
+ if (!flows[peer_vport->index])
continue;
- mlx5_del_flow_rules(flows[vport->index]);
+ mlx5_del_flow_rules(flows[peer_vport->index]);
}
add_vf_flow_err:
- mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) {
- if (!flows[vport->index])
+ mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport,
+ mlx5_core_max_vfs(peer_dev)) {
+ if (!flows[peer_vport->index])
continue;
- mlx5_del_flow_rules(flows[vport->index]);
+ mlx5_del_flow_rules(flows[peer_vport->index]);
}
- if (mlx5_ecpf_vport_exists(esw->dev)) {
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
- mlx5_del_flow_rules(flows[vport->index]);
+ if (mlx5_ecpf_vport_exists(peer_dev)) {
+ peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_ECPF);
+ mlx5_del_flow_rules(flows[peer_vport->index]);
}
add_ecpf_flow_err:
- if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
- mlx5_del_flow_rules(flows[vport->index]);
+ if (mlx5_core_is_ecpf_esw_manager(peer_dev)) {
+ peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_PF);
+ mlx5_del_flow_rules(flows[peer_vport->index]);
}
add_pf_flow_err:
esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
@@ -1313,37 +1316,34 @@ alloc_flows_err:
static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
struct mlx5_core_dev *peer_dev)
{
+ struct mlx5_eswitch *peer_esw = peer_dev->priv.eswitch;
u16 peer_index = mlx5_get_dev_index(peer_dev);
struct mlx5_flow_handle **flows;
- struct mlx5_vport *vport;
+ struct mlx5_vport *peer_vport;
unsigned long i;
flows = esw->fdb_table.offloads.peer_miss_rules[peer_index];
if (!flows)
return;
- if (mlx5_core_ec_sriov_enabled(esw->dev)) {
- mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) {
- /* The flow for a particular vport could be NULL if the other ECPF
- * has fewer or no VFs enabled
- */
- if (!flows[vport->index])
- continue;
- mlx5_del_flow_rules(flows[vport->index]);
- }
+ if (mlx5_core_ec_sriov_enabled(peer_dev)) {
+ mlx5_esw_for_each_ec_vf_vport(peer_esw, i, peer_vport,
+ mlx5_core_max_ec_vfs(peer_dev))
+ mlx5_del_flow_rules(flows[peer_vport->index]);
}
- mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev))
- mlx5_del_flow_rules(flows[vport->index]);
+ mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport,
+ mlx5_core_max_vfs(peer_dev))
+ mlx5_del_flow_rules(flows[peer_vport->index]);
- if (mlx5_ecpf_vport_exists(esw->dev)) {
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
- mlx5_del_flow_rules(flows[vport->index]);
+ if (mlx5_ecpf_vport_exists(peer_dev)) {
+ peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_ECPF);
+ mlx5_del_flow_rules(flows[peer_vport->index]);
}
- if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
- mlx5_del_flow_rules(flows[vport->index]);
+ if (mlx5_core_is_ecpf_esw_manager(peer_dev)) {
+ peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_PF);
+ mlx5_del_flow_rules(flows[peer_vport->index]);
}
kvfree(flows);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
index d45e1145d197..c6436c3a7a83 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
@@ -727,8 +727,9 @@ hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd,
u32 *s_ipv6, *d_ipv6;
if (HWS_IS_FLD_SET_SZ(match_param, outer_headers.l4_type, 0x2) ||
- HWS_IS_FLD_SET_SZ(match_param, outer_headers.reserved_at_c2, 0xe) ||
- HWS_IS_FLD_SET_SZ(match_param, outer_headers.reserved_at_c4, 0x4)) {
+ HWS_IS_FLD_SET_SZ(match_param, outer_headers.l4_type_ext, 0x4) ||
+ HWS_IS_FLD_SET_SZ(match_param, outer_headers.reserved_at_c6, 0xa) ||
+ HWS_IS_FLD_SET_SZ(match_param, outer_headers.reserved_at_d4, 0x4)) {
mlx5hws_err(cd->ctx, "Unsupported outer parameters set\n");
return -EINVAL;
}
@@ -903,8 +904,9 @@ hws_definer_conv_inner(struct mlx5hws_definer_conv_data *cd,
u32 *s_ipv6, *d_ipv6;
if (HWS_IS_FLD_SET_SZ(match_param, inner_headers.l4_type, 0x2) ||
- HWS_IS_FLD_SET_SZ(match_param, inner_headers.reserved_at_c2, 0xe) ||
- HWS_IS_FLD_SET_SZ(match_param, inner_headers.reserved_at_c4, 0x4)) {
+ HWS_IS_FLD_SET_SZ(match_param, inner_headers.l4_type_ext, 0x4) ||
+ HWS_IS_FLD_SET_SZ(match_param, inner_headers.reserved_at_c6, 0xa) ||
+ HWS_IS_FLD_SET_SZ(match_param, inner_headers.reserved_at_d4, 0x4)) {
mlx5hws_err(cd->ctx, "Unsupported inner parameters set\n");
return -EINVAL;
}
@@ -1279,7 +1281,8 @@ hws_definer_conv_misc2(struct mlx5hws_definer_conv_data *cd,
struct mlx5hws_definer_fc *curr_fc;
if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters_2.reserved_at_1a0, 0x8) ||
- HWS_IS_FLD_SET_SZ(match_param, misc_parameters_2.reserved_at_1b8, 0x8) ||
+ HWS_IS_FLD_SET_SZ(match_param,
+ misc_parameters_2.ipsec_next_header, 0x8) ||
HWS_IS_FLD_SET_SZ(match_param, misc_parameters_2.reserved_at_1c0, 0x40) ||
HWS_IS_FLD_SET(match_param, misc_parameters_2.macsec_syndrome) ||
HWS_IS_FLD_SET(match_param, misc_parameters_2.ipsec_syndrome)) {
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index d6c0699bc8cf..43f034e180c4 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -8,6 +8,7 @@
#include <linux/version.h>
#include <linux/msi.h>
#include <linux/irqdomain.h>
+#include <linux/export.h>
#include <net/mana/mana.h>
#include <net/mana/hw_channel.h>
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index a7973651ae51..550843e2164b 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -10,6 +10,7 @@
#include <linux/filter.h>
#include <linux/mm.h>
#include <linux/pci.h>
+#include <linux/export.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c
index 9a774046455b..df4ca897a60c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c
@@ -136,6 +136,7 @@ static struct platform_driver renesas_gbeth_driver = {
.probe = renesas_gbeth_probe,
.driver = {
.name = "renesas-gbeth",
+ .pm = &stmmac_pltfr_pm_ops,
.of_match_table = renesas_gbeth_match,
},
};
diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.c b/drivers/net/ethernet/ti/icssg/icssg_config.c
index ddfd1c02a885..da53eb04b0a4 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_config.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_config.c
@@ -288,8 +288,12 @@ static int prueth_fw_offload_buffer_setup(struct prueth_emac *emac)
int i;
addr = lower_32_bits(prueth->msmcram.pa);
- if (slice)
- addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE;
+ if (slice) {
+ if (prueth->pdata.banked_ms_ram)
+ addr += MSMC_RAM_BANK_SIZE;
+ else
+ addr += PRUETH_SW_TOTAL_BUF_SIZE_PER_SLICE;
+ }
if (addr % SZ_64K) {
dev_warn(prueth->dev, "buffer pool needs to be 64KB aligned\n");
@@ -297,43 +301,66 @@ static int prueth_fw_offload_buffer_setup(struct prueth_emac *emac)
}
bpool_cfg = emac->dram.va + BUFFER_POOL_0_ADDR_OFFSET;
- /* workaround for f/w bug. bpool 0 needs to be initialized */
- for (i = 0; i < PRUETH_NUM_BUF_POOLS; i++) {
+
+ /* Configure buffer pools for forwarding buffers
+ * - used by firmware to store packets to be forwarded to other port
+ * - 8 total pools per slice
+ */
+ for (i = 0; i < PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE; i++) {
writel(addr, &bpool_cfg[i].addr);
- writel(PRUETH_EMAC_BUF_POOL_SIZE, &bpool_cfg[i].len);
- addr += PRUETH_EMAC_BUF_POOL_SIZE;
+ writel(PRUETH_SW_FWD_BUF_POOL_SIZE, &bpool_cfg[i].len);
+ addr += PRUETH_SW_FWD_BUF_POOL_SIZE;
}
- if (!slice)
- addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE;
- else
- addr += PRUETH_SW_NUM_BUF_POOLS_HOST * PRUETH_SW_BUF_POOL_SIZE_HOST;
-
- for (i = PRUETH_NUM_BUF_POOLS;
- i < 2 * PRUETH_SW_NUM_BUF_POOLS_HOST + PRUETH_NUM_BUF_POOLS;
- i++) {
- /* The driver only uses first 4 queues per PRU so only initialize them */
- if (i % PRUETH_SW_NUM_BUF_POOLS_HOST < PRUETH_SW_NUM_BUF_POOLS_PER_PRU) {
- writel(addr, &bpool_cfg[i].addr);
- writel(PRUETH_SW_BUF_POOL_SIZE_HOST, &bpool_cfg[i].len);
- addr += PRUETH_SW_BUF_POOL_SIZE_HOST;
+ /* Configure buffer pools for Local Injection buffers
+ * - used by firmware to store packets received from host core
+ * - 16 total pools per slice
+ */
+ for (i = 0; i < PRUETH_NUM_LI_BUF_POOLS_PER_SLICE; i++) {
+ int cfg_idx = i + PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE;
+
+ /* The driver only uses first 4 queues per PRU,
+ * so only initialize buffer for them
+ */
+ if ((i % PRUETH_NUM_LI_BUF_POOLS_PER_PORT_PER_SLICE)
+ < PRUETH_SW_USED_LI_BUF_POOLS_PER_PORT_PER_SLICE) {
+ writel(addr, &bpool_cfg[cfg_idx].addr);
+ writel(PRUETH_SW_LI_BUF_POOL_SIZE,
+ &bpool_cfg[cfg_idx].len);
+ addr += PRUETH_SW_LI_BUF_POOL_SIZE;
} else {
- writel(0, &bpool_cfg[i].addr);
- writel(0, &bpool_cfg[i].len);
+ writel(0, &bpool_cfg[cfg_idx].addr);
+ writel(0, &bpool_cfg[cfg_idx].len);
}
}
- if (!slice)
- addr += PRUETH_SW_NUM_BUF_POOLS_HOST * PRUETH_SW_BUF_POOL_SIZE_HOST;
- else
- addr += PRUETH_EMAC_RX_CTX_BUF_SIZE;
+ /* Express RX buffer queue
+ * - used by firmware to store express packets to be transmitted
+ * to the host core
+ */
+ rxq_ctx = emac->dram.va + HOST_RX_Q_EXP_CONTEXT_OFFSET;
+ for (i = 0; i < 3; i++)
+ writel(addr, &rxq_ctx->start[i]);
+
+ addr += PRUETH_SW_HOST_EXP_BUF_POOL_SIZE;
+ writel(addr, &rxq_ctx->end);
+ /* Pre-emptible RX buffer queue
+ * - used by firmware to store preemptible packets to be transmitted
+ * to the host core
+ */
rxq_ctx = emac->dram.va + HOST_RX_Q_PRE_CONTEXT_OFFSET;
for (i = 0; i < 3; i++)
writel(addr, &rxq_ctx->start[i]);
- addr += PRUETH_EMAC_RX_CTX_BUF_SIZE;
- writel(addr - SZ_2K, &rxq_ctx->end);
+ addr += PRUETH_SW_HOST_PRE_BUF_POOL_SIZE;
+ writel(addr, &rxq_ctx->end);
+
+ /* Set pointer for default dropped packet write
+ * - used by firmware to temporarily store packet to be dropped
+ */
+ rxq_ctx = emac->dram.va + DEFAULT_MSMC_Q_OFFSET;
+ writel(addr, &rxq_ctx->start[0]);
return 0;
}
@@ -347,13 +374,13 @@ static int prueth_emac_buffer_setup(struct prueth_emac *emac)
u32 addr;
int i;
- /* Layout to have 64KB aligned buffer pool
- * |BPOOL0|BPOOL1|RX_CTX0|RX_CTX1|
- */
-
addr = lower_32_bits(prueth->msmcram.pa);
- if (slice)
- addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE;
+ if (slice) {
+ if (prueth->pdata.banked_ms_ram)
+ addr += MSMC_RAM_BANK_SIZE;
+ else
+ addr += PRUETH_EMAC_TOTAL_BUF_SIZE_PER_SLICE;
+ }
if (addr % SZ_64K) {
dev_warn(prueth->dev, "buffer pool needs to be 64KB aligned\n");
@@ -361,39 +388,66 @@ static int prueth_emac_buffer_setup(struct prueth_emac *emac)
}
bpool_cfg = emac->dram.va + BUFFER_POOL_0_ADDR_OFFSET;
- /* workaround for f/w bug. bpool 0 needs to be initilalized */
- writel(addr, &bpool_cfg[0].addr);
- writel(0, &bpool_cfg[0].len);
- for (i = PRUETH_EMAC_BUF_POOL_START;
- i < PRUETH_EMAC_BUF_POOL_START + PRUETH_NUM_BUF_POOLS;
- i++) {
- writel(addr, &bpool_cfg[i].addr);
- writel(PRUETH_EMAC_BUF_POOL_SIZE, &bpool_cfg[i].len);
- addr += PRUETH_EMAC_BUF_POOL_SIZE;
+ /* Configure buffer pools for forwarding buffers
+ * - in mac mode - no forwarding so initialize all pools to 0
+ * - 8 total pools per slice
+ */
+ for (i = 0; i < PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE; i++) {
+ writel(0, &bpool_cfg[i].addr);
+ writel(0, &bpool_cfg[i].len);
}
- if (!slice)
- addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE;
- else
- addr += PRUETH_EMAC_RX_CTX_BUF_SIZE * 2;
+ /* Configure buffer pools for Local Injection buffers
+ * - used by firmware to store packets received from host core
+ * - 16 total pools per slice
+ */
+ bpool_cfg = emac->dram.va + BUFFER_POOL_0_ADDR_OFFSET;
+ for (i = 0; i < PRUETH_NUM_LI_BUF_POOLS_PER_SLICE; i++) {
+ int cfg_idx = i + PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE;
+
+ /* In EMAC mode, only first 4 buffers are used,
+ * as 1 slice needs to handle only 1 port
+ */
+ if (i < PRUETH_EMAC_USED_LI_BUF_POOLS_PER_PORT_PER_SLICE) {
+ writel(addr, &bpool_cfg[cfg_idx].addr);
+ writel(PRUETH_EMAC_LI_BUF_POOL_SIZE,
+ &bpool_cfg[cfg_idx].len);
+ addr += PRUETH_EMAC_LI_BUF_POOL_SIZE;
+ } else {
+ writel(0, &bpool_cfg[cfg_idx].addr);
+ writel(0, &bpool_cfg[cfg_idx].len);
+ }
+ }
- /* Pre-emptible RX buffer queue */
- rxq_ctx = emac->dram.va + HOST_RX_Q_PRE_CONTEXT_OFFSET;
+ /* Express RX buffer queue
+ * - used by firmware to store express packets to be transmitted
+ * to host core
+ */
+ rxq_ctx = emac->dram.va + HOST_RX_Q_EXP_CONTEXT_OFFSET;
for (i = 0; i < 3; i++)
writel(addr, &rxq_ctx->start[i]);
- addr += PRUETH_EMAC_RX_CTX_BUF_SIZE;
+ addr += PRUETH_EMAC_HOST_EXP_BUF_POOL_SIZE;
writel(addr, &rxq_ctx->end);
- /* Express RX buffer queue */
- rxq_ctx = emac->dram.va + HOST_RX_Q_EXP_CONTEXT_OFFSET;
+ /* Pre-emptible RX buffer queue
+ * - used by firmware to store preemptible packets to be transmitted
+ * to host core
+ */
+ rxq_ctx = emac->dram.va + HOST_RX_Q_PRE_CONTEXT_OFFSET;
for (i = 0; i < 3; i++)
writel(addr, &rxq_ctx->start[i]);
- addr += PRUETH_EMAC_RX_CTX_BUF_SIZE;
+ addr += PRUETH_EMAC_HOST_PRE_BUF_POOL_SIZE;
writel(addr, &rxq_ctx->end);
+ /* Set pointer for default dropped packet write
+ * - used by firmware to temporarily store packet to be dropped
+ */
+ rxq_ctx = emac->dram.va + DEFAULT_MSMC_Q_OFFSET;
+ writel(addr, &rxq_ctx->start[0]);
+
return 0;
}
diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.h b/drivers/net/ethernet/ti/icssg/icssg_config.h
index c884e9fa099e..60d69744ffae 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_config.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_config.h
@@ -26,21 +26,71 @@ struct icssg_flow_cfg {
#define PRUETH_MAX_RX_FLOWS 1 /* excluding default flow */
#define PRUETH_RX_FLOW_DATA 0
-#define PRUETH_EMAC_BUF_POOL_SIZE SZ_8K
-#define PRUETH_EMAC_POOLS_PER_SLICE 24
-#define PRUETH_EMAC_BUF_POOL_START 8
-#define PRUETH_NUM_BUF_POOLS 8
-#define PRUETH_EMAC_RX_CTX_BUF_SIZE SZ_16K /* per slice */
-#define MSMC_RAM_SIZE \
- (2 * (PRUETH_EMAC_BUF_POOL_SIZE * PRUETH_NUM_BUF_POOLS + \
- PRUETH_EMAC_RX_CTX_BUF_SIZE * 2))
-
-#define PRUETH_SW_BUF_POOL_SIZE_HOST SZ_4K
-#define PRUETH_SW_NUM_BUF_POOLS_HOST 8
-#define PRUETH_SW_NUM_BUF_POOLS_PER_PRU 4
-#define MSMC_RAM_SIZE_SWITCH_MODE \
- (MSMC_RAM_SIZE + \
- (2 * PRUETH_SW_BUF_POOL_SIZE_HOST * PRUETH_SW_NUM_BUF_POOLS_HOST))
+/* Defines for forwarding path buffer pools:
+ * - used by firmware to store packets to be forwarded to other port
+ * - 8 total pools per slice
+ * - only used in switch mode (as no forwarding in mac mode)
+ */
+#define PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE 8
+#define PRUETH_SW_FWD_BUF_POOL_SIZE (SZ_8K)
+
+/* Defines for local injection path buffer pools:
+ * - used by firmware to store packets received from host core
+ * - 16 total pools per slice
+ * - 8 pools per port per slice and each slice handles both ports
+ * - only 4 out of 8 pools used per port (as only 4 real QoS levels in ICSSG)
+ * - switch mode: 8 total pools used
+ * - mac mode: 4 total pools used
+ */
+#define PRUETH_NUM_LI_BUF_POOLS_PER_SLICE 16
+#define PRUETH_NUM_LI_BUF_POOLS_PER_PORT_PER_SLICE 8
+#define PRUETH_SW_LI_BUF_POOL_SIZE SZ_4K
+#define PRUETH_SW_USED_LI_BUF_POOLS_PER_SLICE 8
+#define PRUETH_SW_USED_LI_BUF_POOLS_PER_PORT_PER_SLICE 4
+#define PRUETH_EMAC_LI_BUF_POOL_SIZE SZ_8K
+#define PRUETH_EMAC_USED_LI_BUF_POOLS_PER_SLICE 4
+#define PRUETH_EMAC_USED_LI_BUF_POOLS_PER_PORT_PER_SLICE 4
+
+/* Defines for host egress path - express and preemptible buffers
+ * - used by firmware to store express and preemptible packets
+ * to be transmitted to host core
+ * - used by both mac/switch modes
+ */
+#define PRUETH_SW_HOST_EXP_BUF_POOL_SIZE SZ_16K
+#define PRUETH_SW_HOST_PRE_BUF_POOL_SIZE (SZ_16K - SZ_2K)
+#define PRUETH_EMAC_HOST_EXP_BUF_POOL_SIZE PRUETH_SW_HOST_EXP_BUF_POOL_SIZE
+#define PRUETH_EMAC_HOST_PRE_BUF_POOL_SIZE PRUETH_SW_HOST_PRE_BUF_POOL_SIZE
+
+/* Buffer used by firmware to temporarily store packet to be dropped */
+#define PRUETH_SW_DROP_PKT_BUF_SIZE SZ_2K
+#define PRUETH_EMAC_DROP_PKT_BUF_SIZE PRUETH_SW_DROP_PKT_BUF_SIZE
+
+/* Total switch mode memory usage for buffers per slice */
+#define PRUETH_SW_TOTAL_BUF_SIZE_PER_SLICE \
+ (PRUETH_SW_FWD_BUF_POOL_SIZE * PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE + \
+ PRUETH_SW_LI_BUF_POOL_SIZE * PRUETH_SW_USED_LI_BUF_POOLS_PER_SLICE + \
+ PRUETH_SW_HOST_EXP_BUF_POOL_SIZE + \
+ PRUETH_SW_HOST_PRE_BUF_POOL_SIZE + \
+ PRUETH_SW_DROP_PKT_BUF_SIZE)
+
+/* Total switch mode memory usage for all buffers */
+#define PRUETH_SW_TOTAL_BUF_SIZE \
+ (2 * PRUETH_SW_TOTAL_BUF_SIZE_PER_SLICE)
+
+/* Total mac mode memory usage for buffers per slice */
+#define PRUETH_EMAC_TOTAL_BUF_SIZE_PER_SLICE \
+ (PRUETH_EMAC_LI_BUF_POOL_SIZE * \
+ PRUETH_EMAC_USED_LI_BUF_POOLS_PER_SLICE + \
+ PRUETH_EMAC_HOST_EXP_BUF_POOL_SIZE + \
+ PRUETH_EMAC_HOST_PRE_BUF_POOL_SIZE + \
+ PRUETH_EMAC_DROP_PKT_BUF_SIZE)
+
+/* Total mac mode memory usage for all buffers */
+#define PRUETH_EMAC_TOTAL_BUF_SIZE \
+ (2 * PRUETH_EMAC_TOTAL_BUF_SIZE_PER_SLICE)
+
+/* Size of 1 bank of MSMC/OC_SRAM memory */
+#define MSMC_RAM_BANK_SIZE SZ_256K
#define PRUETH_SWITCH_FDB_MASK ((SIZE_OF_FDB / NUMBER_OF_FDB_BUCKET_ENTRIES) - 1)
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index 2aa812cbab92..2b973d6e2341 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -1814,10 +1814,15 @@ static int prueth_probe(struct platform_device *pdev)
goto put_mem;
}
- msmc_ram_size = MSMC_RAM_SIZE;
prueth->is_switchmode_supported = prueth->pdata.switch_mode;
- if (prueth->is_switchmode_supported)
- msmc_ram_size = MSMC_RAM_SIZE_SWITCH_MODE;
+ if (prueth->pdata.banked_ms_ram) {
+ /* Reserve 2 MSMC RAM banks for buffers to avoid arbitration */
+ msmc_ram_size = (2 * MSMC_RAM_BANK_SIZE);
+ } else {
+ msmc_ram_size = PRUETH_EMAC_TOTAL_BUF_SIZE;
+ if (prueth->is_switchmode_supported)
+ msmc_ram_size = PRUETH_SW_TOTAL_BUF_SIZE;
+ }
/* NOTE: FW bug needs buffer base to be 64KB aligned */
prueth->msmcram.va =
@@ -1985,7 +1990,8 @@ put_iep0:
free_pool:
gen_pool_free(prueth->sram_pool,
- (unsigned long)prueth->msmcram.va, msmc_ram_size);
+ (unsigned long)prueth->msmcram.va,
+ prueth->msmcram.size);
put_mem:
pruss_release_mem_region(prueth->pruss, &prueth->shram);
@@ -2037,8 +2043,8 @@ static void prueth_remove(struct platform_device *pdev)
icss_iep_put(prueth->iep0);
gen_pool_free(prueth->sram_pool,
- (unsigned long)prueth->msmcram.va,
- MSMC_RAM_SIZE);
+ (unsigned long)prueth->msmcram.va,
+ prueth->msmcram.size);
pruss_release_mem_region(prueth->pruss, &prueth->shram);
@@ -2055,12 +2061,14 @@ static const struct prueth_pdata am654_icssg_pdata = {
.fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
.quirk_10m_link_issue = 1,
.switch_mode = 1,
+ .banked_ms_ram = 0,
};
static const struct prueth_pdata am64x_icssg_pdata = {
.fdqring_mode = K3_RINGACC_RING_MODE_RING,
.quirk_10m_link_issue = 1,
.switch_mode = 1,
+ .banked_ms_ram = 1,
};
static const struct of_device_id prueth_dt_match[] = {
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
index 9ca2e7fdefbd..ca8a22a4a5da 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
@@ -252,11 +252,13 @@ struct prueth_emac {
* @fdqring_mode: Free desc queue mode
* @quirk_10m_link_issue: 10M link detect errata
* @switch_mode: switch firmware support
+ * @banked_ms_ram: banked memory support
*/
struct prueth_pdata {
enum k3_ring_mode fdqring_mode;
u32 quirk_10m_link_issue:1;
u32 switch_mode:1;
+ u32 banked_ms_ram:1;
};
struct icssg_firmwares {
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
index ff5f41bf499e..5e225310c9de 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
@@ -367,7 +367,7 @@ static irqreturn_t prueth_rx_mgm_ts_thread_sr1(int irq, void *dev_id)
return IRQ_NONE;
prueth_tx_ts_sr1(emac, (void *)page_address(page));
- page_pool_recycle_direct(page->pp, page);
+ page_pool_recycle_direct(pp_page_to_nmdesc(page)->pp, page);
return IRQ_HANDLED;
}
@@ -392,7 +392,7 @@ static irqreturn_t prueth_rx_mgm_rsp_thread(int irq, void *dev_id)
complete(&emac->cmd_complete);
}
- page_pool_recycle_direct(page->pp, page);
+ page_pool_recycle_direct(pp_page_to_nmdesc(page)->pp, page);
return IRQ_HANDLED;
}
diff --git a/drivers/net/ethernet/ti/icssg/icssg_switch_map.h b/drivers/net/ethernet/ti/icssg/icssg_switch_map.h
index 490a9cc06fb0..7e053b8af3ec 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_switch_map.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_switch_map.h
@@ -180,6 +180,9 @@
/* Used to notify the FW of the current link speed */
#define PORT_LINK_SPEED_OFFSET 0x00A8
+/* 2k memory pointer reserved for default writes by PRU0*/
+#define DEFAULT_MSMC_Q_OFFSET 0x00AC
+
/* TAS gate mask for windows list0 */
#define TAS_GATE_MASK_LIST0 0x0100
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index 01c7edb28d96..2672d071b325 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -314,6 +314,8 @@ static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev)
&nsim_dev->fw_update_status);
debugfs_create_u32("fw_update_overwrite_mask", 0600, nsim_dev->ddir,
&nsim_dev->fw_update_overwrite_mask);
+ debugfs_create_u32("fw_update_flash_chunk_time_ms", 0600, nsim_dev->ddir,
+ &nsim_dev->fw_update_flash_chunk_time_ms);
debugfs_create_u32("max_macs", 0600, nsim_dev->ddir,
&nsim_dev->max_macs);
debugfs_create_bool("test1", 0600, nsim_dev->ddir,
@@ -1015,9 +1017,9 @@ static int nsim_dev_info_get(struct devlink *devlink,
DEVLINK_INFO_VERSION_TYPE_COMPONENT);
}
-#define NSIM_DEV_FLASH_SIZE 500000
+#define NSIM_DEV_FLASH_SIZE 50000
#define NSIM_DEV_FLASH_CHUNK_SIZE 1000
-#define NSIM_DEV_FLASH_CHUNK_TIME_MS 10
+#define NSIM_DEV_FLASH_CHUNK_TIME_MS_DEFAULT 100
static int nsim_dev_flash_update(struct devlink *devlink,
struct devlink_flash_update_params *params,
@@ -1041,7 +1043,7 @@ static int nsim_dev_flash_update(struct devlink *devlink,
params->component,
i * NSIM_DEV_FLASH_CHUNK_SIZE,
NSIM_DEV_FLASH_SIZE);
- msleep(NSIM_DEV_FLASH_CHUNK_TIME_MS);
+ msleep(nsim_dev->fw_update_flash_chunk_time_ms ?: 1);
}
if (nsim_dev->fw_update_status) {
@@ -1585,6 +1587,7 @@ int nsim_drv_probe(struct nsim_bus_dev *nsim_bus_dev)
INIT_LIST_HEAD(&nsim_dev->port_list);
nsim_dev->fw_update_status = true;
nsim_dev->fw_update_overwrite_mask = 0;
+ nsim_dev->fw_update_flash_chunk_time_ms = NSIM_DEV_FLASH_CHUNK_TIME_MS_DEFAULT;
nsim_dev->max_macs = NSIM_DEV_MAX_MACS_DEFAULT;
nsim_dev->test1 = NSIM_DEV_TEST1_DEFAULT;
spin_lock_init(&nsim_dev->fa_cookie_lock);
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index 611e7f65291c..39fe28af48b9 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -387,15 +387,34 @@ static int nsim_get_iflink(const struct net_device *dev)
static int nsim_rcv(struct nsim_rq *rq, int budget)
{
struct net_device *dev = rq->napi.dev;
+ struct bpf_prog *xdp_prog;
+ struct netdevsim *ns;
struct sk_buff *skb;
unsigned int skblen;
int i, ret;
+ ns = netdev_priv(dev);
+ xdp_prog = READ_ONCE(ns->xdp.prog);
+
for (i = 0; i < budget; i++) {
if (skb_queue_empty(&rq->skb_queue))
break;
skb = skb_dequeue(&rq->skb_queue);
+
+ if (xdp_prog) {
+ /* skb might be freed directly by XDP, save the len */
+ skblen = skb->len;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ skb_checksum_help(skb);
+ ret = do_xdp_generic(xdp_prog, &skb);
+ if (ret != XDP_PASS) {
+ dev_dstats_rx_add(dev, skblen);
+ continue;
+ }
+ }
+
/* skb might be discard at netif_receive_skb, save the len */
skblen = skb->len;
skb_mark_napi_id(skb, &rq->napi);
@@ -898,7 +917,8 @@ nsim_pp_hold_write(struct file *file, const char __user *data,
if (!ns->page)
ret = -ENOMEM;
} else {
- page_pool_put_full_page(ns->page->pp, ns->page, false);
+ page_pool_put_full_page(pp_page_to_nmdesc(ns->page)->pp,
+ ns->page, false);
ns->page = NULL;
}
@@ -936,7 +956,7 @@ static void nsim_setup(struct net_device *dev)
NETIF_F_TSO;
dev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
dev->max_mtu = ETH_MAX_MTU;
- dev->xdp_features = NETDEV_XDP_ACT_HW_OFFLOAD;
+ dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_HW_OFFLOAD;
}
static int nsim_queue_init(struct netdevsim *ns)
@@ -1126,7 +1146,8 @@ void nsim_destroy(struct netdevsim *ns)
/* Put this intentionally late to exercise the orphaning path */
if (ns->page) {
- page_pool_put_full_page(ns->page->pp, ns->page, false);
+ page_pool_put_full_page(pp_page_to_nmdesc(ns->page)->pp,
+ ns->page, false);
ns->page = NULL;
}
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index 8eeeb9256077..bddd24c1389d 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -317,6 +317,7 @@ struct nsim_dev {
struct list_head port_list;
bool fw_update_status;
u32 fw_update_overwrite_mask;
+ u32 fw_update_flash_chunk_time_ms;
u32 max_macs;
bool test1;
bool dont_allow_reload;
diff --git a/drivers/net/phy/qcom/qca807x.c b/drivers/net/phy/qcom/qca807x.c
index 291f052ea53c..04e84ebb646c 100644
--- a/drivers/net/phy/qcom/qca807x.c
+++ b/drivers/net/phy/qcom/qca807x.c
@@ -823,6 +823,8 @@ static struct phy_driver qca807x_drivers[] = {
.cable_test_get_status = qca808x_cable_test_get_status,
.update_stats = qca807x_update_stats,
.get_phy_stats = qca807x_get_phy_stats,
+ .set_wol = at8031_set_wol,
+ .get_wol = at803x_get_wol,
},
{
PHY_ID_MATCH_EXACT(PHY_ID_QCA8075),
@@ -848,6 +850,8 @@ static struct phy_driver qca807x_drivers[] = {
.led_hw_control_get = qca807x_led_hw_control_get,
.update_stats = qca807x_update_stats,
.get_phy_stats = qca807x_get_phy_stats,
+ .set_wol = at8031_set_wol,
+ .get_wol = at803x_get_wol,
},
};
module_phy_driver(qca807x_drivers);
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 34e82f1e37d9..ea0e5e276cd6 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -892,6 +892,10 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
}
}
+ if (ctx->func_desc)
+ ctx->filtering_supported = !!(ctx->func_desc->bmNetworkCapabilities
+ & USB_CDC_NCM_NCAP_ETH_FILTER);
+
iface_no = ctx->data->cur_altsetting->desc.bInterfaceNumber;
/* Device-specific flags */
@@ -1898,6 +1902,14 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
}
}
+static void cdc_ncm_update_filter(struct usbnet *dev)
+{
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+
+ if (ctx->filtering_supported)
+ usbnet_cdc_update_filter(dev);
+}
+
static const struct driver_info cdc_ncm_info = {
.description = "CDC NCM (NO ZLP)",
.flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
@@ -1908,7 +1920,7 @@ static const struct driver_info cdc_ncm_info = {
.status = cdc_ncm_status,
.rx_fixup = cdc_ncm_rx_fixup,
.tx_fixup = cdc_ncm_tx_fixup,
- .set_rx_mode = usbnet_cdc_update_filter,
+ .set_rx_mode = cdc_ncm_update_filter,
};
/* Same as cdc_ncm_info, but with FLAG_SEND_ZLP */
@@ -1922,7 +1934,7 @@ static const struct driver_info cdc_ncm_zlp_info = {
.status = cdc_ncm_status,
.rx_fixup = cdc_ncm_rx_fixup,
.tx_fixup = cdc_ncm_tx_fixup,
- .set_rx_mode = usbnet_cdc_update_filter,
+ .set_rx_mode = cdc_ncm_update_filter,
};
/* Same as cdc_ncm_info, but with FLAG_SEND_ZLP */
@@ -1964,7 +1976,7 @@ static const struct driver_info wwan_info = {
.status = cdc_ncm_status,
.rx_fixup = cdc_ncm_rx_fixup,
.tx_fixup = cdc_ncm_tx_fixup,
- .set_rx_mode = usbnet_cdc_update_filter,
+ .set_rx_mode = cdc_ncm_update_filter,
};
/* Same as wwan_info, but with FLAG_NOARP */
@@ -1978,7 +1990,7 @@ static const struct driver_info wwan_noarp_info = {
.status = cdc_ncm_status,
.rx_fixup = cdc_ncm_rx_fixup,
.tx_fixup = cdc_ncm_tx_fixup,
- .set_rx_mode = usbnet_cdc_update_filter,
+ .set_rx_mode = cdc_ncm_update_filter,
};
static const struct usb_device_id cdc_devs[] = {
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 8e82184be5e7..de733e0488bf 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -63,6 +63,9 @@ struct smsc95xx_priv {
u32 hash_hi;
u32 hash_lo;
u32 wolopts;
+ bool pause_rx;
+ bool pause_tx;
+ bool pause_autoneg;
spinlock_t mac_cr_lock;
u8 features;
u8 suspend_flags;
@@ -537,16 +540,23 @@ static void smsc95xx_set_multicast(struct net_device *netdev)
static int smsc95xx_phy_update_flowcontrol(struct usbnet *dev)
{
- u32 flow = 0, afc_cfg;
struct smsc95xx_priv *pdata = dev->driver_priv;
- bool tx_pause, rx_pause;
+ u32 flow = 0, afc_cfg;
int ret = smsc95xx_read_reg(dev, AFC_CFG, &afc_cfg);
if (ret < 0)
return ret;
if (pdata->phydev->duplex == DUPLEX_FULL) {
- phy_get_pause(pdata->phydev, &tx_pause, &rx_pause);
+ bool tx_pause, rx_pause;
+
+ if (pdata->phydev->autoneg == AUTONEG_ENABLE &&
+ pdata->pause_autoneg) {
+ phy_get_pause(pdata->phydev, &tx_pause, &rx_pause);
+ } else {
+ tx_pause = pdata->pause_tx;
+ rx_pause = pdata->pause_rx;
+ }
if (rx_pause)
flow = 0xFFFF0002;
@@ -772,6 +782,55 @@ static int smsc95xx_ethtool_get_sset_count(struct net_device *ndev, int sset)
}
}
+static void smsc95xx_get_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct smsc95xx_priv *pdata;
+ struct usbnet *dev;
+
+ dev = netdev_priv(ndev);
+ pdata = dev->driver_priv;
+
+ pause->autoneg = pdata->pause_autoneg;
+ pause->rx_pause = pdata->pause_rx;
+ pause->tx_pause = pdata->pause_tx;
+}
+
+static int smsc95xx_set_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ bool pause_autoneg_rx, pause_autoneg_tx;
+ struct smsc95xx_priv *pdata;
+ struct phy_device *phydev;
+ struct usbnet *dev;
+
+ dev = netdev_priv(ndev);
+ pdata = dev->driver_priv;
+ phydev = ndev->phydev;
+
+ if (!phydev)
+ return -ENODEV;
+
+ pdata->pause_rx = pause->rx_pause;
+ pdata->pause_tx = pause->tx_pause;
+ pdata->pause_autoneg = pause->autoneg;
+
+ if (pause->autoneg) {
+ pause_autoneg_rx = pause->rx_pause;
+ pause_autoneg_tx = pause->tx_pause;
+ } else {
+ pause_autoneg_rx = false;
+ pause_autoneg_tx = false;
+ }
+
+ phy_set_asym_pause(ndev->phydev, pause_autoneg_rx, pause_autoneg_tx);
+ if (phydev->link && (!pause->autoneg ||
+ phydev->autoneg == AUTONEG_DISABLE))
+ smsc95xx_mac_update_fullduplex(dev);
+
+ return 0;
+}
+
static const struct ethtool_ops smsc95xx_ethtool_ops = {
.get_link = smsc95xx_get_link,
.nway_reset = phy_ethtool_nway_reset,
@@ -791,6 +850,8 @@ static const struct ethtool_ops smsc95xx_ethtool_ops = {
.self_test = net_selftest,
.get_strings = smsc95xx_ethtool_get_strings,
.get_sset_count = smsc95xx_ethtool_get_sset_count,
+ .get_pauseparam = smsc95xx_get_pauseparam,
+ .set_pauseparam = smsc95xx_set_pauseparam,
};
static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
@@ -1227,6 +1288,11 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->max_mtu = ETH_DATA_LEN;
dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
+ pdata->pause_tx = true;
+ pdata->pause_rx = true;
+ pdata->pause_autoneg = true;
+ phy_support_asym_pause(pdata->phydev);
+
ret = phy_connect_direct(dev->net, pdata->phydev,
&smsc95xx_handle_link_change,
PHY_INTERFACE_MODE_MII);
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index bcde95cb2a2e..f32be2e301f2 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -4041,7 +4041,7 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_ID], "Cannot change VNI");
return -EOPNOTSUPP;
}
- conf->vni = cpu_to_be32(nla_get_u32(data[IFLA_VXLAN_ID]));
+ conf->vni = vni;
}
if (data[IFLA_VXLAN_GROUP]) {
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index d31708eca3c8..6f78f1752cd6 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -1565,7 +1565,7 @@ static int ath10k_core_create_board_name(struct ath10k *ar, char *name,
bool with_chip_id)
{
/* strlen(',variant=') + strlen(ar->id.bdf_ext) */
- char variant[9 + ATH10K_SMBIOS_BDF_EXT_STR_LENGTH] = { 0 };
+ char variant[9 + ATH10K_SMBIOS_BDF_EXT_STR_LENGTH] = {};
if (with_variant && ar->id.bdf_ext[0] != '\0')
scnprintf(variant, sizeof(variant), ",variant=%s",
@@ -2493,12 +2493,50 @@ static int ath10k_init_hw_params(struct ath10k *ar)
return 0;
}
+static bool ath10k_core_needs_recovery(struct ath10k *ar)
+{
+ long time_left;
+
+ /* Sometimes the recovery will fail and then the next all recovery fail,
+ * so avoid infinite recovery.
+ */
+ if (atomic_read(&ar->fail_cont_count) >= ATH10K_RECOVERY_MAX_FAIL_COUNT) {
+ ath10k_err(ar, "consecutive fail %d times, will shutdown driver!",
+ atomic_read(&ar->fail_cont_count));
+ ar->state = ATH10K_STATE_WEDGED;
+ return false;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "total recovery count: %d", ++ar->recovery_count);
+
+ if (atomic_read(&ar->pending_recovery)) {
+ /* Sometimes it happened another recovery work before the previous one
+ * completed, then the second recovery work will destroy the previous
+ * one, thus below is to avoid that.
+ */
+ time_left = wait_for_completion_timeout(&ar->driver_recovery,
+ ATH10K_RECOVERY_TIMEOUT_HZ);
+ if (time_left) {
+ ath10k_warn(ar, "previous recovery succeeded, skip this!\n");
+ return false;
+ }
+
+ /* Record the continuous recovery fail count when recovery failed. */
+ atomic_inc(&ar->fail_cont_count);
+
+ /* Avoid having multiple recoveries at the same time. */
+ return false;
+ }
+
+ atomic_inc(&ar->pending_recovery);
+
+ return true;
+}
+
void ath10k_core_start_recovery(struct ath10k *ar)
{
- if (test_and_set_bit(ATH10K_FLAG_RESTARTING, &ar->dev_flags)) {
- ath10k_warn(ar, "already restarting\n");
+ if (!ath10k_core_needs_recovery(ar))
return;
- }
queue_work(ar->workqueue, &ar->restart_work);
}
@@ -2534,6 +2572,8 @@ static void ath10k_core_restart(struct work_struct *work)
struct ath10k *ar = container_of(work, struct ath10k, restart_work);
int ret;
+ reinit_completion(&ar->driver_recovery);
+
set_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags);
/* Place a barrier to make sure the compiler doesn't reorder
@@ -2598,8 +2638,6 @@ static void ath10k_core_restart(struct work_struct *work)
if (ret)
ath10k_warn(ar, "failed to send firmware crash dump via devcoredump: %d",
ret);
-
- complete(&ar->driver_recovery);
}
static void ath10k_core_set_coverage_class_work(struct work_struct *work)
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 446dca74f06a..8c72ed386edb 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -4,6 +4,7 @@
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#ifndef _CORE_H_
@@ -87,6 +88,8 @@
IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER)
#define ATH10K_ITER_RESUME_FLAGS (IEEE80211_IFACE_ITER_RESUME_ALL |\
IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER)
+#define ATH10K_RECOVERY_TIMEOUT_HZ (5 * HZ)
+#define ATH10K_RECOVERY_MAX_FAIL_COUNT 4
struct ath10k;
@@ -779,7 +782,7 @@ enum ath10k_fw_features {
/* Firmware supports bypassing PLL setting on init. */
ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT = 9,
- /* Raw mode support. If supported, FW supports receiving and trasmitting
+ /* Raw mode support. If supported, FW supports receiving and transmitting
* frames in raw mode.
*/
ATH10K_FW_FEATURE_RAW_MODE_SUPPORT = 10,
@@ -865,9 +868,6 @@ enum ath10k_dev_flags {
/* Per Station statistics service */
ATH10K_FLAG_PEER_STATS,
- /* Indicates that ath10k device is during recovery process and not complete */
- ATH10K_FLAG_RESTARTING,
-
/* protected by conf_mutex */
ATH10K_FLAG_NAPI_ENABLED,
};
@@ -1211,6 +1211,11 @@ struct ath10k {
struct work_struct bundle_tx_work;
struct work_struct tx_complete_work;
+ atomic_t pending_recovery;
+ unsigned int recovery_count;
+ /* continuous recovery fail count */
+ atomic_t fail_cont_count;
+
/* cycle count is reported twice for each visited channel during scan.
* access protected by data_lock
*/
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 6410d3961e76..b7520220465a 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -547,7 +547,7 @@ static ssize_t ath10k_write_simulate_fw_crash(struct file *file,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
- char buf[32] = {0};
+ char buf[32] = {};
ssize_t rc;
int ret;
@@ -983,7 +983,7 @@ static ssize_t ath10k_write_htt_max_amsdu_ampdu(struct file *file,
{
struct ath10k *ar = file->private_data;
int res;
- char buf[64] = {0};
+ char buf[64] = {};
unsigned int amsdu, ampdu;
res = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
@@ -1039,7 +1039,7 @@ static ssize_t ath10k_write_fw_dbglog(struct file *file,
{
struct ath10k *ar = file->private_data;
int ret;
- char buf[96] = {0};
+ char buf[96] = {};
unsigned int log_level;
u64 mask;
diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
index 0f6de862c3a9..b9fb192e0b48 100644
--- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c
+++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
@@ -3,6 +3,7 @@
* Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include "core.h"
@@ -244,7 +245,7 @@ static ssize_t ath10k_dbg_sta_write_addba(struct file *file,
struct ath10k *ar = arsta->arvif->ar;
u32 tid, buf_size;
int ret;
- char buf[64] = {0};
+ char buf[64] = {};
ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
user_buf, count);
@@ -295,7 +296,7 @@ static ssize_t ath10k_dbg_sta_write_addba_resp(struct file *file,
struct ath10k *ar = arsta->arvif->ar;
u32 tid, status;
int ret;
- char buf[64] = {0};
+ char buf[64] = {};
ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
user_buf, count);
@@ -345,7 +346,7 @@ static ssize_t ath10k_dbg_sta_write_delba(struct file *file,
struct ath10k *ar = arsta->arvif->ar;
u32 tid, initiator, reason;
int ret;
- char buf[64] = {0};
+ char buf[64] = {};
ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
user_buf, count);
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index f12243d6bee1..d7e429041065 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -1884,7 +1884,7 @@ static bool ath10k_htt_rx_h_frag_pn_check(struct ath10k *ar,
enum htt_rx_mpdu_encrypt_type enctype)
{
struct ath10k_peer *peer;
- union htt_rx_pn_t *last_pn, new_pn = {0};
+ union htt_rx_pn_t *last_pn, new_pn = {};
struct ieee80211_hdr *hdr;
u8 tid, frag_number;
u32 seq;
@@ -2402,7 +2402,7 @@ static bool ath10k_htt_rx_pn_check_replay_hl(struct ath10k *ar,
bool last_pn_valid, pn_invalid = false;
enum htt_txrx_sec_cast_type sec_index;
enum htt_security_types sec_type;
- union htt_rx_pn_t new_pn = {0};
+ union htt_rx_pn_t new_pn = {};
struct htt_hl_rx_desc *rx_desc;
union htt_rx_pn_t *last_pn;
u32 rx_desc_info, tid;
@@ -2465,7 +2465,7 @@ static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt,
struct fw_rx_desc_hl *fw_desc;
enum htt_txrx_sec_cast_type sec_index;
enum htt_security_types sec_type;
- union htt_rx_pn_t new_pn = {0};
+ union htt_rx_pn_t new_pn = {};
struct htt_hl_rx_desc *rx_desc;
struct ieee80211_hdr *hdr;
struct ieee80211_rx_status *rx_status;
@@ -2767,7 +2767,7 @@ static bool ath10k_htt_rx_proc_rx_frag_ind_hl(struct ath10k_htt *htt,
struct htt_rx_indication_hl *rx_hl;
enum htt_security_types sec_type;
u32 tid, frag, seq, rx_desc_info;
- union htt_rx_pn_t new_pn = {0};
+ union htt_rx_pn_t new_pn = {};
struct htt_hl_rx_desc *rx_desc;
u16 peer_id, sc, hdr_space;
union htt_rx_pn_t *last_pn;
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index c1ddd761af3e..d6f1d85ba871 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -510,7 +510,7 @@ static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
{
struct ath10k *ar = ctx;
struct ath10k_htt *htt = &ar->htt;
- struct htt_tx_done tx_done = {0};
+ struct htt_tx_done tx_done = {};
ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %u\n", msdu_id);
@@ -560,7 +560,7 @@ void ath10k_htt_op_ep_tx_credits(struct ath10k *ar)
void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
{
struct ath10k_htt *htt = &ar->htt;
- struct htt_tx_done tx_done = {0};
+ struct htt_tx_done tx_done = {};
struct htt_cmd_hdr *htt_hdr;
struct htt_data_tx_desc *desc_hdr = NULL;
u16 flags1 = 0;
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index fec56b916497..da71dce9babf 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -473,8 +473,8 @@ enum ath10k_hw_cc_wraparound_type {
*/
ATH10K_HW_CC_WRAP_SHIFTED_ALL = 1,
- /* Each hw counter wrapsaround independently. When the
- * counter overflows the repestive counter is right shifted
+ /* Each hw counter wraps around independently. When the
+ * counter overflows the respective counter is right shifted
* by 1, i.e reset to 0x7fffffff, and other counters will be
* running unaffected. In this type of wraparound, it should
* be possible to report accurate Rx busy time unlike the
@@ -837,7 +837,7 @@ ath10k_is_rssi_enable(struct ath10k_hw_params *hw,
#define TARGET_10_4_NUM_TDLS_BUFFER_STA 1
#define TARGET_10_4_NUM_TDLS_SLEEP_STA 1
-/* Maximum number of Copy Engine's supported */
+/* Maximum number of Copy Engines supported */
#define CE_COUNT_MAX 12
/* Number of Copy Engines supported */
@@ -1134,7 +1134,7 @@ ath10k_is_rssi_enable(struct ath10k_hw_params *hw,
#define RTC_STATE_V_GET(x) (((x) & RTC_STATE_V_MASK) >> RTC_STATE_V_LSB)
/* Register definitions for first generation ath10k cards. These cards include
- * a mac thich has a register allocation similar to ath9k and at least some
+ * a mac which has a register allocation similar to ath9k and at least some
* registers including the ones relevant for modifying the coverage class are
* identical to the ath9k definitions.
* These registers are usually managed by the ath10k firmware. However by
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 40843974d6f8..24dd794e31ea 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -3385,7 +3385,7 @@ static int ath10k_update_channel_list(struct ath10k *ar)
struct ieee80211_supported_band **bands;
enum nl80211_band band;
struct ieee80211_channel *channel;
- struct wmi_scan_chan_list_arg arg = {0};
+ struct wmi_scan_chan_list_arg arg = {};
struct wmi_channel_arg *ch;
bool passive;
int len;
@@ -4885,7 +4885,7 @@ static int ath10k_mac_get_vht_cap_bf_sound_dim(struct ath10k *ar)
static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
{
- struct ieee80211_sta_vht_cap vht_cap = {0};
+ struct ieee80211_sta_vht_cap vht_cap = {};
struct ath10k_hw_params *hw = &ar->hw_params;
u16 mcs_map;
u32 val;
@@ -4943,7 +4943,7 @@ static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar)
{
int i;
- struct ieee80211_sta_ht_cap ht_cap = {0};
+ struct ieee80211_sta_ht_cap ht_cap = {};
if (!(ar->ht_cap_info & WMI_HT_CAP_ENABLED))
return ht_cap;
@@ -5175,7 +5175,7 @@ static int ath10k_start(struct ieee80211_hw *hw)
struct ath10k *ar = hw->priv;
u32 param;
int ret = 0;
- struct wmi_bb_timing_cfg_arg bb_timing = {0};
+ struct wmi_bb_timing_cfg_arg bb_timing = {};
/*
* This makes sense only when restarting hw. It is harmless to call
@@ -8162,7 +8162,12 @@ static void ath10k_reconfig_complete(struct ieee80211_hw *hw,
ath10k_info(ar, "device successfully recovered\n");
ar->state = ATH10K_STATE_ON;
ieee80211_wake_queues(ar->hw);
- clear_bit(ATH10K_FLAG_RESTARTING, &ar->dev_flags);
+
+ /* Clear recovery state. */
+ complete(&ar->driver_recovery);
+ atomic_set(&ar->fail_cont_count, 0);
+ atomic_set(&ar->pending_recovery, 0);
+
if (ar->hw_params.hw_restart_disconnect) {
list_for_each_entry(arvif, &ar->arvifs, list) {
if (arvif->is_up && arvif->vdev_type == WMI_VDEV_TYPE_STA)
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 1e6d43285138..97b49bf4ad80 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -3,6 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include <linux/pci.h>
@@ -63,7 +64,7 @@ static const struct pci_device_id ath10k_pci_id_table[] = {
{ PCI_VDEVICE(ATHEROS, QCA9984_1_0_DEVICE_ID) }, /* PCI-E QCA9984 V1 */
{ PCI_VDEVICE(ATHEROS, QCA9377_1_0_DEVICE_ID) }, /* PCI-E QCA9377 V1 */
{ PCI_VDEVICE(ATHEROS, QCA9887_1_0_DEVICE_ID) }, /* PCI-E QCA9887 */
- {0}
+ {}
};
static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index df6a24f8f8d5..cb8ae751eb31 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -4,6 +4,7 @@
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include <linux/skbuff.h>
@@ -1941,6 +1942,11 @@ int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id)
}
wait_event_timeout(ar->wmi.tx_credits_wq, ({
+ if (ar->state == ATH10K_STATE_WEDGED) {
+ ret = -ESHUTDOWN;
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "drop wmi command %d, hardware is wedged\n", cmd_id);
+ }
/* try to send pending beacons first. they take priority */
ath10k_wmi_tx_beacons_nowait(ar);
diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c
index fde1ce43c499..50809cc1dad4 100644
--- a/drivers/net/wireless/ath/ath11k/ahb.c
+++ b/drivers/net/wireless/ath/ath11k/ahb.c
@@ -988,7 +988,7 @@ static int ath11k_ahb_fw_resources_init(struct ath11k_base *ab)
{
struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
struct device *host_dev = ab->dev;
- struct platform_device_info info = {0};
+ struct platform_device_info info = {};
struct iommu_domain *iommu_dom;
struct platform_device *pdev;
struct device_node *node;
diff --git a/drivers/net/wireless/ath/ath11k/ce.c b/drivers/net/wireless/ath/ath11k/ce.c
index be9395f2ed8b..c65fc9fb539e 100644
--- a/drivers/net/wireless/ath/ath11k/ce.c
+++ b/drivers/net/wireless/ath/ath11k/ce.c
@@ -395,9 +395,6 @@ static int ath11k_ce_completed_recv_next(struct ath11k_ce_pipe *pipe,
goto err;
}
- /* Make sure descriptor is read after the head pointer. */
- dma_rmb();
-
*nbytes = ath11k_hal_ce_dst_status_get_length(desc);
*skb = pipe->dest_ring->skb[sw_index];
@@ -557,7 +554,7 @@ static int ath11k_ce_init_ring(struct ath11k_base *ab,
struct ath11k_ce_ring *ce_ring,
int ce_id, enum hal_ring_type type)
{
- struct hal_srng_params params = { 0 };
+ struct hal_srng_params params = {};
int ret;
params.ring_base_paddr = ce_ring->base_addr_ce_space;
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index 48d81b82f895..d49353b6b2e7 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -1393,7 +1393,7 @@ static int __ath11k_core_create_board_name(struct ath11k_base *ab, char *name,
enum ath11k_bdf_name_type name_type)
{
/* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */
- char variant[9 + ATH11K_QMI_BDF_EXT_STR_LENGTH] = { 0 };
+ char variant[9 + ATH11K_QMI_BDF_EXT_STR_LENGTH] = {};
if (with_variant && ab->qmi.target.bdf_ext[0] != '\0')
scnprintf(variant, sizeof(variant), ",variant=%s",
@@ -2583,10 +2583,15 @@ int ath11k_core_init(struct ath11k_base *ab)
ret = ath11k_core_soc_create(ab);
if (ret) {
ath11k_err(ab, "failed to create soc core: %d\n", ret);
- return ret;
+ goto err_unregister_pm_notifier;
}
return 0;
+
+err_unregister_pm_notifier:
+ ath11k_core_pm_notifier_unregister(ab);
+
+ return ret;
}
EXPORT_SYMBOL(ath11k_core_init);
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
index 6b2f207975e3..220d69a7a429 100644
--- a/drivers/net/wireless/ath/ath11k/core.h
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -17,6 +17,7 @@
#include <linux/average.h>
#include <linux/firmware.h>
#include <linux/suspend.h>
+#include <linux/of.h>
#include "qmi.h"
#include "htc.h"
@@ -1322,8 +1323,16 @@ static inline void ath11k_core_create_firmware_path(struct ath11k_base *ab,
const char *filename,
void *buf, size_t buf_len)
{
- snprintf(buf, buf_len, "%s/%s/%s", ATH11K_FW_DIR,
- ab->hw_params.fw.dir, filename);
+ const char *fw_name = NULL;
+
+ of_property_read_string(ab->dev->of_node, "firmware-name", &fw_name);
+
+ if (fw_name && strncmp(filename, "board", 5))
+ snprintf(buf, buf_len, "%s/%s/%s/%s", ATH11K_FW_DIR,
+ ab->hw_params.fw.dir, fw_name, filename);
+ else
+ snprintf(buf, buf_len, "%s/%s/%s", ATH11K_FW_DIR,
+ ab->hw_params.fw.dir, filename);
}
static inline const char *ath11k_bus_str(enum ath11k_bus bus)
diff --git a/drivers/net/wireless/ath/ath11k/dbring.c b/drivers/net/wireless/ath/ath11k/dbring.c
index fbb6e8d8a476..520d8b8662a2 100644
--- a/drivers/net/wireless/ath/ath11k/dbring.c
+++ b/drivers/net/wireless/ath/ath11k/dbring.c
@@ -2,6 +2,7 @@
/*
* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include "core.h"
@@ -153,7 +154,7 @@ int ath11k_dbring_wmi_cfg_setup(struct ath11k *ar,
struct ath11k_dbring *ring,
enum wmi_direct_buffer_module id)
{
- struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd param = {0};
+ struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd param = {};
int ret;
if (id >= WMI_DIRECT_BUF_MAX)
diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c
index 906df3b13f4f..977f945b6e66 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs.c
@@ -375,7 +375,7 @@ static ssize_t ath11k_write_simulate_fw_crash(struct file *file,
struct ath11k_base *ab = file->private_data;
struct ath11k_pdev *pdev;
struct ath11k *ar = ab->pdevs[0].ar;
- char buf[32] = {0};
+ char buf[32] = {};
ssize_t rc;
int i, ret, radioup = 0;
@@ -473,7 +473,7 @@ static ssize_t ath11k_read_enable_extd_tx_stats(struct file *file,
size_t count, loff_t *ppos)
{
- char buf[32] = {0};
+ char buf[32] = {};
struct ath11k *ar = file->private_data;
int len = 0;
@@ -497,7 +497,7 @@ static ssize_t ath11k_write_extd_rx_stats(struct file *file,
{
struct ath11k *ar = file->private_data;
struct ath11k_base *ab = ar->ab;
- struct htt_rx_ring_tlv_filter tlv_filter = {0};
+ struct htt_rx_ring_tlv_filter tlv_filter = {};
u32 enable, rx_filter = 0, ring_id;
int i;
int ret;
@@ -737,7 +737,7 @@ static ssize_t ath11k_write_fw_dbglog(struct file *file,
size_t count, loff_t *ppos)
{
struct ath11k *ar = file->private_data;
- char buf[128] = {0};
+ char buf[128] = {};
struct ath11k_fw_dbglog dbglog;
unsigned int param, mod_id_index, is_end;
u64 value;
@@ -950,9 +950,9 @@ static ssize_t ath11k_write_pktlog_filter(struct file *file,
{
struct ath11k *ar = file->private_data;
struct ath11k_base *ab = ar->ab;
- struct htt_rx_ring_tlv_filter tlv_filter = {0};
+ struct htt_rx_ring_tlv_filter tlv_filter = {};
u32 rx_filter = 0, ring_id, filter, mode;
- u8 buf[128] = {0};
+ u8 buf[128] = {};
int i, ret, rx_buf_sz = 0;
ssize_t rc;
@@ -1081,7 +1081,7 @@ static ssize_t ath11k_read_pktlog_filter(struct file *file,
size_t count, loff_t *ppos)
{
- char buf[32] = {0};
+ char buf[32] = {};
struct ath11k *ar = file->private_data;
int len = 0;
@@ -1235,7 +1235,7 @@ static ssize_t ath11k_debugfs_write_enable_dbr_dbg(struct file *file,
size_t count, loff_t *ppos)
{
struct ath11k *ar = file->private_data;
- char buf[32] = {0};
+ char buf[32] = {};
u32 dbr_id, enable;
int ret;
@@ -1473,7 +1473,7 @@ int ath11k_debugfs_register(struct ath11k *ar)
{
struct ath11k_base *ab = ar->ab;
char pdev_name[10];
- char buf[100] = {0};
+ char buf[100] = {};
snprintf(pdev_name, sizeof(pdev_name), "%s%u", "mac", ar->pdev_idx);
@@ -1556,10 +1556,10 @@ static ssize_t ath11k_write_twt_add_dialog(struct file *file,
size_t count, loff_t *ppos)
{
struct ath11k_vif *arvif = file->private_data;
- struct wmi_twt_add_dialog_params params = { 0 };
- struct wmi_twt_enable_params twt_params = {0};
+ struct wmi_twt_add_dialog_params params = {};
+ struct wmi_twt_enable_params twt_params = {};
struct ath11k *ar = arvif->ar;
- u8 buf[128] = {0};
+ u8 buf[128] = {};
int ret;
if (ar->twt_enabled == 0) {
@@ -1632,10 +1632,10 @@ static ssize_t ath11k_write_twt_del_dialog(struct file *file,
size_t count, loff_t *ppos)
{
struct ath11k_vif *arvif = file->private_data;
- struct wmi_twt_del_dialog_params params = { 0 };
- struct wmi_twt_enable_params twt_params = {0};
+ struct wmi_twt_del_dialog_params params = {};
+ struct wmi_twt_enable_params twt_params = {};
struct ath11k *ar = arvif->ar;
- u8 buf[64] = {0};
+ u8 buf[64] = {};
int ret;
if (ar->twt_enabled == 0) {
@@ -1679,8 +1679,8 @@ static ssize_t ath11k_write_twt_pause_dialog(struct file *file,
size_t count, loff_t *ppos)
{
struct ath11k_vif *arvif = file->private_data;
- struct wmi_twt_pause_dialog_params params = { 0 };
- u8 buf[64] = {0};
+ struct wmi_twt_pause_dialog_params params = {};
+ u8 buf[64] = {};
int ret;
if (arvif->ar->twt_enabled == 0) {
@@ -1718,8 +1718,8 @@ static ssize_t ath11k_write_twt_resume_dialog(struct file *file,
size_t count, loff_t *ppos)
{
struct ath11k_vif *arvif = file->private_data;
- struct wmi_twt_resume_dialog_params params = { 0 };
- u8 buf[64] = {0};
+ struct wmi_twt_resume_dialog_params params = {};
+ u8 buf[64] = {};
int ret;
if (arvif->ar->twt_enabled == 0) {
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c
index 870e86a31bf8..11d28c42227e 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c
@@ -2,6 +2,7 @@
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include <linux/vmalloc.h>
@@ -375,7 +376,7 @@ static inline void htt_print_hw_stats_intr_misc_tlv(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char hw_intr_name[HTT_STATS_MAX_HW_INTR_NAME_LEN + 1] = {0};
+ char hw_intr_name[HTT_STATS_MAX_HW_INTR_NAME_LEN + 1] = {};
len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_INTR_MISC_TLV:\n");
memcpy(hw_intr_name, &(htt_stats_buf->hw_intr_name[0]),
@@ -402,7 +403,7 @@ htt_print_hw_stats_wd_timeout_tlv(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char hw_module_name[HTT_STATS_MAX_HW_MODULE_NAME_LEN + 1] = {0};
+ char hw_module_name[HTT_STATS_MAX_HW_MODULE_NAME_LEN + 1] = {};
len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_WD_TIMEOUT_TLV:\n");
memcpy(hw_module_name, &(htt_stats_buf->hw_module_name[0]),
@@ -514,7 +515,7 @@ static inline void htt_print_tx_tid_stats_tlv(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char tid_name[MAX_HTT_TID_NAME + 1] = {0};
+ char tid_name[MAX_HTT_TID_NAME + 1] = {};
len += scnprintf(buf + len, buf_len - len, "HTT_TX_TID_STATS_TLV:\n");
memcpy(tid_name, &(htt_stats_buf->tid_name[0]), MAX_HTT_TID_NAME);
@@ -567,7 +568,7 @@ static inline void htt_print_tx_tid_stats_v1_tlv(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char tid_name[MAX_HTT_TID_NAME + 1] = {0};
+ char tid_name[MAX_HTT_TID_NAME + 1] = {};
len += scnprintf(buf + len, buf_len - len, "HTT_TX_TID_STATS_V1_TLV:\n");
memcpy(tid_name, &(htt_stats_buf->tid_name[0]), MAX_HTT_TID_NAME);
@@ -624,7 +625,7 @@ static inline void htt_print_rx_tid_stats_tlv(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char tid_name[MAX_HTT_TID_NAME + 1] = {0};
+ char tid_name[MAX_HTT_TID_NAME + 1] = {};
len += scnprintf(buf + len, buf_len - len, "HTT_RX_TID_STATS_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %lu\n",
@@ -4712,7 +4713,7 @@ int ath11k_debugfs_htt_stats_req(struct ath11k *ar)
u8 type = stats_req->type;
u64 cookie = 0;
int ret, pdev_id = ar->pdev->pdev_id;
- struct htt_ext_stats_cfg_params cfg_params = { 0 };
+ struct htt_ext_stats_cfg_params cfg_params = {};
init_completion(&stats_req->cmpln);
@@ -4852,7 +4853,7 @@ static ssize_t ath11k_write_htt_stats_reset(struct file *file,
{
struct ath11k *ar = file->private_data;
u8 type;
- struct htt_ext_stats_cfg_params cfg_params = { 0 };
+ struct htt_ext_stats_cfg_params cfg_params = {};
int ret;
ret = kstrtou8_from_user(user_buf, count, 0, &type);
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_sta.c b/drivers/net/wireless/ath/ath11k/debugfs_sta.c
index f56a24b6c8da..d89d0f28d890 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs_sta.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs_sta.c
@@ -2,6 +2,7 @@
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include <linux/vmalloc.h>
@@ -456,7 +457,7 @@ static ssize_t ath11k_dbg_sta_read_peer_pktlog(struct file *file,
struct ieee80211_sta *sta = file->private_data;
struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
struct ath11k *ar = arsta->arvif->ar;
- char buf[32] = {0};
+ char buf[32] = {};
int len;
mutex_lock(&ar->conf_mutex);
@@ -485,7 +486,7 @@ static ssize_t ath11k_dbg_sta_write_delba(struct file *file,
struct ath11k *ar = arsta->arvif->ar;
u32 tid, initiator, reason;
int ret;
- char buf[64] = {0};
+ char buf[64] = {};
ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
user_buf, count);
@@ -536,7 +537,7 @@ static ssize_t ath11k_dbg_sta_write_addba_resp(struct file *file,
struct ath11k *ar = arsta->arvif->ar;
u32 tid, status;
int ret;
- char buf[64] = {0};
+ char buf[64] = {};
ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
user_buf, count);
@@ -586,7 +587,7 @@ static ssize_t ath11k_dbg_sta_write_addba(struct file *file,
struct ath11k *ar = arsta->arvif->ar;
u32 tid, buf_size;
int ret;
- char buf[64] = {0};
+ char buf[64] = {};
ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
user_buf, count);
@@ -700,7 +701,7 @@ ath11k_write_htt_peer_stats_reset(struct file *file,
struct ieee80211_sta *sta = file->private_data;
struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
struct ath11k *ar = arsta->arvif->ar;
- struct htt_ext_stats_cfg_params cfg_params = { 0 };
+ struct htt_ext_stats_cfg_params cfg_params = {};
int ret;
u8 type;
diff --git a/drivers/net/wireless/ath/ath11k/dp.c b/drivers/net/wireless/ath/ath11k/dp.c
index 4661e0d64dd9..56b1a657e0b0 100644
--- a/drivers/net/wireless/ath/ath11k/dp.c
+++ b/drivers/net/wireless/ath/ath11k/dp.c
@@ -225,7 +225,7 @@ int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
enum hal_ring_type type, int ring_num,
int mac_id, int num_entries)
{
- struct hal_srng_params params = { 0 };
+ struct hal_srng_params params = {};
int entry_sz = ath11k_hal_srng_get_entrysize(ab, type);
int max_entries = ath11k_hal_srng_get_max_entries(ab, type);
int ret;
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
index 9230a965f6f0..ffc7482c77b6 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -719,7 +719,7 @@ static void ath11k_dp_reo_cmd_free(struct ath11k_dp *dp, void *ctx,
static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab,
struct dp_rx_tid *rx_tid)
{
- struct ath11k_hal_reo_cmd cmd = {0};
+ struct ath11k_hal_reo_cmd cmd = {};
unsigned long tot_desc_sz, desc_sz;
int ret;
@@ -811,7 +811,7 @@ free_desc:
void ath11k_peer_rx_tid_delete(struct ath11k *ar,
struct ath11k_peer *peer, u8 tid)
{
- struct ath11k_hal_reo_cmd cmd = {0};
+ struct ath11k_hal_reo_cmd cmd = {};
struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
int ret;
@@ -938,7 +938,7 @@ static int ath11k_peer_rx_tid_reo_update(struct ath11k *ar,
u32 ba_win_sz, u16 ssn,
bool update_ssn)
{
- struct ath11k_hal_reo_cmd cmd = {0};
+ struct ath11k_hal_reo_cmd cmd = {};
int ret;
cmd.addr_lo = lower_32_bits(rx_tid->paddr);
@@ -1157,7 +1157,7 @@ int ath11k_dp_peer_rx_pn_replay_config(struct ath11k_vif *arvif,
{
struct ath11k *ar = arvif->ar;
struct ath11k_base *ab = ar->ab;
- struct ath11k_hal_reo_cmd cmd = {0};
+ struct ath11k_hal_reo_cmd cmd = {};
struct ath11k_peer *peer;
struct dp_rx_tid *rx_tid;
u8 tid;
@@ -2591,7 +2591,7 @@ static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab,
{
struct sk_buff *msdu;
struct ath11k *ar;
- struct ieee80211_rx_status rx_status = {0};
+ struct ieee80211_rx_status rx_status = {};
int ret;
if (skb_queue_empty(msdu_list))
@@ -2626,7 +2626,7 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
{
struct ath11k_dp *dp = &ab->dp;
struct dp_rxdma_ring *rx_ring;
- int num_buffs_reaped[MAX_RADIOS] = {0};
+ int num_buffs_reaped[MAX_RADIOS] = {};
struct sk_buff_head msdu_list[MAX_RADIOS];
struct ath11k_skb_rxcb *rxcb;
int total_msdu_reaped = 0;
@@ -2637,7 +2637,7 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
struct ath11k *ar;
struct hal_reo_dest_ring *desc;
enum hal_reo_dest_ring_push_reason push_reason;
- u32 cookie, info0, rx_msdu_info0, rx_mpdu_info0;
+ u32 cookie;
int i;
for (i = 0; i < MAX_RADIOS; i++)
@@ -2650,14 +2650,11 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
try_again:
ath11k_hal_srng_access_begin(ab, srng);
- /* Make sure descriptor is read after the head pointer. */
- dma_rmb();
-
while (likely(desc =
(struct hal_reo_dest_ring *)ath11k_hal_srng_dst_get_next_entry(ab,
srng))) {
cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
- READ_ONCE(desc->buf_addr_info.info1));
+ desc->buf_addr_info.info1);
buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
cookie);
mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie);
@@ -2686,9 +2683,8 @@ try_again:
num_buffs_reaped[mac_id]++;
- info0 = READ_ONCE(desc->info0);
push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON,
- info0);
+ desc->info0);
if (unlikely(push_reason !=
HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION)) {
dev_kfree_skb_any(msdu);
@@ -2696,21 +2692,18 @@ try_again:
continue;
}
- rx_msdu_info0 = READ_ONCE(desc->rx_msdu_info.info0);
- rx_mpdu_info0 = READ_ONCE(desc->rx_mpdu_info.info0);
-
- rxcb->is_first_msdu = !!(rx_msdu_info0 &
+ rxcb->is_first_msdu = !!(desc->rx_msdu_info.info0 &
RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
- rxcb->is_last_msdu = !!(rx_msdu_info0 &
+ rxcb->is_last_msdu = !!(desc->rx_msdu_info.info0 &
RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
- rxcb->is_continuation = !!(rx_msdu_info0 &
+ rxcb->is_continuation = !!(desc->rx_msdu_info.info0 &
RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
rxcb->peer_id = FIELD_GET(RX_MPDU_DESC_META_DATA_PEER_ID,
- READ_ONCE(desc->rx_mpdu_info.meta_data));
+ desc->rx_mpdu_info.meta_data);
rxcb->seq_no = FIELD_GET(RX_MPDU_DESC_INFO0_SEQ_NUM,
- rx_mpdu_info0);
+ desc->rx_mpdu_info.info0);
rxcb->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM,
- info0);
+ desc->info0);
rxcb->mac_id = mac_id;
__skb_queue_tail(&msdu_list[mac_id], msdu);
@@ -3231,7 +3224,7 @@ static int ath11k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key,
size_t data_len, u8 *mic)
{
SHASH_DESC_ON_STACK(desc, tfm);
- u8 mic_hdr[16] = {0};
+ u8 mic_hdr[16] = {};
u8 tid = 0;
int ret;
@@ -3825,7 +3818,7 @@ int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi,
struct dp_link_desc_bank *link_desc_banks;
enum hal_rx_buf_return_buf_manager rbm;
int tot_n_bufs_reaped, quota, ret, i;
- int n_bufs_reaped[MAX_RADIOS] = {0};
+ int n_bufs_reaped[MAX_RADIOS] = {};
struct dp_rxdma_ring *rx_ring;
struct dp_srng *reo_except;
u32 desc_bank, num_msdus;
@@ -4106,7 +4099,7 @@ static void ath11k_dp_rx_wbm_err(struct ath11k *ar,
struct sk_buff_head *msdu_list)
{
struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
- struct ieee80211_rx_status rxs = {0};
+ struct ieee80211_rx_status rxs = {};
bool drop = true;
switch (rxcb->err_rel_src) {
@@ -4142,7 +4135,7 @@ int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab,
struct ath11k_skb_rxcb *rxcb;
u32 *rx_desc;
int buf_id, mac_id;
- int num_buffs_reaped[MAX_RADIOS] = {0};
+ int num_buffs_reaped[MAX_RADIOS] = {};
int total_num_buffs_reaped = 0;
int ret, i;
diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c
index 8522c67baabf..562aba66582f 100644
--- a/drivers/net/wireless/ath/ath11k/dp_tx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_tx.c
@@ -2,6 +2,7 @@
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include "core.h"
@@ -84,7 +85,7 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
{
struct ath11k_base *ab = ar->ab;
struct ath11k_dp *dp = &ab->dp;
- struct hal_tx_info ti = {0};
+ struct hal_tx_info ti = {};
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb);
struct hal_srng *tcl_ring;
@@ -316,7 +317,7 @@ ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base *ab,
struct dp_tx_ring *tx_ring,
struct ath11k_dp_htt_wbm_tx_status *ts)
{
- struct ieee80211_tx_status status = { 0 };
+ struct ieee80211_tx_status status = {};
struct sk_buff *msdu;
struct ieee80211_tx_info *info;
struct ath11k_skb_cb *skb_cb;
@@ -391,7 +392,7 @@ ath11k_dp_tx_process_htt_tx_complete(struct ath11k_base *ab,
u32 msdu_id, struct dp_tx_ring *tx_ring)
{
struct htt_tx_wbm_completion *status_desc;
- struct ath11k_dp_htt_wbm_tx_status ts = {0};
+ struct ath11k_dp_htt_wbm_tx_status ts = {};
enum hal_wbm_htt_tx_comp_status wbm_status;
status_desc = desc + HTT_TX_WBM_COMP_STATUS_OFFSET;
@@ -551,8 +552,8 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar,
struct sk_buff *msdu,
struct hal_tx_status *ts)
{
- struct ieee80211_tx_status status = { 0 };
- struct ieee80211_rate_status status_rate = { 0 };
+ struct ieee80211_tx_status status = {};
+ struct ieee80211_rate_status status_rate = {};
struct ath11k_base *ab = ar->ab;
struct ieee80211_tx_info *info;
struct ath11k_skb_cb *skb_cb;
@@ -690,7 +691,7 @@ void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id)
int hal_ring_id = dp->tx_ring[ring_id].tcl_comp_ring.ring_id;
struct hal_srng *status_ring = &ab->hal.srng_list[hal_ring_id];
struct sk_buff *msdu;
- struct hal_tx_status ts = { 0 };
+ struct hal_tx_status ts = {};
struct dp_tx_ring *tx_ring = &dp->tx_ring[ring_id];
u32 *desc;
u32 msdu_id;
@@ -1187,7 +1188,7 @@ int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k *ar, bool reset)
{
struct ath11k_pdev_dp *dp = &ar->dp;
struct ath11k_base *ab = ar->ab;
- struct htt_rx_ring_tlv_filter tlv_filter = {0};
+ struct htt_rx_ring_tlv_filter tlv_filter = {};
int ret = 0, ring_id = 0, i;
if (ab->hw_params.full_monitor_mode) {
diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c
index f1d76839a87b..0c3ce7509ab8 100644
--- a/drivers/net/wireless/ath/ath11k/hal.c
+++ b/drivers/net/wireless/ath/ath11k/hal.c
@@ -601,7 +601,7 @@ u32 ath11k_hal_ce_dst_status_get_length(void *buf)
struct hal_ce_srng_dst_status_desc *desc = buf;
u32 len;
- len = FIELD_GET(HAL_CE_DST_STATUS_DESC_FLAGS_LEN, READ_ONCE(desc->flags));
+ len = FIELD_GET(HAL_CE_DST_STATUS_DESC_FLAGS_LEN, desc->flags);
desc->flags &= ~HAL_CE_DST_STATUS_DESC_FLAGS_LEN;
return len;
@@ -825,13 +825,23 @@ u32 *ath11k_hal_srng_src_peek(struct ath11k_base *ab, struct hal_srng *srng)
void ath11k_hal_srng_access_begin(struct ath11k_base *ab, struct hal_srng *srng)
{
+ u32 hp;
+
lockdep_assert_held(&srng->lock);
if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
srng->u.src_ring.cached_tp =
*(volatile u32 *)srng->u.src_ring.tp_addr;
} else {
- srng->u.dst_ring.cached_hp = READ_ONCE(*srng->u.dst_ring.hp_addr);
+ hp = READ_ONCE(*srng->u.dst_ring.hp_addr);
+
+ if (hp != srng->u.dst_ring.cached_hp) {
+ srng->u.dst_ring.cached_hp = hp;
+ /* Make sure descriptor is read after the head
+ * pointer.
+ */
+ dma_rmb();
+ }
/* Try to prefetch the next descriptor in the ring */
if (srng->flags & HAL_SRNG_FLAGS_CACHED)
@@ -846,7 +856,6 @@ void ath11k_hal_srng_access_end(struct ath11k_base *ab, struct hal_srng *srng)
{
lockdep_assert_held(&srng->lock);
- /* TODO: See if we need a write memory barrier here */
if (srng->flags & HAL_SRNG_FLAGS_LMAC_RING) {
/* For LMAC rings, ring pointer updates are done through FW and
* hence written to a shared memory location that is read by FW
@@ -854,21 +863,37 @@ void ath11k_hal_srng_access_end(struct ath11k_base *ab, struct hal_srng *srng)
if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
srng->u.src_ring.last_tp =
*(volatile u32 *)srng->u.src_ring.tp_addr;
- *srng->u.src_ring.hp_addr = srng->u.src_ring.hp;
+ /* Make sure descriptor is written before updating the
+ * head pointer.
+ */
+ dma_wmb();
+ WRITE_ONCE(*srng->u.src_ring.hp_addr, srng->u.src_ring.hp);
} else {
srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
- *srng->u.dst_ring.tp_addr = srng->u.dst_ring.tp;
+ /* Make sure descriptor is read before updating the
+ * tail pointer.
+ */
+ dma_mb();
+ WRITE_ONCE(*srng->u.dst_ring.tp_addr, srng->u.dst_ring.tp);
}
} else {
if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
srng->u.src_ring.last_tp =
*(volatile u32 *)srng->u.src_ring.tp_addr;
+ /* Assume implementation use an MMIO write accessor
+ * which has the required wmb() so that the descriptor
+ * is written before the updating the head pointer.
+ */
ath11k_hif_write32(ab,
(unsigned long)srng->u.src_ring.hp_addr -
(unsigned long)ab->mem,
srng->u.src_ring.hp);
} else {
srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
+ /* Make sure descriptor is read before updating the
+ * tail pointer.
+ */
+ mb();
ath11k_hif_write32(ab,
(unsigned long)srng->u.dst_ring.tp_addr -
(unsigned long)ab->mem,
@@ -1348,6 +1373,10 @@ EXPORT_SYMBOL(ath11k_hal_srng_init);
void ath11k_hal_srng_deinit(struct ath11k_base *ab)
{
struct ath11k_hal *hal = &ab->hal;
+ int i;
+
+ for (i = 0; i < HAL_SRNG_RING_ID_MAX; i++)
+ ab->hal.srng_list[i].initialized = 0;
ath11k_hal_unregister_srng_key(ab);
ath11k_hal_free_cont_rdp(ab);
diff --git a/drivers/net/wireless/ath/ath11k/htc.c b/drivers/net/wireless/ath/ath11k/htc.c
index 23054ab29a5e..4571d01cc33d 100644
--- a/drivers/net/wireless/ath/ath11k/htc.c
+++ b/drivers/net/wireless/ath/ath11k/htc.c
@@ -497,7 +497,7 @@ static u8 ath11k_htc_get_credit_allocation(struct ath11k_htc *htc,
static int ath11k_htc_setup_target_buffer_assignments(struct ath11k_htc *htc)
{
struct ath11k_htc_svc_tx_credits *serv_entry;
- u32 svc_id[] = {
+ static const u32 svc_id[] = {
ATH11K_HTC_SVC_ID_WMI_CONTROL,
ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1,
ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2,
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index 758ef6f26432..1fadf5faafb8 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -1037,7 +1037,7 @@ static int ath11k_mac_monitor_vdev_create(struct ath11k *ar)
struct ath11k_pdev *pdev = ar->pdev;
struct vdev_create_params param = {};
int bit, ret;
- u8 tmp_addr[6] = {0};
+ u8 tmp_addr[6] = {};
u16 nss;
lockdep_assert_held(&ar->conf_mutex);
@@ -3026,7 +3026,7 @@ static bool ath11k_mac_vif_recalc_sta_he_txbf(struct ath11k *ar,
struct ieee80211_sta_he_cap *he_cap)
{
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
- struct ieee80211_he_cap_elem he_cap_elem = {0};
+ struct ieee80211_he_cap_elem he_cap_elem = {};
struct ieee80211_sta_he_cap *cap_band = NULL;
struct cfg80211_chan_def def;
u32 param = WMI_VDEV_PARAM_SET_HEMU_MODE;
@@ -3763,7 +3763,7 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
ath11k_recalculate_mgmt_rate(ar, vif, &def);
if (changed & BSS_CHANGED_TWT) {
- struct wmi_twt_enable_params twt_params = {0};
+ struct wmi_twt_enable_params twt_params = {};
if (info->twt_requester || info->twt_responder) {
ath11k_wmi_fill_default_twt_params(&twt_params);
@@ -5323,7 +5323,7 @@ static struct ieee80211_sta_ht_cap
ath11k_create_ht_cap(struct ath11k *ar, u32 ar_ht_cap, u32 rate_cap_rx_chainmask)
{
int i;
- struct ieee80211_sta_ht_cap ht_cap = {0};
+ struct ieee80211_sta_ht_cap ht_cap = {};
u32 ar_vht_cap = ar->pdev->cap.vht_cap;
if (!(ar_ht_cap & WMI_HT_CAP_ENABLED))
@@ -5490,7 +5490,7 @@ static struct ieee80211_sta_vht_cap
ath11k_create_vht_cap(struct ath11k *ar, u32 rate_cap_tx_chainmask,
u32 rate_cap_rx_chainmask)
{
- struct ieee80211_sta_vht_cap vht_cap = {0};
+ struct ieee80211_sta_vht_cap vht_cap = {};
u16 txmcs_map, rxmcs_map;
int i;
@@ -6159,7 +6159,7 @@ void ath11k_mac_drain_tx(struct ath11k *ar)
static int ath11k_mac_config_mon_status_default(struct ath11k *ar, bool enable)
{
- struct htt_rx_ring_tlv_filter tlv_filter = {0};
+ struct htt_rx_ring_tlv_filter tlv_filter = {};
struct ath11k_base *ab = ar->ab;
int i, ret = 0;
u32 ring_id;
@@ -6678,7 +6678,7 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
struct ath11k *ar = hw->priv;
struct ath11k_base *ab = ar->ab;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
- struct vdev_create_params vdev_param = {0};
+ struct vdev_create_params vdev_param = {};
struct peer_create_params peer_param;
u32 param_id, param_value;
u16 nss;
@@ -8744,9 +8744,9 @@ ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
arvif->vdev_id, ret);
return ret;
}
- ieee80211_iterate_stations_atomic(ar->hw,
- ath11k_mac_disable_peer_fixed_rate,
- arvif);
+ ieee80211_iterate_stations_mtx(ar->hw,
+ ath11k_mac_disable_peer_fixed_rate,
+ arvif);
} else if (ath11k_mac_bitrate_mask_get_single_nss(ar, arvif, band, mask,
&single_nss)) {
rate = WMI_FIXED_RATE_NONE;
@@ -8813,9 +8813,9 @@ ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
}
mutex_lock(&ar->conf_mutex);
- ieee80211_iterate_stations_atomic(ar->hw,
- ath11k_mac_disable_peer_fixed_rate,
- arvif);
+ ieee80211_iterate_stations_mtx(ar->hw,
+ ath11k_mac_disable_peer_fixed_rate,
+ arvif);
arvif->bitrate_mask = *mask;
ieee80211_iterate_stations_atomic(ar->hw,
@@ -10421,7 +10421,7 @@ int ath11k_mac_register(struct ath11k_base *ab)
struct ath11k_pdev *pdev;
int i;
int ret;
- u8 mac_addr[ETH_ALEN] = {0};
+ u8 mac_addr[ETH_ALEN] = {};
if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))
return 0;
diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
index 78444f8ea153..d8655badd96d 100644
--- a/drivers/net/wireless/ath/ath11k/pci.c
+++ b/drivers/net/wireless/ath/ath11k/pci.c
@@ -37,7 +37,7 @@ static const struct pci_device_id ath11k_pci_id_table[] = {
{ PCI_VDEVICE(QCOM, QCA6390_DEVICE_ID) },
{ PCI_VDEVICE(QCOM, WCN6855_DEVICE_ID) },
{ PCI_VDEVICE(QCOM, QCN9074_DEVICE_ID) },
- {0}
+ {}
};
MODULE_DEVICE_TABLE(pci, ath11k_pci_id_table);
@@ -692,7 +692,7 @@ static void ath11k_pci_coredump_download(struct ath11k_base *ab)
struct ath11k_tlv_dump_data *dump_tlv;
size_t hdr_len = sizeof(*file_data);
void *buf;
- u32 dump_seg_sz[FW_CRASH_DUMP_TYPE_MAX] = { 0 };
+ u32 dump_seg_sz[FW_CRASH_DUMP_TYPE_MAX] = {};
ath11k_mhi_coredump(mhi_ctrl, false);
diff --git a/drivers/net/wireless/ath/ath11k/spectral.c b/drivers/net/wireless/ath/ath11k/spectral.c
index 79e091134515..b6b0516819a6 100644
--- a/drivers/net/wireless/ath/ath11k/spectral.c
+++ b/drivers/net/wireless/ath/ath11k/spectral.c
@@ -2,6 +2,7 @@
/*
* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include <linux/relay.h>
@@ -205,7 +206,7 @@ static int ath11k_spectral_scan_trigger(struct ath11k *ar)
static int ath11k_spectral_scan_config(struct ath11k *ar,
enum ath11k_spectral_mode mode)
{
- struct ath11k_wmi_vdev_spectral_conf_param param = { 0 };
+ struct ath11k_wmi_vdev_spectral_conf_param param = {};
struct ath11k_vif *arvif;
int ret, count;
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index 56af2e9634f4..0491e3fd6b5e 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -7542,7 +7542,7 @@ static void ath11k_vdev_stopped_event(struct ath11k_base *ab, struct sk_buff *sk
static void ath11k_mgmt_rx_event(struct ath11k_base *ab, struct sk_buff *skb)
{
- struct mgmt_rx_event_params rx_ev = {0};
+ struct mgmt_rx_event_params rx_ev = {};
struct ath11k *ar;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_hdr *hdr;
@@ -7657,7 +7657,7 @@ exit:
static void ath11k_mgmt_tx_compl_event(struct ath11k_base *ab, struct sk_buff *skb)
{
- struct wmi_mgmt_tx_compl_event tx_compl_param = {0};
+ struct wmi_mgmt_tx_compl_event tx_compl_param = {};
struct ath11k *ar;
if (ath11k_pull_mgmt_tx_compl_param_tlv(ab, skb, &tx_compl_param) != 0) {
@@ -7712,7 +7712,7 @@ static struct ath11k *ath11k_get_ar_on_scan_state(struct ath11k_base *ab,
static void ath11k_scan_event(struct ath11k_base *ab, struct sk_buff *skb)
{
struct ath11k *ar;
- struct wmi_scan_event scan_ev = {0};
+ struct wmi_scan_event scan_ev = {};
if (ath11k_pull_scan_ev(ab, skb, &scan_ev) != 0) {
ath11k_warn(ab, "failed to extract scan event");
@@ -7884,7 +7884,7 @@ static void ath11k_roam_event(struct ath11k_base *ab, struct sk_buff *skb)
static void ath11k_chan_info_event(struct ath11k_base *ab, struct sk_buff *skb)
{
- struct wmi_chan_info_event ch_info_ev = {0};
+ struct wmi_chan_info_event ch_info_ev = {};
struct ath11k *ar;
struct survey_info *survey;
int idx;
@@ -8031,7 +8031,7 @@ exit:
static void ath11k_vdev_install_key_compl_event(struct ath11k_base *ab,
struct sk_buff *skb)
{
- struct wmi_vdev_install_key_complete_arg install_key_compl = {0};
+ struct wmi_vdev_install_key_complete_arg install_key_compl = {};
struct ath11k *ar;
if (ath11k_pull_vdev_install_key_compl_ev(ab, skb, &install_key_compl) != 0) {
@@ -8129,7 +8129,7 @@ static void ath11k_service_available_event(struct ath11k_base *ab, struct sk_buf
static void ath11k_peer_assoc_conf_event(struct ath11k_base *ab, struct sk_buff *skb)
{
- struct wmi_peer_assoc_conf_arg peer_assoc_conf = {0};
+ struct wmi_peer_assoc_conf_arg peer_assoc_conf = {};
struct ath11k *ar;
if (ath11k_pull_peer_assoc_conf_ev(ab, skb, &peer_assoc_conf) != 0) {
diff --git a/drivers/net/wireless/ath/ath12k/ahb.c b/drivers/net/wireless/ath/ath12k/ahb.c
index 8d1a86e420a4..3b983f4e3268 100644
--- a/drivers/net/wireless/ath/ath12k/ahb.c
+++ b/drivers/net/wireless/ath/ath12k/ahb.c
@@ -1022,6 +1022,7 @@ static int ath12k_ahb_probe(struct platform_device *pdev)
ab->hif.ops = hif_ops;
ab->pdev = pdev;
ab->hw_rev = hw_rev;
+ ab->target_mem_mode = ATH12K_QMI_MEMORY_MODE_DEFAULT;
platform_set_drvdata(pdev, ab);
ab_ahb = ath12k_ab_to_ahb(ab);
ab_ahb->ab = ab;
diff --git a/drivers/net/wireless/ath/ath12k/ce.c b/drivers/net/wireless/ath/ath12k/ce.c
index 3f3439262cf4..f93a419abf65 100644
--- a/drivers/net/wireless/ath/ath12k/ce.c
+++ b/drivers/net/wireless/ath/ath12k/ce.c
@@ -433,9 +433,6 @@ static int ath12k_ce_completed_recv_next(struct ath12k_ce_pipe *pipe,
goto err;
}
- /* Make sure descriptor is read after the head pointer. */
- dma_rmb();
-
*nbytes = ath12k_hal_ce_dst_status_get_length(desc);
*skb = pipe->dest_ring->skb[sw_index];
@@ -581,7 +578,7 @@ static int ath12k_ce_init_ring(struct ath12k_base *ab,
struct ath12k_ce_ring *ce_ring,
int ce_id, enum hal_ring_type type)
{
- struct hal_srng_params params = { 0 };
+ struct hal_srng_params params = {};
int ret;
params.ring_base_paddr = ce_ring->base_addr_ce_space;
diff --git a/drivers/net/wireless/ath/ath12k/core.c b/drivers/net/wireless/ath/ath12k/core.c
index 83caba3104d6..5d494c5cdc0d 100644
--- a/drivers/net/wireless/ath/ath12k/core.c
+++ b/drivers/net/wireless/ath/ath12k/core.c
@@ -37,6 +37,36 @@ static struct list_head ath12k_hw_group_list = LIST_HEAD_INIT(ath12k_hw_group_li
static DEFINE_MUTEX(ath12k_hw_group_mutex);
+static const struct
+ath12k_mem_profile_based_param ath12k_mem_profile_based_param[] = {
+[ATH12K_QMI_MEMORY_MODE_DEFAULT] = {
+ .num_vdevs = 17,
+ .max_client_single = 512,
+ .max_client_dbs = 128,
+ .max_client_dbs_sbs = 128,
+ .dp_params = {
+ .tx_comp_ring_size = 32768,
+ .rxdma_monitor_buf_ring_size = 4096,
+ .rxdma_monitor_dst_ring_size = 8092,
+ .num_pool_tx_desc = 32768,
+ .rx_desc_count = 12288,
+ },
+ },
+[ATH12K_QMI_MEMORY_MODE_LOW_512_M] = {
+ .num_vdevs = 9,
+ .max_client_single = 128,
+ .max_client_dbs = 64,
+ .max_client_dbs_sbs = 64,
+ .dp_params = {
+ .tx_comp_ring_size = 16384,
+ .rxdma_monitor_buf_ring_size = 256,
+ .rxdma_monitor_dst_ring_size = 512,
+ .num_pool_tx_desc = 16384,
+ .rx_desc_count = 6144,
+ },
+ },
+};
+
static int ath12k_core_rfkill_config(struct ath12k_base *ab)
{
struct ath12k *ar;
@@ -188,7 +218,7 @@ static int __ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
bool bus_type_mode, bool with_default)
{
/* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */
- char variant[9 + ATH12K_QMI_BDF_EXT_STR_LENGTH] = { 0 };
+ char variant[9 + ATH12K_QMI_BDF_EXT_STR_LENGTH] = {};
if (with_variant && ab->qmi.target.bdf_ext[0] != '\0')
scnprintf(variant, sizeof(variant), ",variant=%s",
@@ -593,28 +623,15 @@ exit:
u32 ath12k_core_get_max_station_per_radio(struct ath12k_base *ab)
{
if (ab->num_radios == 2)
- return TARGET_NUM_STATIONS_DBS;
- else if (ab->num_radios == 3)
- return TARGET_NUM_PEERS_PDEV_DBS_SBS;
- return TARGET_NUM_STATIONS_SINGLE;
+ return TARGET_NUM_STATIONS(ab, DBS);
+ if (ab->num_radios == 3)
+ return TARGET_NUM_STATIONS(ab, DBS_SBS);
+ return TARGET_NUM_STATIONS(ab, SINGLE);
}
u32 ath12k_core_get_max_peers_per_radio(struct ath12k_base *ab)
{
- if (ab->num_radios == 2)
- return TARGET_NUM_PEERS_PDEV_DBS;
- else if (ab->num_radios == 3)
- return TARGET_NUM_PEERS_PDEV_DBS_SBS;
- return TARGET_NUM_PEERS_PDEV_SINGLE;
-}
-
-u32 ath12k_core_get_max_num_tids(struct ath12k_base *ab)
-{
- if (ab->num_radios == 2)
- return TARGET_NUM_TIDS(DBS);
- else if (ab->num_radios == 3)
- return TARGET_NUM_TIDS(DBS_SBS);
- return TARGET_NUM_TIDS(SINGLE);
+ return ath12k_core_get_max_station_per_radio(ab) + TARGET_NUM_VDEVS(ab);
}
struct reserved_mem *ath12k_core_get_reserved_mem(struct ath12k_base *ab,
@@ -1332,7 +1349,7 @@ exit:
static int ath12k_core_reconfigure_on_crash(struct ath12k_base *ab)
{
- int ret;
+ int ret, total_vdev;
mutex_lock(&ab->core_lock);
ath12k_dp_pdev_free(ab);
@@ -1343,8 +1360,8 @@ static int ath12k_core_reconfigure_on_crash(struct ath12k_base *ab)
ath12k_dp_free(ab);
ath12k_hal_srng_deinit(ab);
-
- ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1;
+ total_vdev = ab->num_radios * TARGET_NUM_VDEVS(ab);
+ ab->free_vdev_map = (1LL << total_vdev) - 1;
ret = ath12k_hal_srng_init(ab);
if (ret)
@@ -1475,7 +1492,7 @@ static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
complete(&ar->vdev_setup_done);
complete(&ar->vdev_delete_done);
complete(&ar->bss_survey_done);
- complete(&ar->regd_update_completed);
+ complete_all(&ar->regd_update_completed);
wake_up(&ar->dp.tx_empty_waitq);
idr_for_each(&ar->txmgmt_idr,
@@ -1711,8 +1728,23 @@ static void ath12k_core_reset(struct work_struct *work)
mutex_unlock(&ag->mutex);
}
+enum ath12k_qmi_mem_mode ath12k_core_get_memory_mode(struct ath12k_base *ab)
+{
+ unsigned long total_ram;
+ struct sysinfo si;
+
+ si_meminfo(&si);
+ total_ram = si.totalram * si.mem_unit;
+
+ if (total_ram < SZ_512M)
+ return ATH12K_QMI_MEMORY_MODE_LOW_512_M;
+
+ return ATH12K_QMI_MEMORY_MODE_DEFAULT;
+}
+
int ath12k_core_pre_init(struct ath12k_base *ab)
{
+ const struct ath12k_mem_profile_based_param *param;
int ret;
ret = ath12k_hw_init(ab);
@@ -1721,6 +1753,8 @@ int ath12k_core_pre_init(struct ath12k_base *ab)
return ret;
}
+ param = &ath12k_mem_profile_based_param[ab->target_mem_mode];
+ ab->profile_param = param;
ath12k_fw_map(ab);
return 0;
diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h
index 0c1a6df7a02e..519f826f56c8 100644
--- a/drivers/net/wireless/ath/ath12k/core.h
+++ b/drivers/net/wireless/ath/ath12k/core.h
@@ -116,6 +116,7 @@ static inline u64 ath12k_le32hilo_to_u64(__le32 hi, __le32 lo)
enum ath12k_skb_flags {
ATH12K_SKB_HW_80211_ENCAP = BIT(0),
ATH12K_SKB_CIPHER_SET = BIT(1),
+ ATH12K_SKB_MLO_STA = BIT(2),
};
struct ath12k_skb_cb {
@@ -316,6 +317,7 @@ struct ath12k_link_vif {
int bank_id;
u8 vdev_id_check_en;
+ bool beacon_prot;
struct wmi_wmm_params_all_arg wmm_params;
struct list_head list;
@@ -349,6 +351,7 @@ struct ath12k_link_vif {
bool group_key_valid;
struct wmi_vdev_install_key_arg group_key;
bool pairwise_key_done;
+ u16 num_stations;
};
struct ath12k_vif {
@@ -563,6 +566,8 @@ struct ath12k_link_sta {
/* for firmware use only */
u8 link_idx;
+ u32 tx_retry_failed;
+ u32 tx_retry_count;
};
struct ath12k_reoq_buf {
@@ -674,6 +679,15 @@ struct ath12k_per_peer_tx_stats {
bool is_ampdu;
};
+struct ath12k_pdev_rssi_offsets {
+ s32 temp_offset;
+ s8 min_nf_dbm;
+ /* Cache the sum here to avoid calculating it every time in hot path
+ * noise_floor = min_nf_dbm + temp_offset
+ */
+ s32 noise_floor;
+};
+
#define ATH12K_FLUSH_TIMEOUT (5 * HZ)
#define ATH12K_VDEV_DELETE_TIMEOUT_HZ (5 * HZ)
@@ -827,6 +841,7 @@ struct ath12k {
unsigned long last_tx_power_update;
s8 max_allowed_tx_power;
+ struct ath12k_pdev_rssi_offsets rssi_info;
};
struct ath12k_hw {
@@ -884,6 +899,8 @@ struct ath12k_pdev_cap {
struct ath12k_band_cap band[NUM_NL80211_BANDS];
u32 eml_cap;
u32 mld_cap;
+ bool nss_ratio_enabled;
+ u8 nss_ratio_info;
};
struct mlo_timestamp {
@@ -995,6 +1012,22 @@ struct ath12k_wsi_info {
u32 hw_link_id_base;
};
+struct ath12k_dp_profile_params {
+ u32 tx_comp_ring_size;
+ u32 rxdma_monitor_buf_ring_size;
+ u32 rxdma_monitor_dst_ring_size;
+ u32 num_pool_tx_desc;
+ u32 rx_desc_count;
+};
+
+struct ath12k_mem_profile_based_param {
+ u32 num_vdevs;
+ u32 max_client_single;
+ u32 max_client_dbs;
+ u32 max_client_dbs_sbs;
+ struct ath12k_dp_profile_params dp_params;
+};
+
/* Master structure to hold the hw data which may be used in core module */
struct ath12k_base {
enum ath12k_hw_rev hw_rev;
@@ -1198,6 +1231,8 @@ struct ath12k_base {
struct ath12k_reg_freq reg_freq_2ghz;
struct ath12k_reg_freq reg_freq_5ghz;
struct ath12k_reg_freq reg_freq_6ghz;
+ const struct ath12k_mem_profile_based_param *profile_param;
+ enum ath12k_qmi_mem_mode target_mem_mode;
/* must be last */
u8 drv_priv[] __aligned(sizeof(void *));
@@ -1324,7 +1359,6 @@ const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab,
const char *filename);
u32 ath12k_core_get_max_station_per_radio(struct ath12k_base *ab);
u32 ath12k_core_get_max_peers_per_radio(struct ath12k_base *ab);
-u32 ath12k_core_get_max_num_tids(struct ath12k_base *ab);
void ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group *ag);
void ath12k_fw_stats_init(struct ath12k *ar);
@@ -1333,6 +1367,7 @@ void ath12k_fw_stats_free(struct ath12k_fw_stats *stats);
void ath12k_fw_stats_reset(struct ath12k *ar);
struct reserved_mem *ath12k_core_get_reserved_mem(struct ath12k_base *ab,
int index);
+enum ath12k_qmi_mem_mode ath12k_core_get_memory_mode(struct ath12k_base *ab);
static inline const char *ath12k_scan_state_str(enum ath12k_scan_state state)
{
@@ -1467,4 +1502,11 @@ static inline struct ath12k_base *ath12k_ag_to_ab(struct ath12k_hw_group *ag,
return ag->ab[device_id];
}
+static inline s32 ath12k_pdev_get_noise_floor(struct ath12k *ar)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ return ar->rssi_info.noise_floor;
+}
+
#endif /* _CORE_H_ */
diff --git a/drivers/net/wireless/ath/ath12k/dbring.c b/drivers/net/wireless/ath/ath12k/dbring.c
index 788160c84c68..6604dacea2ae 100644
--- a/drivers/net/wireless/ath/ath12k/dbring.c
+++ b/drivers/net/wireless/ath/ath12k/dbring.c
@@ -2,6 +2,7 @@
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include "core.h"
@@ -117,7 +118,7 @@ int ath12k_dbring_wmi_cfg_setup(struct ath12k *ar,
struct ath12k_dbring *ring,
enum wmi_direct_buffer_module id)
{
- struct ath12k_wmi_pdev_dma_ring_cfg_arg arg = {0};
+ struct ath12k_wmi_pdev_dma_ring_cfg_arg arg = {};
int ret;
if (id >= WMI_DIRECT_BUF_MAX)
diff --git a/drivers/net/wireless/ath/ath12k/debugfs.c b/drivers/net/wireless/ath/ath12k/debugfs.c
index 23da93afaa5c..16601a8c3644 100644
--- a/drivers/net/wireless/ath/ath12k/debugfs.c
+++ b/drivers/net/wireless/ath/ath12k/debugfs.c
@@ -52,7 +52,7 @@ ath12k_write_simulate_fw_crash(struct file *file,
struct ath12k_base *ab = file->private_data;
struct ath12k_pdev *pdev;
struct ath12k *ar = NULL;
- char buf[32] = {0};
+ char buf[32] = {};
int i, ret;
ssize_t rc;
@@ -816,7 +816,7 @@ static ssize_t ath12k_write_extd_rx_stats(struct file *file,
size_t count, loff_t *ppos)
{
struct ath12k *ar = file->private_data;
- struct htt_rx_ring_tlv_filter tlv_filter = {0};
+ struct htt_rx_ring_tlv_filter tlv_filter = {};
u32 ring_id, rx_filter = 0;
bool enable;
int ret, i;
@@ -1217,7 +1217,7 @@ void ath12k_debugfs_pdev_create(struct ath12k_base *ab)
void ath12k_debugfs_soc_create(struct ath12k_base *ab)
{
bool dput_needed;
- char soc_name[64] = { 0 };
+ char soc_name[64] = {};
struct dentry *debugfs_ath12k;
debugfs_ath12k = debugfs_lookup("ath12k", NULL);
@@ -1470,7 +1470,7 @@ void ath12k_debugfs_register(struct ath12k *ar)
struct ath12k_base *ab = ar->ab;
struct ieee80211_hw *hw = ar->ah->hw;
char pdev_name[5];
- char buf[100] = {0};
+ char buf[100] = {};
scnprintf(pdev_name, sizeof(pdev_name), "%s%d", "mac", ar->pdev_idx);
diff --git a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
index aeaf970339d4..48b010a1b756 100644
--- a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
+++ b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
@@ -2024,7 +2024,7 @@ ath12k_htt_print_stats_string_tlv(const void *tag_buf, u16 tag_len,
u8 i;
u16 index = 0;
u32 datum;
- char data[ATH12K_HTT_MAX_STRING_LEN] = {0};
+ char data[ATH12K_HTT_MAX_STRING_LEN] = {};
tag_len = tag_len >> 2;
@@ -3081,7 +3081,7 @@ ath12k_htt_print_ul_mumimo_trig_stats(const void *tag_buf, u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
const struct ath12k_htt_rx_ul_mumimo_trig_stats_tlv *htt_stats_buf = tag_buf;
- char str_buf[ATH12K_HTT_MAX_STRING_LEN] = {0};
+ char str_buf[ATH12K_HTT_MAX_STRING_LEN] = {};
u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
u32 len = stats_req->buf_len;
u8 *buf = stats_req->buf;
@@ -3642,7 +3642,7 @@ ath12k_htt_print_dlpager_stats_tlv(const void *tag_buf, u16 tag_len,
u8 *buf = stats_req->buf;
u8 pg_locked;
u8 pg_unlock;
- char str_buf[ATH12K_HTT_MAX_STRING_LEN] = {0};
+ char str_buf[ATH12K_HTT_MAX_STRING_LEN] = {};
if (tag_len < sizeof(*stat_buf))
return;
@@ -4720,7 +4720,38 @@ ath12k_htt_print_tx_pdev_rate_stats_tlv(const void *tag_buf, u16 tag_len,
len += print_array_to_buf(buf, len, "tx_pream", htt_stats_buf->tx_pream,
ATH12K_HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES, "\n");
len += print_array_to_buf(buf, len, "tx_dcm", htt_stats_buf->tx_dcm,
- ATH12K_HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS, "\n");
+ ATH12K_HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_histogram_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_histogram_stats_tlv *stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+
+ if (tag_len < sizeof(*stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "low_latency_rate_cnt = %u\n",
+ le32_to_cpu(stats_buf->low_latency_rate_cnt));
+ len += scnprintf(buf + len, buf_len - len, "su_burst_rate_drop_cnt = %u\n",
+ le32_to_cpu(stats_buf->su_burst_rate_drop_cnt));
+ len += scnprintf(buf + len, buf_len - len, "su_burst_rate_drop_fail_cnt = %u\n",
+ le32_to_cpu(stats_buf->su_burst_rate_drop_fail_cnt));
+ len += scnprintf(buf + len, buf_len - len, "rate_retry_mcs_drop_cnt = %u\n",
+ le32_to_cpu(stats_buf->rate_retry_mcs_drop_cnt));
+
+ len += scnprintf(buf + len, buf_len - len, "\nPER_HISTOGRAM_STATS\n");
+ len += print_array_to_buf(buf, len, "mcs_drop_rate", stats_buf->mcs_drop_rate,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_DROP_COUNTERS, "\n");
+ len += print_array_to_buf(buf, len, "per_histogram_count",
+ stats_buf->per_histogram_cnt,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_PER_COUNTERS, "\n\n");
stats_req->buf_len = len;
}
@@ -5015,6 +5046,497 @@ ath12k_htt_print_rx_pdev_rate_ext_stats_tlv(const void *tag_buf, u16 tag_len,
stats_req->buf_len = len;
}
+static void
+ath12k_htt_print_pdev_tdma_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_pdev_tdma_stats_tlv *htt_stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+ u32 mac_id_word;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_id_word = le32_to_cpu(htt_stats_buf->mac_id__word);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PDEV_TDMA_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len, "num_tdma_active_schedules = %u\n",
+ le32_to_cpu(htt_stats_buf->num_tdma_active_schedules));
+ len += scnprintf(buf + len, buf_len - len, "num_tdma_reserved_schedules = %u\n",
+ le32_to_cpu(htt_stats_buf->num_tdma_reserved_schedules));
+ len += scnprintf(buf + len, buf_len - len,
+ "num_tdma_restricted_schedules = %u\n",
+ le32_to_cpu(htt_stats_buf->num_tdma_restricted_schedules));
+ len += scnprintf(buf + len, buf_len - len,
+ "num_tdma_unconfigured_schedules = %u\n",
+ le32_to_cpu(htt_stats_buf->num_tdma_unconfigured_schedules));
+ len += scnprintf(buf + len, buf_len - len, "num_tdma_slot_switches = %u\n",
+ le32_to_cpu(htt_stats_buf->num_tdma_slot_switches));
+ len += scnprintf(buf + len, buf_len - len, "num_tdma_edca_switches = %u\n\n",
+ le32_to_cpu(htt_stats_buf->num_tdma_edca_switches));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_mlo_sched_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_mlo_sched_stats_tlv *stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+
+ if (tag_len < sizeof(*stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_STATS_MLO_SCHED_STATS:\n");
+ len += scnprintf(buf + len, buf_len - len, "num_sec_link_sched = %u\n",
+ le32_to_cpu(stats_buf->pref_link_num_sec_link_sched));
+ len += scnprintf(buf + len, buf_len - len, "num_pref_link_timeout = %u\n",
+ le32_to_cpu(stats_buf->pref_link_num_pref_link_timeout));
+ len += scnprintf(buf + len, buf_len - len, "num_pref_link_sch_delay_ipc = %u\n",
+ le32_to_cpu(stats_buf->pref_link_num_pref_link_sch_delay_ipc));
+ len += scnprintf(buf + len, buf_len - len, "num_pref_link_timeout_ipc = %u\n\n",
+ le32_to_cpu(stats_buf->pref_link_num_pref_link_timeout_ipc));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_mlo_ipc_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_pdev_mlo_ipc_stats_tlv *stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+ u8 i, j;
+
+ if (tag_len < sizeof(*stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_STATS_MLO_IPC_STATS:\n");
+ for (i = 0; i < ATH12K_HTT_HWMLO_MAX_LINKS; i++) {
+ len += scnprintf(buf + len, buf_len - len, "src_link: %u\n", i);
+ for (j = 0; j < ATH12K_HTT_MLO_MAX_IPC_RINGS; j++)
+ len += scnprintf(buf + len, buf_len - len,
+ "mlo_ipc_ring_full_cnt[%u]: %u\n", j,
+ le32_to_cpu(stats_buf->mlo_ipc_ring_cnt[i][j]));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ }
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_pdev_rtt_resp_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_stats_pdev_rtt_resp_stats_tlv *sbuf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+
+ if (tag_len < sizeof(*sbuf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PDEV_RTT_RESP_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n",
+ le32_to_cpu(sbuf->pdev_id));
+ len += scnprintf(buf + len, buf_len - len, "tx_11mc_ftm_suc = %u\n",
+ le32_to_cpu(sbuf->tx_11mc_ftm_suc));
+ len += scnprintf(buf + len, buf_len - len, "tx_11mc_ftm_suc_retry = %u\n",
+ le32_to_cpu(sbuf->tx_11mc_ftm_suc_retry));
+ len += scnprintf(buf + len, buf_len - len, "tx_11mc_ftm_fail = %u\n",
+ le32_to_cpu(sbuf->tx_11mc_ftm_fail));
+ len += scnprintf(buf + len, buf_len - len, "rx_11mc_ftmr_cnt = %u\n",
+ le32_to_cpu(sbuf->rx_11mc_ftmr_cnt));
+ len += scnprintf(buf + len, buf_len - len, "rx_11mc_ftmr_dup_cnt = %u\n",
+ le32_to_cpu(sbuf->rx_11mc_ftmr_dup_cnt));
+ len += scnprintf(buf + len, buf_len - len, "rx_11mc_iftmr_cnt = %u\n",
+ le32_to_cpu(sbuf->rx_11mc_iftmr_cnt));
+ len += scnprintf(buf + len, buf_len - len, "rx_11mc_iftmr_dup_cnt = %u\n",
+ le32_to_cpu(sbuf->rx_11mc_iftmr_dup_cnt));
+ len += scnprintf(buf + len, buf_len - len,
+ "ftmr_drop_11mc_resp_role_not_enabled_cnt = %u\n",
+ le32_to_cpu(sbuf->ftmr_drop_11mc_resp_role_not_enabled_cnt));
+ len += scnprintf(buf + len, buf_len - len, "tx_11az_ftm_successful = %u\n",
+ le32_to_cpu(sbuf->tx_11az_ftm_successful));
+ len += scnprintf(buf + len, buf_len - len, "tx_11az_ftm_failed = %u\n",
+ le32_to_cpu(sbuf->tx_11az_ftm_failed));
+ len += scnprintf(buf + len, buf_len - len, "rx_11az_ftmr_cnt = %u\n",
+ le32_to_cpu(sbuf->rx_11az_ftmr_cnt));
+ len += scnprintf(buf + len, buf_len - len, "rx_11az_ftmr_dup_cnt = %u\n",
+ le32_to_cpu(sbuf->rx_11az_ftmr_dup_cnt));
+ len += scnprintf(buf + len, buf_len - len, "rx_11az_iftmr_dup_cnt = %u\n",
+ le32_to_cpu(sbuf->rx_11az_iftmr_dup_cnt));
+ len += scnprintf(buf + len, buf_len - len,
+ "initiator_active_responder_rejected_cnt = %u\n",
+ le32_to_cpu(sbuf->initiator_active_responder_rejected_cnt));
+ len += scnprintf(buf + len, buf_len - len, "malformed_ftmr = %u\n",
+ le32_to_cpu(sbuf->malformed_ftmr));
+ len += scnprintf(buf + len, buf_len - len,
+ "ftmr_drop_ntb_resp_role_not_enabled_cnt = %u\n",
+ le32_to_cpu(sbuf->ftmr_drop_ntb_resp_role_not_enabled_cnt));
+ len += scnprintf(buf + len, buf_len - len,
+ "ftmr_drop_tb_resp_role_not_enabled_cnt = %u\n",
+ le32_to_cpu(sbuf->ftmr_drop_tb_resp_role_not_enabled_cnt));
+ len += scnprintf(buf + len, buf_len - len, "responder_alloc_cnt = %u\n",
+ le32_to_cpu(sbuf->responder_alloc_cnt));
+ len += scnprintf(buf + len, buf_len - len, "responder_alloc_failure = %u\n",
+ le32_to_cpu(sbuf->responder_alloc_failure));
+ len += scnprintf(buf + len, buf_len - len, "responder_terminate_cnt = %u\n",
+ le32_to_cpu(sbuf->responder_terminate_cnt));
+ len += scnprintf(buf + len, buf_len - len, "active_rsta_open = %u\n",
+ le32_to_cpu(sbuf->active_rsta_open));
+ len += scnprintf(buf + len, buf_len - len, "active_rsta_mac = %u\n",
+ le32_to_cpu(sbuf->active_rsta_mac));
+ len += scnprintf(buf + len, buf_len - len, "active_rsta_mac_phy = %u\n",
+ le32_to_cpu(sbuf->active_rsta_mac_phy));
+ len += scnprintf(buf + len, buf_len - len, "pn_check_failure_cnt = %u\n",
+ le32_to_cpu(sbuf->pn_check_failure_cnt));
+ len += scnprintf(buf + len, buf_len - len, "num_assoc_ranging_peers = %u\n",
+ le32_to_cpu(sbuf->num_assoc_ranging_peers));
+ len += scnprintf(buf + len, buf_len - len, "num_unassoc_ranging_peers = %u\n",
+ le32_to_cpu(sbuf->num_unassoc_ranging_peers));
+ len += scnprintf(buf + len, buf_len - len, "pasn_m1_auth_recv_cnt = %u\n",
+ le32_to_cpu(sbuf->pasn_m1_auth_recv_cnt));
+ len += scnprintf(buf + len, buf_len - len, "pasn_m1_auth_drop_cnt = %u\n",
+ le32_to_cpu(sbuf->pasn_m1_auth_drop_cnt));
+ len += scnprintf(buf + len, buf_len - len, "pasn_m2_auth_recv_cnt = %u\n",
+ le32_to_cpu(sbuf->pasn_m2_auth_recv_cnt));
+ len += scnprintf(buf + len, buf_len - len, "pasn_m2_auth_tx_fail_cnt = %u\n",
+ le32_to_cpu(sbuf->pasn_m2_auth_tx_fail_cnt));
+ len += scnprintf(buf + len, buf_len - len, "pasn_m3_auth_recv_cnt = %u\n",
+ le32_to_cpu(sbuf->pasn_m3_auth_recv_cnt));
+ len += scnprintf(buf + len, buf_len - len, "pasn_m3_auth_drop_cnt = %u\n",
+ le32_to_cpu(sbuf->pasn_m3_auth_drop_cnt));
+ len += scnprintf(buf + len, buf_len - len, "pasn_peer_create_request_cnt = %u\n",
+ le32_to_cpu(sbuf->pasn_peer_create_request_cnt));
+ len += scnprintf(buf + len, buf_len - len, "pasn_peer_created_cnt = %u\n",
+ le32_to_cpu(sbuf->pasn_peer_created_cnt));
+ len += scnprintf(buf + len, buf_len - len, "pasn_peer_create_timeout_cnt = %u\n",
+ le32_to_cpu(sbuf->pasn_peer_create_timeout_cnt));
+ len += scnprintf(buf + len, buf_len - len,
+ "sec_ranging_not_supported_mfp_not_setup = %u\n",
+ le32_to_cpu(sbuf->sec_ranging_not_supported_mfp_not_setup));
+ len += scnprintf(buf + len, buf_len - len,
+ "non_sec_ranging_discarded_for_assoc_peer_with_mfpr_set = %u\n",
+ le32_to_cpu(sbuf->non_sec_ranging_discarded_for_assoc_peer));
+ len += scnprintf(buf + len, buf_len - len,
+ "open_ranging_discarded_with_URNM_MFPR_set_for_pasn_peer = %u\n",
+ le32_to_cpu(sbuf->open_ranging_discarded_set_for_pasn_peer));
+ len += scnprintf(buf + len, buf_len - len,
+ "unassoc_non_pasn_ranging_not_supported_with_URNM_MFPR = %u\n",
+ le32_to_cpu(sbuf->unassoc_non_pasn_ranging_not_supported));
+ len += scnprintf(buf + len, buf_len - len, "invalid_ftm_request_params = %u\n",
+ le32_to_cpu(sbuf->invalid_ftm_request_params));
+ len += scnprintf(buf + len, buf_len - len,
+ "requested_bw_format_not_supported = %u\n",
+ le32_to_cpu(sbuf->requested_bw_format_not_supported));
+ len += scnprintf(buf + len, buf_len - len,
+ "ntb_unsec_unassoc_mode_ranging_peer_alloc_failed = %u\n",
+ le32_to_cpu(sbuf->ntb_unsec_unassoc_ranging_peer_alloc_failed));
+ len += scnprintf(buf + len, buf_len - len,
+ "tb_unassoc_unsec_mode_pasn_peer_creation_failed = %u\n",
+ le32_to_cpu(sbuf->tb_unassoc_unsec_pasn_peer_creation_failed));
+ len += scnprintf(buf + len, buf_len - len,
+ "num_ranging_sequences_processed = %u\n",
+ le32_to_cpu(sbuf->num_ranging_sequences_processed));
+ len += scnprintf(buf + len, buf_len - len, "ndp_rx_cnt = %u\n",
+ le32_to_cpu(sbuf->ndp_rx_cnt));
+ len += scnprintf(buf + len, buf_len - len, "num_req_bw_20_MHz = %u\n",
+ le32_to_cpu(sbuf->num_req_bw_20_mhz));
+ len += scnprintf(buf + len, buf_len - len, "num_req_bw_40_MHz = %u\n",
+ le32_to_cpu(sbuf->num_req_bw_40_mhz));
+ len += scnprintf(buf + len, buf_len - len, "num_req_bw_80_MHz = %u\n",
+ le32_to_cpu(sbuf->num_req_bw_80_mhz));
+ len += scnprintf(buf + len, buf_len - len, "num_req_bw_160_MHz = %u\n",
+ le32_to_cpu(sbuf->num_req_bw_160_mhz));
+ len += scnprintf(buf + len, buf_len - len, "ntb_tx_ndp = %u\n",
+ le32_to_cpu(sbuf->ntb_tx_ndp));
+ len += scnprintf(buf + len, buf_len - len, "num_ntb_ranging_NDPAs_recv = %u\n",
+ le32_to_cpu(sbuf->num_ntb_ranging_ndpas_recv));
+ len += scnprintf(buf + len, buf_len - len, "recv_lmr = %u\n",
+ le32_to_cpu(sbuf->recv_lmr));
+ len += scnprintf(buf + len, buf_len - len, "invalid_ftmr_cnt = %u\n",
+ le32_to_cpu(sbuf->invalid_ftmr_cnt));
+ len += scnprintf(buf + len, buf_len - len, "max_time_bw_meas_exp_cnt = %u\n\n",
+ le32_to_cpu(sbuf->max_time_bw_meas_exp_cnt));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_pdev_rtt_init_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_stats_pdev_rtt_init_stats_tlv *htt_stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf, i;
+ __le32 sch_fail;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PDEV_RTT_INIT_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n",
+ le32_to_cpu(htt_stats_buf->pdev_id));
+ len += scnprintf(buf + len, buf_len - len, "tx_11mc_ftmr_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->tx_11mc_ftmr_cnt));
+ len += scnprintf(buf + len, buf_len - len, "tx_11mc_ftmr_fail = %u\n",
+ le32_to_cpu(htt_stats_buf->tx_11mc_ftmr_fail));
+ len += scnprintf(buf + len, buf_len - len, "tx_11mc_ftmr_suc_retry = %u\n",
+ le32_to_cpu(htt_stats_buf->tx_11mc_ftmr_suc_retry));
+ len += scnprintf(buf + len, buf_len - len, "rx_11mc_ftm_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_11mc_ftm_cnt));
+ len += scnprintf(buf + len, buf_len - len, "rx_11az_ftm_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_11az_ftm_cnt));
+ len += scnprintf(buf + len, buf_len - len, "initiator_terminate_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->initiator_terminate_cnt));
+ len += scnprintf(buf + len, buf_len - len, "tx_meas_req_count = %u\n",
+ le32_to_cpu(htt_stats_buf->tx_meas_req_count));
+ len += scnprintf(buf + len, buf_len - len, "tx_11az_ftmr_start = %u\n",
+ le32_to_cpu(htt_stats_buf->tx_11az_ftmr_start));
+ len += scnprintf(buf + len, buf_len - len, "tx_11az_ftmr_stop = %u\n",
+ le32_to_cpu(htt_stats_buf->tx_11az_ftmr_stop));
+ len += scnprintf(buf + len, buf_len - len, "tx_11az_ftmr_fail = %u\n",
+ le32_to_cpu(htt_stats_buf->tx_11az_ftmr_fail));
+ len += scnprintf(buf + len, buf_len - len,
+ "ftmr_tx_failed_null_11az_peer = %u\n",
+ le32_to_cpu(htt_stats_buf->ftmr_tx_failed_null_11az_peer));
+ len += scnprintf(buf + len, buf_len - len, "ftmr_retry_timeout = %u\n",
+ le32_to_cpu(htt_stats_buf->ftmr_retry_timeout));
+ len += scnprintf(buf + len, buf_len - len, "ftm_parse_failure = %u\n",
+ le32_to_cpu(htt_stats_buf->ftm_parse_failure));
+ len += scnprintf(buf + len, buf_len - len, "incompatible_ftm_params = %u\n",
+ le32_to_cpu(htt_stats_buf->incompatible_ftm_params));
+ len += scnprintf(buf + len, buf_len - len,
+ "ranging_negotiation_successful_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->ranging_negotiation_successful_cnt));
+ len += scnprintf(buf + len, buf_len - len, "active_ista = %u\n",
+ le32_to_cpu(htt_stats_buf->active_ista));
+ len += scnprintf(buf + len, buf_len - len, "init_role_not_enabled = %u\n",
+ le32_to_cpu(htt_stats_buf->init_role_not_enabled));
+ len += scnprintf(buf + len, buf_len - len, "invalid_preamble = %u\n",
+ le32_to_cpu(htt_stats_buf->invalid_preamble));
+ len += scnprintf(buf + len, buf_len - len, "invalid_chan_bw_format = %u\n",
+ le32_to_cpu(htt_stats_buf->invalid_chan_bw_format));
+ len += scnprintf(buf + len, buf_len - len, "mgmt_buff_alloc_fail_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->mgmt_buff_alloc_fail_cnt));
+ len += scnprintf(buf + len, buf_len - len, "sec_ranging_req_in_open_mode = %u\n",
+ le32_to_cpu(htt_stats_buf->sec_ranging_req_in_open_mode));
+ len += scnprintf(buf + len, buf_len - len, "max_time_bw_meas_exp_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->max_time_bw_meas_exp_cnt));
+ len += scnprintf(buf + len, buf_len - len, "num_tb_ranging_requests = %u\n",
+ le32_to_cpu(htt_stats_buf->num_tb_ranging_requests));
+ len += scnprintf(buf + len, buf_len - len, "tb_meas_duration_expiry_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->tb_meas_duration_expiry_cnt));
+ len += scnprintf(buf + len, buf_len - len, "ntbr_triggered_successfully = %u\n",
+ le32_to_cpu(htt_stats_buf->ntbr_triggered_successfully));
+ len += scnprintf(buf + len, buf_len - len, "ntbr_trigger_failed = %u\n",
+ le32_to_cpu(htt_stats_buf->ntbr_trigger_failed));
+ len += scnprintf(buf + len, buf_len - len, "invalid_or_no_vreg_idx = %u\n",
+ le32_to_cpu(htt_stats_buf->invalid_or_no_vreg_idx));
+ len += scnprintf(buf + len, buf_len - len, "set_vreg_params_failed = %u\n",
+ le32_to_cpu(htt_stats_buf->set_vreg_params_failed));
+ len += scnprintf(buf + len, buf_len - len, "sac_mismatch = %u\n",
+ le32_to_cpu(htt_stats_buf->sac_mismatch));
+ len += scnprintf(buf + len, buf_len - len, "pasn_m1_auth_recv_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->pasn_m1_auth_recv_cnt));
+ len += scnprintf(buf + len, buf_len - len, "pasn_m1_auth_tx_fail_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->pasn_m1_auth_tx_fail_cnt));
+ len += scnprintf(buf + len, buf_len - len, "pasn_m2_auth_recv_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->pasn_m2_auth_recv_cnt));
+ len += scnprintf(buf + len, buf_len - len, "pasn_m2_auth_drop_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->pasn_m2_auth_drop_cnt));
+ len += scnprintf(buf + len, buf_len - len, "pasn_m3_auth_recv_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->pasn_m3_auth_recv_cnt));
+ len += scnprintf(buf + len, buf_len - len, "pasn_m3_auth_tx_fail_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->pasn_m3_auth_tx_fail_cnt));
+ len += scnprintf(buf + len, buf_len - len, "pasn_peer_create_request_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->pasn_peer_create_request_cnt));
+ len += scnprintf(buf + len, buf_len - len, "pasn_peer_created_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->pasn_peer_created_cnt));
+ len += scnprintf(buf + len, buf_len - len, "pasn_peer_create_timeout_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->pasn_peer_create_timeout_cnt));
+ len += scnprintf(buf + len, buf_len - len, "ntbr_ndpa_failed = %u\n",
+ le32_to_cpu(htt_stats_buf->ntbr_ndpa_failed));
+ len += scnprintf(buf + len, buf_len - len, "ntbr_sequence_successful = %u\n",
+ le32_to_cpu(htt_stats_buf->ntbr_sequence_successful));
+ len += scnprintf(buf + len, buf_len - len, "ntbr_ndp_failed = %u\n",
+ le32_to_cpu(htt_stats_buf->ntbr_ndp_failed));
+ len += scnprintf(buf + len, buf_len - len, "num_tb_ranging_NDPAs_recv = %u\n",
+ le32_to_cpu(htt_stats_buf->num_tb_ranging_ndpas_recv));
+ len += scnprintf(buf + len, buf_len - len, "ndp_rx_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->ndp_rx_cnt));
+ len += scnprintf(buf + len, buf_len - len, "num_trigger_frames_received = %u\n",
+ le32_to_cpu(htt_stats_buf->num_trigger_frames_received));
+ for (i = 0; i < (ATH12K_HTT_SCH_CMD_STATUS_CNT - 1); i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "num_sch_cmd_status_%d = %u\n", i,
+ le32_to_cpu(htt_stats_buf->sch_cmd_status_cnts[i]));
+ sch_fail = htt_stats_buf->sch_cmd_status_cnts[ATH12K_HTT_SCH_CMD_STATUS_CNT - 1];
+ len += scnprintf(buf + len, buf_len - len,
+ "num_sch_cmd_status_other_failure = %u\n",
+ le32_to_cpu(sch_fail));
+ len += scnprintf(buf + len, buf_len - len, "lmr_timeout = %u\n",
+ le32_to_cpu(htt_stats_buf->lmr_timeout));
+ len += scnprintf(buf + len, buf_len - len, "lmr_recv = %u\n\n",
+ le32_to_cpu(htt_stats_buf->lmr_recv));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_pdev_rtt_hw_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_stats_pdev_rtt_hw_stats_tlv *htt_stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_STATS_PDEV_RTT_HW_STATS_TAG:\n");
+ len += scnprintf(buf + len, buf_len - len, "ista_ranging_ndpa_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->ista_ranging_ndpa_cnt));
+ len += scnprintf(buf + len, buf_len - len, "ista_ranging_ndp_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->ista_ranging_ndp_cnt));
+ len += scnprintf(buf + len, buf_len - len, "ista_ranging_i2r_lmr_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->ista_ranging_i2r_lmr_cnt));
+ len += scnprintf(buf + len, buf_len - len, "rtsa_ranging_resp_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->rtsa_ranging_resp_cnt));
+ len += scnprintf(buf + len, buf_len - len, "rtsa_ranging_ndp_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->rtsa_ranging_ndp_cnt));
+ len += scnprintf(buf + len, buf_len - len, "rsta_ranging_lmr_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->rsta_ranging_lmr_cnt));
+ len += scnprintf(buf + len, buf_len - len, "tb_ranging_cts2s_rcvd_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->tb_ranging_cts2s_rcvd_cnt));
+ len += scnprintf(buf + len, buf_len - len, "tb_ranging_ndp_rcvd_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->tb_ranging_ndp_rcvd_cnt));
+ len += scnprintf(buf + len, buf_len - len, "tb_ranging_lmr_rcvd_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->tb_ranging_lmr_rcvd_cnt));
+ len += scnprintf(buf + len, buf_len - len,
+ "tb_ranging_tf_poll_resp_sent_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->tb_ranging_tf_poll_resp_sent_cnt));
+ len += scnprintf(buf + len, buf_len - len,
+ "tb_ranging_tf_sound_resp_sent_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->tb_ranging_tf_sound_resp_sent_cnt));
+ len += scnprintf(buf + len, buf_len - len,
+ "tb_ranging_tf_report_resp_sent_cnt = %u\n\n",
+ le32_to_cpu(htt_stats_buf->tb_ranging_tf_report_resp_sent_cnt));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_pdev_rtt_tbr_selfgen_queued_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *req)
+{
+ const struct ath12k_htt_stats_pdev_rtt_tbr_tlv *sbuf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = req->buf_len;
+ u8 *buf = req->buf;
+
+ if (tag_len < sizeof(*sbuf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_STATS_PDEV_RTT_TBR_SELFGEN_QUEUED_STATS_TAG:\n");
+ len += scnprintf(buf + len, buf_len - len, "SU poll = %u\n",
+ le32_to_cpu(sbuf->su_ftype[ATH12K_HTT_FTYPE_TF_POLL]));
+ len += scnprintf(buf + len, buf_len - len, "SU sound = %u\n",
+ le32_to_cpu(sbuf->su_ftype[ATH12K_HTT_FTYPE_TF_SOUND]));
+ len += scnprintf(buf + len, buf_len - len, "SU NDPA = %u\n",
+ le32_to_cpu(sbuf->su_ftype[ATH12K_HTT_FTYPE_TBR_NDPA]));
+ len += scnprintf(buf + len, buf_len - len, "SU NDP = %u\n",
+ le32_to_cpu(sbuf->su_ftype[ATH12K_HTT_FTYPE_TBR_NDP]));
+ len += scnprintf(buf + len, buf_len - len, "SU LMR = %u\n",
+ le32_to_cpu(sbuf->su_ftype[ATH12K_HTT_FTYPE_TBR_LMR]));
+ len += scnprintf(buf + len, buf_len - len, "SU TF_REPORT = %u\n",
+ le32_to_cpu(sbuf->su_ftype[ATH12K_HTT_FTYPE_TF_RPRT]));
+ len += scnprintf(buf + len, buf_len - len, "MU poll = %u\n",
+ le32_to_cpu(sbuf->mu_ftype[ATH12K_HTT_FTYPE_TF_POLL]));
+ len += scnprintf(buf + len, buf_len - len, "MU sound = %u\n",
+ le32_to_cpu(sbuf->mu_ftype[ATH12K_HTT_FTYPE_TF_SOUND]));
+ len += scnprintf(buf + len, buf_len - len, "MU NDPA = %u\n",
+ le32_to_cpu(sbuf->mu_ftype[ATH12K_HTT_FTYPE_TBR_NDPA]));
+ len += scnprintf(buf + len, buf_len - len, "MU NDP = %u\n",
+ le32_to_cpu(sbuf->mu_ftype[ATH12K_HTT_FTYPE_TBR_NDP]));
+ len += scnprintf(buf + len, buf_len - len, "MU LMR = %u\n",
+ le32_to_cpu(sbuf->mu_ftype[ATH12K_HTT_FTYPE_TBR_LMR]));
+ len += scnprintf(buf + len, buf_len - len, "MU TF_REPORT = %u\n\n",
+ le32_to_cpu(sbuf->mu_ftype[ATH12K_HTT_FTYPE_TF_RPRT]));
+
+ req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_pdev_rtt_tbr_cmd_res_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_stats_pdev_rtt_tbr_cmd_result_stats_tlv *sbuf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf, i;
+
+ if (tag_len < sizeof(*sbuf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_STATS_PDEV_RTT_TBR_CMD_RESULT_STATS_TAG:\n");
+ for (i = 0; i < le32_to_cpu(sbuf->tbr_num_sch_cmd_result_buckets); i++) {
+ len += scnprintf(buf + len, buf_len - len, "num_sch_cmd_status_%u:\n", i);
+ len += scnprintf(buf + len, buf_len - len,
+ "SU frame_SGEN_TF_POLL = %u\n",
+ le32_to_cpu(sbuf->su_res[ATH12K_HTT_FTYPE_TF_POLL][i]));
+ len += scnprintf(buf + len, buf_len - len,
+ "SU frame_SGEN_TF_SOUND = %u\n",
+ le32_to_cpu(sbuf->su_res[ATH12K_HTT_FTYPE_TF_SOUND][i]));
+ len += scnprintf(buf + len, buf_len - len,
+ "SU frame_SGEN_TBR_NDPA = %u\n",
+ le32_to_cpu(sbuf->su_res[ATH12K_HTT_FTYPE_TBR_NDPA][i]));
+ len += scnprintf(buf + len, buf_len - len,
+ "SU frame_SGEN_TBR_NDP = %u\n",
+ le32_to_cpu(sbuf->su_res[ATH12K_HTT_FTYPE_TBR_NDP][i]));
+ len += scnprintf(buf + len, buf_len - len,
+ "SU frame_SGEN_TBR_LMR = %u\n",
+ le32_to_cpu(sbuf->su_res[ATH12K_HTT_FTYPE_TBR_LMR][i]));
+ len += scnprintf(buf + len, buf_len - len,
+ "SU frame_SGEN_TF_REPORT = %u\n",
+ le32_to_cpu(sbuf->su_res[ATH12K_HTT_FTYPE_TF_RPRT][i]));
+ len += scnprintf(buf + len, buf_len - len,
+ "MU frame_SGEN_TF_POLL = %u\n",
+ le32_to_cpu(sbuf->mu_res[ATH12K_HTT_FTYPE_TF_POLL][i]));
+ len += scnprintf(buf + len, buf_len - len,
+ "MU frame_SGEN_TF_SOUND = %u\n",
+ le32_to_cpu(sbuf->mu_res[ATH12K_HTT_FTYPE_TF_SOUND][i]));
+ len += scnprintf(buf + len, buf_len - len,
+ "MU frame_SGEN_TBR_NDPA = %u\n",
+ le32_to_cpu(sbuf->mu_res[ATH12K_HTT_FTYPE_TBR_NDPA][i]));
+ len += scnprintf(buf + len, buf_len - len,
+ "MU frame_SGEN_TBR_NDP = %u\n",
+ le32_to_cpu(sbuf->mu_res[ATH12K_HTT_FTYPE_TBR_NDP][i]));
+ len += scnprintf(buf + len, buf_len - len,
+ "MU frame_SGEN_TBR_LMR = %u\n",
+ le32_to_cpu(sbuf->mu_res[ATH12K_HTT_FTYPE_TBR_LMR][i]));
+ len += scnprintf(buf + len, buf_len - len,
+ "MU frame_SGEN_TF_REPORT = %u\n\n",
+ le32_to_cpu(sbuf->mu_res[ATH12K_HTT_FTYPE_TF_RPRT][i]));
+ }
+
+ stats_req->buf_len = len;
+}
+
static int ath12k_dbg_htt_ext_stats_parse(struct ath12k_base *ab,
u16 tag, u16 len, const void *tag_buf,
void *user_data)
@@ -5277,12 +5799,40 @@ static int ath12k_dbg_htt_ext_stats_parse(struct ath12k_base *ab,
case HTT_STATS_TX_PDEV_RATE_STATS_TAG:
ath12k_htt_print_tx_pdev_rate_stats_tlv(tag_buf, len, stats_req);
break;
+ case HTT_STATS_TX_PDEV_HISTOGRAM_STATS_TAG:
+ ath12k_htt_print_histogram_stats_tlv(tag_buf, len, stats_req);
+ break;
case HTT_STATS_RX_PDEV_RATE_STATS_TAG:
ath12k_htt_print_rx_pdev_rate_stats_tlv(tag_buf, len, stats_req);
break;
case HTT_STATS_RX_PDEV_RATE_EXT_STATS_TAG:
ath12k_htt_print_rx_pdev_rate_ext_stats_tlv(tag_buf, len, stats_req);
break;
+ case HTT_STATS_PDEV_TDMA_TAG:
+ ath12k_htt_print_pdev_tdma_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_MLO_SCHED_STATS_TAG:
+ ath12k_htt_print_mlo_sched_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_PDEV_MLO_IPC_STATS_TAG:
+ ath12k_htt_print_mlo_ipc_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_PDEV_RTT_RESP_STATS_TAG:
+ ath12k_htt_print_pdev_rtt_resp_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_PDEV_RTT_INIT_STATS_TAG:
+ ath12k_htt_print_pdev_rtt_init_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_PDEV_RTT_HW_STATS_TAG:
+ ath12k_htt_print_pdev_rtt_hw_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_PDEV_RTT_TBR_SELFGEN_QUEUED_STATS_TAG:
+ ath12k_htt_print_pdev_rtt_tbr_selfgen_queued_stats_tlv(tag_buf, len,
+ stats_req);
+ break;
+ case HTT_STATS_PDEV_RTT_TBR_CMD_RESULT_STATS_TAG:
+ ath12k_htt_print_pdev_rtt_tbr_cmd_res_stats_tlv(tag_buf, len, stats_req);
+ break;
default:
break;
}
@@ -5373,7 +5923,7 @@ static ssize_t ath12k_write_htt_stats_type(struct file *file,
{
struct ath12k *ar = file->private_data;
enum ath12k_dbg_htt_ext_stats_type type;
- unsigned int cfg_param[4] = {0};
+ unsigned int cfg_param[4] = {};
const int size = 32;
int num_args;
@@ -5423,7 +5973,7 @@ static int ath12k_debugfs_htt_stats_req(struct ath12k *ar)
enum ath12k_dbg_htt_ext_stats_type type = stats_req->type;
u64 cookie;
int ret, pdev_id;
- struct htt_ext_stats_cfg_params cfg_params = { 0 };
+ struct htt_ext_stats_cfg_params cfg_params = {};
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
@@ -5562,7 +6112,7 @@ static ssize_t ath12k_write_htt_stats_reset(struct file *file,
{
struct ath12k *ar = file->private_data;
enum ath12k_dbg_htt_ext_stats_type type;
- struct htt_ext_stats_cfg_params cfg_params = { 0 };
+ struct htt_ext_stats_cfg_params cfg_params = {};
u8 param_pos;
int ret;
diff --git a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h
index c2a02cf8a38b..9bd3a632b002 100644
--- a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h
+++ b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h
@@ -155,6 +155,11 @@ enum ath12k_dbg_htt_ext_stats_type {
ATH12K_DBG_HTT_EXT_STATS_PDEV_SCHED_ALGO = 49,
ATH12K_DBG_HTT_EXT_STATS_MANDATORY_MUOFDMA = 51,
ATH12K_DGB_HTT_EXT_STATS_PDEV_MBSSID_CTRL_FRAME = 54,
+ ATH12K_DBG_HTT_PDEV_TDMA_STATS = 57,
+ ATH12K_DBG_HTT_MLO_SCHED_STATS = 63,
+ ATH12K_DBG_HTT_PDEV_MLO_IPC_STATS = 64,
+ ATH12K_DBG_HTT_EXT_PDEV_RTT_RESP_STATS = 65,
+ ATH12K_DBG_HTT_EXT_PDEV_RTT_INITIATOR_STATS = 66,
/* keep this last */
ATH12K_DBG_HTT_NUM_EXT_STATS,
@@ -237,6 +242,7 @@ enum ath12k_dbg_htt_tlv_tag {
HTT_STATS_TX_SELFGEN_BE_ERR_STATS_TAG = 137,
HTT_STATS_TX_SELFGEN_BE_STATS_TAG = 138,
HTT_STATS_TX_SELFGEN_BE_SCHED_STATUS_STATS_TAG = 139,
+ HTT_STATS_TX_PDEV_HISTOGRAM_STATS_TAG = 144,
HTT_STATS_TXBF_OFDMA_AX_NDPA_STATS_TAG = 147,
HTT_STATS_TXBF_OFDMA_AX_NDP_STATS_TAG = 148,
HTT_STATS_TXBF_OFDMA_AX_BRP_STATS_TAG = 149,
@@ -247,6 +253,14 @@ enum ath12k_dbg_htt_tlv_tag {
HTT_STATS_PDEV_SCHED_ALGO_OFDMA_STATS_TAG = 165,
HTT_STATS_TXBF_OFDMA_AX_STEER_MPDU_STATS_TAG = 172,
HTT_STATS_PDEV_MBSSID_CTRL_FRAME_STATS_TAG = 176,
+ HTT_STATS_PDEV_TDMA_TAG = 187,
+ HTT_STATS_MLO_SCHED_STATS_TAG = 190,
+ HTT_STATS_PDEV_MLO_IPC_STATS_TAG = 191,
+ HTT_STATS_PDEV_RTT_RESP_STATS_TAG = 194,
+ HTT_STATS_PDEV_RTT_INIT_STATS_TAG = 195,
+ HTT_STATS_PDEV_RTT_HW_STATS_TAG = 196,
+ HTT_STATS_PDEV_RTT_TBR_SELFGEN_QUEUED_STATS_TAG = 197,
+ HTT_STATS_PDEV_RTT_TBR_CMD_RESULT_STATS_TAG = 198,
HTT_STATS_MAX_TAG,
};
@@ -418,6 +432,12 @@ struct ath12k_htt_tx_pdev_mu_ppdu_dist_stats_tlv {
#define ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS 2
#define ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS 2
#define ATH12K_HTT_TX_PDEV_STATS_NUM_11AX_TRIGGER_TYPES 6
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_PER_COUNTERS 101
+
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_DROP_COUNTERS \
+ (ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS + \
+ ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS + \
+ ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS)
struct ath12k_htt_tx_pdev_rate_stats_tlv {
__le32 mac_id_word;
@@ -470,7 +490,16 @@ struct ath12k_htt_tx_pdev_rate_stats_tlv {
[ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS];
__le32 tx_mcs_ext_2[ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS];
__le32 tx_bw_320mhz;
-};
+} __packed;
+
+struct ath12k_htt_tx_histogram_stats_tlv {
+ __le32 rate_retry_mcs_drop_cnt;
+ __le32 mcs_drop_rate[ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_DROP_COUNTERS];
+ __le32 per_histogram_cnt[ATH12K_HTT_TX_PDEV_STATS_NUM_PER_COUNTERS];
+ __le32 low_latency_rate_cnt;
+ __le32 su_burst_rate_drop_cnt;
+ __le32 su_burst_rate_drop_fail_cnt;
+} __packed;
#define ATH12K_HTT_RX_PDEV_STATS_NUM_LEGACY_CCK_STATS 4
#define ATH12K_HTT_RX_PDEV_STATS_NUM_LEGACY_OFDM_STATS 8
@@ -550,7 +579,7 @@ struct ath12k_htt_rx_pdev_rate_stats_tlv {
__le32 rx_ulofdma_non_data_nusers[ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER];
__le32 rx_ulofdma_data_nusers[ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER];
__le32 rx_mcs_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS];
-};
+} __packed;
#define ATH12K_HTT_RX_PDEV_STATS_NUM_BW_EXT_COUNTERS 4
#define ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT 14
@@ -580,7 +609,7 @@ struct ath12k_htt_rx_pdev_rate_ext_stats_tlv {
__le32 rx_gi_ext_2[ATH12K_HTT_RX_PDEV_STATS_NUM_GI_COUNTERS]
[ATH12K_HTT_RX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS];
__le32 rx_su_punctured_mode[ATH12K_HTT_RX_PDEV_STATS_NUM_PUNCTURED_MODE_COUNTERS];
-};
+} __packed;
#define ATH12K_HTT_TX_PDEV_STATS_SCHED_PER_TXQ_MAC_ID GENMASK(7, 0)
#define ATH12K_HTT_TX_PDEV_STATS_SCHED_PER_TXQ_ID GENMASK(15, 8)
@@ -1872,4 +1901,176 @@ struct ath12k_htt_pdev_mbssid_ctrl_frame_tlv {
__le32 ul_mumimo_trigger_within_bss;
} __packed;
+struct ath12k_htt_pdev_tdma_stats_tlv {
+ __le32 mac_id__word;
+ __le32 num_tdma_active_schedules;
+ __le32 num_tdma_reserved_schedules;
+ __le32 num_tdma_restricted_schedules;
+ __le32 num_tdma_unconfigured_schedules;
+ __le32 num_tdma_slot_switches;
+ __le32 num_tdma_edca_switches;
+} __packed;
+
+struct ath12k_htt_mlo_sched_stats_tlv {
+ __le32 pref_link_num_sec_link_sched;
+ __le32 pref_link_num_pref_link_timeout;
+ __le32 pref_link_num_pref_link_sch_delay_ipc;
+ __le32 pref_link_num_pref_link_timeout_ipc;
+} __packed;
+
+#define ATH12K_HTT_HWMLO_MAX_LINKS 6
+#define ATH12K_HTT_MLO_MAX_IPC_RINGS 7
+
+struct ath12k_htt_pdev_mlo_ipc_stats_tlv {
+ __le32 mlo_ipc_ring_cnt[ATH12K_HTT_HWMLO_MAX_LINKS][ATH12K_HTT_MLO_MAX_IPC_RINGS];
+} __packed;
+
+struct ath12k_htt_stats_pdev_rtt_resp_stats_tlv {
+ __le32 pdev_id;
+ __le32 tx_11mc_ftm_suc;
+ __le32 tx_11mc_ftm_suc_retry;
+ __le32 tx_11mc_ftm_fail;
+ __le32 rx_11mc_ftmr_cnt;
+ __le32 rx_11mc_ftmr_dup_cnt;
+ __le32 rx_11mc_iftmr_cnt;
+ __le32 rx_11mc_iftmr_dup_cnt;
+ __le32 ftmr_drop_11mc_resp_role_not_enabled_cnt;
+ __le32 initiator_active_responder_rejected_cnt;
+ __le32 responder_terminate_cnt;
+ __le32 active_rsta_open;
+ __le32 active_rsta_mac;
+ __le32 active_rsta_mac_phy;
+ __le32 num_assoc_ranging_peers;
+ __le32 num_unassoc_ranging_peers;
+ __le32 responder_alloc_cnt;
+ __le32 responder_alloc_failure;
+ __le32 pn_check_failure_cnt;
+ __le32 pasn_m1_auth_recv_cnt;
+ __le32 pasn_m1_auth_drop_cnt;
+ __le32 pasn_m2_auth_recv_cnt;
+ __le32 pasn_m2_auth_tx_fail_cnt;
+ __le32 pasn_m3_auth_recv_cnt;
+ __le32 pasn_m3_auth_drop_cnt;
+ __le32 pasn_peer_create_request_cnt;
+ __le32 pasn_peer_create_timeout_cnt;
+ __le32 pasn_peer_created_cnt;
+ __le32 sec_ranging_not_supported_mfp_not_setup;
+ __le32 non_sec_ranging_discarded_for_assoc_peer;
+ __le32 open_ranging_discarded_set_for_pasn_peer;
+ __le32 unassoc_non_pasn_ranging_not_supported;
+ __le32 num_req_bw_20_mhz;
+ __le32 num_req_bw_40_mhz;
+ __le32 num_req_bw_80_mhz;
+ __le32 num_req_bw_160_mhz;
+ __le32 tx_11az_ftm_successful;
+ __le32 tx_11az_ftm_failed;
+ __le32 rx_11az_ftmr_cnt;
+ __le32 rx_11az_ftmr_dup_cnt;
+ __le32 rx_11az_iftmr_dup_cnt;
+ __le32 malformed_ftmr;
+ __le32 ftmr_drop_ntb_resp_role_not_enabled_cnt;
+ __le32 ftmr_drop_tb_resp_role_not_enabled_cnt;
+ __le32 invalid_ftm_request_params;
+ __le32 requested_bw_format_not_supported;
+ __le32 ntb_unsec_unassoc_ranging_peer_alloc_failed;
+ __le32 tb_unassoc_unsec_pasn_peer_creation_failed;
+ __le32 num_ranging_sequences_processed;
+ __le32 ntb_tx_ndp;
+ __le32 ndp_rx_cnt;
+ __le32 num_ntb_ranging_ndpas_recv;
+ __le32 recv_lmr;
+ __le32 invalid_ftmr_cnt;
+ __le32 max_time_bw_meas_exp_cnt;
+} __packed;
+
+#define ATH12K_HTT_MAX_SCH_CMD_RESULT 25
+#define ATH12K_HTT_SCH_CMD_STATUS_CNT 9
+
+struct ath12k_htt_stats_pdev_rtt_init_stats_tlv {
+ __le32 pdev_id;
+ __le32 tx_11mc_ftmr_cnt;
+ __le32 tx_11mc_ftmr_fail;
+ __le32 tx_11mc_ftmr_suc_retry;
+ __le32 rx_11mc_ftm_cnt;
+ __le32 tx_meas_req_count;
+ __le32 init_role_not_enabled;
+ __le32 initiator_terminate_cnt;
+ __le32 tx_11az_ftmr_fail;
+ __le32 tx_11az_ftmr_start;
+ __le32 tx_11az_ftmr_stop;
+ __le32 rx_11az_ftm_cnt;
+ __le32 active_ista;
+ __le32 invalid_preamble;
+ __le32 invalid_chan_bw_format;
+ __le32 mgmt_buff_alloc_fail_cnt;
+ __le32 ftm_parse_failure;
+ __le32 ranging_negotiation_successful_cnt;
+ __le32 incompatible_ftm_params;
+ __le32 sec_ranging_req_in_open_mode;
+ __le32 ftmr_tx_failed_null_11az_peer;
+ __le32 ftmr_retry_timeout;
+ __le32 max_time_bw_meas_exp_cnt;
+ __le32 tb_meas_duration_expiry_cnt;
+ __le32 num_tb_ranging_requests;
+ __le32 ntbr_triggered_successfully;
+ __le32 ntbr_trigger_failed;
+ __le32 invalid_or_no_vreg_idx;
+ __le32 set_vreg_params_failed;
+ __le32 sac_mismatch;
+ __le32 pasn_m1_auth_recv_cnt;
+ __le32 pasn_m1_auth_tx_fail_cnt;
+ __le32 pasn_m2_auth_recv_cnt;
+ __le32 pasn_m2_auth_drop_cnt;
+ __le32 pasn_m3_auth_recv_cnt;
+ __le32 pasn_m3_auth_tx_fail_cnt;
+ __le32 pasn_peer_create_request_cnt;
+ __le32 pasn_peer_create_timeout_cnt;
+ __le32 pasn_peer_created_cnt;
+ __le32 ntbr_ndpa_failed;
+ __le32 ntbr_sequence_successful;
+ __le32 ntbr_ndp_failed;
+ __le32 sch_cmd_status_cnts[ATH12K_HTT_SCH_CMD_STATUS_CNT];
+ __le32 lmr_timeout;
+ __le32 lmr_recv;
+ __le32 num_trigger_frames_received;
+ __le32 num_tb_ranging_ndpas_recv;
+ __le32 ndp_rx_cnt;
+} __packed;
+
+struct ath12k_htt_stats_pdev_rtt_hw_stats_tlv {
+ __le32 ista_ranging_ndpa_cnt;
+ __le32 ista_ranging_ndp_cnt;
+ __le32 ista_ranging_i2r_lmr_cnt;
+ __le32 rtsa_ranging_resp_cnt;
+ __le32 rtsa_ranging_ndp_cnt;
+ __le32 rsta_ranging_lmr_cnt;
+ __le32 tb_ranging_cts2s_rcvd_cnt;
+ __le32 tb_ranging_ndp_rcvd_cnt;
+ __le32 tb_ranging_lmr_rcvd_cnt;
+ __le32 tb_ranging_tf_poll_resp_sent_cnt;
+ __le32 tb_ranging_tf_sound_resp_sent_cnt;
+ __le32 tb_ranging_tf_report_resp_sent_cnt;
+} __packed;
+
+enum ath12k_htt_stats_txsend_ftype {
+ ATH12K_HTT_FTYPE_TF_POLL,
+ ATH12K_HTT_FTYPE_TF_SOUND,
+ ATH12K_HTT_FTYPE_TBR_NDPA,
+ ATH12K_HTT_FTYPE_TBR_NDP,
+ ATH12K_HTT_FTYPE_TBR_LMR,
+ ATH12K_HTT_FTYPE_TF_RPRT,
+ ATH12K_HTT_FTYPE_MAX
+};
+
+struct ath12k_htt_stats_pdev_rtt_tbr_tlv {
+ __le32 su_ftype[ATH12K_HTT_FTYPE_MAX];
+ __le32 mu_ftype[ATH12K_HTT_FTYPE_MAX];
+} __packed;
+
+struct ath12k_htt_stats_pdev_rtt_tbr_cmd_result_stats_tlv {
+ __le32 tbr_num_sch_cmd_result_buckets;
+ __le32 su_res[ATH12K_HTT_FTYPE_MAX][ATH12K_HTT_MAX_SCH_CMD_RESULT];
+ __le32 mu_res[ATH12K_HTT_FTYPE_MAX][ATH12K_HTT_MAX_SCH_CMD_RESULT];
+} __packed;
+
#endif
diff --git a/drivers/net/wireless/ath/ath12k/dp.c b/drivers/net/wireless/ath/ath12k/dp.c
index c6b10acb643e..f893fce6d9bd 100644
--- a/drivers/net/wireless/ath/ath12k/dp.c
+++ b/drivers/net/wireless/ath/ath12k/dp.c
@@ -84,7 +84,6 @@ int ath12k_dp_peer_setup(struct ath12k *ar, int vdev_id, const u8 *addr)
ret = ath12k_dp_rx_peer_frag_setup(ar, addr, vdev_id);
if (ret) {
ath12k_warn(ab, "failed to setup rx defrag context\n");
- tid--;
goto peer_clean;
}
@@ -102,7 +101,7 @@ peer_clean:
return -ENOENT;
}
- for (; tid >= 0; tid--)
+ for (tid--; tid >= 0; tid--)
ath12k_dp_rx_peer_tid_delete(ar, peer, tid);
spin_unlock_bh(&ab->base_lock);
@@ -242,7 +241,7 @@ int ath12k_dp_srng_setup(struct ath12k_base *ab, struct dp_srng *ring,
enum hal_ring_type type, int ring_num,
int mac_id, int num_entries)
{
- struct hal_srng_params params = { 0 };
+ struct hal_srng_params params = {};
int entry_sz = ath12k_hal_srng_get_entrysize(ab, type);
int max_entries = ath12k_hal_srng_get_max_entries(ab, type);
int ret;
@@ -521,7 +520,7 @@ static int ath12k_dp_srng_common_setup(struct ath12k_base *ab)
ret = ath12k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_comp_ring,
HAL_WBM2SW_RELEASE, tx_comp_ring_num, 0,
- DP_TX_COMP_RING_SIZE);
+ DP_TX_COMP_RING_SIZE(ab));
if (ret) {
ath12k_warn(ab, "failed to set up tcl_comp ring (%d) :%d\n",
tx_comp_ring_num, ret);
@@ -1084,8 +1083,8 @@ out:
int ath12k_dp_htt_connect(struct ath12k_dp *dp)
{
- struct ath12k_htc_svc_conn_req conn_req = {0};
- struct ath12k_htc_svc_conn_resp conn_resp = {0};
+ struct ath12k_htc_svc_conn_req conn_req = {};
+ struct ath12k_htc_svc_conn_resp conn_resp = {};
int status;
conn_req.ep_ops.ep_tx_complete = ath12k_dp_htt_htc_tx_complete;
@@ -1164,31 +1163,36 @@ static void ath12k_dp_cc_cleanup(struct ath12k_base *ab)
/* RX Descriptor cleanup */
spin_lock_bh(&dp->rx_desc_lock);
- for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES; i++) {
- desc_info = dp->rxbaddr[i];
-
- for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
- if (!desc_info[j].in_use) {
- list_del(&desc_info[j].list);
+ if (dp->rxbaddr) {
+ for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES(ab); i++) {
+ if (!dp->rxbaddr[i])
continue;
- }
- skb = desc_info[j].skb;
- if (!skb)
- continue;
+ desc_info = dp->rxbaddr[i];
- dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr,
- skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
- dev_kfree_skb_any(skb);
- }
- }
+ for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
+ if (!desc_info[j].in_use) {
+ list_del(&desc_info[j].list);
+ continue;
+ }
- for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES; i++) {
- if (!dp->rxbaddr[i])
- continue;
+ skb = desc_info[j].skb;
+ if (!skb)
+ continue;
+
+ dma_unmap_single(ab->dev,
+ ATH12K_SKB_RXCB(skb)->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ }
+
+ kfree(dp->rxbaddr[i]);
+ dp->rxbaddr[i] = NULL;
+ }
- kfree(dp->rxbaddr[i]);
- dp->rxbaddr[i] = NULL;
+ kfree(dp->rxbaddr);
+ dp->rxbaddr = NULL;
}
spin_unlock_bh(&dp->rx_desc_lock);
@@ -1197,8 +1201,8 @@ static void ath12k_dp_cc_cleanup(struct ath12k_base *ab)
for (i = 0; i < ATH12K_HW_MAX_QUEUES; i++) {
spin_lock_bh(&dp->tx_desc_lock[i]);
- list_for_each_entry_safe(tx_desc_info, tmp1, &dp->tx_desc_used_list[i],
- list) {
+ list_for_each_entry_safe(tx_desc_info, tmp1,
+ &dp->tx_desc_used_list[i], list) {
list_del(&tx_desc_info->list);
skb = tx_desc_info->skb;
@@ -1232,19 +1236,25 @@ static void ath12k_dp_cc_cleanup(struct ath12k_base *ab)
spin_unlock_bh(&dp->tx_desc_lock[i]);
}
- for (pool_id = 0; pool_id < ATH12K_HW_MAX_QUEUES; pool_id++) {
- spin_lock_bh(&dp->tx_desc_lock[pool_id]);
+ if (dp->txbaddr) {
+ for (pool_id = 0; pool_id < ATH12K_HW_MAX_QUEUES; pool_id++) {
+ spin_lock_bh(&dp->tx_desc_lock[pool_id]);
- for (i = 0; i < ATH12K_TX_SPT_PAGES_PER_POOL; i++) {
- tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL;
- if (!dp->txbaddr[tx_spt_page])
- continue;
+ for (i = 0; i < ATH12K_TX_SPT_PAGES_PER_POOL(ab); i++) {
+ tx_spt_page = i + pool_id *
+ ATH12K_TX_SPT_PAGES_PER_POOL(ab);
+ if (!dp->txbaddr[tx_spt_page])
+ continue;
+
+ kfree(dp->txbaddr[tx_spt_page]);
+ dp->txbaddr[tx_spt_page] = NULL;
+ }
- kfree(dp->txbaddr[tx_spt_page]);
- dp->txbaddr[tx_spt_page] = NULL;
+ spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
}
- spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
+ kfree(dp->txbaddr);
+ dp->txbaddr = NULL;
}
/* unmap SPT pages */
@@ -1393,8 +1403,8 @@ struct ath12k_rx_desc_info *ath12k_dp_get_rx_desc(struct ath12k_base *ab,
ppt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_PPT);
spt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_SPT);
- start_ppt_idx = dp->rx_ppt_base + ATH12K_RX_SPT_PAGE_OFFSET;
- end_ppt_idx = start_ppt_idx + ATH12K_NUM_RX_SPT_PAGES;
+ start_ppt_idx = dp->rx_ppt_base + ATH12K_RX_SPT_PAGE_OFFSET(ab);
+ end_ppt_idx = start_ppt_idx + ATH12K_NUM_RX_SPT_PAGES(ab);
if (ppt_idx < start_ppt_idx ||
ppt_idx >= end_ppt_idx ||
@@ -1418,7 +1428,7 @@ struct ath12k_tx_desc_info *ath12k_dp_get_tx_desc(struct ath12k_base *ab,
start_ppt_idx = ATH12K_TX_SPT_PAGE_OFFSET;
end_ppt_idx = start_ppt_idx +
- (ATH12K_TX_SPT_PAGES_PER_POOL * ATH12K_HW_MAX_QUEUES);
+ (ATH12K_TX_SPT_PAGES_PER_POOL(ab) * ATH12K_HW_MAX_QUEUES);
if (ppt_idx < start_ppt_idx ||
ppt_idx >= end_ppt_idx ||
@@ -1435,13 +1445,24 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab)
struct ath12k_dp *dp = &ab->dp;
struct ath12k_rx_desc_info *rx_descs, **rx_desc_addr;
struct ath12k_tx_desc_info *tx_descs, **tx_desc_addr;
+ u32 num_rx_spt_pages = ATH12K_NUM_RX_SPT_PAGES(ab);
u32 i, j, pool_id, tx_spt_page;
u32 ppt_idx, cookie_ppt_idx;
spin_lock_bh(&dp->rx_desc_lock);
- /* First ATH12K_NUM_RX_SPT_PAGES of allocated SPT pages are used for RX */
- for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES; i++) {
+ dp->rxbaddr = kcalloc(num_rx_spt_pages,
+ sizeof(struct ath12k_rx_desc_info *), GFP_ATOMIC);
+
+ if (!dp->rxbaddr) {
+ spin_unlock_bh(&dp->rx_desc_lock);
+ return -ENOMEM;
+ }
+
+ /* First ATH12K_NUM_RX_SPT_PAGES(ab) of allocated SPT pages are used for
+ * RX
+ */
+ for (i = 0; i < num_rx_spt_pages; i++) {
rx_descs = kcalloc(ATH12K_MAX_SPT_ENTRIES, sizeof(*rx_descs),
GFP_ATOMIC);
@@ -1450,7 +1471,7 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab)
return -ENOMEM;
}
- ppt_idx = ATH12K_RX_SPT_PAGE_OFFSET + i;
+ ppt_idx = ATH12K_RX_SPT_PAGE_OFFSET(ab) + i;
cookie_ppt_idx = dp->rx_ppt_base + ppt_idx;
dp->rxbaddr[i] = &rx_descs[0];
@@ -1468,9 +1489,15 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab)
spin_unlock_bh(&dp->rx_desc_lock);
+ dp->txbaddr = kcalloc(ATH12K_NUM_TX_SPT_PAGES(ab),
+ sizeof(struct ath12k_tx_desc_info *), GFP_ATOMIC);
+
+ if (!dp->txbaddr)
+ return -ENOMEM;
+
for (pool_id = 0; pool_id < ATH12K_HW_MAX_QUEUES; pool_id++) {
spin_lock_bh(&dp->tx_desc_lock[pool_id]);
- for (i = 0; i < ATH12K_TX_SPT_PAGES_PER_POOL; i++) {
+ for (i = 0; i < ATH12K_TX_SPT_PAGES_PER_POOL(ab); i++) {
tx_descs = kcalloc(ATH12K_MAX_SPT_ENTRIES, sizeof(*tx_descs),
GFP_ATOMIC);
@@ -1480,7 +1507,8 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab)
return -ENOMEM;
}
- tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL;
+ tx_spt_page = i + pool_id *
+ ATH12K_TX_SPT_PAGES_PER_POOL(ab);
ppt_idx = ATH12K_TX_SPT_PAGE_OFFSET + tx_spt_page;
dp->txbaddr[tx_spt_page] = &tx_descs[0];
@@ -1514,12 +1542,12 @@ static int ath12k_dp_cmem_init(struct ath12k_base *ab,
switch (type) {
case ATH12K_DP_TX_DESC:
start = ATH12K_TX_SPT_PAGE_OFFSET;
- end = start + ATH12K_NUM_TX_SPT_PAGES;
+ end = start + ATH12K_NUM_TX_SPT_PAGES(ab);
break;
case ATH12K_DP_RX_DESC:
cmem_base += ATH12K_PPT_ADDR_OFFSET(dp->rx_ppt_base);
- start = ATH12K_RX_SPT_PAGE_OFFSET;
- end = start + ATH12K_NUM_RX_SPT_PAGES;
+ start = ATH12K_RX_SPT_PAGE_OFFSET(ab);
+ end = start + ATH12K_NUM_RX_SPT_PAGES(ab);
break;
default:
ath12k_err(ab, "invalid descriptor type %d in cmem init\n", type);
@@ -1547,6 +1575,11 @@ void ath12k_dp_partner_cc_init(struct ath12k_base *ab)
}
}
+static u32 ath12k_dp_get_num_spt_pages(struct ath12k_base *ab)
+{
+ return ATH12K_NUM_RX_SPT_PAGES(ab) + ATH12K_NUM_TX_SPT_PAGES(ab);
+}
+
static int ath12k_dp_cc_init(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
@@ -1561,7 +1594,7 @@ static int ath12k_dp_cc_init(struct ath12k_base *ab)
spin_lock_init(&dp->tx_desc_lock[i]);
}
- dp->num_spt_pages = ATH12K_NUM_SPT_PAGES;
+ dp->num_spt_pages = ath12k_dp_get_num_spt_pages(ab);
if (dp->num_spt_pages > ATH12K_MAX_PPT_ENTRIES)
dp->num_spt_pages = ATH12K_MAX_PPT_ENTRIES;
@@ -1573,7 +1606,7 @@ static int ath12k_dp_cc_init(struct ath12k_base *ab)
return -ENOMEM;
}
- dp->rx_ppt_base = ab->device_id * ATH12K_NUM_RX_SPT_PAGES;
+ dp->rx_ppt_base = ab->device_id * ATH12K_NUM_RX_SPT_PAGES(ab);
for (i = 0; i < dp->num_spt_pages; i++) {
dp->spt_info[i].vaddr = dma_alloc_coherent(ab->dev,
@@ -1748,7 +1781,8 @@ int ath12k_dp_alloc(struct ath12k_base *ab)
if (ret)
goto fail_dp_bank_profiles_cleanup;
- size = sizeof(struct hal_wbm_release_ring_tx) * DP_TX_COMP_RING_SIZE;
+ size = sizeof(struct hal_wbm_release_ring_tx) *
+ DP_TX_COMP_RING_SIZE(ab);
ret = ath12k_dp_reoq_lut_setup(ab);
if (ret) {
@@ -1760,7 +1794,7 @@ int ath12k_dp_alloc(struct ath12k_base *ab)
dp->tx_ring[i].tcl_data_ring_id = i;
dp->tx_ring[i].tx_status_head = 0;
- dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1;
+ dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE(ab) - 1;
dp->tx_ring[i].tx_status = kmalloc(size, GFP_KERNEL);
if (!dp->tx_ring[i].tx_status) {
ret = -ENOMEM;
diff --git a/drivers/net/wireless/ath/ath12k/dp.h b/drivers/net/wireless/ath/ath12k/dp.h
index a353333f83b6..7baa48b86f7a 100644
--- a/drivers/net/wireless/ath/ath12k/dp.h
+++ b/drivers/net/wireless/ath/ath12k/dp.h
@@ -46,7 +46,7 @@ struct dp_rxdma_ring {
int bufs_max;
};
-#define ATH12K_TX_COMPL_NEXT(x) (((x) + 1) % DP_TX_COMP_RING_SIZE)
+#define ATH12K_TX_COMPL_NEXT(ab, x) (((x) + 1) % DP_TX_COMP_RING_SIZE(ab))
struct dp_tx_ring {
u8 tcl_data_ring_id;
@@ -174,8 +174,9 @@ struct ath12k_pdev_dp {
#define DP_WBM_RELEASE_RING_SIZE 64
#define DP_TCL_DATA_RING_SIZE 512
-#define DP_TX_COMP_RING_SIZE 32768
-#define DP_TX_IDR_SIZE DP_TX_COMP_RING_SIZE
+#define DP_TX_COMP_RING_SIZE(ab) \
+ ((ab)->profile_param->dp_params.tx_comp_ring_size)
+#define DP_TX_IDR_SIZE(ab) DP_TX_COMP_RING_SIZE(ab)
#define DP_TCL_CMD_RING_SIZE 32
#define DP_TCL_STATUS_RING_SIZE 32
#define DP_REO_DST_RING_MAX 8
@@ -190,8 +191,10 @@ struct ath12k_pdev_dp {
#define DP_RXDMA_REFILL_RING_SIZE 2048
#define DP_RXDMA_ERR_DST_RING_SIZE 1024
#define DP_RXDMA_MON_STATUS_RING_SIZE 1024
-#define DP_RXDMA_MONITOR_BUF_RING_SIZE 4096
-#define DP_RXDMA_MONITOR_DST_RING_SIZE 8092
+#define DP_RXDMA_MONITOR_BUF_RING_SIZE(ab) \
+ ((ab)->profile_param->dp_params.rxdma_monitor_buf_ring_size)
+#define DP_RXDMA_MONITOR_DST_RING_SIZE(ab) \
+ ((ab)->profile_param->dp_params.rxdma_monitor_dst_ring_size)
#define DP_RXDMA_MONITOR_DESC_RING_SIZE 4096
#define DP_TX_MONITOR_BUF_RING_SIZE 4096
#define DP_TX_MONITOR_DEST_RING_SIZE 2048
@@ -225,10 +228,11 @@ struct ath12k_pdev_dp {
#define ATH12K_SHADOW_DP_TIMER_INTERVAL 20
#define ATH12K_SHADOW_CTRL_TIMER_INTERVAL 10
-#define ATH12K_NUM_POOL_TX_DESC 32768
-
+#define ATH12K_NUM_POOL_TX_DESC(ab) \
+ ((ab)->profile_param->dp_params.num_pool_tx_desc)
/* TODO: revisit this count during testing */
-#define ATH12K_RX_DESC_COUNT (12288)
+#define ATH12K_RX_DESC_COUNT(ab) \
+ ((ab)->profile_param->dp_params.rx_desc_count)
#define ATH12K_PAGE_SIZE PAGE_SIZE
@@ -240,20 +244,21 @@ struct ath12k_pdev_dp {
/* Total 512 entries in a SPT, i.e 4K Page/8 */
#define ATH12K_MAX_SPT_ENTRIES 512
-#define ATH12K_NUM_RX_SPT_PAGES ((ATH12K_RX_DESC_COUNT) / ATH12K_MAX_SPT_ENTRIES)
+#define ATH12K_NUM_RX_SPT_PAGES(ab) ((ATH12K_RX_DESC_COUNT(ab)) / \
+ ATH12K_MAX_SPT_ENTRIES)
-#define ATH12K_TX_SPT_PAGES_PER_POOL (ATH12K_NUM_POOL_TX_DESC / \
+#define ATH12K_TX_SPT_PAGES_PER_POOL(ab) (ATH12K_NUM_POOL_TX_DESC(ab) / \
ATH12K_MAX_SPT_ENTRIES)
-#define ATH12K_NUM_TX_SPT_PAGES (ATH12K_TX_SPT_PAGES_PER_POOL * ATH12K_HW_MAX_QUEUES)
-#define ATH12K_NUM_SPT_PAGES (ATH12K_NUM_RX_SPT_PAGES + ATH12K_NUM_TX_SPT_PAGES)
+#define ATH12K_NUM_TX_SPT_PAGES(ab) (ATH12K_TX_SPT_PAGES_PER_POOL(ab) * \
+ ATH12K_HW_MAX_QUEUES)
#define ATH12K_TX_SPT_PAGE_OFFSET 0
-#define ATH12K_RX_SPT_PAGE_OFFSET ATH12K_NUM_TX_SPT_PAGES
+#define ATH12K_RX_SPT_PAGE_OFFSET(ab) ATH12K_NUM_TX_SPT_PAGES(ab)
/* The SPT pages are divided for RX and TX, first block for RX
* and remaining for TX
*/
-#define ATH12K_NUM_TX_SPT_PAGE_START ATH12K_NUM_RX_SPT_PAGES
+#define ATH12K_NUM_TX_SPT_PAGE_START(ab) ATH12K_NUM_RX_SPT_PAGES(ab)
#define ATH12K_DP_RX_DESC_MAGIC 0xBABABABA
@@ -399,8 +404,8 @@ struct ath12k_dp {
struct ath12k_spt_info *spt_info;
u32 num_spt_pages;
u32 rx_ppt_base;
- struct ath12k_rx_desc_info *rxbaddr[ATH12K_NUM_RX_SPT_PAGES];
- struct ath12k_tx_desc_info *txbaddr[ATH12K_NUM_TX_SPT_PAGES];
+ struct ath12k_rx_desc_info **rxbaddr;
+ struct ath12k_tx_desc_info **txbaddr;
struct list_head rx_desc_free_list;
/* protects the free desc list */
spinlock_t rx_desc_lock;
@@ -469,6 +474,7 @@ enum htt_h2t_msg_type {
};
#define HTT_VER_REQ_INFO_MSG_ID GENMASK(7, 0)
+#define HTT_OPTION_TCL_METADATA_VER_V1 1
#define HTT_OPTION_TCL_METADATA_VER_V2 2
#define HTT_OPTION_TAG GENMASK(7, 0)
#define HTT_OPTION_LEN GENMASK(15, 8)
@@ -703,7 +709,8 @@ struct htt_ppdu_stats_cfg_cmd {
} __packed;
#define HTT_PPDU_STATS_CFG_MSG_TYPE GENMASK(7, 0)
-#define HTT_PPDU_STATS_CFG_PDEV_ID GENMASK(15, 8)
+#define HTT_PPDU_STATS_CFG_SOC_STATS BIT(8)
+#define HTT_PPDU_STATS_CFG_PDEV_ID GENMASK(15, 9)
#define HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK GENMASK(31, 16)
enum htt_ppdu_stats_tag_type {
@@ -1559,6 +1566,8 @@ enum HTT_PPDU_STATS_PPDU_TYPE {
#define HTT_PPDU_STATS_USER_RATE_FLAGS_DCM_M BIT(28)
#define HTT_PPDU_STATS_USER_RATE_FLAGS_LDPC_M BIT(29)
+#define HTT_USR_RATE_PPDU_TYPE(_val) \
+ le32_get_bits(_val, HTT_PPDU_STATS_USER_RATE_INFO1_PPDU_TYPE_M)
#define HTT_USR_RATE_PREAMBLE(_val) \
le32_get_bits(_val, HTT_PPDU_STATS_USER_RATE_FLAGS_PREAMBLE_M)
#define HTT_USR_RATE_BW(_val) \
diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.c b/drivers/net/wireless/ath/ath12k/dp_mon.c
index 91f4e3aff74c..8189e52ed007 100644
--- a/drivers/net/wireless/ath/ath12k/dp_mon.c
+++ b/drivers/net/wireless/ath/ath12k/dp_mon.c
@@ -2146,10 +2146,15 @@ static void ath12k_dp_mon_update_radiotap(struct ath12k *ar,
struct ieee80211_rx_status *rxs)
{
struct ieee80211_supported_band *sband;
+ s32 noise_floor;
u8 *ptr = NULL;
+ spin_lock_bh(&ar->data_lock);
+ noise_floor = ath12k_pdev_get_noise_floor(ar);
+ spin_unlock_bh(&ar->data_lock);
+
rxs->flag |= RX_FLAG_MACTIME_START;
- rxs->signal = ppduinfo->rssi_comb + ATH12K_DEFAULT_NOISE_FLOOR;
+ rxs->signal = ppduinfo->rssi_comb + noise_floor;
rxs->nss = ppduinfo->nss + 1;
if (ppduinfo->userstats[ppduinfo->userid].ampdu_present) {
@@ -3610,7 +3615,6 @@ ath12k_dp_mon_rx_update_user_stats(struct ath12k *ar,
struct hal_rx_mon_ppdu_info *ppdu_info,
u32 uid)
{
- struct ath12k_sta *ahsta;
struct ath12k_link_sta *arsta;
struct ath12k_rx_peer_stats *rx_stats = NULL;
struct hal_rx_user_status *user_stats = &ppdu_info->userstats[uid];
@@ -3628,8 +3632,13 @@ ath12k_dp_mon_rx_update_user_stats(struct ath12k *ar,
return;
}
- ahsta = ath12k_sta_to_ahsta(peer->sta);
- arsta = &ahsta->deflink;
+ arsta = ath12k_peer_get_link_sta(ar->ab, peer);
+ if (!arsta) {
+ ath12k_warn(ar->ab, "link sta not found on peer %pM id %d\n",
+ peer->addr, peer->peer_id);
+ return;
+ }
+
arsta->rssi_comb = ppdu_info->rssi_comb;
ewma_avg_rssi_add(&arsta->avg_rssi, ppdu_info->rssi_comb);
rx_stats = arsta->rx_stats;
@@ -3742,7 +3751,6 @@ int ath12k_dp_mon_srng_process(struct ath12k *ar, int *budget,
struct dp_srng *mon_dst_ring;
struct hal_srng *srng;
struct dp_rxdma_mon_ring *buf_ring;
- struct ath12k_sta *ahsta = NULL;
struct ath12k_link_sta *arsta;
struct ath12k_peer *peer;
struct sk_buff_head skb_list;
@@ -3868,8 +3876,15 @@ move_next:
}
if (ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_SU) {
- ahsta = ath12k_sta_to_ahsta(peer->sta);
- arsta = &ahsta->deflink;
+ arsta = ath12k_peer_get_link_sta(ar->ab, peer);
+ if (!arsta) {
+ ath12k_warn(ar->ab, "link sta not found on peer %pM id %d\n",
+ peer->addr, peer->peer_id);
+ spin_unlock_bh(&ab->base_lock);
+ rcu_read_unlock();
+ dev_kfree_skb_any(skb);
+ continue;
+ }
ath12k_dp_mon_rx_update_peer_su_stats(ar, arsta,
ppdu_info);
} else if ((ppdu_info->fc_valid) &&
diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c
index a2f3d5d7b916..8ab91273592c 100644
--- a/drivers/net/wireless/ath/ath12k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath12k/dp_rx.c
@@ -570,7 +570,7 @@ static int ath12k_dp_rx_pdev_srng_alloc(struct ath12k *ar)
&dp->rxdma_mon_dst_ring[i],
HAL_RXDMA_MONITOR_DST,
0, mac_id + i,
- DP_RXDMA_MONITOR_DST_RING_SIZE);
+ DP_RXDMA_MONITOR_DST_RING_SIZE(ab));
if (ret) {
ath12k_warn(ar->ab,
"failed to setup HAL_RXDMA_MONITOR_DST\n");
@@ -671,7 +671,7 @@ static int ath12k_dp_reo_cmd_send(struct ath12k_base *ab, struct ath12k_dp_rx_ti
static void ath12k_dp_reo_cache_flush(struct ath12k_base *ab,
struct ath12k_dp_rx_tid *rx_tid)
{
- struct ath12k_hal_reo_cmd cmd = {0};
+ struct ath12k_hal_reo_cmd cmd = {};
unsigned long tot_desc_sz, desc_sz;
int ret;
@@ -828,7 +828,7 @@ static void ath12k_peer_rx_tid_qref_reset(struct ath12k_base *ab, u16 peer_id, u
void ath12k_dp_rx_peer_tid_delete(struct ath12k *ar,
struct ath12k_peer *peer, u8 tid)
{
- struct ath12k_hal_reo_cmd cmd = {0};
+ struct ath12k_hal_reo_cmd cmd = {};
struct ath12k_dp_rx_tid *rx_tid = &peer->rx_tid[tid];
int ret;
@@ -939,7 +939,7 @@ static int ath12k_peer_rx_tid_reo_update(struct ath12k *ar,
u32 ba_win_sz, u16 ssn,
bool update_ssn)
{
- struct ath12k_hal_reo_cmd cmd = {0};
+ struct ath12k_hal_reo_cmd cmd = {};
int ret;
cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned);
@@ -1204,7 +1204,7 @@ int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_link_vif *arvif,
{
struct ath12k *ar = arvif->ar;
struct ath12k_base *ab = ar->ab;
- struct ath12k_hal_reo_cmd cmd = {0};
+ struct ath12k_hal_reo_cmd cmd = {};
struct ath12k_peer *peer;
struct ath12k_dp_rx_tid *rx_tid;
u8 tid;
@@ -1418,27 +1418,33 @@ ath12k_update_per_peer_tx_stats(struct ath12k *ar,
{
struct ath12k_base *ab = ar->ab;
struct ath12k_peer *peer;
- struct ieee80211_sta *sta;
- struct ath12k_sta *ahsta;
struct ath12k_link_sta *arsta;
struct htt_ppdu_stats_user_rate *user_rate;
struct ath12k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats;
struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user];
struct htt_ppdu_stats_common *common = &ppdu_stats->common;
int ret;
- u8 flags, mcs, nss, bw, sgi, dcm, rate_idx = 0;
+ u8 flags, mcs, nss, bw, sgi, dcm, ppdu_type, rate_idx = 0;
u32 v, succ_bytes = 0;
u16 tones, rate = 0, succ_pkts = 0;
u32 tx_duration = 0;
u8 tid = HTT_PPDU_STATS_NON_QOS_TID;
- bool is_ampdu = false;
+ u16 tx_retry_failed = 0, tx_retry_count = 0;
+ bool is_ampdu = false, is_ofdma;
if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE)))
return;
- if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON))
+ if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON)) {
is_ampdu =
HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags);
+ tx_retry_failed =
+ __le16_to_cpu(usr_stats->cmpltn_cmn.mpdu_tried) -
+ __le16_to_cpu(usr_stats->cmpltn_cmn.mpdu_success);
+ tx_retry_count =
+ HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) +
+ HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags);
+ }
if (usr_stats->tlv_flags &
BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) {
@@ -1460,6 +1466,10 @@ ath12k_update_per_peer_tx_stats(struct ath12k *ar,
sgi = HTT_USR_RATE_GI(user_rate->rate_flags);
dcm = HTT_USR_RATE_DCM(user_rate->rate_flags);
+ ppdu_type = HTT_USR_RATE_PPDU_TYPE(user_rate->info1);
+ is_ofdma = (ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA) ||
+ (ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO_OFDMA);
+
/* Note: If host configured fixed rates and in some other special
* cases, the broadcast/management frames are sent in different rates.
* Firmware rate's control to be skipped for this?
@@ -1500,12 +1510,17 @@ ath12k_update_per_peer_tx_stats(struct ath12k *ar,
return;
}
- sta = peer->sta;
- ahsta = ath12k_sta_to_ahsta(sta);
- arsta = &ahsta->deflink;
+ arsta = ath12k_peer_get_link_sta(ab, peer);
+ if (!arsta) {
+ spin_unlock_bh(&ab->base_lock);
+ rcu_read_unlock();
+ return;
+ }
memset(&arsta->txrate, 0, sizeof(arsta->txrate));
+ arsta->txrate.bw = ath12k_mac_bw_to_mac80211_bw(bw);
+
switch (flags) {
case WMI_RATE_PREAMBLE_OFDM:
arsta->txrate.legacy = rate;
@@ -1534,11 +1549,26 @@ ath12k_update_per_peer_tx_stats(struct ath12k *ar,
le16_to_cpu(user_rate->ru_start) + 1;
v = ath12k_he_ru_tones_to_nl80211_he_ru_alloc(tones);
arsta->txrate.he_ru_alloc = v;
+ if (is_ofdma)
+ arsta->txrate.bw = RATE_INFO_BW_HE_RU;
+ break;
+ case WMI_RATE_PREAMBLE_EHT:
+ arsta->txrate.mcs = mcs;
+ arsta->txrate.flags = RATE_INFO_FLAGS_EHT_MCS;
+ arsta->txrate.he_dcm = dcm;
+ arsta->txrate.eht_gi = ath12k_mac_eht_gi_to_nl80211_eht_gi(sgi);
+ tones = le16_to_cpu(user_rate->ru_end) -
+ le16_to_cpu(user_rate->ru_start) + 1;
+ v = ath12k_mac_eht_ru_tones_to_nl80211_eht_ru_alloc(tones);
+ arsta->txrate.eht_ru_alloc = v;
+ if (is_ofdma)
+ arsta->txrate.bw = RATE_INFO_BW_EHT_RU;
break;
}
+ arsta->tx_retry_failed += tx_retry_failed;
+ arsta->tx_retry_count += tx_retry_count;
arsta->txrate.nss = nss;
- arsta->txrate.bw = ath12k_mac_bw_to_mac80211_bw(bw);
arsta->tx_duration += tx_duration;
memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info));
@@ -2705,7 +2735,7 @@ static void ath12k_dp_rx_process_received_packets(struct ath12k_base *ab,
int ring_id)
{
struct ath12k_hw_group *ag = ab->ag;
- struct ieee80211_rx_status rx_status = {0};
+ struct ieee80211_rx_status rx_status = {};
struct ath12k_skb_rxcb *rxcb;
struct sk_buff *msdu;
struct ath12k *ar;
@@ -3000,7 +3030,7 @@ static int ath12k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key,
size_t data_len, u8 *mic)
{
SHASH_DESC_ON_STACK(desc, tfm);
- u8 mic_hdr[16] = {0};
+ u8 mic_hdr[16] = {};
u8 tid = 0;
int ret;
@@ -3969,7 +3999,7 @@ static void ath12k_dp_rx_wbm_err(struct ath12k *ar,
struct sk_buff_head *msdu_list)
{
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
- struct ieee80211_rx_status rxs = {0};
+ struct ieee80211_rx_status rxs = {};
struct ath12k_dp_rx_info rx_info;
bool drop = true;
@@ -3993,6 +4023,8 @@ static void ath12k_dp_rx_wbm_err(struct ath12k *ar,
return;
}
+ rx_info.rx_status->flag |= RX_FLAG_SKIP_MONITOR;
+
ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_info);
}
@@ -4314,7 +4346,7 @@ void ath12k_dp_rx_pdev_free(struct ath12k_base *ab, int mac_id)
int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
- struct htt_rx_ring_tlv_filter tlv_filter = {0};
+ struct htt_rx_ring_tlv_filter tlv_filter = {};
u32 ring_id;
int ret;
u32 hal_rx_desc_sz = ab->hal.hal_desc_sz;
@@ -4355,7 +4387,7 @@ int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab)
int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
- struct htt_rx_ring_tlv_filter tlv_filter = {0};
+ struct htt_rx_ring_tlv_filter tlv_filter = {};
u32 ring_id;
int ret = 0;
u32 hal_rx_desc_sz = ab->hal.hal_desc_sz;
@@ -4512,7 +4544,7 @@ int ath12k_dp_rx_alloc(struct ath12k_base *ab)
ret = ath12k_dp_srng_setup(ab,
&dp->rxdma_mon_buf_ring.refill_buf_ring,
HAL_RXDMA_MONITOR_BUF, 0, 0,
- DP_RXDMA_MONITOR_BUF_RING_SIZE);
+ DP_RXDMA_MONITOR_BUF_RING_SIZE(ab));
if (ret) {
ath12k_warn(ab, "failed to setup HAL_RXDMA_MONITOR_BUF\n");
return ret;
diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.c b/drivers/net/wireless/ath/ath12k/dp_tx.c
index 075912eacfaa..abc84ca8467a 100644
--- a/drivers/net/wireless/ath/ath12k/dp_tx.c
+++ b/drivers/net/wireless/ath/ath12k/dp_tx.c
@@ -225,7 +225,7 @@ int ath12k_dp_tx(struct ath12k *ar, struct ath12k_link_vif *arvif,
{
struct ath12k_base *ab = ar->ab;
struct ath12k_dp *dp = &ab->dp;
- struct hal_tx_info ti = {0};
+ struct hal_tx_info ti = {};
struct ath12k_tx_desc_info *tx_desc;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
@@ -244,6 +244,8 @@ int ath12k_dp_tx(struct ath12k *ar, struct ath12k_link_vif *arvif,
bool msdu_ext_desc = false;
bool add_htt_metadata = false;
u32 iova_mask = ab->hw_params->iova_mask;
+ bool is_diff_encap = false;
+ bool is_null_frame = false;
if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
return -ESHUTDOWN;
@@ -334,7 +336,19 @@ tcl_ring_sel:
switch (ti.encap_type) {
case HAL_TCL_ENCAP_TYPE_NATIVE_WIFI:
- ath12k_dp_tx_encap_nwifi(skb);
+ is_null_frame = ieee80211_is_nullfunc(hdr->frame_control);
+ if (ahvif->vif->offload_flags & IEEE80211_OFFLOAD_ENCAP_ENABLED) {
+ if (skb->protocol == cpu_to_be16(ETH_P_PAE) || is_null_frame)
+ is_diff_encap = true;
+
+ /* Firmware expects msdu ext descriptor for nwifi/raw packets
+ * received in ETH mode. Without this, observed tx fail for
+ * Multicast packets in ETH mode.
+ */
+ msdu_ext_desc = true;
+ } else {
+ ath12k_dp_tx_encap_nwifi(skb);
+ }
break;
case HAL_TCL_ENCAP_TYPE_RAW:
if (!test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags)) {
@@ -378,15 +392,25 @@ map:
goto fail_remove_tx_buf;
}
- if (!test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags) &&
- !(skb_cb->flags & ATH12K_SKB_HW_80211_ENCAP) &&
- !(skb_cb->flags & ATH12K_SKB_CIPHER_SET) &&
- ieee80211_has_protected(hdr->frame_control)) {
- /* Add metadata for sw encrypted vlan group traffic */
+ if ((!test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags) &&
+ !(skb_cb->flags & ATH12K_SKB_HW_80211_ENCAP) &&
+ !(skb_cb->flags & ATH12K_SKB_CIPHER_SET) &&
+ ieee80211_has_protected(hdr->frame_control)) ||
+ is_diff_encap) {
+ /* Firmware is not expecting meta data for qos null
+ * nwifi packet received in ETH encap mode.
+ */
+ if (is_null_frame && msdu_ext_desc)
+ goto skip_htt_meta;
+
+ /* Add metadata for sw encrypted vlan group traffic
+ * and EAPOL nwifi packet received in ETH encap mode.
+ */
add_htt_metadata = true;
msdu_ext_desc = true;
- ti.flags0 |= u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_TO_FW);
ti.meta_data_flags |= HTT_TCL_META_DATA_VALID_HTT;
+skip_htt_meta:
+ ti.flags0 |= u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_TO_FW);
ti.encap_type = HAL_TCL_ENCAP_TYPE_RAW;
ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
}
@@ -544,7 +568,8 @@ static void
ath12k_dp_tx_htt_tx_complete_buf(struct ath12k_base *ab,
struct ath12k_tx_desc_params *desc_params,
struct dp_tx_ring *tx_ring,
- struct ath12k_dp_htt_wbm_tx_status *ts)
+ struct ath12k_dp_htt_wbm_tx_status *ts,
+ u16 peer_id)
{
struct ieee80211_tx_info *info;
struct ath12k_link_vif *arvif;
@@ -553,6 +578,9 @@ ath12k_dp_tx_htt_tx_complete_buf(struct ath12k_base *ab,
struct ath12k_vif *ahvif;
struct ath12k *ar;
struct sk_buff *msdu = desc_params->skb;
+ s32 noise_floor;
+ struct ieee80211_tx_status status = {};
+ struct ath12k_peer *peer;
skb_cb = ATH12K_SKB_CB(msdu);
info = IEEE80211_SKB_CB(msdu);
@@ -591,16 +619,38 @@ ath12k_dp_tx_htt_tx_complete_buf(struct ath12k_base *ab,
info->status.ack_signal = ts->ack_rssi;
if (!test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
- ab->wmi_ab.svc_map))
- info->status.ack_signal += ATH12K_DEFAULT_NOISE_FLOOR;
+ ab->wmi_ab.svc_map)) {
+ spin_lock_bh(&ar->data_lock);
+ noise_floor = ath12k_pdev_get_noise_floor(ar);
+ spin_unlock_bh(&ar->data_lock);
+
+ info->status.ack_signal += noise_floor;
+ }
info->status.flags = IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
} else {
info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
}
}
+ rcu_read_lock();
+ spin_lock_bh(&ab->base_lock);
+ peer = ath12k_peer_find_by_id(ab, peer_id);
+ if (!peer || !peer->sta) {
+ ath12k_dbg(ab, ATH12K_DBG_DATA,
+ "dp_tx: failed to find the peer with peer_id %d\n", peer_id);
+ spin_unlock_bh(&ab->base_lock);
+ ieee80211_free_txskb(ath12k_ar_to_hw(ar), msdu);
+ goto exit;
+ } else {
+ status.sta = peer->sta;
+ }
+ spin_unlock_bh(&ab->base_lock);
- ieee80211_tx_status_skb(ath12k_ar_to_hw(ar), msdu);
+ status.info = info;
+ status.skb = msdu;
+ ieee80211_tx_status_ext(ath12k_ar_to_hw(ar), &status);
+exit:
+ rcu_read_unlock();
}
static void
@@ -609,8 +659,9 @@ ath12k_dp_tx_process_htt_tx_complete(struct ath12k_base *ab, void *desc,
struct ath12k_tx_desc_params *desc_params)
{
struct htt_tx_wbm_completion *status_desc;
- struct ath12k_dp_htt_wbm_tx_status ts = {0};
+ struct ath12k_dp_htt_wbm_tx_status ts = {};
enum hal_wbm_htt_tx_comp_status wbm_status;
+ u16 peer_id;
status_desc = desc;
@@ -623,7 +674,11 @@ ath12k_dp_tx_process_htt_tx_complete(struct ath12k_base *ab, void *desc,
ts.acked = (wbm_status == HAL_WBM_REL_HTT_TX_COMP_STATUS_OK);
ts.ack_rssi = le32_get_bits(status_desc->info2,
HTT_TX_WBM_COMP_INFO2_ACK_RSSI);
- ath12k_dp_tx_htt_tx_complete_buf(ab, desc_params, tx_ring, &ts);
+
+ peer_id = le32_get_bits(((struct hal_wbm_completion_ring_tx *)desc)->
+ info3, HAL_WBM_COMPL_TX_INFO3_PEER_ID);
+
+ ath12k_dp_tx_htt_tx_complete_buf(ab, desc_params, tx_ring, &ts, peer_id);
break;
case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP:
case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL:
@@ -650,7 +705,7 @@ static void ath12k_dp_tx_update_txcompl(struct ath12k *ar, struct hal_tx_status
struct ieee80211_sta *sta;
struct ath12k_sta *ahsta;
struct ath12k_link_sta *arsta;
- struct rate_info txrate = {0};
+ struct rate_info txrate = {};
u16 rate, ru_tones;
u8 rate_idx = 0;
int ret;
@@ -774,6 +829,13 @@ static void ath12k_dp_tx_complete_msdu(struct ath12k *ar,
struct ieee80211_vif *vif;
struct ath12k_vif *ahvif;
struct sk_buff *msdu = desc_params->skb;
+ s32 noise_floor;
+ struct ieee80211_tx_status status = {};
+ struct ieee80211_rate_status status_rate = {};
+ struct ath12k_peer *peer;
+ struct ath12k_link_sta *arsta;
+ struct ath12k_sta *ahsta;
+ struct rate_info rate;
if (WARN_ON_ONCE(ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)) {
/* Must not happen */
@@ -826,8 +888,13 @@ static void ath12k_dp_tx_complete_msdu(struct ath12k *ar,
info->status.ack_signal = ts->ack_rssi;
if (!test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
- ab->wmi_ab.svc_map))
- info->status.ack_signal += ATH12K_DEFAULT_NOISE_FLOOR;
+ ab->wmi_ab.svc_map)) {
+ spin_lock_bh(&ar->data_lock);
+ noise_floor = ath12k_pdev_get_noise_floor(ar);
+ spin_unlock_bh(&ar->data_lock);
+
+ info->status.ack_signal += noise_floor;
+ }
info->status.flags = IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
}
@@ -860,7 +927,32 @@ static void ath12k_dp_tx_complete_msdu(struct ath12k *ar,
ath12k_dp_tx_update_txcompl(ar, ts);
- ieee80211_tx_status_skb(ath12k_ar_to_hw(ar), msdu);
+ spin_lock_bh(&ab->base_lock);
+ peer = ath12k_peer_find_by_id(ab, ts->peer_id);
+ if (!peer || !peer->sta) {
+ ath12k_err(ab,
+ "dp_tx: failed to find the peer with peer_id %d\n",
+ ts->peer_id);
+ spin_unlock_bh(&ab->base_lock);
+ ieee80211_free_txskb(ath12k_ar_to_hw(ar), msdu);
+ goto exit;
+ }
+ ahsta = ath12k_sta_to_ahsta(peer->sta);
+ arsta = &ahsta->deflink;
+
+ spin_unlock_bh(&ab->base_lock);
+
+ status.sta = peer->sta;
+ status.info = info;
+ status.skb = msdu;
+ rate = arsta->last_txrate;
+
+ status_rate.rate_idx = rate;
+ status_rate.try_count = 1;
+
+ status.rates = &status_rate;
+ status.n_rates = 1;
+ ieee80211_tx_status_ext(ath12k_ar_to_hw(ar), &status);
exit:
rcu_read_unlock();
@@ -889,6 +981,9 @@ static void ath12k_dp_tx_status_parse(struct ath12k_base *ab,
ts->peer_id = le32_get_bits(desc->info3, HAL_WBM_COMPL_TX_INFO3_PEER_ID);
+ ts->ack_rssi = le32_get_bits(desc->info2,
+ HAL_WBM_COMPL_TX_INFO2_ACK_FRAME_RSSI);
+
if (info0 & HAL_TX_RATE_STATS_INFO0_VALID) {
ts->pkt_type = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_PKT_TYPE);
ts->mcs = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_MCS);
@@ -906,7 +1001,7 @@ void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id)
int hal_ring_id = dp->tx_ring[ring_id].tcl_comp_ring.ring_id;
struct hal_srng *status_ring = &ab->hal.srng_list[hal_ring_id];
struct ath12k_tx_desc_info *tx_desc = NULL;
- struct hal_tx_status ts = { 0 };
+ struct hal_tx_status ts = {};
struct ath12k_tx_desc_params desc_params;
struct dp_tx_ring *tx_ring = &dp->tx_ring[ring_id];
struct hal_wbm_release_ring *desc;
@@ -919,7 +1014,8 @@ void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id)
ath12k_hal_srng_access_begin(ab, status_ring);
- while (ATH12K_TX_COMPL_NEXT(tx_ring->tx_status_head) != tx_ring->tx_status_tail) {
+ while (ATH12K_TX_COMPL_NEXT(ab, tx_ring->tx_status_head) !=
+ tx_ring->tx_status_tail) {
desc = ath12k_hal_srng_dst_get_next_entry(ab, status_ring);
if (!desc)
break;
@@ -927,11 +1023,12 @@ void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id)
memcpy(&tx_ring->tx_status[tx_ring->tx_status_head],
desc, sizeof(*desc));
tx_ring->tx_status_head =
- ATH12K_TX_COMPL_NEXT(tx_ring->tx_status_head);
+ ATH12K_TX_COMPL_NEXT(ab, tx_ring->tx_status_head);
}
if (ath12k_hal_srng_dst_peek(ab, status_ring) &&
- (ATH12K_TX_COMPL_NEXT(tx_ring->tx_status_head) == tx_ring->tx_status_tail)) {
+ (ATH12K_TX_COMPL_NEXT(ab, tx_ring->tx_status_head) ==
+ tx_ring->tx_status_tail)) {
/* TODO: Process pending tx_status messages when kfifo_is_full() */
ath12k_warn(ab, "Unable to process some of the tx_status ring desc because status_fifo is full\n");
}
@@ -940,12 +1037,13 @@ void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id)
spin_unlock_bh(&status_ring->lock);
- while (ATH12K_TX_COMPL_NEXT(tx_ring->tx_status_tail) != tx_ring->tx_status_head) {
+ while (ATH12K_TX_COMPL_NEXT(ab, tx_ring->tx_status_tail) !=
+ tx_ring->tx_status_head) {
struct hal_wbm_completion_ring_tx *tx_status;
u32 desc_id;
tx_ring->tx_status_tail =
- ATH12K_TX_COMPL_NEXT(tx_ring->tx_status_tail);
+ ATH12K_TX_COMPL_NEXT(ab, tx_ring->tx_status_tail);
tx_status = &tx_ring->tx_status[tx_ring->tx_status_tail];
ath12k_dp_tx_status_parse(ab, tx_status, &ts);
@@ -1182,6 +1280,7 @@ int ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base *ab)
struct sk_buff *skb;
struct htt_ver_req_cmd *cmd;
int len = sizeof(*cmd);
+ u32 metadata_version;
int ret;
init_completion(&dp->htt_tgt_version_received);
@@ -1194,12 +1293,14 @@ int ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base *ab)
cmd = (struct htt_ver_req_cmd *)skb->data;
cmd->ver_reg_info = le32_encode_bits(HTT_H2T_MSG_TYPE_VERSION_REQ,
HTT_OPTION_TAG);
+ metadata_version = ath12k_ftm_mode ? HTT_OPTION_TCL_METADATA_VER_V1 :
+ HTT_OPTION_TCL_METADATA_VER_V2;
cmd->tcl_metadata_version = le32_encode_bits(HTT_TAG_TCL_METADATA_VERSION,
HTT_OPTION_TAG) |
le32_encode_bits(HTT_TCL_METADATA_VER_SZ,
HTT_OPTION_LEN) |
- le32_encode_bits(HTT_OPTION_TCL_METADATA_VER_V2,
+ le32_encode_bits(metadata_version,
HTT_OPTION_VALUE);
ret = ath12k_htc_send(&ab->htc, dp->eid, skb);
@@ -1245,7 +1346,7 @@ int ath12k_dp_tx_htt_h2t_ppdu_stats_req(struct ath12k *ar, u32 mask)
cmd->msg = le32_encode_bits(HTT_H2T_MSG_TYPE_PPDU_STATS_CFG,
HTT_PPDU_STATS_CFG_MSG_TYPE);
- pdev_mask = 1 << (i + 1);
+ pdev_mask = 1 << (i + ar->pdev_idx);
cmd->msg |= le32_encode_bits(pdev_mask, HTT_PPDU_STATS_CFG_PDEV_ID);
cmd->msg |= le32_encode_bits(mask, HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK);
@@ -1470,7 +1571,7 @@ int ath12k_dp_tx_htt_monitor_mode_ring_config(struct ath12k *ar, bool reset)
int ath12k_dp_tx_htt_rx_monitor_mode_ring_config(struct ath12k *ar, bool reset)
{
struct ath12k_base *ab = ar->ab;
- struct htt_rx_ring_tlv_filter tlv_filter = {0};
+ struct htt_rx_ring_tlv_filter tlv_filter = {};
int ret, ring_id, i;
tlv_filter.offset_valid = false;
diff --git a/drivers/net/wireless/ath/ath12k/hal.c b/drivers/net/wireless/ath/ath12k/hal.c
index a301898e5849..6406fcf5d69f 100644
--- a/drivers/net/wireless/ath/ath12k/hal.c
+++ b/drivers/net/wireless/ath/ath12k/hal.c
@@ -1950,7 +1950,7 @@ u32 ath12k_hal_ce_dst_status_get_length(struct hal_ce_srng_dst_status_desc *desc
{
u32 len;
- len = le32_get_bits(READ_ONCE(desc->flags), HAL_CE_DST_STATUS_DESC_FLAGS_LEN);
+ len = le32_get_bits(desc->flags, HAL_CE_DST_STATUS_DESC_FLAGS_LEN);
desc->flags &= ~cpu_to_le32(HAL_CE_DST_STATUS_DESC_FLAGS_LEN);
return len;
@@ -2143,13 +2143,24 @@ void *ath12k_hal_srng_src_get_next_reaped(struct ath12k_base *ab,
void ath12k_hal_srng_access_begin(struct ath12k_base *ab, struct hal_srng *srng)
{
+ u32 hp;
+
lockdep_assert_held(&srng->lock);
- if (srng->ring_dir == HAL_SRNG_DIR_SRC)
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
srng->u.src_ring.cached_tp =
*(volatile u32 *)srng->u.src_ring.tp_addr;
- else
- srng->u.dst_ring.cached_hp = READ_ONCE(*srng->u.dst_ring.hp_addr);
+ } else {
+ hp = READ_ONCE(*srng->u.dst_ring.hp_addr);
+
+ if (hp != srng->u.dst_ring.cached_hp) {
+ srng->u.dst_ring.cached_hp = hp;
+ /* Make sure descriptor is read after the head
+ * pointer.
+ */
+ dma_rmb();
+ }
+ }
}
/* Update cached ring head/tail pointers to HW. ath12k_hal_srng_access_begin()
@@ -2159,7 +2170,6 @@ void ath12k_hal_srng_access_end(struct ath12k_base *ab, struct hal_srng *srng)
{
lockdep_assert_held(&srng->lock);
- /* TODO: See if we need a write memory barrier here */
if (srng->flags & HAL_SRNG_FLAGS_LMAC_RING) {
/* For LMAC rings, ring pointer updates are done through FW and
* hence written to a shared memory location that is read by FW
@@ -2167,21 +2177,37 @@ void ath12k_hal_srng_access_end(struct ath12k_base *ab, struct hal_srng *srng)
if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
srng->u.src_ring.last_tp =
*(volatile u32 *)srng->u.src_ring.tp_addr;
- *srng->u.src_ring.hp_addr = srng->u.src_ring.hp;
+ /* Make sure descriptor is written before updating the
+ * head pointer.
+ */
+ dma_wmb();
+ WRITE_ONCE(*srng->u.src_ring.hp_addr, srng->u.src_ring.hp);
} else {
srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
- *srng->u.dst_ring.tp_addr = srng->u.dst_ring.tp;
+ /* Make sure descriptor is read before updating the
+ * tail pointer.
+ */
+ dma_mb();
+ WRITE_ONCE(*srng->u.dst_ring.tp_addr, srng->u.dst_ring.tp);
}
} else {
if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
srng->u.src_ring.last_tp =
*(volatile u32 *)srng->u.src_ring.tp_addr;
+ /* Assume implementation use an MMIO write accessor
+ * which has the required wmb() so that the descriptor
+ * is written before the updating the head pointer.
+ */
ath12k_hif_write32(ab,
(unsigned long)srng->u.src_ring.hp_addr -
(unsigned long)ab->mem,
srng->u.src_ring.hp);
} else {
srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
+ /* Make sure descriptor is read before updating the
+ * tail pointer.
+ */
+ mb();
ath12k_hif_write32(ab,
(unsigned long)srng->u.dst_ring.tp_addr -
(unsigned long)ab->mem,
diff --git a/drivers/net/wireless/ath/ath12k/hw.c b/drivers/net/wireless/ath/ath12k/hw.c
index ec77ad498b33..6791ae1d64e5 100644
--- a/drivers/net/wireless/ath/ath12k/hw.c
+++ b/drivers/net/wireless/ath/ath12k/hw.c
@@ -14,6 +14,7 @@
#include "hw.h"
#include "mhi.h"
#include "dp_rx.h"
+#include "peer.h"
static const guid_t wcn7850_uuid = GUID_INIT(0xf634f534, 0x6147, 0x11ec,
0x90, 0xd6, 0x02, 0x42,
@@ -49,6 +50,12 @@ static bool ath12k_dp_srng_is_comp_ring_qcn9274(int ring_num)
return false;
}
+static bool ath12k_is_frame_link_agnostic_qcn9274(struct ath12k_link_vif *arvif,
+ struct ieee80211_mgmt *mgmt)
+{
+ return ieee80211_is_action(mgmt->frame_control);
+}
+
static int ath12k_hw_mac_id_to_pdev_id_wcn7850(const struct ath12k_hw_params *hw,
int mac_id)
{
@@ -74,6 +81,52 @@ static bool ath12k_dp_srng_is_comp_ring_wcn7850(int ring_num)
return false;
}
+static bool ath12k_is_addba_resp_action_code(struct ieee80211_mgmt *mgmt)
+{
+ if (!ieee80211_is_action(mgmt->frame_control))
+ return false;
+
+ if (mgmt->u.action.category != WLAN_CATEGORY_BACK)
+ return false;
+
+ if (mgmt->u.action.u.addba_resp.action_code != WLAN_ACTION_ADDBA_RESP)
+ return false;
+
+ return true;
+}
+
+static bool ath12k_is_frame_link_agnostic_wcn7850(struct ath12k_link_vif *arvif,
+ struct ieee80211_mgmt *mgmt)
+{
+ struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
+ struct ath12k_hw *ah = ath12k_ar_to_ah(arvif->ar);
+ struct ath12k_base *ab = arvif->ar->ab;
+ __le16 fc = mgmt->frame_control;
+
+ spin_lock_bh(&ab->base_lock);
+ if (!ath12k_peer_find_by_addr(ab, mgmt->da) &&
+ !ath12k_peer_ml_find(ah, mgmt->da)) {
+ spin_unlock_bh(&ab->base_lock);
+ return false;
+ }
+ spin_unlock_bh(&ab->base_lock);
+
+ if (vif->type == NL80211_IFTYPE_STATION)
+ return arvif->is_up &&
+ (vif->valid_links == vif->active_links) &&
+ !ieee80211_is_probe_req(fc) &&
+ !ieee80211_is_auth(fc) &&
+ !ieee80211_is_deauth(fc) &&
+ !ath12k_is_addba_resp_action_code(mgmt);
+
+ if (vif->type == NL80211_IFTYPE_AP)
+ return !(ieee80211_is_probe_resp(fc) || ieee80211_is_auth(fc) ||
+ ieee80211_is_assoc_resp(fc) || ieee80211_is_reassoc_resp(fc) ||
+ ath12k_is_addba_resp_action_code(mgmt));
+
+ return false;
+}
+
static const struct ath12k_hw_ops qcn9274_ops = {
.get_hw_mac_from_pdev_id = ath12k_hw_qcn9274_mac_from_pdev_id,
.mac_id_to_pdev_id = ath12k_hw_mac_id_to_pdev_id_qcn9274,
@@ -81,6 +134,7 @@ static const struct ath12k_hw_ops qcn9274_ops = {
.rxdma_ring_sel_config = ath12k_dp_rxdma_ring_sel_config_qcn9274,
.get_ring_selector = ath12k_hw_get_ring_selector_qcn9274,
.dp_srng_is_tx_comp_ring = ath12k_dp_srng_is_comp_ring_qcn9274,
+ .is_frame_link_agnostic = ath12k_is_frame_link_agnostic_qcn9274,
};
static const struct ath12k_hw_ops wcn7850_ops = {
@@ -90,6 +144,7 @@ static const struct ath12k_hw_ops wcn7850_ops = {
.rxdma_ring_sel_config = ath12k_dp_rxdma_ring_sel_config_wcn7850,
.get_ring_selector = ath12k_hw_get_ring_selector_wcn7850,
.dp_srng_is_tx_comp_ring = ath12k_dp_srng_is_comp_ring_wcn7850,
+ .is_frame_link_agnostic = ath12k_is_frame_link_agnostic_wcn7850,
};
#define ATH12K_TX_RING_MASK_0 0x1
diff --git a/drivers/net/wireless/ath/ath12k/hw.h b/drivers/net/wireless/ath/ath12k/hw.h
index 0a75bc5abfa2..8ce11c3e6d5c 100644
--- a/drivers/net/wireless/ath/ath12k/hw.h
+++ b/drivers/net/wireless/ath/ath12k/hw.h
@@ -16,37 +16,21 @@
/* Target configuration defines */
/* Num VDEVS per radio */
-#define TARGET_NUM_VDEVS (16 + 1)
-
-#define TARGET_NUM_PEERS_PDEV_SINGLE (TARGET_NUM_STATIONS_SINGLE + \
- TARGET_NUM_VDEVS)
-#define TARGET_NUM_PEERS_PDEV_DBS (TARGET_NUM_STATIONS_DBS + \
- TARGET_NUM_VDEVS)
-#define TARGET_NUM_PEERS_PDEV_DBS_SBS (TARGET_NUM_STATIONS_DBS_SBS + \
- TARGET_NUM_VDEVS)
-
-/* Num of peers for Single Radio mode */
-#define TARGET_NUM_PEERS_SINGLE (TARGET_NUM_PEERS_PDEV_SINGLE)
-
-/* Num of peers for DBS */
-#define TARGET_NUM_PEERS_DBS (2 * TARGET_NUM_PEERS_PDEV_DBS)
-
-/* Num of peers for DBS_SBS */
-#define TARGET_NUM_PEERS_DBS_SBS (3 * TARGET_NUM_PEERS_PDEV_DBS_SBS)
+#define TARGET_NUM_VDEVS(ab) ((ab)->profile_param->num_vdevs)
/* Max num of stations for Single Radio mode */
-#define TARGET_NUM_STATIONS_SINGLE 512
+#define TARGET_NUM_STATIONS_SINGLE(ab) ((ab)->profile_param->max_client_single)
/* Max num of stations for DBS */
-#define TARGET_NUM_STATIONS_DBS 128
+#define TARGET_NUM_STATIONS_DBS(ab) ((ab)->profile_param->max_client_dbs)
/* Max num of stations for DBS_SBS */
-#define TARGET_NUM_STATIONS_DBS_SBS 128
+#define TARGET_NUM_STATIONS_DBS_SBS(ab) \
+ ((ab)->profile_param->max_client_dbs_sbs)
+
+#define TARGET_NUM_STATIONS(ab, x) TARGET_NUM_STATIONS_##x(ab)
-#define TARGET_NUM_PEERS(x) TARGET_NUM_PEERS_##x
#define TARGET_NUM_PEER_KEYS 2
-#define TARGET_NUM_TIDS(x) (2 * TARGET_NUM_PEERS(x) + \
- 4 * TARGET_NUM_VDEVS + 8)
#define TARGET_AST_SKID_LIMIT 16
#define TARGET_NUM_OFFLD_PEERS 4
@@ -246,6 +230,8 @@ struct ath12k_hw_ops {
int (*rxdma_ring_sel_config)(struct ath12k_base *ab);
u8 (*get_ring_selector)(struct sk_buff *skb);
bool (*dp_srng_is_tx_comp_ring)(int ring_num);
+ bool (*is_frame_link_agnostic)(struct ath12k_link_vif *arvif,
+ struct ieee80211_mgmt *mgmt);
};
static inline
diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
index 32519666632d..bd1ec3b2c084 100644
--- a/drivers/net/wireless/ath/ath12k/mac.c
+++ b/drivers/net/wireless/ath/ath12k/mac.c
@@ -209,7 +209,7 @@ ath12k_phymodes[NUM_NL80211_BANDS][ATH12K_CHAN_WIDTH_NUM] = {
[NL80211_CHAN_WIDTH_40] = MODE_11BE_EHT40,
[NL80211_CHAN_WIDTH_80] = MODE_11BE_EHT80,
[NL80211_CHAN_WIDTH_160] = MODE_11BE_EHT160,
- [NL80211_CHAN_WIDTH_80P80] = MODE_11BE_EHT80_80,
+ [NL80211_CHAN_WIDTH_80P80] = MODE_UNKNOWN,
[NL80211_CHAN_WIDTH_320] = MODE_11BE_EHT320,
},
[NL80211_BAND_6GHZ] = {
@@ -220,7 +220,7 @@ ath12k_phymodes[NUM_NL80211_BANDS][ATH12K_CHAN_WIDTH_NUM] = {
[NL80211_CHAN_WIDTH_40] = MODE_11BE_EHT40,
[NL80211_CHAN_WIDTH_80] = MODE_11BE_EHT80,
[NL80211_CHAN_WIDTH_160] = MODE_11BE_EHT160,
- [NL80211_CHAN_WIDTH_80P80] = MODE_11BE_EHT80_80,
+ [NL80211_CHAN_WIDTH_80P80] = MODE_UNKNOWN,
[NL80211_CHAN_WIDTH_320] = MODE_11BE_EHT320,
},
@@ -521,6 +521,18 @@ ath12k_mac_max_vht_nss(const u16 *vht_mcs_mask)
return 1;
}
+static u32
+ath12k_mac_max_he_nss(const u16 he_mcs_mask[NL80211_HE_NSS_MAX])
+{
+ int nss;
+
+ for (nss = NL80211_HE_NSS_MAX - 1; nss >= 0; nss--)
+ if (he_mcs_mask[nss])
+ return nss + 1;
+
+ return 1;
+}
+
static u8 ath12k_parse_mpdudensity(u8 mpdudensity)
{
/* From IEEE Std 802.11-2020 defined values for "Minimum MPDU Start Spacing":
@@ -602,6 +614,33 @@ ath12k_mac_get_tx_arvif(struct ath12k_link_vif *arvif,
return NULL;
}
+static const u8 *ath12k_mac_get_tx_bssid(struct ath12k_link_vif *arvif)
+{
+ struct ieee80211_bss_conf *link_conf;
+ struct ath12k_link_vif *tx_arvif;
+ struct ath12k *ar = arvif->ar;
+
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ link_conf = ath12k_mac_get_link_bss_conf(arvif);
+ if (!link_conf) {
+ ath12k_warn(ar->ab,
+ "unable to access bss link conf for link %u required to retrieve transmitting link conf\n",
+ arvif->link_id);
+ return NULL;
+ }
+ if (link_conf->vif->type == NL80211_IFTYPE_STATION) {
+ if (link_conf->nontransmitted)
+ return link_conf->transmitter_bssid;
+ } else {
+ tx_arvif = ath12k_mac_get_tx_arvif(arvif, link_conf);
+ if (tx_arvif)
+ return tx_arvif->bssid;
+ }
+
+ return NULL;
+}
+
struct ieee80211_bss_conf *
ath12k_mac_get_link_bss_conf(struct ath12k_link_vif *arvif)
{
@@ -1457,11 +1496,13 @@ static int ath12k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui,
return 0;
}
-static void ath12k_mac_set_arvif_ies(struct ath12k_link_vif *arvif, struct sk_buff *bcn,
+static void ath12k_mac_set_arvif_ies(struct ath12k_link_vif *arvif,
+ struct ath12k_link_vif *tx_arvif,
+ struct sk_buff *bcn,
u8 bssid_index, bool *nontx_profile_found)
{
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)bcn->data;
- const struct element *elem, *nontx, *index, *nie;
+ const struct element *elem, *nontx, *index, *nie, *ext_cap_ie;
const u8 *start, *tail;
u16 rem_len;
u8 i;
@@ -1479,6 +1520,11 @@ static void ath12k_mac_set_arvif_ies(struct ath12k_link_vif *arvif, struct sk_bu
start, rem_len))
arvif->wpaie_present = true;
+ ext_cap_ie = cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY, start, rem_len);
+ if (ext_cap_ie && ext_cap_ie->datalen >= 11 &&
+ (ext_cap_ie->data[10] & WLAN_EXT_CAPA11_BCN_PROTECT))
+ tx_arvif->beacon_prot = true;
+
/* Return from here for the transmitted profile */
if (!bssid_index)
return;
@@ -1521,6 +1567,19 @@ static void ath12k_mac_set_arvif_ies(struct ath12k_link_vif *arvif, struct sk_bu
if (index->data[0] == bssid_index) {
*nontx_profile_found = true;
+
+ /* Check if nontx BSS has beacon protection enabled */
+ if (!tx_arvif->beacon_prot) {
+ ext_cap_ie =
+ cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY,
+ nontx->data,
+ nontx->datalen);
+ if (ext_cap_ie && ext_cap_ie->datalen >= 11 &&
+ (ext_cap_ie->data[10] &
+ WLAN_EXT_CAPA11_BCN_PROTECT))
+ tx_arvif->beacon_prot = true;
+ }
+
if (cfg80211_find_ie(WLAN_EID_RSN,
nontx->data,
nontx->datalen)) {
@@ -1569,11 +1628,11 @@ static int ath12k_mac_setup_bcn_tmpl_ema(struct ath12k_link_vif *arvif,
}
if (tx_arvif == arvif)
- ath12k_mac_set_arvif_ies(arvif, beacons->bcn[0].skb, 0, NULL);
+ ath12k_mac_set_arvif_ies(arvif, tx_arvif, beacons->bcn[0].skb, 0, NULL);
for (i = 0; i < beacons->cnt; i++) {
if (tx_arvif != arvif && !nontx_profile_found)
- ath12k_mac_set_arvif_ies(arvif, beacons->bcn[i].skb,
+ ath12k_mac_set_arvif_ies(arvif, tx_arvif, beacons->bcn[i].skb,
bssid_index,
&nontx_profile_found);
@@ -1642,9 +1701,9 @@ static int ath12k_mac_setup_bcn_tmpl(struct ath12k_link_vif *arvif)
}
if (tx_arvif == arvif) {
- ath12k_mac_set_arvif_ies(arvif, bcn, 0, NULL);
+ ath12k_mac_set_arvif_ies(arvif, tx_arvif, bcn, 0, NULL);
} else {
- ath12k_mac_set_arvif_ies(arvif, bcn,
+ ath12k_mac_set_arvif_ies(arvif, tx_arvif, bcn,
link_conf->bssid_index,
&nontx_profile_found);
if (!nontx_profile_found)
@@ -1691,8 +1750,6 @@ static void ath12k_control_beaconing(struct ath12k_link_vif *arvif,
{
struct ath12k_wmi_vdev_up_params params = {};
struct ath12k_vif *ahvif = arvif->ahvif;
- struct ieee80211_bss_conf *link_conf;
- struct ath12k_link_vif *tx_arvif;
struct ath12k *ar = arvif->ar;
int ret;
@@ -1723,18 +1780,8 @@ static void ath12k_control_beaconing(struct ath12k_link_vif *arvif,
params.vdev_id = arvif->vdev_id;
params.aid = ahvif->aid;
params.bssid = arvif->bssid;
-
- link_conf = ath12k_mac_get_link_bss_conf(arvif);
- if (!link_conf) {
- ath12k_warn(ar->ab,
- "unable to access bss link conf for link %u required to retrieve transmitting link conf\n",
- arvif->link_id);
- return;
- }
-
- tx_arvif = ath12k_mac_get_tx_arvif(arvif, link_conf);
- if (tx_arvif) {
- params.tx_bssid = tx_arvif->bssid;
+ params.tx_bssid = ath12k_mac_get_tx_bssid(arvif);
+ if (params.tx_bssid) {
params.nontx_profile_idx = info->bssid_index;
params.nontx_profile_cnt = 1 << info->bssid_indicator;
}
@@ -2056,9 +2103,15 @@ static void ath12k_peer_assoc_h_ht(struct ath12k *ar,
arg->peer_rate_caps |= WMI_HOST_RC_CW40_FLAG;
}
- if (arvif->bitrate_mask.control[band].gi != NL80211_TXRATE_FORCE_LGI) {
- if (ht_cap->cap & (IEEE80211_HT_CAP_SGI_20 |
- IEEE80211_HT_CAP_SGI_40))
+ /* As firmware handles these two flags (IEEE80211_HT_CAP_SGI_20
+ * and IEEE80211_HT_CAP_SGI_40) for enabling SGI, reset both
+ * flags if guard interval is to force Long GI
+ */
+ if (arvif->bitrate_mask.control[band].gi == NL80211_TXRATE_FORCE_LGI) {
+ arg->peer_ht_caps &= ~(IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40);
+ } else {
+ /* Enable SGI flag if either SGI_20 or SGI_40 is supported */
+ if (ht_cap->cap & (IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40))
arg->peer_rate_caps |= WMI_HOST_RC_SGI_FLAG;
}
@@ -2170,6 +2223,34 @@ ath12k_peer_assoc_h_vht_limit(u16 tx_mcs_set,
return tx_mcs_set;
}
+static u8 ath12k_get_nss_160mhz(struct ath12k *ar,
+ u8 max_nss)
+{
+ u8 nss_ratio_info = ar->pdev->cap.nss_ratio_info;
+ u8 max_sup_nss = 0;
+
+ switch (nss_ratio_info) {
+ case WMI_NSS_RATIO_1BY2_NSS:
+ max_sup_nss = max_nss >> 1;
+ break;
+ case WMI_NSS_RATIO_3BY4_NSS:
+ ath12k_warn(ar->ab, "WMI_NSS_RATIO_3BY4_NSS not supported\n");
+ break;
+ case WMI_NSS_RATIO_1_NSS:
+ max_sup_nss = max_nss;
+ break;
+ case WMI_NSS_RATIO_2_NSS:
+ ath12k_warn(ar->ab, "WMI_NSS_RATIO_2_NSS not supported\n");
+ break;
+ default:
+ ath12k_warn(ar->ab, "invalid nss ratio received from fw: %d\n",
+ nss_ratio_info);
+ break;
+ }
+
+ return max_sup_nss;
+}
+
static void ath12k_peer_assoc_h_vht(struct ath12k *ar,
struct ath12k_link_vif *arvif,
struct ath12k_link_sta *arsta,
@@ -2181,11 +2262,13 @@ static void ath12k_peer_assoc_h_vht(struct ath12k *ar,
struct ieee80211_link_sta *link_sta;
struct cfg80211_chan_def def;
enum nl80211_band band;
- const u16 *vht_mcs_mask;
+ u16 *vht_mcs_mask;
u16 tx_mcs_map;
u8 ampdu_factor;
u8 max_nss, vht_mcs;
- int i;
+ int i, vht_nss, nss_idx;
+ bool user_rate_valid = true;
+ u32 rx_nss, tx_nss, nss_160;
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
@@ -2238,6 +2321,25 @@ static void ath12k_peer_assoc_h_vht(struct ath12k *ar,
if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160)
arg->bw_160 = true;
+ vht_nss = ath12k_mac_max_vht_nss(vht_mcs_mask);
+
+ if (vht_nss > link_sta->rx_nss) {
+ user_rate_valid = false;
+ for (nss_idx = link_sta->rx_nss - 1; nss_idx >= 0; nss_idx--) {
+ if (vht_mcs_mask[nss_idx]) {
+ user_rate_valid = true;
+ break;
+ }
+ }
+ }
+
+ if (!user_rate_valid) {
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "Setting vht range MCS value to peer supported nss:%d for peer %pM\n",
+ link_sta->rx_nss, arsta->addr);
+ vht_mcs_mask[link_sta->rx_nss - 1] = vht_mcs_mask[vht_nss - 1];
+ }
+
/* Calculate peer NSS capability from VHT capabilities if STA
* supports VHT.
*/
@@ -2271,10 +2373,90 @@ static void ath12k_peer_assoc_h_vht(struct ath12k *ar,
/* TODO: Check */
arg->tx_max_mcs_nss = 0xFF;
- ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
- arsta->addr, arg->peer_max_mpdu, arg->peer_flags);
+ if (arg->peer_phymode == MODE_11AC_VHT160) {
+ tx_nss = ath12k_get_nss_160mhz(ar, max_nss);
+ rx_nss = min(arg->peer_nss, tx_nss);
+ arg->peer_bw_rxnss_override = ATH12K_BW_NSS_MAP_ENABLE;
+
+ if (!rx_nss) {
+ ath12k_warn(ar->ab, "invalid max_nss\n");
+ return;
+ }
+
+ nss_160 = u32_encode_bits(rx_nss - 1, ATH12K_PEER_RX_NSS_160MHZ);
+ arg->peer_bw_rxnss_override |= nss_160;
+ }
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "mac vht peer %pM max_mpdu %d flags 0x%x nss_override 0x%x\n",
+ arsta->addr, arg->peer_max_mpdu, arg->peer_flags,
+ arg->peer_bw_rxnss_override);
+}
+
+static int ath12k_mac_get_max_he_mcs_map(u16 mcs_map, int nss)
+{
+ switch ((mcs_map >> (2 * nss)) & 0x3) {
+ case IEEE80211_HE_MCS_SUPPORT_0_7: return BIT(8) - 1;
+ case IEEE80211_HE_MCS_SUPPORT_0_9: return BIT(10) - 1;
+ case IEEE80211_HE_MCS_SUPPORT_0_11: return BIT(12) - 1;
+ }
+ return 0;
+}
+
+static u16 ath12k_peer_assoc_h_he_limit(u16 tx_mcs_set,
+ const u16 *he_mcs_limit)
+{
+ int idx_limit;
+ int nss;
+ u16 mcs_map;
+ u16 mcs;
+
+ for (nss = 0; nss < NL80211_HE_NSS_MAX; nss++) {
+ mcs_map = ath12k_mac_get_max_he_mcs_map(tx_mcs_set, nss) &
+ he_mcs_limit[nss];
+
+ if (mcs_map)
+ idx_limit = fls(mcs_map) - 1;
+ else
+ idx_limit = -1;
+
+ switch (idx_limit) {
+ case 0 ... 7:
+ mcs = IEEE80211_HE_MCS_SUPPORT_0_7;
+ break;
+ case 8:
+ case 9:
+ mcs = IEEE80211_HE_MCS_SUPPORT_0_9;
+ break;
+ case 10:
+ case 11:
+ mcs = IEEE80211_HE_MCS_SUPPORT_0_11;
+ break;
+ default:
+ WARN_ON(1);
+ fallthrough;
+ case -1:
+ mcs = IEEE80211_HE_MCS_NOT_SUPPORTED;
+ break;
+ }
+
+ tx_mcs_set &= ~(0x3 << (nss * 2));
+ tx_mcs_set |= mcs << (nss * 2);
+ }
+
+ return tx_mcs_set;
+}
- /* TODO: rxnss_override */
+static bool
+ath12k_peer_assoc_h_he_masked(const u16 he_mcs_mask[NL80211_HE_NSS_MAX])
+{
+ int nss;
+
+ for (nss = 0; nss < NL80211_HE_NSS_MAX; nss++)
+ if (he_mcs_mask[nss])
+ return false;
+
+ return true;
}
static void ath12k_peer_assoc_h_he(struct ath12k *ar,
@@ -2287,18 +2469,29 @@ static void ath12k_peer_assoc_h_he(struct ath12k *ar,
const struct ieee80211_sta_he_cap *he_cap;
struct ieee80211_bss_conf *link_conf;
struct ieee80211_link_sta *link_sta;
+ struct cfg80211_chan_def def;
int i;
u8 ampdu_factor, max_nss;
u8 rx_mcs_80 = IEEE80211_HE_MCS_NOT_SUPPORTED;
u8 rx_mcs_160 = IEEE80211_HE_MCS_NOT_SUPPORTED;
u16 mcs_160_map, mcs_80_map;
+ u8 link_id = arvif->link_id;
bool support_160;
- u16 v;
+ enum nl80211_band band;
+ u16 *he_mcs_mask;
+ u8 he_mcs;
+ u16 he_tx_mcs = 0, v = 0;
+ int he_nss, nss_idx;
+ bool user_rate_valid = true;
+ u32 rx_nss, tx_nss, nss_160;
+
+ if (WARN_ON(ath12k_mac_vif_link_chan(vif, link_id, &def)))
+ return;
link_conf = ath12k_mac_get_link_bss_conf(arvif);
if (!link_conf) {
ath12k_warn(ar->ab, "unable to access bss link conf in peer assoc he for vif %pM link %u",
- vif->addr, arvif->link_id);
+ vif->addr, link_id);
return;
}
@@ -2313,6 +2506,12 @@ static void ath12k_peer_assoc_h_he(struct ath12k *ar,
if (!he_cap->has_he)
return;
+ band = def.chan->band;
+ he_mcs_mask = arvif->bitrate_mask.control[band].he_mcs;
+
+ if (ath12k_peer_assoc_h_he_masked(he_mcs_mask))
+ return;
+
arg->he_flag = true;
support_160 = !!(he_cap->he_cap_elem.phy_cap_info[0] &
@@ -2418,25 +2617,36 @@ static void ath12k_peer_assoc_h_he(struct ath12k *ar,
if (he_cap->he_cap_elem.mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_TWT_REQ)
arg->twt_requester = true;
- switch (link_sta->bandwidth) {
- case IEEE80211_STA_RX_BW_160:
- if (he_cap->he_cap_elem.phy_cap_info[0] &
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) {
- v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80p80);
- arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80_80] = v;
-
- v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_80p80);
- arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80_80] = v;
+ he_nss = ath12k_mac_max_he_nss(he_mcs_mask);
- arg->peer_he_mcs_count++;
+ if (he_nss > link_sta->rx_nss) {
+ user_rate_valid = false;
+ for (nss_idx = link_sta->rx_nss - 1; nss_idx >= 0; nss_idx--) {
+ if (he_mcs_mask[nss_idx]) {
+ user_rate_valid = true;
+ break;
+ }
}
+ }
+
+ if (!user_rate_valid) {
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "Setting he range MCS value to peer supported nss:%d for peer %pM\n",
+ link_sta->rx_nss, arsta->addr);
+ he_mcs_mask[link_sta->rx_nss - 1] = he_mcs_mask[he_nss - 1];
+ }
+
+ switch (link_sta->bandwidth) {
+ case IEEE80211_STA_RX_BW_160:
v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_160] = v;
- v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_160);
+ v = ath12k_peer_assoc_h_he_limit(v, he_mcs_mask);
arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_160] = v;
arg->peer_he_mcs_count++;
+ if (!he_tx_mcs)
+ he_tx_mcs = v;
fallthrough;
default:
@@ -2444,11 +2654,54 @@ static void ath12k_peer_assoc_h_he(struct ath12k *ar,
arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80] = v;
v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_80);
+ v = ath12k_peer_assoc_h_he_limit(v, he_mcs_mask);
arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80] = v;
arg->peer_he_mcs_count++;
+ if (!he_tx_mcs)
+ he_tx_mcs = v;
break;
}
+
+ /* Calculate peer NSS capability from HE capabilities if STA
+ * supports HE.
+ */
+ for (i = 0, max_nss = 0, he_mcs = 0; i < NL80211_HE_NSS_MAX; i++) {
+ he_mcs = he_tx_mcs >> (2 * i) & 3;
+
+ /* In case of fixed rates, MCS Range in he_tx_mcs might have
+ * unsupported range, with he_mcs_mask set, so check either of them
+ * to find nss.
+ */
+ if (he_mcs != IEEE80211_HE_MCS_NOT_SUPPORTED ||
+ he_mcs_mask[i])
+ max_nss = i + 1;
+ }
+
+ max_nss = min(max_nss, ar->num_tx_chains);
+ arg->peer_nss = min(link_sta->rx_nss, max_nss);
+
+ if (arg->peer_phymode == MODE_11AX_HE160) {
+ tx_nss = ath12k_get_nss_160mhz(ar, ar->num_tx_chains);
+ rx_nss = min(arg->peer_nss, tx_nss);
+
+ arg->peer_nss = min(link_sta->rx_nss, ar->num_rx_chains);
+ arg->peer_bw_rxnss_override = ATH12K_BW_NSS_MAP_ENABLE;
+
+ if (!rx_nss) {
+ ath12k_warn(ar->ab, "invalid max_nss\n");
+ return;
+ }
+
+ nss_160 = u32_encode_bits(rx_nss - 1, ATH12K_PEER_RX_NSS_160MHZ);
+ arg->peer_bw_rxnss_override |= nss_160;
+ }
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "mac he peer %pM nss %d mcs cnt %d nss_override 0x%x\n",
+ arsta->addr, arg->peer_nss,
+ arg->peer_he_mcs_count,
+ arg->peer_bw_rxnss_override);
}
static void ath12k_peer_assoc_h_he_6ghz(struct ath12k *ar,
@@ -2689,16 +2942,14 @@ static enum wmi_phy_mode ath12k_mac_get_phymode_vht(struct ath12k *ar,
struct ieee80211_link_sta *link_sta)
{
if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160) {
- switch (link_sta->vht_cap.cap &
- IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
- case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
+ if (link_sta->vht_cap.cap & (IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ |
+ IEEE80211_VHT_CAP_EXT_NSS_BW_MASK))
return MODE_11AC_VHT160;
- case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ:
- return MODE_11AC_VHT80_80;
- default:
- /* not sure if this is a valid case? */
- return MODE_11AC_VHT160;
- }
+
+ /* Allow STA to connect even if it does not explicitly advertise 160 MHz
+ * support
+ */
+ return MODE_11AC_VHT160;
}
if (link_sta->bandwidth == IEEE80211_STA_RX_BW_80)
@@ -2720,11 +2971,8 @@ static enum wmi_phy_mode ath12k_mac_get_phymode_he(struct ath12k *ar,
if (link_sta->he_cap.he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
return MODE_11AX_HE160;
- else if (link_sta->he_cap.he_cap_elem.phy_cap_info[0] &
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
- return MODE_11AX_HE80_80;
- /* not sure if this is a valid case? */
- return MODE_11AX_HE160;
+
+ return MODE_UNKNOWN;
}
if (link_sta->bandwidth == IEEE80211_STA_RX_BW_80)
@@ -2752,14 +3000,10 @@ static enum wmi_phy_mode ath12k_mac_get_phymode_eht(struct ath12k *ar,
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
return MODE_11BE_EHT160;
- if (link_sta->he_cap.he_cap_elem.phy_cap_info[0] &
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
- return MODE_11BE_EHT80_80;
-
ath12k_warn(ar->ab, "invalid EHT PHY capability info for 160 Mhz: %d\n",
link_sta->he_cap.he_cap_elem.phy_cap_info[0]);
- return MODE_11BE_EHT160;
+ return MODE_UNKNOWN;
}
if (link_sta->bandwidth == IEEE80211_STA_RX_BW_80)
@@ -2784,6 +3028,7 @@ static void ath12k_peer_assoc_h_phymode(struct ath12k *ar,
enum nl80211_band band;
const u8 *ht_mcs_mask;
const u16 *vht_mcs_mask;
+ const u16 *he_mcs_mask;
enum wmi_phy_mode phymode = MODE_UNKNOWN;
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
@@ -2797,6 +3042,7 @@ static void ath12k_peer_assoc_h_phymode(struct ath12k *ar,
band = def.chan->band;
ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+ he_mcs_mask = arvif->bitrate_mask.control[band].he_mcs;
link_sta = ath12k_mac_get_link_sta(arsta);
if (!link_sta) {
@@ -2812,7 +3058,8 @@ static void ath12k_peer_assoc_h_phymode(struct ath12k *ar,
phymode = MODE_11BE_EHT40_2G;
else
phymode = MODE_11BE_EHT20_2G;
- } else if (link_sta->he_cap.has_he) {
+ } else if (link_sta->he_cap.has_he &&
+ !ath12k_peer_assoc_h_he_masked(he_mcs_mask)) {
if (link_sta->bandwidth == IEEE80211_STA_RX_BW_80)
phymode = MODE_11AX_HE80_2G;
else if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40)
@@ -2842,7 +3089,8 @@ static void ath12k_peer_assoc_h_phymode(struct ath12k *ar,
/* Check EHT first */
if (link_sta->eht_cap.has_eht) {
phymode = ath12k_mac_get_phymode_eht(ar, link_sta);
- } else if (link_sta->he_cap.has_he) {
+ } else if (link_sta->he_cap.has_he &&
+ !ath12k_peer_assoc_h_he_masked(he_mcs_mask)) {
phymode = ath12k_mac_get_phymode_he(ar, link_sta);
} else if (link_sta->vht_cap.vht_supported &&
!ath12k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
@@ -3145,6 +3393,177 @@ static int ath12k_setup_peer_smps(struct ath12k *ar, struct ath12k_link_vif *arv
ath12k_smps_map[smps]);
}
+static int ath12k_mac_set_he_txbf_conf(struct ath12k_link_vif *arvif)
+{
+ struct ath12k_vif *ahvif = arvif->ahvif;
+ struct ath12k *ar = arvif->ar;
+ u32 param = WMI_VDEV_PARAM_SET_HEMU_MODE;
+ u32 value = 0;
+ int ret;
+ struct ieee80211_bss_conf *link_conf;
+
+ link_conf = ath12k_mac_get_link_bss_conf(arvif);
+ if (!link_conf) {
+ ath12k_warn(ar->ab, "unable to access bss link conf in txbf conf\n");
+ return -EINVAL;
+ }
+
+ if (!link_conf->he_support)
+ return 0;
+
+ if (link_conf->he_su_beamformer) {
+ value |= u32_encode_bits(HE_SU_BFER_ENABLE, HE_MODE_SU_TX_BFER);
+ if (link_conf->he_mu_beamformer &&
+ ahvif->vdev_type == WMI_VDEV_TYPE_AP)
+ value |= u32_encode_bits(HE_MU_BFER_ENABLE, HE_MODE_MU_TX_BFER);
+ }
+
+ if (ahvif->vif->type != NL80211_IFTYPE_MESH_POINT) {
+ value |= u32_encode_bits(HE_DL_MUOFDMA_ENABLE, HE_MODE_DL_OFDMA) |
+ u32_encode_bits(HE_UL_MUOFDMA_ENABLE, HE_MODE_UL_OFDMA);
+
+ if (link_conf->he_full_ul_mumimo)
+ value |= u32_encode_bits(HE_UL_MUMIMO_ENABLE, HE_MODE_UL_MUMIMO);
+
+ if (link_conf->he_su_beamformee)
+ value |= u32_encode_bits(HE_SU_BFEE_ENABLE, HE_MODE_SU_TX_BFEE);
+ }
+
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param, value);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set vdev %d HE MU mode: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ param = WMI_VDEV_PARAM_SET_HE_SOUNDING_MODE;
+ value = u32_encode_bits(HE_VHT_SOUNDING_MODE_ENABLE, HE_VHT_SOUNDING_MODE) |
+ u32_encode_bits(HE_TRIG_NONTRIG_SOUNDING_MODE_ENABLE,
+ HE_TRIG_NONTRIG_SOUNDING_MODE);
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param, value);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set vdev %d sounding mode: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath12k_mac_vif_recalc_sta_he_txbf(struct ath12k *ar,
+ struct ath12k_link_vif *arvif,
+ struct ieee80211_sta_he_cap *he_cap,
+ int *hemode)
+{
+ struct ieee80211_vif *vif = arvif->ahvif->vif;
+ struct ieee80211_he_cap_elem he_cap_elem = {};
+ struct ieee80211_sta_he_cap *cap_band;
+ struct cfg80211_chan_def def;
+ u8 link_id = arvif->link_id;
+ struct ieee80211_bss_conf *link_conf;
+
+ link_conf = ath12k_mac_get_link_bss_conf(arvif);
+ if (!link_conf) {
+ ath12k_warn(ar->ab, "unable to access bss link conf in recalc txbf conf\n");
+ return -EINVAL;
+ }
+
+ if (!link_conf->he_support)
+ return 0;
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return -EINVAL;
+
+ if (WARN_ON(ath12k_mac_vif_link_chan(vif, link_id, &def)))
+ return -EINVAL;
+
+ if (def.chan->band == NL80211_BAND_2GHZ)
+ cap_band = &ar->mac.iftype[NL80211_BAND_2GHZ][vif->type].he_cap;
+ else
+ cap_band = &ar->mac.iftype[NL80211_BAND_5GHZ][vif->type].he_cap;
+
+ memcpy(&he_cap_elem, &cap_band->he_cap_elem, sizeof(he_cap_elem));
+
+ *hemode = 0;
+ if (HECAP_PHY_SUBFME_GET(he_cap_elem.phy_cap_info)) {
+ if (HECAP_PHY_SUBFMR_GET(he_cap->he_cap_elem.phy_cap_info))
+ *hemode |= u32_encode_bits(HE_SU_BFEE_ENABLE, HE_MODE_SU_TX_BFEE);
+ if (HECAP_PHY_MUBFMR_GET(he_cap->he_cap_elem.phy_cap_info))
+ *hemode |= u32_encode_bits(HE_MU_BFEE_ENABLE, HE_MODE_MU_TX_BFEE);
+ }
+
+ if (vif->type != NL80211_IFTYPE_MESH_POINT) {
+ *hemode |= u32_encode_bits(HE_DL_MUOFDMA_ENABLE, HE_MODE_DL_OFDMA) |
+ u32_encode_bits(HE_UL_MUOFDMA_ENABLE, HE_MODE_UL_OFDMA);
+
+ if (HECAP_PHY_ULMUMIMO_GET(he_cap_elem.phy_cap_info))
+ if (HECAP_PHY_ULMUMIMO_GET(he_cap->he_cap_elem.phy_cap_info))
+ *hemode |= u32_encode_bits(HE_UL_MUMIMO_ENABLE,
+ HE_MODE_UL_MUMIMO);
+
+ if (u32_get_bits(*hemode, HE_MODE_MU_TX_BFEE))
+ *hemode |= u32_encode_bits(HE_SU_BFEE_ENABLE, HE_MODE_SU_TX_BFEE);
+
+ if (u32_get_bits(*hemode, HE_MODE_MU_TX_BFER))
+ *hemode |= u32_encode_bits(HE_SU_BFER_ENABLE, HE_MODE_SU_TX_BFER);
+ }
+
+ return 0;
+}
+
+static int ath12k_mac_set_eht_txbf_conf(struct ath12k_link_vif *arvif)
+{
+ struct ath12k_vif *ahvif = arvif->ahvif;
+ struct ath12k *ar = arvif->ar;
+ u32 param = WMI_VDEV_PARAM_SET_EHT_MU_MODE;
+ u32 value = 0;
+ int ret;
+ struct ieee80211_bss_conf *link_conf;
+
+ link_conf = ath12k_mac_get_link_bss_conf(arvif);
+ if (!link_conf) {
+ ath12k_warn(ar->ab, "unable to access bss link conf in eht txbf conf\n");
+ return -ENOENT;
+ }
+
+ if (!link_conf->eht_support)
+ return 0;
+
+ if (link_conf->eht_su_beamformer) {
+ value |= u32_encode_bits(EHT_SU_BFER_ENABLE, EHT_MODE_SU_TX_BFER);
+ if (link_conf->eht_mu_beamformer &&
+ ahvif->vdev_type == WMI_VDEV_TYPE_AP)
+ value |= u32_encode_bits(EHT_MU_BFER_ENABLE,
+ EHT_MODE_MU_TX_BFER) |
+ u32_encode_bits(EHT_DL_MUOFDMA_ENABLE,
+ EHT_MODE_DL_OFDMA_MUMIMO) |
+ u32_encode_bits(EHT_UL_MUOFDMA_ENABLE,
+ EHT_MODE_UL_OFDMA_MUMIMO);
+ }
+
+ if (ahvif->vif->type != NL80211_IFTYPE_MESH_POINT) {
+ value |= u32_encode_bits(EHT_DL_MUOFDMA_ENABLE, EHT_MODE_DL_OFDMA) |
+ u32_encode_bits(EHT_UL_MUOFDMA_ENABLE, EHT_MODE_UL_OFDMA);
+
+ if (link_conf->eht_80mhz_full_bw_ul_mumimo)
+ value |= u32_encode_bits(EHT_UL_MUMIMO_ENABLE, EHT_MODE_MUMIMO);
+
+ if (link_conf->eht_su_beamformee)
+ value |= u32_encode_bits(EHT_SU_BFEE_ENABLE,
+ EHT_MODE_SU_TX_BFEE);
+ }
+
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param, value);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set vdev %d EHT MU mode: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static u32 ath12k_mac_ieee80211_sta_bw_to_wmi(struct ath12k *ar,
struct ieee80211_link_sta *link_sta)
{
@@ -3190,6 +3609,7 @@ static void ath12k_bss_assoc(struct ath12k *ar,
struct ath12k_sta *ahsta;
struct ath12k_peer *peer;
bool is_auth = false;
+ u32 hemode = 0;
int ret;
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
@@ -3233,8 +3653,27 @@ static void ath12k_bss_assoc(struct ath12k *ar,
ath12k_peer_assoc_prepare(ar, arvif, arsta, peer_arg, false);
+ /* link_sta->he_cap must be protected by rcu_read_lock */
+ ret = ath12k_mac_vif_recalc_sta_he_txbf(ar, arvif, &link_sta->he_cap, &hemode);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to recalc he txbf for vdev %i on bss %pM: %d\n",
+ arvif->vdev_id, bss_conf->bssid, ret);
+ rcu_read_unlock();
+ return;
+ }
+
rcu_read_unlock();
+ /* keep this before ath12k_wmi_send_peer_assoc_cmd() */
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ WMI_VDEV_PARAM_SET_HEMU_MODE, hemode);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to submit vdev param txbf 0x%x: %d\n",
+ hemode, ret);
+ return;
+ }
+
+ peer_arg->is_assoc = true;
ret = ath12k_wmi_send_peer_assoc_cmd(ar, peer_arg);
if (ret) {
ath12k_warn(ar->ab, "failed to run peer assoc for %pM vdev %i: %d\n",
@@ -3264,6 +3703,11 @@ static void ath12k_bss_assoc(struct ath12k *ar,
params.vdev_id = arvif->vdev_id;
params.aid = ahvif->aid;
params.bssid = arvif->bssid;
+ params.tx_bssid = ath12k_mac_get_tx_bssid(arvif);
+ if (params.tx_bssid) {
+ params.nontx_profile_idx = bss_conf->bssid_index;
+ params.nontx_profile_cnt = 1 << bss_conf->bssid_indicator;
+ }
ret = ath12k_wmi_vdev_up(ar, &params);
if (ret) {
ath12k_warn(ar->ab, "failed to set vdev %d up: %d\n",
@@ -3434,12 +3878,17 @@ static void ath12k_mac_init_arvif(struct ath12k_vif *ahvif,
INIT_DELAYED_WORK(&arvif->connection_loss_work,
ath12k_mac_vif_sta_connection_loss_work);
+ arvif->num_stations = 0;
+
for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
arvif->bitrate_mask.control[i].legacy = 0xffffffff;
+ arvif->bitrate_mask.control[i].gi = NL80211_TXRATE_DEFAULT_GI;
memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
sizeof(arvif->bitrate_mask.control[i].ht_mcs));
memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
sizeof(arvif->bitrate_mask.control[i].vht_mcs));
+ memset(arvif->bitrate_mask.control[i].he_mcs, 0xff,
+ sizeof(arvif->bitrate_mask.control[i].he_mcs));
}
/* Handle MLO related assignments */
@@ -3755,13 +4204,13 @@ static void ath12k_mac_vif_setup_ps(struct ath12k_link_vif *arvif)
psmode, arvif->vdev_id, ret);
}
-static bool ath12k_mac_supports_station_tpc(struct ath12k *ar,
- struct ath12k_vif *ahvif,
- const struct cfg80211_chan_def *chandef)
+static bool ath12k_mac_supports_tpc(struct ath12k *ar, struct ath12k_vif *ahvif,
+ const struct cfg80211_chan_def *chandef)
{
return ath12k_wmi_supports_6ghz_cc_ext(ar) &&
test_bit(WMI_TLV_SERVICE_EXT_TPC_REG_SUPPORT, ar->ab->wmi_ab.svc_map) &&
- ahvif->vdev_type == WMI_VDEV_TYPE_STA &&
+ (ahvif->vdev_type == WMI_VDEV_TYPE_STA ||
+ ahvif->vdev_type == WMI_VDEV_TYPE_AP) &&
ahvif->vdev_subtype == WMI_VDEV_SUBTYPE_NONE &&
chandef->chan &&
chandef->chan->band == NL80211_BAND_6GHZ;
@@ -3853,6 +4302,19 @@ static void ath12k_mac_bss_info_changed(struct ath12k *ar,
ether_addr_copy(arvif->bssid, info->bssid);
if (changed & BSS_CHANGED_BEACON_ENABLED) {
+ if (info->enable_beacon) {
+ ret = ath12k_mac_set_he_txbf_conf(arvif);
+ if (ret)
+ ath12k_warn(ar->ab,
+ "failed to set HE TXBF config for vdev: %d\n",
+ arvif->vdev_id);
+
+ ret = ath12k_mac_set_eht_txbf_conf(arvif);
+ if (ret)
+ ath12k_warn(ar->ab,
+ "failed to set EHT TXBF config for vdev: %d\n",
+ arvif->vdev_id);
+ }
ath12k_control_beaconing(arvif, info);
if (arvif->is_up && info->he_support &&
@@ -4863,6 +5325,16 @@ static int ath12k_install_key(struct ath12k_link_vif *arvif,
arg.key_cipher = WMI_CIPHER_AES_GCM;
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ arg.key_cipher = WMI_CIPHER_AES_CMAC;
+ break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ arg.key_cipher = WMI_CIPHER_AES_GMAC;
+ break;
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ arg.key_cipher = WMI_CIPHER_AES_CMAC;
+ break;
default:
ath12k_warn(ar->ab, "cipher %d is not supported\n", key->cipher);
return -EOPNOTSUPP;
@@ -5157,13 +5629,9 @@ static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
lockdep_assert_wiphy(hw->wiphy);
- /* BIP needs to be done in software */
- if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
- key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
- key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256 ||
- key->cipher == WLAN_CIPHER_SUITE_BIP_CMAC_256) {
+ /* IGTK needs to be done in host software */
+ if (key->keyidx == 4 || key->keyidx == 5)
return 1;
- }
if (key->keyidx > WMI_MAX_KEY_INDEX)
return -ENOSPC;
@@ -5251,6 +5719,20 @@ ath12k_mac_bitrate_mask_num_vht_rates(struct ath12k *ar,
}
static int
+ath12k_mac_bitrate_mask_num_he_rates(struct ath12k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ int num_rates = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].he_mcs); i++)
+ num_rates += hweight16(mask->control[band].he_mcs[i]);
+
+ return num_rates;
+}
+
+static int
ath12k_mac_set_peer_vht_fixed_rate(struct ath12k_link_vif *arvif,
struct ath12k_link_sta *arsta,
const struct cfg80211_bitrate_mask *mask,
@@ -5296,6 +5778,60 @@ ath12k_mac_set_peer_vht_fixed_rate(struct ath12k_link_vif *arvif,
return ret;
}
+static int
+ath12k_mac_set_peer_he_fixed_rate(struct ath12k_link_vif *arvif,
+ struct ath12k_link_sta *arsta,
+ const struct cfg80211_bitrate_mask *mask,
+ enum nl80211_band band)
+{
+ struct ath12k *ar = arvif->ar;
+ u8 he_rate, nss;
+ u32 rate_code;
+ int ret, i;
+ struct ath12k_sta *ahsta = arsta->ahsta;
+ struct ieee80211_sta *sta;
+
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ sta = ath12k_ahsta_to_sta(ahsta);
+ nss = 0;
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].he_mcs); i++) {
+ if (hweight16(mask->control[band].he_mcs[i]) == 1) {
+ nss = i + 1;
+ he_rate = ffs(mask->control[band].he_mcs[i]) - 1;
+ }
+ }
+
+ if (!nss) {
+ ath12k_warn(ar->ab, "No single HE Fixed rate found to set for %pM",
+ arsta->addr);
+ return -EINVAL;
+ }
+
+ /* Avoid updating invalid nss as fixed rate*/
+ if (nss > sta->deflink.rx_nss)
+ return -EINVAL;
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "Setting Fixed HE Rate for peer %pM. Device will not switch to any other selected rates",
+ arsta->addr);
+
+ rate_code = ATH12K_HW_RATE_CODE(he_rate, nss - 1,
+ WMI_RATE_PREAMBLE_HE);
+
+ ret = ath12k_wmi_set_peer_param(ar, arsta->addr,
+ arvif->vdev_id,
+ WMI_PEER_PARAM_FIXED_RATE,
+ rate_code);
+ if (ret)
+ ath12k_warn(ar->ab,
+ "failed to update STA %pM Fixed Rate %d: %d\n",
+ arsta->addr, rate_code, ret);
+
+ return ret;
+}
+
static int ath12k_mac_station_assoc(struct ath12k *ar,
struct ath12k_link_vif *arvif,
struct ath12k_link_sta *arsta,
@@ -5308,7 +5844,7 @@ static int ath12k_mac_station_assoc(struct ath12k *ar,
struct cfg80211_chan_def def;
enum nl80211_band band;
struct cfg80211_bitrate_mask *mask;
- u8 num_vht_rates;
+ u8 num_vht_rates, num_he_rates;
u8 link_id = arvif->link_id;
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
@@ -5334,6 +5870,8 @@ static int ath12k_mac_station_assoc(struct ath12k *ar,
"invalid peer NSS %d\n", peer_arg->peer_nss);
return -EINVAL;
}
+
+ peer_arg->is_assoc = true;
ret = ath12k_wmi_send_peer_assoc_cmd(ar, peer_arg);
if (ret) {
ath12k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n",
@@ -5348,9 +5886,10 @@ static int ath12k_mac_station_assoc(struct ath12k *ar,
}
num_vht_rates = ath12k_mac_bitrate_mask_num_vht_rates(ar, band, mask);
+ num_he_rates = ath12k_mac_bitrate_mask_num_he_rates(ar, band, mask);
- /* If single VHT rate is configured (by set_bitrate_mask()),
- * peer_assoc will disable VHT. This is now enabled by a peer specific
+ /* If single VHT/HE rate is configured (by set_bitrate_mask()),
+ * peer_assoc will disable VHT/HE. This is now enabled by a peer specific
* fixed param.
* Note that all other rates and NSS will be disabled for this peer.
*/
@@ -5366,8 +5905,9 @@ static int ath12k_mac_station_assoc(struct ath12k *ar,
spin_unlock_bh(&ar->data_lock);
if (link_sta->vht_cap.vht_supported && num_vht_rates == 1) {
- ret = ath12k_mac_set_peer_vht_fixed_rate(arvif, arsta, mask,
- band);
+ ret = ath12k_mac_set_peer_vht_fixed_rate(arvif, arsta, mask, band);
+ } else if (link_sta->he_cap.has_he && num_he_rates == 1) {
+ ret = ath12k_mac_set_peer_he_fixed_rate(arvif, arsta, mask, band);
if (ret)
return ret;
}
@@ -5431,8 +5971,9 @@ static void ath12k_sta_rc_update_wk(struct wiphy *wiphy, struct wiphy_work *wk)
enum nl80211_band band;
const u8 *ht_mcs_mask;
const u16 *vht_mcs_mask;
- u32 changed, bw, nss, smps, bw_prev;
- int err, num_vht_rates;
+ const u16 *he_mcs_mask;
+ u32 changed, bw, nss, mac_nss, smps, bw_prev;
+ int err, num_vht_rates, num_he_rates;
const struct cfg80211_bitrate_mask *mask;
enum wmi_phy_mode peer_phymode;
struct ath12k_link_sta *arsta;
@@ -5452,6 +5993,7 @@ static void ath12k_sta_rc_update_wk(struct wiphy *wiphy, struct wiphy_work *wk)
band = def.chan->band;
ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+ he_mcs_mask = arvif->bitrate_mask.control[band].he_mcs;
spin_lock_bh(&ar->data_lock);
@@ -5466,8 +6008,10 @@ static void ath12k_sta_rc_update_wk(struct wiphy *wiphy, struct wiphy_work *wk)
spin_unlock_bh(&ar->data_lock);
nss = max_t(u32, 1, nss);
- nss = min(nss, max(ath12k_mac_max_ht_nss(ht_mcs_mask),
- ath12k_mac_max_vht_nss(vht_mcs_mask)));
+ mac_nss = max3(ath12k_mac_max_ht_nss(ht_mcs_mask),
+ ath12k_mac_max_vht_nss(vht_mcs_mask),
+ ath12k_mac_max_he_nss(he_mcs_mask));
+ nss = min(nss, mac_nss);
struct ath12k_wmi_peer_assoc_arg *peer_arg __free(kfree) =
kzalloc(sizeof(*peer_arg), GFP_KERNEL);
@@ -5550,6 +6094,8 @@ static void ath12k_sta_rc_update_wk(struct wiphy *wiphy, struct wiphy_work *wk)
mask = &arvif->bitrate_mask;
num_vht_rates = ath12k_mac_bitrate_mask_num_vht_rates(ar, band,
mask);
+ num_he_rates = ath12k_mac_bitrate_mask_num_he_rates(ar, band,
+ mask);
/* Peer_assoc_prepare will reject vht rates in
* bitrate_mask if its not available in range format and
@@ -5572,14 +6118,28 @@ static void ath12k_sta_rc_update_wk(struct wiphy *wiphy, struct wiphy_work *wk)
if (link_sta->vht_cap.vht_supported && num_vht_rates == 1) {
ath12k_mac_set_peer_vht_fixed_rate(arvif, arsta, mask,
band);
+ } else if (link_sta->he_cap.has_he && num_he_rates == 1) {
+ ath12k_mac_set_peer_he_fixed_rate(arvif, arsta, mask, band);
} else {
- /* If the peer is non-VHT or no fixed VHT rate
+ /* If the peer is non-VHT/HE or no fixed VHT/HE rate
* is provided in the new bitrate mask we set the
- * other rates using peer_assoc command.
+ * other rates using peer_assoc command. Also clear
+ * the peer fixed rate settings as it has higher proprity
+ * than peer assoc
*/
+ err = ath12k_wmi_set_peer_param(ar, arsta->addr,
+ arvif->vdev_id,
+ WMI_PEER_PARAM_FIXED_RATE,
+ WMI_FIXED_RATE_NONE);
+ if (err)
+ ath12k_warn(ar->ab,
+ "failed to disable peer fixed rate for STA %pM ret %d\n",
+ arsta->addr, err);
+
ath12k_peer_assoc_prepare(ar, arvif, arsta,
peer_arg, true);
+ peer_arg->is_assoc = false;
err = ath12k_wmi_send_peer_assoc_cmd(ar, peer_arg);
if (err)
ath12k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n",
@@ -5636,6 +6196,11 @@ static int ath12k_mac_inc_num_stations(struct ath12k_link_vif *arvif,
return -ENOBUFS;
ar->num_stations++;
+ arvif->num_stations++;
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "mac station %pM connected to vdev %u num_stations %u\n",
+ arsta->addr, arvif->vdev_id, arvif->num_stations);
return 0;
}
@@ -5652,6 +6217,17 @@ static void ath12k_mac_dec_num_stations(struct ath12k_link_vif *arvif,
return;
ar->num_stations--;
+
+ if (arvif->num_stations) {
+ arvif->num_stations--;
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "mac station %pM disconnected from vdev %u num_stations %u\n",
+ arsta->addr, arvif->vdev_id, arvif->num_stations);
+ } else {
+ ath12k_warn(ar->ab,
+ "mac station %pM disconnect for vdev %u without any connected station\n",
+ arsta->addr, arvif->vdev_id);
+ }
}
static void ath12k_mac_station_post_remove(struct ath12k *ar,
@@ -5799,7 +6375,7 @@ static int ath12k_mac_station_add(struct ath12k *ar,
struct ath12k_base *ab = ar->ab;
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
- struct ath12k_wmi_peer_create_arg peer_param = {0};
+ struct ath12k_wmi_peer_create_arg peer_param = {};
int ret;
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
@@ -6143,7 +6719,7 @@ static int ath12k_mac_mlo_sta_set_link_active(struct ath12k_base *ab,
u8 *mlo_inactive_vdev_lst,
u8 num_mlo_inactive_vdev)
{
- struct wmi_mlo_link_set_active_arg param = {0};
+ struct wmi_mlo_link_set_active_arg param = {};
u32 entry_idx, entry_offset, vdev_idx;
u8 vdev_id;
@@ -6205,8 +6781,8 @@ static int ath12k_mac_mlo_sta_update_link_active(struct ath12k_base *ab,
struct ieee80211_hw *hw,
struct ath12k_vif *ahvif)
{
- u8 mlo_vdev_id_lst[IEEE80211_MLD_MAX_NUM_LINKS] = {0};
- u32 mlo_freq_list[IEEE80211_MLD_MAX_NUM_LINKS] = {0};
+ u8 mlo_vdev_id_lst[IEEE80211_MLD_MAX_NUM_LINKS] = {};
+ u32 mlo_freq_list[IEEE80211_MLD_MAX_NUM_LINKS] = {};
unsigned long links = ahvif->links_map;
enum wmi_mlo_link_force_reason reason;
struct ieee80211_chanctx_conf *conf;
@@ -6943,7 +7519,7 @@ static struct ieee80211_sta_ht_cap
ath12k_create_ht_cap(struct ath12k *ar, u32 ar_ht_cap, u32 rate_cap_rx_chainmask)
{
int i;
- struct ieee80211_sta_ht_cap ht_cap = {0};
+ struct ieee80211_sta_ht_cap ht_cap = {};
u32 ar_vht_cap = ar->pdev->cap.vht_cap;
if (!(ar_ht_cap & WMI_HT_CAP_ENABLED))
@@ -7099,7 +7675,7 @@ static struct ieee80211_sta_vht_cap
ath12k_create_vht_cap(struct ath12k *ar, u32 rate_cap_tx_chainmask,
u32 rate_cap_rx_chainmask)
{
- struct ieee80211_sta_vht_cap vht_cap = {0};
+ struct ieee80211_sta_vht_cap vht_cap = {};
u16 txmcs_map, rxmcs_map;
int i;
@@ -7108,10 +7684,8 @@ ath12k_create_vht_cap(struct ath12k *ar, u32 rate_cap_tx_chainmask,
ath12k_set_vht_txbf_cap(ar, &vht_cap.cap);
- /* TODO: Enable back VHT160 mode once association issues are fixed */
- /* Disabling VHT160 and VHT80+80 modes */
- vht_cap.cap &= ~IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
- vht_cap.cap &= ~IEEE80211_VHT_CAP_SHORT_GI_160;
+ /* 80P80 is not supported */
+ vht_cap.cap &= ~IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ;
rxmcs_map = 0;
txmcs_map = 0;
@@ -7133,6 +7707,12 @@ ath12k_create_vht_cap(struct ath12k *ar, u32 rate_cap_tx_chainmask,
vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(rxmcs_map);
vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(txmcs_map);
+ /* Check if the HW supports 1:1 NSS ratio and reset
+ * EXT NSS BW Support field to 0 to indicate 1:1 ratio
+ */
+ if (ar->pdev->cap.nss_ratio_info == WMI_NSS_RATIO_1_NSS)
+ vht_cap.cap &= ~IEEE80211_VHT_CAP_EXT_NSS_BW_MASK;
+
return vht_cap;
}
@@ -7310,12 +7890,55 @@ static __le16 ath12k_mac_setup_he_6ghz_cap(struct ath12k_pdev_cap *pcap,
return cpu_to_le16(bcap->he_6ghz_capa);
}
-static void ath12k_mac_copy_he_cap(struct ath12k_band_cap *band_cap,
+static void ath12k_mac_set_hemcsmap(struct ath12k *ar,
+ struct ath12k_pdev_cap *cap,
+ struct ieee80211_sta_he_cap *he_cap)
+{
+ struct ieee80211_he_mcs_nss_supp *mcs_nss = &he_cap->he_mcs_nss_supp;
+ u8 maxtxnss_160 = ath12k_get_nss_160mhz(ar, ar->num_tx_chains);
+ u8 maxrxnss_160 = ath12k_get_nss_160mhz(ar, ar->num_rx_chains);
+ u16 txmcs_map_160 = 0, rxmcs_map_160 = 0;
+ u16 txmcs_map = 0, rxmcs_map = 0;
+ u32 i;
+
+ for (i = 0; i < 8; i++) {
+ if (i < ar->num_tx_chains &&
+ (ar->cfg_tx_chainmask >> cap->tx_chain_mask_shift) & BIT(i))
+ txmcs_map |= IEEE80211_HE_MCS_SUPPORT_0_11 << (i * 2);
+ else
+ txmcs_map |= IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2);
+
+ if (i < ar->num_rx_chains &&
+ (ar->cfg_rx_chainmask >> cap->tx_chain_mask_shift) & BIT(i))
+ rxmcs_map |= IEEE80211_HE_MCS_SUPPORT_0_11 << (i * 2);
+ else
+ rxmcs_map |= IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2);
+
+ if (i < maxtxnss_160 &&
+ (ar->cfg_tx_chainmask >> cap->tx_chain_mask_shift) & BIT(i))
+ txmcs_map_160 |= IEEE80211_HE_MCS_SUPPORT_0_11 << (i * 2);
+ else
+ txmcs_map_160 |= IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2);
+
+ if (i < maxrxnss_160 &&
+ (ar->cfg_tx_chainmask >> cap->tx_chain_mask_shift) & BIT(i))
+ rxmcs_map_160 |= IEEE80211_HE_MCS_SUPPORT_0_11 << (i * 2);
+ else
+ rxmcs_map_160 |= IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2);
+ }
+
+ mcs_nss->rx_mcs_80 = cpu_to_le16(rxmcs_map & 0xffff);
+ mcs_nss->tx_mcs_80 = cpu_to_le16(txmcs_map & 0xffff);
+ mcs_nss->rx_mcs_160 = cpu_to_le16(rxmcs_map_160 & 0xffff);
+ mcs_nss->tx_mcs_160 = cpu_to_le16(txmcs_map_160 & 0xffff);
+}
+
+static void ath12k_mac_copy_he_cap(struct ath12k *ar,
+ struct ath12k_band_cap *band_cap,
int iftype, u8 num_tx_chains,
struct ieee80211_sta_he_cap *he_cap)
{
struct ieee80211_he_cap_elem *he_cap_elem = &he_cap->he_cap_elem;
- struct ieee80211_he_mcs_nss_supp *mcs_nss = &he_cap->he_mcs_nss_supp;
he_cap->has_he = true;
memcpy(he_cap_elem->mac_cap_info, band_cap->he_cap_info,
@@ -7325,11 +7948,15 @@ static void ath12k_mac_copy_he_cap(struct ath12k_band_cap *band_cap,
he_cap_elem->mac_cap_info[1] &=
IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_MASK;
-
+ he_cap_elem->phy_cap_info[0] &=
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
+ /* 80PLUS80 is not supported */
+ he_cap_elem->phy_cap_info[0] &=
+ ~IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G;
he_cap_elem->phy_cap_info[5] &=
~IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK;
- he_cap_elem->phy_cap_info[5] &=
- ~IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK;
he_cap_elem->phy_cap_info[5] |= num_tx_chains - 1;
switch (iftype) {
@@ -7352,13 +7979,7 @@ static void ath12k_mac_copy_he_cap(struct ath12k_band_cap *band_cap,
break;
}
- mcs_nss->rx_mcs_80 = cpu_to_le16(band_cap->he_mcs & 0xffff);
- mcs_nss->tx_mcs_80 = cpu_to_le16(band_cap->he_mcs & 0xffff);
- mcs_nss->rx_mcs_160 = cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff);
- mcs_nss->tx_mcs_160 = cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff);
- mcs_nss->rx_mcs_80p80 = cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff);
- mcs_nss->tx_mcs_80p80 = cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff);
-
+ ath12k_mac_set_hemcsmap(ar, &ar->pdev->cap, he_cap);
memset(he_cap->ppe_thres, 0, sizeof(he_cap->ppe_thres));
if (he_cap_elem->phy_cap_info[6] &
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT)
@@ -7548,7 +8169,7 @@ static int ath12k_mac_copy_sband_iftype_data(struct ath12k *ar,
data[idx].types_mask = BIT(i);
- ath12k_mac_copy_he_cap(band_cap, i, ar->num_tx_chains, he_cap);
+ ath12k_mac_copy_he_cap(ar, band_cap, i, ar->num_tx_chains, he_cap);
if (band == NL80211_BAND_6GHZ) {
data[idx].he_6ghz_capa.capa =
ath12k_mac_setup_he_6ghz_cap(cap, band_cap);
@@ -7756,7 +8377,7 @@ static int ath12k_mac_mgmt_tx_wmi(struct ath12k *ar, struct ath12k_link_vif *arv
skb_cb->paddr = paddr;
- ret = ath12k_wmi_mgmt_send(ar, arvif->vdev_id, buf_id, skb);
+ ret = ath12k_wmi_mgmt_send(arvif, buf_id, skb);
if (ret) {
ath12k_warn(ar->ab, "failed to send mgmt frame: %d\n", ret);
goto err_unmap_buf;
@@ -7783,6 +8404,174 @@ static void ath12k_mgmt_over_wmi_tx_purge(struct ath12k *ar)
ath12k_mgmt_over_wmi_tx_drop(ar, skb);
}
+static int ath12k_mac_mgmt_action_frame_fill_elem_data(struct ath12k_link_vif *arvif,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ u8 category, *buf, iv_len, action_code, dialog_token;
+ struct ieee80211_bss_conf *link_conf;
+ struct ieee80211_chanctx_conf *conf;
+ int cur_tx_power, max_tx_power;
+ struct ath12k *ar = arvif->ar;
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
+ struct wiphy *wiphy = hw->wiphy;
+ struct ath12k_skb_cb *skb_cb;
+ struct ieee80211_mgmt *mgmt;
+ unsigned int remaining_len;
+ bool has_protected;
+
+ lockdep_assert_wiphy(wiphy);
+
+ /* make sure category field is present */
+ if (skb->len < IEEE80211_MIN_ACTION_SIZE)
+ return -EINVAL;
+
+ remaining_len = skb->len - IEEE80211_MIN_ACTION_SIZE;
+ has_protected = ieee80211_has_protected(hdr->frame_control);
+
+ /* In case of SW crypto and hdr protected (PMF), packet will already be encrypted,
+ * we can't put in data in this case
+ */
+ if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags) &&
+ has_protected)
+ return 0;
+
+ mgmt = (struct ieee80211_mgmt *)hdr;
+ buf = (u8 *)&mgmt->u.action;
+
+ /* FCTL_PROTECTED frame might have extra space added for HDR_LEN. Offset that
+ * many bytes if it is there
+ */
+ if (has_protected) {
+ skb_cb = ATH12K_SKB_CB(skb);
+
+ switch (skb_cb->cipher) {
+ /* Cipher suite having flag %IEEE80211_KEY_FLAG_GENERATE_IV_MGMT set in
+ * key needs to be processed. See ath12k_install_key()
+ */
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ iv_len = IEEE80211_CCMP_HDR_LEN;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ iv_len = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (remaining_len < iv_len)
+ return -EINVAL;
+
+ buf += iv_len;
+ remaining_len -= iv_len;
+ }
+
+ category = *buf++;
+ /* category code is already taken care in %IEEE80211_MIN_ACTION_SIZE hence
+ * no need to adjust remaining_len
+ */
+
+ switch (category) {
+ case WLAN_CATEGORY_RADIO_MEASUREMENT:
+ /* need action code and dialog token */
+ if (remaining_len < 2)
+ return -EINVAL;
+
+ /* Packet Format:
+ * Action Code | Dialog Token | Variable Len (based on Action Code)
+ */
+ action_code = *buf++;
+ dialog_token = *buf++;
+ remaining_len -= 2;
+
+ link_conf = ath12k_mac_get_link_bss_conf(arvif);
+ if (!link_conf) {
+ ath12k_warn(ar->ab,
+ "failed to get bss link conf for vdev %d in RM handling\n",
+ arvif->vdev_id);
+ return -EINVAL;
+ }
+
+ conf = wiphy_dereference(wiphy, link_conf->chanctx_conf);
+ if (!conf)
+ return -ENOENT;
+
+ cur_tx_power = link_conf->txpower;
+ max_tx_power = min(conf->def.chan->max_reg_power,
+ (int)ar->max_tx_power / 2);
+
+ ath12k_mac_op_get_txpower(hw, arvif->ahvif->vif, arvif->link_id,
+ &cur_tx_power);
+
+ switch (action_code) {
+ case WLAN_RM_ACTION_LINK_MEASUREMENT_REQUEST:
+ /* need variable fields to be present in len */
+ if (remaining_len < 2)
+ return -EINVAL;
+
+ /* Variable length format as defined in IEEE 802.11-2024,
+ * Figure 9-1187-Link Measurement Request frame Action field
+ * format.
+ * Transmit Power | Max Tx Power
+ * We fill both of these.
+ */
+ *buf++ = cur_tx_power;
+ *buf = max_tx_power;
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "RRM: Link Measurement Req dialog_token %u cur_tx_power %d max_tx_power %d\n",
+ dialog_token, cur_tx_power, max_tx_power);
+ break;
+ case WLAN_RM_ACTION_LINK_MEASUREMENT_REPORT:
+ /* need variable fields to be present in len */
+ if (remaining_len < 3)
+ return -EINVAL;
+
+ /* Variable length format as defined in IEEE 802.11-2024,
+ * Figure 9-1188-Link Measurement Report frame Action field format
+ * TPC Report | Variable Fields
+ *
+ * TPC Report Format:
+ * Element ID | Len | Tx Power | Link Margin
+ *
+ * We fill Tx power in the TPC Report (2nd index)
+ */
+ buf[2] = cur_tx_power;
+
+ /* TODO: At present, Link margin data is not present so can't
+ * really fill it now. Once it is available, it can be added
+ * here
+ */
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "RRM: Link Measurement Report dialog_token %u cur_tx_power %d\n",
+ dialog_token, cur_tx_power);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ /* nothing to fill */
+ return 0;
+ }
+
+ return 0;
+}
+
+static int ath12k_mac_mgmt_frame_fill_elem_data(struct ath12k_link_vif *arvif,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+
+ if (!ieee80211_is_action(hdr->frame_control))
+ return 0;
+
+ return ath12k_mac_mgmt_action_frame_fill_elem_data(arvif, skb);
+}
+
static void ath12k_mgmt_over_wmi_tx_work(struct wiphy *wiphy, struct wiphy_work *work)
{
struct ath12k *ar = container_of(work, struct ath12k, wmi_mgmt_tx_work);
@@ -7814,6 +8603,20 @@ static void ath12k_mgmt_over_wmi_tx_work(struct wiphy *wiphy, struct wiphy_work
arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[skb_cb->link_id]);
if (ar->allocated_vdev_map & (1LL << arvif->vdev_id)) {
+ /* Fill in the data which is required to be filled by the driver
+ * For example: Max Tx power in Link Measurement Request/Report
+ */
+ ret = ath12k_mac_mgmt_frame_fill_elem_data(arvif, skb);
+ if (ret) {
+ /* If we couldn't fill the data due to any reason,
+ * let's not discard transmitting the packet.
+ * For example: Software crypto and PMF case
+ */
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "Failed to fill the required data for the mgmt packet err %d\n",
+ ret);
+ }
+
ret = ath12k_mac_mgmt_tx_wmi(ar, arvif, skb);
if (ret) {
ath12k_warn(ar->ab, "failed to tx mgmt frame, vdev_id %d :%d\n",
@@ -8068,6 +8871,9 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
skb_cb->flags |= ATH12K_SKB_HW_80211_ENCAP;
} else if (ieee80211_is_mgmt(hdr->frame_control)) {
+ if (sta && sta->mlo)
+ skb_cb->flags |= ATH12K_SKB_MLO_STA;
+
ret = ath12k_mac_mgmt_tx(ar, skb, is_prb_rsp);
if (ret) {
ath12k_warn(ar->ab, "failed to queue management frame %d\n",
@@ -8092,6 +8898,7 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
is_dvlan = true;
if (!vif->valid_links || !is_mcast || is_dvlan ||
+ (skb_cb->flags & ATH12K_SKB_HW_80211_ENCAP) ||
test_bit(ATH12K_FLAG_RAW_MODE, &ar->ab->dev_flags)) {
ret = ath12k_dp_tx(ar, arvif, skb, false, 0, is_mcast);
if (unlikely(ret)) {
@@ -8342,14 +9149,9 @@ err:
static void ath12k_drain_tx(struct ath12k_hw *ah)
{
- struct ath12k *ar = ah->radio;
+ struct ath12k *ar;
int i;
- if (ath12k_ftm_mode) {
- ath12k_err(ar->ab, "fail to start mac operations in ftm mode\n");
- return;
- }
-
lockdep_assert_wiphy(ah->hw->wiphy);
for_each_ar(ah, ar, i)
@@ -8362,6 +9164,9 @@ static int ath12k_mac_op_start(struct ieee80211_hw *hw)
struct ath12k *ar;
int ret, i;
+ if (ath12k_ftm_mode)
+ return -EPERM;
+
lockdep_assert_wiphy(hw->wiphy);
ath12k_drain_tx(ah);
@@ -8648,72 +9453,6 @@ static int ath12k_mac_setup_vdev_create_arg(struct ath12k_link_vif *arvif,
return 0;
}
-static u32
-ath12k_mac_prepare_he_mode(struct ath12k_pdev *pdev, u32 viftype)
-{
- struct ath12k_pdev_cap *pdev_cap = &pdev->cap;
- struct ath12k_band_cap *cap_band = NULL;
- u32 *hecap_phy_ptr = NULL;
- u32 hemode;
-
- if (pdev->cap.supported_bands & WMI_HOST_WLAN_2GHZ_CAP)
- cap_band = &pdev_cap->band[NL80211_BAND_2GHZ];
- else
- cap_band = &pdev_cap->band[NL80211_BAND_5GHZ];
-
- hecap_phy_ptr = &cap_band->he_cap_phy_info[0];
-
- hemode = u32_encode_bits(HE_SU_BFEE_ENABLE, HE_MODE_SU_TX_BFEE) |
- u32_encode_bits(HECAP_PHY_SUBFMR_GET(hecap_phy_ptr),
- HE_MODE_SU_TX_BFER) |
- u32_encode_bits(HECAP_PHY_ULMUMIMO_GET(hecap_phy_ptr),
- HE_MODE_UL_MUMIMO);
-
- /* TODO: WDS and other modes */
- if (viftype == NL80211_IFTYPE_AP) {
- hemode |= u32_encode_bits(HECAP_PHY_MUBFMR_GET(hecap_phy_ptr),
- HE_MODE_MU_TX_BFER) |
- u32_encode_bits(HE_DL_MUOFDMA_ENABLE, HE_MODE_DL_OFDMA) |
- u32_encode_bits(HE_UL_MUOFDMA_ENABLE, HE_MODE_UL_OFDMA);
- } else {
- hemode |= u32_encode_bits(HE_MU_BFEE_ENABLE, HE_MODE_MU_TX_BFEE);
- }
-
- return hemode;
-}
-
-static int ath12k_set_he_mu_sounding_mode(struct ath12k *ar,
- struct ath12k_link_vif *arvif)
-{
- u32 param_id, param_value;
- struct ath12k_base *ab = ar->ab;
- struct ath12k_vif *ahvif = arvif->ahvif;
- int ret;
-
- param_id = WMI_VDEV_PARAM_SET_HEMU_MODE;
- param_value = ath12k_mac_prepare_he_mode(ar->pdev, ahvif->vif->type);
- ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
- param_id, param_value);
- if (ret) {
- ath12k_warn(ab, "failed to set vdev %d HE MU mode: %d param_value %x\n",
- arvif->vdev_id, ret, param_value);
- return ret;
- }
- param_id = WMI_VDEV_PARAM_SET_HE_SOUNDING_MODE;
- param_value =
- u32_encode_bits(HE_VHT_SOUNDING_MODE_ENABLE, HE_VHT_SOUNDING_MODE) |
- u32_encode_bits(HE_TRIG_NONTRIG_SOUNDING_MODE_ENABLE,
- HE_TRIG_NONTRIG_SOUNDING_MODE);
- ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
- param_id, param_value);
- if (ret) {
- ath12k_warn(ab, "failed to set vdev %d HE MU mode: %d\n",
- arvif->vdev_id, ret);
- return ret;
- }
- return ret;
-}
-
static void ath12k_mac_update_vif_offload(struct ath12k_link_vif *arvif)
{
struct ath12k_vif *ahvif = arvif->ahvif;
@@ -8943,8 +9682,8 @@ int ath12k_mac_vdev_create(struct ath12k *ar, struct ath12k_link_vif *arvif)
struct ieee80211_hw *hw = ah->hw;
struct ath12k_vif *ahvif = arvif->ahvif;
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
- struct ath12k_wmi_vdev_create_arg vdev_arg = {0};
- struct ath12k_wmi_peer_create_arg peer_param = {0};
+ struct ath12k_wmi_vdev_create_arg vdev_arg = {};
+ struct ath12k_wmi_peer_create_arg peer_param = {};
struct ieee80211_bss_conf *link_conf = NULL;
u32 param_id, param_value;
u16 nss;
@@ -9318,9 +10057,9 @@ static struct ath12k *ath12k_mac_assign_vif_to_vdev(struct ieee80211_hw *hw,
if (arvif->is_created)
goto flush;
- if (ar->num_created_vdevs > (TARGET_NUM_VDEVS - 1)) {
+ if (ar->num_created_vdevs > (TARGET_NUM_VDEVS(ab) - 1)) {
ath12k_warn(ab, "failed to create vdev, reached max vdev limit %d\n",
- TARGET_NUM_VDEVS);
+ TARGET_NUM_VDEVS(ab));
goto unlock;
}
@@ -9907,14 +10646,6 @@ ath12k_mac_vdev_start_restart(struct ath12k_link_vif *arvif,
spin_unlock_bh(&ab->base_lock);
/* TODO: Notify if secondary 80Mhz also needs radar detection */
- if (link_conf->he_support) {
- ret = ath12k_set_he_mu_sounding_mode(ar, arvif);
- if (ret) {
- ath12k_warn(ar->ab, "failed to set he mode vdev %i\n",
- arg.vdev_id);
- return ret;
- }
- }
}
arg.passive |= !!(chandef->chan->flags & IEEE80211_CHAN_NO_IR);
@@ -9944,7 +10675,7 @@ ath12k_mac_vdev_start_restart(struct ath12k_link_vif *arvif,
/* TODO: For now we only set TPC power here. However when
* channel changes, say CSA, it should be updated again.
*/
- if (ath12k_mac_supports_station_tpc(ar, ahvif, chandef)) {
+ if (ath12k_mac_supports_tpc(ar, ahvif, chandef)) {
ath12k_mac_fill_reg_tpc_info(ar, arvif, ctx);
ath12k_wmi_send_vdev_set_tpc_power(ar, arvif->vdev_id,
&arvif->reg_tpc_info);
@@ -10132,7 +10863,7 @@ ath12k_mac_update_vif_chan(struct ath12k *ar,
int n_vifs)
{
struct ath12k_wmi_vdev_up_params params = {};
- struct ath12k_link_vif *arvif, *tx_arvif;
+ struct ath12k_link_vif *arvif;
struct ieee80211_bss_conf *link_conf;
struct ath12k_base *ab = ar->ab;
struct ieee80211_vif *vif;
@@ -10204,10 +10935,8 @@ ath12k_mac_update_vif_chan(struct ath12k *ar,
params.vdev_id = arvif->vdev_id;
params.aid = ahvif->aid;
params.bssid = arvif->bssid;
-
- tx_arvif = ath12k_mac_get_tx_arvif(arvif, link_conf);
- if (tx_arvif) {
- params.tx_bssid = tx_arvif->bssid;
+ params.tx_bssid = ath12k_mac_get_tx_bssid(arvif);
+ if (params.tx_bssid) {
params.nontx_profile_idx = link_conf->bssid_index;
params.nontx_profile_cnt = 1 << link_conf->bssid_indicator;
}
@@ -10501,7 +11230,9 @@ void ath12k_mac_fill_reg_tpc_info(struct ath12k *ar,
bool is_psd_power = false, is_tpe_present = false;
s8 max_tx_power[ATH12K_NUM_PWR_LEVELS],
psd_power, tx_power, eirp_power;
+ struct ath12k_vif *ahvif = arvif->ahvif;
u16 start_freq, center_freq;
+ u8 reg_6ghz_power_mode;
chan = ctx->def.chan;
start_freq = ath12k_mac_get_6ghz_start_frequency(&ctx->def);
@@ -10657,8 +11388,14 @@ void ath12k_mac_fill_reg_tpc_info(struct ath12k *ar,
reg_tpc_info->num_pwr_levels = num_pwr_levels;
reg_tpc_info->is_psd_power = is_psd_power;
reg_tpc_info->eirp_power = eirp_power;
+ if (ahvif->vdev_type == WMI_VDEV_TYPE_STA)
+ reg_6ghz_power_mode = bss_conf->power_type;
+ else
+ /* For now, LPI is the only supported AP power mode */
+ reg_6ghz_power_mode = IEEE80211_REG_LPI_AP;
+
reg_tpc_info->ap_power_type =
- ath12k_reg_ap_pwr_convert(bss_conf->power_type);
+ ath12k_reg_ap_pwr_convert(reg_6ghz_power_mode);
}
static void ath12k_mac_parse_tx_pwr_env(struct ath12k *ar,
@@ -10691,7 +11428,7 @@ static void ath12k_mac_parse_tx_pwr_env(struct ath12k *ar,
"no transmit power envelope match client power type %d\n",
client_type);
return;
- };
+ }
if (psd_valid) {
tpc_info->is_psd_power = true;
@@ -11081,19 +11818,36 @@ ath12k_mac_has_single_legacy_rate(struct ath12k *ar,
if (ath12k_mac_bitrate_mask_num_vht_rates(ar, band, mask))
return false;
+ if (ath12k_mac_bitrate_mask_num_he_rates(ar, band, mask))
+ return false;
+
return num_rates == 1;
}
+static __le16
+ath12k_mac_get_tx_mcs_map(const struct ieee80211_sta_he_cap *he_cap)
+{
+ if (he_cap->he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
+ return he_cap->he_mcs_nss_supp.tx_mcs_160;
+
+ return he_cap->he_mcs_nss_supp.tx_mcs_80;
+}
+
static bool
ath12k_mac_bitrate_mask_get_single_nss(struct ath12k *ar,
+ struct ieee80211_vif *vif,
enum nl80211_band band,
const struct cfg80211_bitrate_mask *mask,
int *nss)
{
struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
+ const struct ieee80211_sta_he_cap *he_cap;
+ u16 he_mcs_map = 0;
u8 ht_nss_mask = 0;
u8 vht_nss_mask = 0;
+ u8 he_nss_mask = 0;
int i;
/* No need to consider legacy here. Basic rates are always present
@@ -11120,7 +11874,24 @@ ath12k_mac_bitrate_mask_get_single_nss(struct ath12k *ar,
return false;
}
- if (ht_nss_mask != vht_nss_mask)
+ he_cap = ieee80211_get_he_iftype_cap_vif(sband, vif);
+ if (!he_cap)
+ return false;
+
+ he_mcs_map = le16_to_cpu(ath12k_mac_get_tx_mcs_map(he_cap));
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].he_mcs); i++) {
+ if (mask->control[band].he_mcs[i] == 0)
+ continue;
+
+ if (mask->control[band].he_mcs[i] ==
+ ath12k_mac_get_max_he_mcs_map(he_mcs_map, i))
+ he_nss_mask |= BIT(i);
+ else
+ return false;
+ }
+
+ if (ht_nss_mask != vht_nss_mask || ht_nss_mask != he_nss_mask)
return false;
if (ht_nss_mask == 0)
@@ -11167,54 +11938,182 @@ ath12k_mac_get_single_legacy_rate(struct ath12k *ar,
return 0;
}
-static int ath12k_mac_set_fixed_rate_params(struct ath12k_link_vif *arvif,
- u32 rate, u8 nss, u8 sgi, u8 ldpc)
+static int
+ath12k_mac_set_fixed_rate_gi_ltf(struct ath12k_link_vif *arvif, u8 he_gi, u8 he_ltf)
{
struct ath12k *ar = arvif->ar;
- u32 vdev_param;
int ret;
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
- ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02x nss %u sgi %u\n",
- arvif->vdev_id, rate, nss, sgi);
+ /* 0.8 = 0, 1.6 = 2 and 3.2 = 3. */
+ if (he_gi && he_gi != 0xFF)
+ he_gi += 1;
- vdev_param = WMI_VDEV_PARAM_FIXED_RATE;
ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
- vdev_param, rate);
+ WMI_VDEV_PARAM_SGI, he_gi);
if (ret) {
- ath12k_warn(ar->ab, "failed to set fixed rate param 0x%02x: %d\n",
- rate, ret);
+ ath12k_warn(ar->ab, "failed to set HE GI:%d, error:%d\n",
+ he_gi, ret);
return ret;
}
+ /* start from 1 */
+ if (he_ltf != 0xFF)
+ he_ltf += 1;
- vdev_param = WMI_VDEV_PARAM_NSS;
ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
- vdev_param, nss);
+ WMI_VDEV_PARAM_HE_LTF, he_ltf);
if (ret) {
- ath12k_warn(ar->ab, "failed to set nss param %d: %d\n",
- nss, ret);
+ ath12k_warn(ar->ab, "failed to set HE LTF:%d, error:%d\n",
+ he_ltf, ret);
return ret;
}
+ return 0;
+}
+
+static int
+ath12k_mac_set_auto_rate_gi_ltf(struct ath12k_link_vif *arvif, u16 he_gi, u8 he_ltf)
+{
+ struct ath12k *ar = arvif->ar;
+ int ret;
+ u32 he_ar_gi_ltf;
+
+ if (he_gi != 0xFF) {
+ switch (he_gi) {
+ case NL80211_RATE_INFO_HE_GI_0_8:
+ he_gi = WMI_AUTORATE_800NS_GI;
+ break;
+ case NL80211_RATE_INFO_HE_GI_1_6:
+ he_gi = WMI_AUTORATE_1600NS_GI;
+ break;
+ case NL80211_RATE_INFO_HE_GI_3_2:
+ he_gi = WMI_AUTORATE_3200NS_GI;
+ break;
+ default:
+ ath12k_warn(ar->ab, "Invalid GI\n");
+ return -EINVAL;
+ }
+ }
+
+ if (he_ltf != 0xFF) {
+ switch (he_ltf) {
+ case NL80211_RATE_INFO_HE_1XLTF:
+ he_ltf = WMI_HE_AUTORATE_LTF_1X;
+ break;
+ case NL80211_RATE_INFO_HE_2XLTF:
+ he_ltf = WMI_HE_AUTORATE_LTF_2X;
+ break;
+ case NL80211_RATE_INFO_HE_4XLTF:
+ he_ltf = WMI_HE_AUTORATE_LTF_4X;
+ break;
+ default:
+ ath12k_warn(ar->ab, "Invalid LTF\n");
+ return -EINVAL;
+ }
+ }
+
+ he_ar_gi_ltf = he_gi | he_ltf;
- vdev_param = WMI_VDEV_PARAM_SGI;
ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
- vdev_param, sgi);
+ WMI_VDEV_PARAM_AUTORATE_MISC_CFG,
+ he_ar_gi_ltf);
if (ret) {
- ath12k_warn(ar->ab, "failed to set sgi param %d: %d\n",
- sgi, ret);
+ ath12k_warn(ar->ab,
+ "failed to set HE autorate GI:%u, LTF:%u params, error:%d\n",
+ he_gi, he_ltf, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static u32 ath12k_mac_nlgi_to_wmigi(enum nl80211_txrate_gi gi)
+{
+ switch (gi) {
+ case NL80211_TXRATE_DEFAULT_GI:
+ return WMI_GI_400_NS;
+ case NL80211_TXRATE_FORCE_LGI:
+ return WMI_GI_800_NS;
+ default:
+ return WMI_GI_400_NS;
+ }
+}
+
+static int ath12k_mac_set_rate_params(struct ath12k_link_vif *arvif,
+ u32 rate, u8 nss, u8 sgi, u8 ldpc,
+ u8 he_gi, u8 he_ltf, bool he_fixed_rate)
+{
+ struct ieee80211_bss_conf *link_conf;
+ struct ath12k *ar = arvif->ar;
+ u32 vdev_param;
+ u32 param_value;
+ int ret;
+ bool he_support;
+
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ link_conf = ath12k_mac_get_link_bss_conf(arvif);
+ if (!link_conf)
+ return -EINVAL;
+
+ he_support = link_conf->he_support;
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "mac set rate params vdev %i rate 0x%02x nss 0x%02x sgi 0x%02x ldpc 0x%02x\n",
+ arvif->vdev_id, rate, nss, sgi, ldpc);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "he_gi 0x%02x he_ltf 0x%02x he_fixed_rate %d\n", he_gi,
+ he_ltf, he_fixed_rate);
+
+ if (!he_support) {
+ vdev_param = WMI_VDEV_PARAM_FIXED_RATE;
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, rate);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set fixed rate param 0x%02x: %d\n",
+ rate, ret);
+ return ret;
+ }
+ }
+
+ vdev_param = WMI_VDEV_PARAM_NSS;
+
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, nss);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set nss param %d: %d\n",
+ nss, ret);
return ret;
}
- vdev_param = WMI_VDEV_PARAM_LDPC;
ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
- vdev_param, ldpc);
+ WMI_VDEV_PARAM_LDPC, ldpc);
if (ret) {
ath12k_warn(ar->ab, "failed to set ldpc param %d: %d\n",
ldpc, ret);
return ret;
}
+ if (he_support) {
+ if (he_fixed_rate)
+ ret = ath12k_mac_set_fixed_rate_gi_ltf(arvif, he_gi, he_ltf);
+ else
+ ret = ath12k_mac_set_auto_rate_gi_ltf(arvif, he_gi, he_ltf);
+ if (ret)
+ return ret;
+ } else {
+ vdev_param = WMI_VDEV_PARAM_SGI;
+ param_value = ath12k_mac_nlgi_to_wmigi(sgi);
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, param_value);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set sgi param %d: %d\n",
+ sgi, ret);
+ return ret;
+ }
+ }
+
return 0;
}
@@ -11243,6 +12142,31 @@ ath12k_mac_vht_mcs_range_present(struct ath12k *ar,
return true;
}
+static bool
+ath12k_mac_he_mcs_range_present(struct ath12k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ int i;
+ u16 he_mcs;
+
+ for (i = 0; i < NL80211_HE_NSS_MAX; i++) {
+ he_mcs = mask->control[band].he_mcs[i];
+
+ switch (he_mcs) {
+ case 0:
+ case BIT(8) - 1:
+ case BIT(10) - 1:
+ case BIT(12) - 1:
+ break;
+ default:
+ return false;
+ }
+ }
+
+ return true;
+}
+
static void ath12k_mac_set_bitrate_mask_iter(void *data,
struct ieee80211_sta *sta)
{
@@ -11251,7 +12175,10 @@ static void ath12k_mac_set_bitrate_mask_iter(void *data,
struct ath12k_link_sta *arsta;
struct ath12k *ar = arvif->ar;
- arsta = rcu_dereference(ahsta->link[arvif->link_id]);
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
+ ahsta->link[arvif->link_id]);
if (!arsta || arsta->arvif != arvif)
return;
@@ -11289,6 +12216,61 @@ static void ath12k_mac_disable_peer_fixed_rate(void *data,
arsta->addr, ret);
}
+static bool
+ath12k_mac_validate_fixed_rate_settings(struct ath12k *ar, enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask,
+ unsigned int link_id)
+{
+ bool he_fixed_rate = false, vht_fixed_rate = false;
+ const u16 *vht_mcs_mask, *he_mcs_mask;
+ struct ieee80211_link_sta *link_sta;
+ struct ath12k_peer *peer, *tmp;
+ u8 vht_nss, he_nss;
+ int ret = true;
+
+ vht_mcs_mask = mask->control[band].vht_mcs;
+ he_mcs_mask = mask->control[band].he_mcs;
+
+ if (ath12k_mac_bitrate_mask_num_vht_rates(ar, band, mask) == 1)
+ vht_fixed_rate = true;
+
+ if (ath12k_mac_bitrate_mask_num_he_rates(ar, band, mask) == 1)
+ he_fixed_rate = true;
+
+ if (!vht_fixed_rate && !he_fixed_rate)
+ return true;
+
+ vht_nss = ath12k_mac_max_vht_nss(vht_mcs_mask);
+ he_nss = ath12k_mac_max_he_nss(he_mcs_mask);
+
+ rcu_read_lock();
+ spin_lock_bh(&ar->ab->base_lock);
+ list_for_each_entry_safe(peer, tmp, &ar->ab->peers, list) {
+ if (peer->sta) {
+ link_sta = rcu_dereference(peer->sta->link[link_id]);
+ if (!link_sta) {
+ ret = false;
+ goto exit;
+ }
+
+ if (vht_fixed_rate && (!link_sta->vht_cap.vht_supported ||
+ link_sta->rx_nss < vht_nss)) {
+ ret = false;
+ goto exit;
+ }
+ if (he_fixed_rate && (!link_sta->he_cap.has_he ||
+ link_sta->rx_nss < he_nss)) {
+ ret = false;
+ goto exit;
+ }
+ }
+ }
+exit:
+ spin_unlock_bh(&ar->ab->base_lock);
+ rcu_read_unlock();
+ return ret;
+}
+
static int
ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -11301,13 +12283,17 @@ ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
enum nl80211_band band;
const u8 *ht_mcs_mask;
const u16 *vht_mcs_mask;
+ const u16 *he_mcs_mask;
+ u8 he_ltf = 0;
+ u8 he_gi = 0;
u32 rate;
- u8 nss;
+ u8 nss, mac_nss;
u8 sgi;
u8 ldpc;
int single_nss;
int ret;
int num_rates;
+ bool he_fixed_rate = false;
lockdep_assert_wiphy(hw->wiphy);
@@ -11322,14 +12308,18 @@ ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
band = def.chan->band;
ht_mcs_mask = mask->control[band].ht_mcs;
vht_mcs_mask = mask->control[band].vht_mcs;
+ he_mcs_mask = mask->control[band].he_mcs;
ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC);
sgi = mask->control[band].gi;
- if (sgi == NL80211_TXRATE_FORCE_LGI) {
+ if (sgi == NL80211_TXRATE_FORCE_SGI) {
ret = -EINVAL;
goto out;
}
+ he_gi = mask->control[band].he_gi;
+ he_ltf = mask->control[band].he_ltf;
+
/* mac80211 doesn't support sending a fixed HT/VHT MCS alone, rather it
* requires passing at least one of used basic rates along with them.
* Fixed rate setting across different preambles(legacy, HT, VHT) is
@@ -11346,18 +12336,31 @@ ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
arvif->vdev_id, ret);
goto out;
}
+
ieee80211_iterate_stations_mtx(hw,
ath12k_mac_disable_peer_fixed_rate,
arvif);
- } else if (ath12k_mac_bitrate_mask_get_single_nss(ar, band, mask,
+ } else if (ath12k_mac_bitrate_mask_get_single_nss(ar, vif, band, mask,
&single_nss)) {
rate = WMI_FIXED_RATE_NONE;
nss = single_nss;
+ arvif->bitrate_mask = *mask;
+
+ ieee80211_iterate_stations_atomic(hw,
+ ath12k_mac_set_bitrate_mask_iter,
+ arvif);
} else {
rate = WMI_FIXED_RATE_NONE;
- nss = min_t(u32, ar->num_tx_chains,
- max(ath12k_mac_max_ht_nss(ht_mcs_mask),
- ath12k_mac_max_vht_nss(vht_mcs_mask)));
+
+ if (!ath12k_mac_validate_fixed_rate_settings(ar, band,
+ mask, arvif->link_id))
+ ath12k_warn(ar->ab,
+ "failed to update fixed rate settings due to mcs/nss incompatibility\n");
+
+ mac_nss = max3(ath12k_mac_max_ht_nss(ht_mcs_mask),
+ ath12k_mac_max_vht_nss(vht_mcs_mask),
+ ath12k_mac_max_he_nss(he_mcs_mask));
+ nss = min_t(u32, ar->num_tx_chains, mac_nss);
/* If multiple rates across different preambles are given
* we can reconfigure this info with all peers using PEER_ASSOC
@@ -11389,9 +12392,21 @@ ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
*/
ath12k_warn(ar->ab,
"Setting more than one MCS Value in bitrate mask not supported\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
+ num_rates = ath12k_mac_bitrate_mask_num_he_rates(ar, band, mask);
+ if (num_rates == 1)
+ he_fixed_rate = true;
+
+ if (!ath12k_mac_he_mcs_range_present(ar, band, mask) &&
+ num_rates > 1) {
+ ath12k_warn(ar->ab,
+ "Setting more than one HE MCS Value in bitrate mask not supported\n");
+ ret = -EINVAL;
+ goto out;
+ }
ieee80211_iterate_stations_mtx(hw,
ath12k_mac_disable_peer_fixed_rate,
arvif);
@@ -11402,9 +12417,10 @@ ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
arvif);
}
- ret = ath12k_mac_set_fixed_rate_params(arvif, rate, nss, sgi, ldpc);
+ ret = ath12k_mac_set_rate_params(arvif, rate, nss, sgi, ldpc, he_gi,
+ he_ltf, he_fixed_rate);
if (ret) {
- ath12k_warn(ar->ab, "failed to set fixed rate params on vdev %i: %d\n",
+ ath12k_warn(ar->ab, "failed to set rate params on vdev %i: %d\n",
arvif->vdev_id, ret);
}
@@ -11590,8 +12606,8 @@ static void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw,
struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
struct ath12k_fw_stats_req_params params = {};
struct ath12k_link_sta *arsta;
+ s8 signal, noise_floor;
struct ath12k *ar;
- s8 signal;
bool db2dbm;
lockdep_assert_wiphy(hw->wiphy);
@@ -11639,17 +12655,108 @@ static void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw,
!(ath12k_mac_get_fw_stats(ar, &params)))
signal = arsta->rssi_beacon;
+ spin_lock_bh(&ar->data_lock);
+ noise_floor = ath12k_pdev_get_noise_floor(ar);
+ spin_unlock_bh(&ar->data_lock);
+
if (signal) {
- sinfo->signal = db2dbm ? signal : signal + ATH12K_DEFAULT_NOISE_FLOOR;
+ sinfo->signal = db2dbm ? signal : signal + noise_floor;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
}
sinfo->signal_avg = ewma_avg_rssi_read(&arsta->avg_rssi);
if (!db2dbm)
- sinfo->signal_avg += ATH12K_DEFAULT_NOISE_FLOOR;
+ sinfo->signal_avg += noise_floor;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
+
+ sinfo->tx_retries = arsta->tx_retry_count;
+ sinfo->tx_failed = arsta->tx_retry_failed;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED);
+}
+
+static void ath12k_mac_op_link_sta_statistics(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta,
+ struct link_station_info *link_sinfo)
+{
+ struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(link_sta->sta);
+ struct ath12k_fw_stats_req_params params = {};
+ struct ath12k_link_sta *arsta;
+ struct ath12k *ar;
+ s8 signal;
+ bool db2dbm;
+
+ lockdep_assert_wiphy(hw->wiphy);
+
+ arsta = wiphy_dereference(hw->wiphy, ahsta->link[link_sta->link_id]);
+
+ if (!arsta)
+ return;
+
+ ar = ath12k_get_ar_by_vif(hw, vif, arsta->link_id);
+ if (!ar)
+ return;
+
+ db2dbm = test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
+ ar->ab->wmi_ab.svc_map);
+
+ link_sinfo->rx_duration = arsta->rx_duration;
+ link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION);
+
+ link_sinfo->tx_duration = arsta->tx_duration;
+ link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_DURATION);
+
+ if (arsta->txrate.legacy || arsta->txrate.nss) {
+ if (arsta->txrate.legacy) {
+ link_sinfo->txrate.legacy = arsta->txrate.legacy;
+ } else {
+ link_sinfo->txrate.mcs = arsta->txrate.mcs;
+ link_sinfo->txrate.nss = arsta->txrate.nss;
+ link_sinfo->txrate.bw = arsta->txrate.bw;
+ link_sinfo->txrate.he_gi = arsta->txrate.he_gi;
+ link_sinfo->txrate.he_dcm = arsta->txrate.he_dcm;
+ link_sinfo->txrate.he_ru_alloc =
+ arsta->txrate.he_ru_alloc;
+ link_sinfo->txrate.eht_gi = arsta->txrate.eht_gi;
+ link_sinfo->txrate.eht_ru_alloc =
+ arsta->txrate.eht_ru_alloc;
+ }
+ link_sinfo->txrate.flags = arsta->txrate.flags;
+ link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+ }
+
+ /* TODO: Use real NF instead of default one. */
+ signal = arsta->rssi_comb;
+
+ params.pdev_id = ar->pdev->pdev_id;
+ params.vdev_id = 0;
+ params.stats_id = WMI_REQUEST_VDEV_STAT;
+
+ if (!signal &&
+ ahsta->ahvif->vdev_type == WMI_VDEV_TYPE_STA &&
+ !(ath12k_mac_get_fw_stats(ar, &params)))
+ signal = arsta->rssi_beacon;
+
+ if (signal) {
+ link_sinfo->signal =
+ db2dbm ? signal : signal + ATH12K_DEFAULT_NOISE_FLOOR;
+ link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
+ }
+
+ link_sinfo->signal_avg = ewma_avg_rssi_read(&arsta->avg_rssi);
+
+ if (!db2dbm)
+ link_sinfo->signal_avg += ATH12K_DEFAULT_NOISE_FLOOR;
+
+ link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
+
+ link_sinfo->tx_retries = arsta->tx_retry_count;
+ link_sinfo->tx_failed = arsta->tx_retry_failed;
+ link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES);
+ link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED);
}
static int ath12k_mac_op_cancel_remain_on_channel(struct ieee80211_hw *hw,
@@ -11887,6 +12994,7 @@ static const struct ieee80211_ops ath12k_ops = {
.get_survey = ath12k_mac_op_get_survey,
.flush = ath12k_mac_op_flush,
.sta_statistics = ath12k_mac_op_sta_statistics,
+ .link_sta_statistics = ath12k_mac_op_link_sta_statistics,
.remain_on_channel = ath12k_mac_op_remain_on_channel,
.cancel_remain_on_channel = ath12k_mac_op_cancel_remain_on_channel,
.change_sta_links = ath12k_mac_op_change_sta_links,
@@ -12257,15 +13365,12 @@ ath12k_mac_setup_radio_iface_comb(struct ath12k *ar,
comb[0].beacon_int_infra_match = true;
comb[0].beacon_int_min_gcd = 100;
- if (ar->ab->hw_params->single_pdev_only) {
- comb[0].num_different_channels = 2;
- } else {
- comb[0].num_different_channels = 1;
- comb[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
- BIT(NL80211_CHAN_WIDTH_20) |
- BIT(NL80211_CHAN_WIDTH_40) |
- BIT(NL80211_CHAN_WIDTH_80);
- }
+ comb[0].num_different_channels = 1;
+ comb[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80) |
+ BIT(NL80211_CHAN_WIDTH_160);
return 0;
}
@@ -12348,25 +13453,42 @@ static int ath12k_mac_setup_iface_combinations(struct ath12k_hw *ah)
struct ieee80211_iface_combination *combinations, *comb;
struct wiphy *wiphy = ah->hw->wiphy;
struct wiphy_radio *radio;
+ int n_combinations = 1;
struct ath12k *ar;
int i, ret;
- combinations = kzalloc(sizeof(*combinations), GFP_KERNEL);
- if (!combinations)
- return -ENOMEM;
-
if (ah->num_radio == 1) {
- ret = ath12k_mac_setup_radio_iface_comb(&ah->radio[0],
- combinations);
+ ar = &ah->radio[0];
+
+ if (ar->ab->hw_params->single_pdev_only)
+ n_combinations = 2;
+
+ combinations = kcalloc(n_combinations, sizeof(*combinations),
+ GFP_KERNEL);
+ if (!combinations)
+ return -ENOMEM;
+
+ ret = ath12k_mac_setup_radio_iface_comb(ar, combinations);
if (ret) {
ath12k_hw_warn(ah, "failed to setup radio interface combinations for one radio: %d",
ret);
goto err_free_combinations;
}
+ if (ar->ab->hw_params->single_pdev_only) {
+ comb = combinations + 1;
+ memcpy(comb, combinations, sizeof(*comb));
+ comb->num_different_channels = 2;
+ comb->radar_detect_widths = 0;
+ }
+
goto out;
}
+ combinations = kcalloc(n_combinations, sizeof(*combinations), GFP_KERNEL);
+ if (!combinations)
+ return -ENOMEM;
+
/* there are multiple radios */
radio = kcalloc(ah->num_radio, sizeof(*radio), GFP_KERNEL);
@@ -12409,7 +13531,7 @@ static int ath12k_mac_setup_iface_combinations(struct ath12k_hw *ah)
out:
wiphy->iface_combinations = combinations;
- wiphy->n_iface_combinations = 1;
+ wiphy->n_iface_combinations = n_combinations;
return 0;
@@ -12529,6 +13651,10 @@ static int ath12k_mac_setup_register(struct ath12k *ar,
ar->max_num_stations = ath12k_core_get_max_station_per_radio(ar->ab);
ar->max_num_peers = ath12k_core_get_max_peers_per_radio(ar->ab);
+ ar->rssi_info.min_nf_dbm = ATH12K_DEFAULT_NOISE_FLOOR;
+ ar->rssi_info.temp_offset = 0;
+ ar->rssi_info.noise_floor = ar->rssi_info.min_nf_dbm + ar->rssi_info.temp_offset;
+
return 0;
}
@@ -12603,7 +13729,7 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
else
mac_addr = ab->mac_addr;
- mbssid_max_interfaces += TARGET_NUM_VDEVS;
+ mbssid_max_interfaces += TARGET_NUM_VDEVS(ar->ab);
}
wiphy->available_antennas_rx = antennas_rx;
@@ -12642,6 +13768,14 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
ieee80211_hw_set(hw, REPORTS_LOW_ACK);
ieee80211_hw_set(hw, NO_VIRTUAL_MONITOR);
+ if (test_bit(WMI_TLV_SERVICE_ETH_OFFLOAD, ar->wmi->wmi_ab->svc_map)) {
+ ieee80211_hw_set(hw, SUPPORTS_TX_ENCAP_OFFLOAD);
+ ieee80211_hw_set(hw, SUPPORTS_RX_DECAP_OFFLOAD);
+ }
+
+ if (cap->nss_ratio_enabled)
+ ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
+
if ((ht_cap & WMI_HT_CAP_ENABLED) || is_6ghz) {
ieee80211_hw_set(hw, AMPDU_AGGREGATION);
ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW);
@@ -12674,6 +13808,8 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
NL80211_FEATURE_AP_SCAN;
+ wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION;
+
/* MLO is not yet supported so disable Wireless Extensions for now
* to make sure ath12k users don't use it. This flag can be removed
* once WIPHY_FLAG_SUPPORTS_MLO is enabled.
@@ -12702,6 +13838,7 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_STA_TX_PWR);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT);
wiphy->cipher_suites = cipher_suites;
wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
@@ -12711,6 +13848,7 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
wiphy->mbssid_max_interfaces = mbssid_max_interfaces;
wiphy->ema_max_profile_periodicity = TARGET_EMA_MAX_PROFILE_PERIOD;
+ ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
if (is_6ghz) {
wiphy_ext_feature_set(wiphy,
@@ -12720,6 +13858,8 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
}
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_PUNCT);
+ if (test_bit(WMI_TLV_SERVICE_BEACON_PROTECTION_SUPPORT, ab->wmi_ab.svc_map))
+ wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_BEACON_PROTECTION);
ath12k_reg_init(hw);
@@ -12755,7 +13895,7 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
* proceeding with registration.
*/
for_each_ar(ah, ar, i)
- complete(&ar->regd_update_completed);
+ complete_all(&ar->regd_update_completed);
ret = ieee80211_register_hw(hw);
if (ret) {
@@ -13151,9 +14291,12 @@ void ath12k_mac_destroy(struct ath12k_hw_group *ag)
static void ath12k_mac_set_device_defaults(struct ath12k_base *ab)
{
+ int total_vdev;
+
/* Initialize channel counters frequency value in hertz */
ab->cc_freq_hz = 320000;
- ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1;
+ total_vdev = ab->num_radios * TARGET_NUM_VDEVS(ab);
+ ab->free_vdev_map = (1LL << total_vdev) - 1;
}
int ath12k_mac_allocate(struct ath12k_hw_group *ag)
diff --git a/drivers/net/wireless/ath/ath12k/mac.h b/drivers/net/wireless/ath/ath12k/mac.h
index 473611bfccdc..18c79d4002cb 100644
--- a/drivers/net/wireless/ath/ath12k/mac.h
+++ b/drivers/net/wireless/ath/ath12k/mac.h
@@ -41,6 +41,8 @@ struct ath12k_generic_iter {
#define IEEE80211_DISABLE_VHT_MCS_SUPPORT_0_11 BIT(24)
#define ATH12K_CHAN_WIDTH_NUM 14
+#define ATH12K_BW_NSS_MAP_ENABLE BIT(31)
+#define ATH12K_PEER_RX_NSS_160MHZ GENMASK(2, 0)
#define ATH12K_TX_POWER_MAX_VAL 70
#define ATH12K_TX_POWER_MIN_VAL 0
@@ -59,6 +61,21 @@ struct ath12k_generic_iter {
#define ATH12K_NUM_MAX_ACTIVE_LINKS_PER_DEVICE 2
+#define HECAP_PHY_SUBFMR_GET(hecap_phy) \
+ u8_get_bits(hecap_phy[3], IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER)
+
+#define HECAP_PHY_SUBFME_GET(hecap_phy) \
+ u8_get_bits(hecap_phy[4], IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE)
+
+#define HECAP_PHY_MUBFMR_GET(hecap_phy) \
+ u8_get_bits(hecap_phy[4], IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER)
+
+#define HECAP_PHY_ULMUMIMO_GET(hecap_phy) \
+ u8_get_bits(hecap_phy[2], IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO)
+
+#define HECAP_PHY_ULOFDMA_GET(hecap_phy) \
+ u8_get_bits(hecap_phy[2], IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO)
+
enum ath12k_supported_bw {
ATH12K_BW_20 = 0,
ATH12K_BW_40 = 1,
diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c
index 1f3cfd9b89fd..c729d5526c75 100644
--- a/drivers/net/wireless/ath/ath12k/pci.c
+++ b/drivers/net/wireless/ath/ath12k/pci.c
@@ -48,7 +48,7 @@
static const struct pci_device_id ath12k_pci_id_table[] = {
{ PCI_VDEVICE(QCOM, QCN9274_DEVICE_ID) },
{ PCI_VDEVICE(QCOM, WCN7850_DEVICE_ID) },
- {0}
+ {}
};
MODULE_DEVICE_TABLE(pci, ath12k_pci_id_table);
@@ -1353,7 +1353,7 @@ static void ath12k_pci_coredump_download(struct ath12k_base *ab)
struct ath12k_tlv_dump_data *dump_tlv;
size_t hdr_len = sizeof(*file_data);
void *buf;
- u32 dump_seg_sz[FW_CRASH_DUMP_TYPE_MAX] = { 0 };
+ u32 dump_seg_sz[FW_CRASH_DUMP_TYPE_MAX] = {};
ath12k_mhi_coredump(mhi_ctrl, false);
@@ -1595,6 +1595,7 @@ static int ath12k_pci_probe(struct pci_dev *pdev,
ab->hal_rx_ops = &hal_rx_qcn9274_ops;
ath12k_pci_read_hw_version(ab, &soc_hw_version_major,
&soc_hw_version_minor);
+ ab->target_mem_mode = ath12k_core_get_memory_mode(ab);
switch (soc_hw_version_major) {
case ATH12K_PCI_SOC_HW_VERSION_2:
ab->hw_rev = ATH12K_HW_QCN9274_HW20;
@@ -1618,6 +1619,7 @@ static int ath12k_pci_probe(struct pci_dev *pdev,
ab->hal_rx_ops = &hal_rx_wcn7850_ops;
ath12k_pci_read_hw_version(ab, &soc_hw_version_major,
&soc_hw_version_minor);
+ ab->target_mem_mode = ATH12K_QMI_MEMORY_MODE_DEFAULT;
switch (soc_hw_version_major) {
case ATH12K_PCI_SOC_HW_VERSION_2:
ab->hw_rev = ATH12K_HW_WCN7850_HW20;
diff --git a/drivers/net/wireless/ath/ath12k/peer.c b/drivers/net/wireless/ath/ath12k/peer.c
index ec7236bbccc0..f1ae9e5b5af7 100644
--- a/drivers/net/wireless/ath/ath12k/peer.c
+++ b/drivers/net/wireless/ath/ath12k/peer.c
@@ -8,7 +8,7 @@
#include "peer.h"
#include "debug.h"
-static struct ath12k_ml_peer *ath12k_peer_ml_find(struct ath12k_hw *ah, const u8 *addr)
+struct ath12k_ml_peer *ath12k_peer_ml_find(struct ath12k_hw *ah, const u8 *addr)
{
struct ath12k_ml_peer *ml_peer;
@@ -100,6 +100,9 @@ struct ath12k_peer *ath12k_peer_find_by_id(struct ath12k_base *ab,
lockdep_assert_held(&ab->base_lock);
+ if (peer_id == HAL_INVALID_PEERID)
+ return NULL;
+
if (peer_id & ATH12K_PEER_ML_ID_VALID)
return ath12k_peer_find_by_ml_id(ab, peer_id);
diff --git a/drivers/net/wireless/ath/ath12k/peer.h b/drivers/net/wireless/ath/ath12k/peer.h
index f3a5e054d2b5..44afc0b7dd53 100644
--- a/drivers/net/wireless/ath/ath12k/peer.h
+++ b/drivers/net/wireless/ath/ath12k/peer.h
@@ -91,5 +91,33 @@ struct ath12k_peer *ath12k_peer_find_by_ast(struct ath12k_base *ab, int ast_hash
int ath12k_peer_ml_create(struct ath12k_hw *ah, struct ieee80211_sta *sta);
int ath12k_peer_ml_delete(struct ath12k_hw *ah, struct ieee80211_sta *sta);
int ath12k_peer_mlo_link_peers_delete(struct ath12k_vif *ahvif, struct ath12k_sta *ahsta);
+struct ath12k_ml_peer *ath12k_peer_ml_find(struct ath12k_hw *ah,
+ const u8 *addr);
+static inline
+struct ath12k_link_sta *ath12k_peer_get_link_sta(struct ath12k_base *ab,
+ struct ath12k_peer *peer)
+{
+ struct ath12k_sta *ahsta;
+ struct ath12k_link_sta *arsta;
+
+ if (!peer->sta)
+ return NULL;
+
+ ahsta = ath12k_sta_to_ahsta(peer->sta);
+ if (peer->ml_id & ATH12K_PEER_ML_ID_VALID) {
+ if (!(ahsta->links_map & BIT(peer->link_id))) {
+ ath12k_warn(ab, "peer %pM id %d link_id %d can't found in STA link_map 0x%x\n",
+ peer->addr, peer->peer_id, peer->link_id,
+ ahsta->links_map);
+ return NULL;
+ }
+ arsta = rcu_dereference(ahsta->link[peer->link_id]);
+ if (!arsta)
+ return NULL;
+ } else {
+ arsta = &ahsta->deflink;
+ }
+ return arsta;
+}
#endif /* _PEER_H_ */
diff --git a/drivers/net/wireless/ath/ath12k/qmi.c b/drivers/net/wireless/ath/ath12k/qmi.c
index 99e1fb2910d0..7c611a1fd6d0 100644
--- a/drivers/net/wireless/ath/ath12k/qmi.c
+++ b/drivers/net/wireless/ath/ath12k/qmi.c
@@ -3856,7 +3856,7 @@ int ath12k_qmi_init_service(struct ath12k_base *ab)
memset(&ab->qmi.target_mem, 0, sizeof(struct target_mem_chunk));
ab->qmi.ab = ab;
- ab->qmi.target_mem_mode = ATH12K_QMI_TARGET_MEM_MODE_DEFAULT;
+ ab->qmi.target_mem_mode = ab->target_mem_mode;
ret = qmi_handle_init(&ab->qmi.handle, ATH12K_QMI_RESP_LEN_MAX,
&ath12k_qmi_ops, ath12k_qmi_msg_handlers);
if (ret < 0) {
diff --git a/drivers/net/wireless/ath/ath12k/qmi.h b/drivers/net/wireless/ath/ath12k/qmi.h
index 96e6c3daecfe..abdaade3b542 100644
--- a/drivers/net/wireless/ath/ath12k/qmi.h
+++ b/drivers/net/wireless/ath/ath12k/qmi.h
@@ -37,7 +37,6 @@
#define QMI_WLANFW_MAX_DATA_SIZE_V01 6144
#define ATH12K_FIRMWARE_MODE_OFF 4
-#define ATH12K_QMI_TARGET_MEM_MODE_DEFAULT 0
#define ATH12K_BOARD_ID_DEFAULT 0xFF
@@ -602,6 +601,11 @@ struct qmi_wlanfw_wlan_ini_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
+enum ath12k_qmi_mem_mode {
+ ATH12K_QMI_MEMORY_MODE_DEFAULT = 0,
+ ATH12K_QMI_MEMORY_MODE_LOW_512_M,
+};
+
static inline void ath12k_qmi_set_event_block(struct ath12k_qmi *qmi, bool block)
{
lockdep_assert_held(&qmi->event_lock);
diff --git a/drivers/net/wireless/ath/ath12k/reg.c b/drivers/net/wireless/ath/ath12k/reg.c
index 96254d6fc675..7898f6981e5a 100644
--- a/drivers/net/wireless/ath/ath12k/reg.c
+++ b/drivers/net/wireless/ath/ath12k/reg.c
@@ -426,6 +426,29 @@ ath12k_map_fw_dfs_region(enum ath12k_dfs_region dfs_region)
}
}
+static u32 ath12k_get_bw_reg_flags(u16 max_bw)
+{
+ switch (max_bw) {
+ case 20:
+ return NL80211_RRF_NO_HT40 |
+ NL80211_RRF_NO_80MHZ |
+ NL80211_RRF_NO_160MHZ |
+ NL80211_RRF_NO_320MHZ;
+ case 40:
+ return NL80211_RRF_NO_80MHZ |
+ NL80211_RRF_NO_160MHZ |
+ NL80211_RRF_NO_320MHZ;
+ case 80:
+ return NL80211_RRF_NO_160MHZ |
+ NL80211_RRF_NO_320MHZ;
+ case 160:
+ return NL80211_RRF_NO_320MHZ;
+ case 320:
+ default:
+ return 0;
+ }
+}
+
static u32 ath12k_map_fw_reg_flags(u16 reg_flags)
{
u32 flags = 0;
@@ -704,7 +727,7 @@ ath12k_reg_build_regd(struct ath12k_base *ab,
reg_rule = reg_info->reg_rules_2g_ptr + i;
max_bw = min_t(u16, reg_rule->max_bw,
reg_info->max_bw_2g);
- flags = 0;
+ flags = ath12k_get_bw_reg_flags(reg_info->max_bw_2g);
ath12k_reg_update_freq_range(&ab->reg_freq_2ghz, reg_rule);
} else if (reg_info->num_5g_reg_rules &&
(j < reg_info->num_5g_reg_rules)) {
@@ -718,13 +741,15 @@ ath12k_reg_build_regd(struct ath12k_base *ab,
* BW correction if required and applies flags as
* per other BW rule flags we pass from here
*/
- flags = NL80211_RRF_AUTO_BW;
+ flags = NL80211_RRF_AUTO_BW |
+ ath12k_get_bw_reg_flags(reg_info->max_bw_5g);
ath12k_reg_update_freq_range(&ab->reg_freq_5ghz, reg_rule);
} else if (reg_info->is_ext_reg_event && reg_6ghz_number &&
(k < reg_6ghz_number)) {
reg_rule = reg_rule_6ghz + k++;
max_bw = min_t(u16, reg_rule->max_bw, max_bw_6ghz);
- flags = NL80211_RRF_AUTO_BW;
+ flags = NL80211_RRF_AUTO_BW |
+ ath12k_get_bw_reg_flags(max_bw_6ghz);
if (reg_rule->psd_flag)
flags |= NL80211_RRF_PSD;
ath12k_reg_update_freq_range(&ab->reg_freq_6ghz, reg_rule);
diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
index b38f22118d73..da85c28ec355 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.c
+++ b/drivers/net/wireless/ath/ath12k/wmi.c
@@ -201,10 +201,9 @@ static __le32 ath12k_wmi_tlv_cmd_hdr(u32 cmd, u32 len)
void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
struct ath12k_wmi_resource_config_arg *config)
{
- config->num_vdevs = ab->num_radios * TARGET_NUM_VDEVS;
+ config->num_vdevs = ab->num_radios * TARGET_NUM_VDEVS(ab);
config->num_peers = ab->num_radios *
ath12k_core_get_max_peers_per_radio(ab);
- config->num_tids = ath12k_core_get_max_num_tids(ab);
config->num_offload_peers = TARGET_NUM_OFFLD_PEERS;
config->num_offload_reorder_buffs = TARGET_NUM_OFFLD_REORDER_BUFFS;
config->num_peer_keys = TARGET_NUM_PEER_KEYS;
@@ -537,6 +536,10 @@ ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
pdev_cap->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_5g);
pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_5g);
+ pdev_cap->nss_ratio_enabled =
+ WMI_NSS_RATIO_EN_DIS_GET(mac_caps->nss_ratio);
+ pdev_cap->nss_ratio_info =
+ WMI_NSS_RATIO_INFO_GET(mac_caps->nss_ratio);
} else {
return -EINVAL;
}
@@ -782,20 +785,46 @@ struct sk_buff *ath12k_wmi_alloc_skb(struct ath12k_wmi_base *wmi_ab, u32 len)
return skb;
}
-int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id,
+int ath12k_wmi_mgmt_send(struct ath12k_link_vif *arvif, u32 buf_id,
struct sk_buff *frame)
{
+ struct ath12k *ar = arvif->ar;
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_mgmt_send_cmd *cmd;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(frame);
- struct wmi_tlv *frame_tlv;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)frame->data;
+ struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
+ int cmd_len = sizeof(struct ath12k_wmi_mgmt_send_tx_params);
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)hdr;
+ struct ath12k_wmi_mlo_mgmt_send_params *ml_params;
+ struct ath12k_base *ab = ar->ab;
+ struct wmi_tlv *frame_tlv, *tlv;
+ struct ath12k_skb_cb *skb_cb;
+ u32 buf_len, buf_len_aligned;
+ u32 vdev_id = arvif->vdev_id;
+ bool link_agnostic = false;
struct sk_buff *skb;
- u32 buf_len;
int ret, len;
+ void *ptr;
buf_len = min_t(int, frame->len, WMI_MGMT_SEND_DOWNLD_LEN);
- len = sizeof(*cmd) + sizeof(*frame_tlv) + roundup(buf_len, 4);
+ buf_len_aligned = roundup(buf_len, sizeof(u32));
+
+ len = sizeof(*cmd) + sizeof(*frame_tlv) + buf_len_aligned;
+
+ if (ieee80211_vif_is_mld(vif)) {
+ skb_cb = ATH12K_SKB_CB(frame);
+ if ((skb_cb->flags & ATH12K_SKB_MLO_STA) &&
+ ab->hw_params->hw_ops->is_frame_link_agnostic &&
+ ab->hw_params->hw_ops->is_frame_link_agnostic(arvif, mgmt)) {
+ len += cmd_len + TLV_HDR_SIZE + sizeof(*ml_params);
+ ath12k_generic_dbg(ATH12K_DBG_MGMT,
+ "Sending Mgmt Frame fc 0x%0x as link agnostic",
+ mgmt->frame_control);
+ link_agnostic = true;
+ }
+ }
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
@@ -818,6 +847,28 @@ int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id,
memcpy(frame_tlv->value, frame->data, buf_len);
+ if (!link_agnostic)
+ goto send;
+
+ ptr = skb->data + sizeof(*cmd) + sizeof(*frame_tlv) + buf_len_aligned;
+
+ tlv = ptr;
+
+ /* Tx params not used currently */
+ tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_TX_SEND_PARAMS, cmd_len);
+ ptr += cmd_len;
+
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, sizeof(*ml_params));
+ ptr += TLV_HDR_SIZE;
+
+ ml_params = ptr;
+ ml_params->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_TX_SEND_PARAMS,
+ sizeof(*ml_params));
+
+ ml_params->hw_link_id = cpu_to_le32(WMI_MGMT_LINK_AGNOSTIC_ID);
+
+send:
ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MGMT_TX_SEND_CMDID);
if (ret) {
ath12k_warn(ar->ab,
@@ -1059,15 +1110,14 @@ static void ath12k_wmi_put_wmi_channel(struct ath12k_wmi_channel_params *chan,
chan->band_center_freq2 = cpu_to_le32(center_freq1);
- } else if (arg->mode == MODE_11BE_EHT160) {
+ } else if (arg->mode == MODE_11BE_EHT160 ||
+ arg->mode == MODE_11AX_HE160) {
if (arg->freq > center_freq1)
chan->band_center_freq1 = cpu_to_le32(center_freq1 + 40);
else
chan->band_center_freq1 = cpu_to_le32(center_freq1 - 40);
chan->band_center_freq2 = cpu_to_le32(center_freq1);
- } else if (arg->mode == MODE_11BE_EHT80_80) {
- chan->band_center_freq2 = cpu_to_le32(arg->band_center_freq2);
} else {
chan->band_center_freq2 = 0;
}
@@ -2013,6 +2063,9 @@ int ath12k_wmi_bcn_tmpl(struct ath12k_link_vif *arvif,
u32p_replace_bits(&ema_params, 1, WMI_EMA_BEACON_LAST);
cmd->ema_params = cpu_to_le32(ema_params);
}
+ cmd->feature_enable_bitmap =
+ cpu_to_le32(u32_encode_bits(arvif->beacon_prot,
+ WMI_BEACON_PROTECTION_EN_BIT));
ptr = skb->data + sizeof(*cmd);
@@ -2152,7 +2205,7 @@ static void ath12k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd,
cmd->peer_flags |= cpu_to_le32(WMI_PEER_AUTH);
if (arg->need_ptk_4_way) {
cmd->peer_flags |= cpu_to_le32(WMI_PEER_NEED_PTK_4_WAY);
- if (!hw_crypto_disabled)
+ if (!hw_crypto_disabled && arg->is_assoc)
cmd->peer_flags &= cpu_to_le32(~WMI_PEER_AUTH);
}
if (arg->need_gtk_2_way)
@@ -3880,7 +3933,8 @@ ath12k_wmi_copy_resource_config(struct ath12k_base *ab,
wmi_cfg->max_bssid_rx_filters = cpu_to_le32(tg_cfg->max_bssid_rx_filters);
wmi_cfg->use_pdev_id = cpu_to_le32(tg_cfg->use_pdev_id);
wmi_cfg->flag1 = cpu_to_le32(tg_cfg->atf_config |
- WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64);
+ WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64 |
+ WMI_RSRC_CFG_FLAG1_ACK_RSSI);
wmi_cfg->peer_map_unmap_version = cpu_to_le32(tg_cfg->peer_map_unmap_version);
wmi_cfg->sched_params = cpu_to_le32(tg_cfg->sched_params);
wmi_cfg->twt_ap_pdev_count = cpu_to_le32(tg_cfg->twt_ap_pdev_count);
@@ -6116,7 +6170,7 @@ static int ath12k_pull_mgmt_rx_params_tlv(struct ath12k_base *ab,
}
static int wmi_process_mgmt_tx_comp(struct ath12k *ar, u32 desc_id,
- u32 status)
+ u32 status, u32 ack_rssi)
{
struct sk_buff *msdu;
struct ieee80211_tx_info *info;
@@ -6140,8 +6194,16 @@ static int wmi_process_mgmt_tx_comp(struct ath12k *ar, u32 desc_id,
dma_unmap_single(ar->ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
info = IEEE80211_SKB_CB(msdu);
- if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) && !status)
+ memset(&info->status, 0, sizeof(info->status));
+
+ /* skip tx rate update from ieee80211_status*/
+ info->status.rates[0].idx = -1;
+
+ if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) && !status) {
info->flags |= IEEE80211_TX_STAT_ACK;
+ info->status.ack_signal = ack_rssi;
+ info->status.flags |= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
+ }
if ((info->flags & IEEE80211_TX_CTL_NO_ACK) && !status)
info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
@@ -6185,6 +6247,8 @@ static int ath12k_pull_mgmt_tx_compl_param_tlv(struct ath12k_base *ab,
param->pdev_id = ev->pdev_id;
param->desc_id = ev->desc_id;
param->status = ev->status;
+ param->ppdu_id = ev->ppdu_id;
+ param->ack_rssi = ev->ack_rssi;
kfree(tb);
return 0;
@@ -6764,7 +6828,7 @@ out:
* before registering the hardware.
*/
if (ar)
- complete(&ar->regd_update_completed);
+ complete_all(&ar->regd_update_completed);
return ret;
}
@@ -6975,12 +7039,13 @@ static void ath12k_vdev_stopped_event(struct ath12k_base *ab, struct sk_buff *sk
static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb)
{
- struct ath12k_wmi_mgmt_rx_arg rx_ev = {0};
+ struct ath12k_wmi_mgmt_rx_arg rx_ev = {};
struct ath12k *ar;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_hdr *hdr;
u16 fc;
struct ieee80211_supported_band *sband;
+ s32 noise_floor;
if (ath12k_pull_mgmt_rx_params_tlv(ab, skb, &rx_ev) != 0) {
ath12k_warn(ab, "failed to extract mgmt rx event");
@@ -7042,7 +7107,11 @@ static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb)
status->freq = ieee80211_channel_to_frequency(rx_ev.channel,
status->band);
- status->signal = rx_ev.snr + ATH12K_DEFAULT_NOISE_FLOOR;
+ spin_lock_bh(&ar->data_lock);
+ noise_floor = ath12k_pdev_get_noise_floor(ar);
+ spin_unlock_bh(&ar->data_lock);
+
+ status->signal = rx_ev.snr + noise_floor;
status->rate_idx = ath12k_mac_bitrate_to_idx(sband, rx_ev.rate / 100);
hdr = (struct ieee80211_hdr *)skb->data;
@@ -7089,7 +7158,7 @@ exit:
static void ath12k_mgmt_tx_compl_event(struct ath12k_base *ab, struct sk_buff *skb)
{
- struct wmi_mgmt_tx_compl_event tx_compl_param = {0};
+ struct wmi_mgmt_tx_compl_event tx_compl_param = {};
struct ath12k *ar;
if (ath12k_pull_mgmt_tx_compl_param_tlv(ab, skb, &tx_compl_param) != 0) {
@@ -7106,7 +7175,8 @@ static void ath12k_mgmt_tx_compl_event(struct ath12k_base *ab, struct sk_buff *s
}
wmi_process_mgmt_tx_comp(ar, le32_to_cpu(tx_compl_param.desc_id),
- le32_to_cpu(tx_compl_param.status));
+ le32_to_cpu(tx_compl_param.status),
+ le32_to_cpu(tx_compl_param.ack_rssi));
ath12k_dbg(ab, ATH12K_DBG_MGMT,
"mgmt tx compl ev pdev_id %d, desc_id %d, status %d",
@@ -7146,7 +7216,7 @@ static struct ath12k *ath12k_get_ar_on_scan_state(struct ath12k_base *ab,
static void ath12k_scan_event(struct ath12k_base *ab, struct sk_buff *skb)
{
struct ath12k *ar;
- struct wmi_scan_event scan_ev = {0};
+ struct wmi_scan_event scan_ev = {};
if (ath12k_pull_scan_ev(ab, skb, &scan_ev) != 0) {
ath12k_warn(ab, "failed to extract scan event");
@@ -7323,7 +7393,7 @@ static void ath12k_roam_event(struct ath12k_base *ab, struct sk_buff *skb)
static void ath12k_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb)
{
- struct wmi_chan_info_event ch_info_ev = {0};
+ struct wmi_chan_info_event ch_info_ev = {};
struct ath12k *ar;
struct survey_info *survey;
int idx;
@@ -7471,7 +7541,7 @@ exit:
static void ath12k_vdev_install_key_compl_event(struct ath12k_base *ab,
struct sk_buff *skb)
{
- struct wmi_vdev_install_key_complete_arg install_key_compl = {0};
+ struct wmi_vdev_install_key_complete_arg install_key_compl = {};
struct ath12k *ar;
if (ath12k_pull_vdev_install_key_compl_ev(ab, skb, &install_key_compl) != 0) {
@@ -7511,7 +7581,8 @@ static int ath12k_wmi_tlv_services_parser(struct ath12k_base *ab,
void *data)
{
const struct wmi_service_available_event *ev;
- u32 *wmi_ext2_service_bitmap;
+ u16 wmi_ext2_service_words;
+ __le32 *wmi_ext2_service_bitmap;
int i, j;
u16 expected_len;
@@ -7543,21 +7614,21 @@ static int ath12k_wmi_tlv_services_parser(struct ath12k_base *ab,
ev->wmi_service_segment_bitmap[3]);
break;
case WMI_TAG_ARRAY_UINT32:
- wmi_ext2_service_bitmap = (u32 *)ptr;
+ wmi_ext2_service_bitmap = (__le32 *)ptr;
+ wmi_ext2_service_words = len / sizeof(u32);
for (i = 0, j = WMI_MAX_EXT_SERVICE;
- i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT2_SERVICE;
+ i < wmi_ext2_service_words && j < WMI_MAX_EXT2_SERVICE;
i++) {
do {
- if (wmi_ext2_service_bitmap[i] &
+ if (__le32_to_cpu(wmi_ext2_service_bitmap[i]) &
BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
set_bit(j, ab->wmi_ab.svc_map);
} while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "wmi_ext2_service bitmap 0x%08x\n",
+ __le32_to_cpu(wmi_ext2_service_bitmap[i]));
}
- ath12k_dbg(ab, ATH12K_DBG_WMI,
- "wmi_ext2_service_bitmap 0x%04x 0x%04x 0x%04x 0x%04x",
- wmi_ext2_service_bitmap[0], wmi_ext2_service_bitmap[1],
- wmi_ext2_service_bitmap[2], wmi_ext2_service_bitmap[3]);
break;
}
return 0;
@@ -7575,7 +7646,7 @@ static int ath12k_service_available_event(struct ath12k_base *ab, struct sk_buff
static void ath12k_peer_assoc_conf_event(struct ath12k_base *ab, struct sk_buff *skb)
{
- struct wmi_peer_assoc_conf_arg peer_assoc_conf = {0};
+ struct wmi_peer_assoc_conf_arg peer_assoc_conf = {};
struct ath12k *ar;
if (ath12k_pull_peer_assoc_conf_ev(ab, skb, &peer_assoc_conf) != 0) {
@@ -8521,7 +8592,7 @@ ath12k_wmi_pdev_temperature_event(struct ath12k_base *ab,
struct sk_buff *skb)
{
struct ath12k *ar;
- struct wmi_pdev_temperature_event ev = {0};
+ struct wmi_pdev_temperature_event ev = {};
if (ath12k_pull_pdev_temp_ev(ab, skb, &ev) != 0) {
ath12k_warn(ab, "failed to extract pdev temperature event");
@@ -9319,6 +9390,229 @@ static void ath12k_wmi_process_tpc_stats(struct ath12k_base *ab,
}
#endif
+static int
+ath12k_wmi_rssi_dbm_conv_info_evt_subtlv_parser(struct ath12k_base *ab,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ const struct ath12k_wmi_rssi_dbm_conv_temp_info_params *temp_info;
+ const struct ath12k_wmi_rssi_dbm_conv_info_params *param_info;
+ struct ath12k_wmi_rssi_dbm_conv_info_arg *rssi_info = data;
+ struct ath12k_wmi_rssi_dbm_conv_param_arg param_arg;
+ s32 nf_hw_dbm[ATH12K_MAX_NUM_NF_HW_DBM];
+ u8 num_20mhz_segments;
+ s8 min_nf, *nf_ptr;
+ int i, j;
+
+ switch (tag) {
+ case WMI_TAG_RSSI_DBM_CONVERSION_PARAMS_INFO:
+ if (len < sizeof(*param_info)) {
+ ath12k_warn(ab,
+ "RSSI dbm conv subtlv 0x%x invalid len %d rcvd",
+ tag, len);
+ return -EINVAL;
+ }
+
+ param_info = ptr;
+
+ param_arg.curr_bw = le32_to_cpu(param_info->curr_bw);
+ param_arg.curr_rx_chainmask = le32_to_cpu(param_info->curr_rx_chainmask);
+
+ /* The received array is actually a 2D byte-array for per chain,
+ * per 20MHz subband. Convert to 2D byte-array
+ */
+ nf_ptr = &param_arg.nf_hw_dbm[0][0];
+
+ for (i = 0; i < ATH12K_MAX_NUM_NF_HW_DBM; i++) {
+ nf_hw_dbm[i] = a_sle32_to_cpu(param_info->nf_hw_dbm[i]);
+
+ for (j = 0; j < 4; j++) {
+ *nf_ptr = (nf_hw_dbm[i] >> (j * 8)) & 0xFF;
+ nf_ptr++;
+ }
+ }
+
+ switch (param_arg.curr_bw) {
+ case WMI_CHAN_WIDTH_20:
+ num_20mhz_segments = 1;
+ break;
+ case WMI_CHAN_WIDTH_40:
+ num_20mhz_segments = 2;
+ break;
+ case WMI_CHAN_WIDTH_80:
+ num_20mhz_segments = 4;
+ break;
+ case WMI_CHAN_WIDTH_160:
+ num_20mhz_segments = 8;
+ break;
+ case WMI_CHAN_WIDTH_320:
+ num_20mhz_segments = 16;
+ break;
+ default:
+ ath12k_warn(ab, "Invalid current bandwidth %d in RSSI dbm event",
+ param_arg.curr_bw);
+ /* In error case, still consider the primary 20 MHz segment since
+ * that would be much better than instead of dropping the whole
+ * event
+ */
+ num_20mhz_segments = 1;
+ }
+
+ min_nf = ATH12K_DEFAULT_NOISE_FLOOR;
+
+ for (i = 0; i < ATH12K_MAX_NUM_ANTENNA; i++) {
+ if (!(param_arg.curr_rx_chainmask & BIT(i)))
+ continue;
+
+ for (j = 0; j < num_20mhz_segments; j++) {
+ if (param_arg.nf_hw_dbm[i][j] < min_nf)
+ min_nf = param_arg.nf_hw_dbm[i][j];
+ }
+ }
+
+ rssi_info->min_nf_dbm = min_nf;
+ rssi_info->nf_dbm_present = true;
+ break;
+ case WMI_TAG_RSSI_DBM_CONVERSION_TEMP_OFFSET_INFO:
+ if (len < sizeof(*temp_info)) {
+ ath12k_warn(ab,
+ "RSSI dbm conv subtlv 0x%x invalid len %d rcvd",
+ tag, len);
+ return -EINVAL;
+ }
+
+ temp_info = ptr;
+ rssi_info->temp_offset = a_sle32_to_cpu(temp_info->offset);
+ rssi_info->temp_offset_present = true;
+ break;
+ default:
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "Unknown subtlv 0x%x in RSSI dbm conversion event\n", tag);
+ }
+
+ return 0;
+}
+
+static int
+ath12k_wmi_rssi_dbm_conv_info_event_parser(struct ath12k_base *ab,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ int ret = 0;
+
+ switch (tag) {
+ case WMI_TAG_RSSI_DBM_CONVERSION_PARAMS_INFO_FIXED_PARAM:
+ /* Fixed param is already processed*/
+ break;
+ case WMI_TAG_ARRAY_STRUCT:
+ /* len 0 is expected for array of struct when there
+ * is no content of that type inside that tlv
+ */
+ if (len == 0)
+ return 0;
+
+ ret = ath12k_wmi_tlv_iter(ab, ptr, len,
+ ath12k_wmi_rssi_dbm_conv_info_evt_subtlv_parser,
+ data);
+ break;
+ default:
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "Received invalid tag 0x%x for RSSI dbm conv info event\n",
+ tag);
+ break;
+ }
+
+ return ret;
+}
+
+static int
+ath12k_wmi_rssi_dbm_conv_info_process_fixed_param(struct ath12k_base *ab, u8 *ptr,
+ size_t len, int *pdev_id)
+{
+ struct ath12k_wmi_rssi_dbm_conv_info_fixed_params *fixed_param;
+ const struct wmi_tlv *tlv;
+ u16 tlv_tag;
+
+ if (len < (sizeof(*fixed_param) + TLV_HDR_SIZE)) {
+ ath12k_warn(ab, "invalid RSSI dbm conv event size %zu\n", len);
+ return -EINVAL;
+ }
+
+ tlv = (struct wmi_tlv *)ptr;
+ tlv_tag = le32_get_bits(tlv->header, WMI_TLV_TAG);
+ ptr += sizeof(*tlv);
+
+ if (tlv_tag != WMI_TAG_RSSI_DBM_CONVERSION_PARAMS_INFO_FIXED_PARAM) {
+ ath12k_warn(ab, "RSSI dbm conv event received without fixed param tlv\n");
+ return -EINVAL;
+ }
+
+ fixed_param = (struct ath12k_wmi_rssi_dbm_conv_info_fixed_params *)ptr;
+ *pdev_id = le32_to_cpu(fixed_param->pdev_id);
+
+ return 0;
+}
+
+static void
+ath12k_wmi_update_rssi_offsets(struct ath12k *ar,
+ struct ath12k_wmi_rssi_dbm_conv_info_arg *rssi_info)
+{
+ struct ath12k_pdev_rssi_offsets *info = &ar->rssi_info;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ if (rssi_info->temp_offset_present)
+ info->temp_offset = rssi_info->temp_offset;
+
+ if (rssi_info->nf_dbm_present)
+ info->min_nf_dbm = rssi_info->min_nf_dbm;
+
+ info->noise_floor = info->min_nf_dbm + info->temp_offset;
+}
+
+static void
+ath12k_wmi_rssi_dbm_conversion_params_info_event(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ath12k_wmi_rssi_dbm_conv_info_arg rssi_info;
+ struct ath12k *ar;
+ s32 noise_floor;
+ u32 pdev_id;
+ int ret;
+
+ ret = ath12k_wmi_rssi_dbm_conv_info_process_fixed_param(ab, skb->data, skb->len,
+ &pdev_id);
+ if (ret) {
+ ath12k_warn(ab, "failed to parse fixed param in RSSI dbm conv event: %d\n",
+ ret);
+ return;
+ }
+
+ rcu_read_lock();
+ ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
+ /* If pdev is not active, ignore the event */
+ if (!ar)
+ goto out_unlock;
+
+ ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath12k_wmi_rssi_dbm_conv_info_event_parser,
+ &rssi_info);
+ if (ret) {
+ ath12k_warn(ab, "unable to parse RSSI dbm conversion event\n");
+ goto out_unlock;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ ath12k_wmi_update_rssi_offsets(ar, &rssi_info);
+ noise_floor = ath12k_pdev_get_noise_floor(ar);
+ spin_unlock_bh(&ar->data_lock);
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "RSSI noise floor updated, new value is %d dbm\n", noise_floor);
+out_unlock:
+ rcu_read_unlock();
+}
+
static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
@@ -9450,6 +9744,9 @@ static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
case WMI_11D_NEW_COUNTRY_EVENTID:
ath12k_reg_11d_new_cc_event(ab, skb);
break;
+ case WMI_PDEV_RSSI_DBM_CONVERSION_PARAMS_INFO_EVENTID:
+ ath12k_wmi_rssi_dbm_conversion_params_info_event(ab, skb);
+ break;
/* add Unsupported events (rare) here */
case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
diff --git a/drivers/net/wireless/ath/ath12k/wmi.h b/drivers/net/wireless/ath/ath12k/wmi.h
index 8627154f1680..f3b0a6f57ec2 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.h
+++ b/drivers/net/wireless/ath/ath12k/wmi.h
@@ -222,6 +222,22 @@ enum WMI_HOST_WLAN_BAND {
WMI_HOST_WLAN_2GHZ_5GHZ_CAP = 3,
};
+/* Parameters used for WMI_VDEV_PARAM_AUTORATE_MISC_CFG command.
+ * Used only for HE auto rate mode.
+ */
+enum {
+ /* HE LTF related configuration */
+ WMI_HE_AUTORATE_LTF_1X = BIT(0),
+ WMI_HE_AUTORATE_LTF_2X = BIT(1),
+ WMI_HE_AUTORATE_LTF_4X = BIT(2),
+
+ /* HE GI related configuration */
+ WMI_AUTORATE_400NS_GI = BIT(8),
+ WMI_AUTORATE_800NS_GI = BIT(9),
+ WMI_AUTORATE_1600NS_GI = BIT(10),
+ WMI_AUTORATE_3200NS_GI = BIT(11),
+};
+
enum wmi_cmd_group {
/* 0 to 2 are reserved */
WMI_GRP_START = 0x3,
@@ -734,6 +750,8 @@ enum wmi_tlv_event_id {
WMI_SERVICE_READY_EXT2_EVENTID,
WMI_PDEV_GET_HALPHY_CAL_STATUS_EVENTID =
WMI_SERVICE_READY_EXT2_EVENTID + 4,
+ WMI_PDEV_RSSI_DBM_CONVERSION_PARAMS_INFO_EVENTID =
+ WMI_PDEV_GET_HALPHY_CAL_STATUS_EVENTID + 5,
WMI_VDEV_START_RESP_EVENTID = WMI_TLV_CMD(WMI_GRP_VDEV),
WMI_VDEV_STOPPED_EVENTID,
WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID,
@@ -1169,13 +1187,16 @@ enum wmi_tlv_vdev_param {
WMI_VDEV_PARAM_HE_RANGE_EXT,
WMI_VDEV_PARAM_ENABLE_BCAST_PROBE_RESPONSE,
WMI_VDEV_PARAM_FILS_MAX_CHANNEL_GUARD_TIME,
+ WMI_VDEV_PARAM_HE_LTF = 0x74,
WMI_VDEV_PARAM_BA_MODE = 0x7e,
+ WMI_VDEV_PARAM_AUTORATE_MISC_CFG = 0x80,
WMI_VDEV_PARAM_SET_HE_SOUNDING_MODE = 0x87,
WMI_VDEV_PARAM_6GHZ_PARAMS = 0x99,
WMI_VDEV_PARAM_PROTOTYPE = 0x8000,
WMI_VDEV_PARAM_BSS_COLOR,
WMI_VDEV_PARAM_SET_HEMU_MODE,
WMI_VDEV_PARAM_HEOPS_0_31 = 0x8003,
+ WMI_VDEV_PARAM_SET_EHT_MU_MODE = 0x8005,
};
enum wmi_tlv_peer_flags {
@@ -1992,6 +2013,9 @@ enum wmi_tlv_tag {
WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD = 0x3D8,
WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD = 0x3D9,
WMI_TAG_PDEV_SET_BIOS_INTERFACE_CMD = 0x3FB,
+ WMI_TAG_RSSI_DBM_CONVERSION_PARAMS_INFO_FIXED_PARAM = 0x427,
+ WMI_TAG_RSSI_DBM_CONVERSION_PARAMS_INFO,
+ WMI_TAG_RSSI_DBM_CONVERSION_TEMP_OFFSET_INFO,
WMI_TAG_HALPHY_CTRL_PATH_CMD_FIXED_PARAM = 0x442,
WMI_TAG_HALPHY_CTRL_PATH_EVENT_FIXED_PARAM,
WMI_TAG_MAX
@@ -2217,6 +2241,7 @@ enum wmi_tlv_service {
WMI_TLV_SERVICE_PER_PEER_HTT_STATS_RESET = 213,
WMI_TLV_SERVICE_FREQINFO_IN_METADATA = 219,
WMI_TLV_SERVICE_EXT2_MSG = 220,
+ WMI_TLV_SERVICE_BEACON_PROTECTION_SUPPORT = 244,
WMI_TLV_SERVICE_MBSS_PARAM_IN_VDEV_START_SUPPORT = 253,
WMI_MAX_EXT_SERVICE = 256,
@@ -2230,6 +2255,7 @@ enum wmi_tlv_service {
WMI_TLV_SERVICE_WMSK_COMPACTION_RX_TLVS = 361,
WMI_TLV_SERVICE_PEER_METADATA_V1A_V1B_SUPPORT = 365,
+ WMI_TLV_SERVICE_ETH_OFFLOAD = 461,
WMI_MAX_EXT2_SERVICE,
};
@@ -2309,6 +2335,21 @@ enum wmi_direct_buffer_module {
WMI_DIRECT_BUF_MAX
};
+/**
+ * enum wmi_nss_ratio - NSS ratio received from FW during service ready ext event
+ * @WMI_NSS_RATIO_1BY2_NSS: Max nss of 160MHz is equals to half of the max nss of 80MHz
+ * @WMI_NSS_RATIO_3BY4_NSS: Max nss of 160MHz is equals to 3/4 of the max nss of 80MHz
+ * @WMI_NSS_RATIO_1_NSS: Max nss of 160MHz is equals to the max nss of 80MHz
+ * @WMI_NSS_RATIO_2_NSS: Max nss of 160MHz is equals to two times the max nss of 80MHz
+ */
+
+enum wmi_nss_ratio {
+ WMI_NSS_RATIO_1BY2_NSS,
+ WMI_NSS_RATIO_3BY4_NSS,
+ WMI_NSS_RATIO_1_NSS,
+ WMI_NSS_RATIO_2_NSS
+};
+
struct ath12k_wmi_pdev_band_arg {
u32 pdev_id;
u32 start_freq;
@@ -2487,6 +2528,7 @@ struct wmi_init_cmd {
#define WMI_RSRC_CFG_FLAGS2_RX_PEER_METADATA_VERSION GENMASK(5, 4)
#define WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64 BIT(5)
#define WMI_RSRC_CFG_FLAGS2_CALC_NEXT_DTIM_COUNT_SET BIT(9)
+#define WMI_RSRC_CFG_FLAG1_ACK_RSSI BIT(18)
struct ath12k_wmi_resource_config_params {
__le32 tlv_header;
@@ -2628,6 +2670,12 @@ struct ath12k_wmi_hw_mode_cap_params {
} __packed;
#define WMI_MAX_HECAP_PHY_SIZE (3)
+#define WMI_NSS_RATIO_EN_DIS_BITPOS BIT(0)
+#define WMI_NSS_RATIO_EN_DIS_GET(_val) \
+ le32_get_bits(_val, WMI_NSS_RATIO_EN_DIS_BITPOS)
+#define WMI_NSS_RATIO_INFO_BITPOS GENMASK(4, 1)
+#define WMI_NSS_RATIO_INFO_GET(_val) \
+ le32_get_bits(_val, WMI_NSS_RATIO_INFO_BITPOS)
/* pdev_id is present in lower 16 bits of pdev_and_hw_link_ids in
* ath12k_wmi_mac_phy_caps_params & ath12k_wmi_caps_ext_params.
@@ -3131,31 +3179,6 @@ struct ath12k_wmi_rx_reorder_queue_remove_arg {
#define WMI_VDEV_PARAM_TXBF_SU_TX_BFER BIT(2)
#define WMI_VDEV_PARAM_TXBF_MU_TX_BFER BIT(3)
-#define HECAP_PHYDWORD_0 0
-#define HECAP_PHYDWORD_1 1
-#define HECAP_PHYDWORD_2 2
-
-#define HECAP_PHY_SU_BFER BIT(31)
-#define HECAP_PHY_SU_BFEE BIT(0)
-#define HECAP_PHY_MU_BFER BIT(1)
-#define HECAP_PHY_UL_MUMIMO BIT(22)
-#define HECAP_PHY_UL_MUOFDMA BIT(23)
-
-#define HECAP_PHY_SUBFMR_GET(hecap_phy) \
- u32_get_bits(hecap_phy[HECAP_PHYDWORD_0], HECAP_PHY_SU_BFER)
-
-#define HECAP_PHY_SUBFME_GET(hecap_phy) \
- u32_get_bits(hecap_phy[HECAP_PHYDWORD_1], HECAP_PHY_SU_BFEE)
-
-#define HECAP_PHY_MUBFMR_GET(hecap_phy) \
- u32_get_bits(hecap_phy[HECAP_PHYDWORD_1], HECAP_PHY_MU_BFER)
-
-#define HECAP_PHY_ULMUMIMO_GET(hecap_phy) \
- u32_get_bits(hecap_phy[HECAP_PHYDWORD_0], HECAP_PHY_UL_MUMIMO)
-
-#define HECAP_PHY_ULOFDMA_GET(hecap_phy) \
- u32_get_bits(hecap_phy[HECAP_PHYDWORD_0], HECAP_PHY_UL_MUOFDMA)
-
#define HE_MODE_SU_TX_BFEE BIT(0)
#define HE_MODE_SU_TX_BFER BIT(1)
#define HE_MODE_MU_TX_BFEE BIT(2)
@@ -3167,8 +3190,31 @@ struct ath12k_wmi_rx_reorder_queue_remove_arg {
#define HE_DL_MUOFDMA_ENABLE 1
#define HE_UL_MUOFDMA_ENABLE 1
#define HE_DL_MUMIMO_ENABLE 1
+#define HE_UL_MUMIMO_ENABLE 1
#define HE_MU_BFEE_ENABLE 1
#define HE_SU_BFEE_ENABLE 1
+#define HE_MU_BFER_ENABLE 1
+#define HE_SU_BFER_ENABLE 1
+
+#define EHT_MODE_SU_TX_BFEE BIT(0)
+#define EHT_MODE_SU_TX_BFER BIT(1)
+#define EHT_MODE_MU_TX_BFEE BIT(2)
+#define EHT_MODE_MU_TX_BFER BIT(3)
+#define EHT_MODE_DL_OFDMA BIT(4)
+#define EHT_MODE_UL_OFDMA BIT(5)
+#define EHT_MODE_MUMIMO BIT(6)
+#define EHT_MODE_DL_OFDMA_TXBF BIT(7)
+#define EHT_MODE_DL_OFDMA_MUMIMO BIT(8)
+#define EHT_MODE_UL_OFDMA_MUMIMO BIT(9)
+
+#define EHT_DL_MUOFDMA_ENABLE 1
+#define EHT_UL_MUOFDMA_ENABLE 1
+#define EHT_DL_MUMIMO_ENABLE 1
+#define EHT_UL_MUMIMO_ENABLE 1
+#define EHT_MU_BFEE_ENABLE 1
+#define EHT_SU_BFEE_ENABLE 1
+#define EHT_MU_BFER_ENABLE 1
+#define EHT_SU_BFER_ENABLE 1
#define HE_VHT_SOUNDING_MODE_ENABLE 1
#define HE_SU_MU_SOUNDING_MODE_ENABLE 1
@@ -3632,6 +3678,15 @@ struct wmi_force_fw_hang_cmd {
__le32 delay_time_ms;
} __packed;
+/* Param values to be sent for WMI_VDEV_PARAM_SGI param_id
+ * which are used in 11n, 11ac systems
+ * @WMI_GI_800_NS - Always uses 0.8us (Long GI)
+ * @WMI_GI_400_NS - Firmware switches between 0.4us (Short GI)
+ * and 0.8us (Long GI) based on packet error rate.
+ */
+#define WMI_GI_800_NS 0
+#define WMI_GI_400_NS 1
+
struct wmi_vdev_set_param_cmd {
__le32 tlv_header;
__le32 vdev_id;
@@ -3703,6 +3758,8 @@ struct ath12k_wmi_ftm_event {
#define WMI_EMA_BEACON_FIRST GENMASK(23, 16)
#define WMI_EMA_BEACON_LAST GENMASK(31, 24)
+#define WMI_BEACON_PROTECTION_EN_BIT BIT(0)
+
struct ath12k_wmi_bcn_tmpl_ema_arg {
u8 bcn_cnt;
u8 bcn_index;
@@ -3773,7 +3830,6 @@ struct wmi_vdev_install_key_arg {
#define WMI_HOST_MAX_HE_RATE_SET 3
#define WMI_HECAP_TXRX_MCS_NSS_IDX_80 0
#define WMI_HECAP_TXRX_MCS_NSS_IDX_160 1
-#define WMI_HECAP_TXRX_MCS_NSS_IDX_80_80 2
#define ATH12K_WMI_MLO_MAX_PARTNER_LINKS \
(ATH12K_WMI_MLO_MAX_LINKS + ATH12K_MAX_NUM_BRIDGE_LINKS - 1)
@@ -3963,6 +4019,7 @@ struct wmi_scan_chan_list_cmd {
} __packed;
#define WMI_MGMT_SEND_DOWNLD_LEN 64
+#define WMI_MGMT_LINK_AGNOSTIC_ID 0xFFFFFFFF
#define WMI_TX_PARAMS_DWORD0_POWER GENMASK(7, 0)
#define WMI_TX_PARAMS_DWORD0_MCS_MASK GENMASK(19, 8)
@@ -3988,7 +4045,18 @@ struct wmi_mgmt_send_cmd {
/* This TLV is followed by struct wmi_mgmt_frame */
- /* Followed by struct wmi_mgmt_send_params */
+ /* Followed by struct ath12k_wmi_mlo_mgmt_send_params */
+} __packed;
+
+struct ath12k_wmi_mlo_mgmt_send_params {
+ __le32 tlv_header;
+ __le32 hw_link_id;
+} __packed;
+
+struct ath12k_wmi_mgmt_send_tx_params {
+ __le32 tlv_header;
+ __le32 tx_param_dword0;
+ __le32 tx_param_dword1;
} __packed;
struct wmi_sta_powersave_mode_cmd {
@@ -4461,6 +4529,8 @@ struct wmi_mgmt_tx_compl_event {
__le32 desc_id;
__le32 status;
__le32 pdev_id;
+ __le32 ppdu_id;
+ __le32 ack_rssi;
} __packed;
struct wmi_scan_event {
@@ -4672,7 +4742,7 @@ enum wmi_ap_ps_peer_param {
#define DISABLE_SIFS_RESPONSE_TRIGGER 0
-#define WMI_MAX_KEY_INDEX 3
+#define WMI_MAX_KEY_INDEX 7
#define WMI_MAX_KEY_LEN 32
enum wmi_key_type {
@@ -6176,6 +6246,43 @@ struct wmi_mlo_link_set_active_arg {
struct wmi_ml_disallow_mode_bmap_arg disallow_bmap[WMI_ML_MAX_DISALLOW_BMAP_COMB];
};
+#define ATH12K_MAX_20MHZ_SEGMENTS 16
+#define ATH12K_MAX_NUM_ANTENNA 8
+#define ATH12K_MAX_NUM_NF_HW_DBM 32
+
+struct ath12k_wmi_rssi_dbm_conv_info_fixed_params {
+ __le32 pdev_id;
+} __packed;
+
+struct ath12k_wmi_rssi_dbm_conv_info_params {
+ __le32 curr_bw;
+ __le32 curr_rx_chainmask;
+ __le32 xbar_config;
+ a_sle32 xlna_bypass_offset;
+ a_sle32 xlna_bypass_threshold;
+ a_sle32 nf_hw_dbm[ATH12K_MAX_NUM_NF_HW_DBM];
+} __packed;
+
+struct ath12k_wmi_rssi_dbm_conv_temp_info_params {
+ a_sle32 offset;
+} __packed;
+
+struct ath12k_wmi_rssi_dbm_conv_param_arg {
+ u32 curr_bw;
+ u32 curr_rx_chainmask;
+ u32 xbar_config;
+ s32 xlna_bypass_offset;
+ s32 xlna_bypass_threshold;
+ s8 nf_hw_dbm[ATH12K_MAX_NUM_ANTENNA][ATH12K_MAX_20MHZ_SEGMENTS];
+};
+
+struct ath12k_wmi_rssi_dbm_conv_info_arg {
+ bool temp_offset_present;
+ s32 temp_offset;
+ bool nf_dbm_present;
+ s8 min_nf_dbm;
+};
+
void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
struct ath12k_wmi_resource_config_arg *config);
void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
@@ -6183,7 +6290,7 @@ void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
int ath12k_wmi_cmd_send(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb,
u32 cmd_id);
struct sk_buff *ath12k_wmi_alloc_skb(struct ath12k_wmi_base *wmi_sc, u32 len);
-int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id,
+int ath12k_wmi_mgmt_send(struct ath12k_link_vif *arvif, u32 buf_id,
struct sk_buff *frame);
int ath12k_wmi_p2p_go_bcn_ie(struct ath12k *ar, u32 vdev_id,
const u8 *p2p_ie);
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 4825f9cb9cb8..66b2dee39155 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -3116,10 +3116,7 @@ ath5k_combine_pwr_to_pdadc_curves(struct ath5k_hw *ah,
pd_gain_overlap;
/* Force each power step to be at least 0.5 dB */
- if ((pdadc_tmp[1] - pdadc_tmp[0]) > 1)
- pwr_step = pdadc_tmp[1] - pdadc_tmp[0];
- else
- pwr_step = 1;
+ pwr_step = max(pdadc_tmp[1] - pdadc_tmp[0], 1);
/* If pdadc_0 is negative, we need to extrapolate
* below this pdgain by a number of pwr_steps */
@@ -3144,11 +3141,8 @@ ath5k_combine_pwr_to_pdadc_curves(struct ath5k_hw *ah,
continue;
/* Force each power step to be at least 0.5 dB */
- if ((pdadc_tmp[table_size - 1] - pdadc_tmp[table_size - 2]) > 1)
- pwr_step = pdadc_tmp[table_size - 1] -
- pdadc_tmp[table_size - 2];
- else
- pwr_step = 1;
+ pwr_step = max(pdadc_tmp[table_size - 1] -
+ pdadc_tmp[table_size - 2], 1);
/* Extrapolate above */
while ((pdadc_0 < (s16) pdadc_n) &&
diff --git a/drivers/net/wireless/ath/ath5k/reg.h b/drivers/net/wireless/ath/ath5k/reg.h
index 0ea1608b47fd..22101c96713f 100644
--- a/drivers/net/wireless/ath/ath5k/reg.h
+++ b/drivers/net/wireless/ath/ath5k/reg.h
@@ -543,7 +543,7 @@
* Queue control unit (QCU) registers [5211+]
*
* Card has 12 TX Queues but i see that only 0-9 are used (?)
- * both in binary HAL (see ah.h) and ar5k. Each queue has it's own
+ * both in binary HAL (see ah.h) and ar5k. Each queue has its own
* TXDP at addresses 0x0800 - 0x082c, a CBR (Constant Bit Rate)
* configuration register (0x08c0 - 0x08ec), a ready time configuration
* register (0x0900 - 0x092c), a misc configuration register (0x09c0 -
diff --git a/drivers/net/wireless/ath/ath6kl/core.c b/drivers/net/wireless/ath/ath6kl/core.c
index 4f0a7a185fc9..830350bda531 100644
--- a/drivers/net/wireless/ath/ath6kl/core.c
+++ b/drivers/net/wireless/ath/ath6kl/core.c
@@ -91,7 +91,7 @@ int ath6kl_core_init(struct ath6kl *ar, enum ath6kl_htc_type htc_type)
/*
* Turn on power to get hardware (target) version and leave power
- * on delibrately as we will boot the hardware anyway within few
+ * on deliberately as we will boot the hardware anyway within few
* seconds.
*/
ret = ath6kl_hif_power_on(ar);
diff --git a/drivers/net/wireless/ath/ath6kl/hif.c b/drivers/net/wireless/ath/ath6kl/hif.c
index d1942537ea10..c693783bb9de 100644
--- a/drivers/net/wireless/ath/ath6kl/hif.c
+++ b/drivers/net/wireless/ath/ath6kl/hif.c
@@ -513,7 +513,7 @@ static int proc_pending_irqs(struct ath6kl_device *dev, bool *done)
out:
/*
* An optimization to bypass reading the IRQ status registers
- * unecessarily which can re-wake the target, if upper layers
+ * unnecessarily which can re-wake the target, if upper layers
* determine that we are in a low-throughput mode, we can rely on
* taking another interrupt rather than re-checking the status
* registers which can re-wake the target.
diff --git a/drivers/net/wireless/ath/ath6kl/htc.h b/drivers/net/wireless/ath/ath6kl/htc.h
index d3534a29c4f0..d61be202677c 100644
--- a/drivers/net/wireless/ath/ath6kl/htc.h
+++ b/drivers/net/wireless/ath/ath6kl/htc.h
@@ -485,7 +485,7 @@ struct htc_endpoint_stats {
/* count of credits received via another endpoint */
u32 cred_from_ep0;
- /* count of consummed credits */
+ /* count of consumed credits */
u32 cred_cosumd;
/* count of credits returned */
@@ -596,7 +596,7 @@ struct htc_target {
/* protects free_ctrl_txbuf and free_ctrl_rxbuf */
spinlock_t htc_lock;
- /* FIXME: does this protext rx_bufq and endpoint structures or what? */
+ /* FIXME: does this protect rx_bufq and endpoint structures or what? */
spinlock_t rx_lock;
/* protects endpoint->txq */
@@ -624,7 +624,7 @@ struct htc_target {
int chk_irq_status_cnt;
- /* counts the number of Tx without bundling continously per AC */
+ /* counts the number of Tx without bundling continuously per AC */
u32 ac_tx_count[WMM_NUM_AC];
struct {
diff --git a/drivers/net/wireless/ath/ath6kl/htc_mbox.c b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
index f8a94d764be6..122e07ef3965 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_mbox.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
@@ -938,7 +938,7 @@ static void ath6kl_htc_tx_from_queue(struct htc_target *target,
/*
* if an AC has bundling disabled and no tx bundling
- * has occured continously for a certain number of TX,
+ * has occurred continuously for a certain number of TX,
* enable tx bundling for this AC
*/
if (!bundle_sent) {
diff --git a/drivers/net/wireless/ath/ath6kl/htc_pipe.c b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
index 2f2edfe43761..7b823be9d846 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_pipe.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
@@ -718,7 +718,7 @@ static struct htc_packet *htc_lookup_tx_packet(struct htc_target *target,
spin_lock_bh(&target->tx_lock);
/*
- * interate from the front of tx lookup queue
+ * iterate from the front of tx lookup queue
* this lookup should be fast since lower layers completes in-order and
* so the completed packet should be at the head of the list generally
*/
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index 9b100ee2ebc3..782209dcb782 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -207,7 +207,7 @@ static const struct ath6kl_hw hw_list[] = {
/*
* This configuration item sets the value of disconnect timeout
- * Firmware delays sending the disconnec event to the host for this
+ * Firmware delays sending the disconnect event to the host for this
* timeout after is gets disconnected from the current AP.
* If the firmware successly roams within the disconnect timeout
* it sends a new connect event
@@ -221,7 +221,7 @@ struct sk_buff *ath6kl_buf_alloc(int size)
struct sk_buff *skb;
u16 reserved;
- /* Add chacheline space at front and back of buffer */
+ /* Add cacheline space at front and back of buffer */
reserved = roundup((2 * L1_CACHE_BYTES) + ATH6KL_DATA_OFFSET +
sizeof(struct htc_packet) + ATH6KL_HTC_ALIGN_BYTES, 4);
skb = dev_alloc_skb(size + reserved);
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
index d62b96459751..59068ea3879b 100644
--- a/drivers/net/wireless/ath/ath6kl/main.c
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -583,7 +583,7 @@ static int ath6kl_commit_ch_switch(struct ath6kl_vif *vif, u16 channel)
switch (vif->nw_type) {
case AP_NETWORK:
/*
- * reconfigure any saved RSN IE capabilites in the beacon /
+ * reconfigure any saved RSN IE capabilities in the beacon /
* probe response to stay in sync with the supplicant.
*/
if (vif->rsn_capab &&
diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c
index 9ab091044706..83de40bc4445 100644
--- a/drivers/net/wireless/ath/ath6kl/sdio.c
+++ b/drivers/net/wireless/ath/ath6kl/sdio.c
@@ -486,7 +486,7 @@ static void ath6kl_sdio_irq_handler(struct sdio_func *func)
ar_sdio = sdio_get_drvdata(func);
atomic_set(&ar_sdio->irq_handling, 1);
/*
- * Release the host during interrups so we can pick it back up when
+ * Release the host during interrupts so we can pick it back up when
* we process commands.
*/
sdio_release_host(ar_sdio->func);
diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c
index 5220809841a6..38bb501fc553 100644
--- a/drivers/net/wireless/ath/ath6kl/usb.c
+++ b/drivers/net/wireless/ath/ath6kl/usb.c
@@ -93,7 +93,7 @@ struct ath6kl_urb_context {
#define ATH6KL_USB_EP_ADDR_APP_DATA_MP_OUT 0x03
#define ATH6KL_USB_EP_ADDR_APP_DATA_HP_OUT 0x04
-/* diagnostic command defnitions */
+/* diagnostic command definitions */
#define ATH6KL_USB_CONTROL_REQ_SEND_BMI_CMD 1
#define ATH6KL_USB_CONTROL_REQ_RECV_BMI_RESP 2
#define ATH6KL_USB_CONTROL_REQ_DIAG_CMD 3
@@ -882,7 +882,7 @@ static int ath6kl_usb_submit_ctrl_out(struct ath6kl_usb *ar_usb,
return -ENOMEM;
}
- /* note: if successful returns number of bytes transfered */
+ /* note: if successful returns number of bytes transferred */
ret = usb_control_msg(ar_usb->udev,
usb_sndctrlpipe(ar_usb->udev, 0),
req,
@@ -914,7 +914,7 @@ static int ath6kl_usb_submit_ctrl_in(struct ath6kl_usb *ar_usb,
return -ENOMEM;
}
- /* note: if successful returns number of bytes transfered */
+ /* note: if successful returns number of bytes transferred */
ret = usb_control_msg(ar_usb->udev,
usb_rcvctrlpipe(ar_usb->udev, 0),
req,
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index 84317afe4651..08a154bce139 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -2601,7 +2601,7 @@ int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi, u8 if_idx,
}
/*
- * Indicate activty change to driver layer only if this is the
+ * Indicate activity change to driver layer only if this is the
* first TSID to get created in this AC explicitly or an implicit
* fat pipe is getting created.
*/
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.h b/drivers/net/wireless/ath/ath6kl/wmi.h
index 68384159870b..3080d82e25cc 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.h
+++ b/drivers/net/wireless/ath/ath6kl/wmi.h
@@ -655,7 +655,7 @@ enum wmi_mgmt_frame_type {
enum wmi_ie_field_type {
WMI_RSN_IE_CAPB = 0x1,
- WMI_IE_FULL = 0xFF, /* indicats full IE */
+ WMI_IE_FULL = 0xFF, /* indicates full IE */
};
/* WMI_CONNECT_CMDID */
@@ -1178,10 +1178,10 @@ struct wmi_create_pstream_cmd {
__le32 sba;
__le32 medium_time;
- /* in octects */
+ /* in octets */
__le16 nominal_msdu;
- /* in octects */
+ /* in octets */
__le16 max_msdu;
u8 traffic_class;
@@ -1742,7 +1742,7 @@ struct wmi_scan_complete_event {
/*
* Special frame receive Event.
- * Mechanism used to inform host of the receiption of the special frames.
+ * Mechanism used to inform host of the reception of the special frames.
* Consists of special frame info header followed by special frame body.
* The 802.11 header is not included.
*/
@@ -1860,7 +1860,7 @@ struct wmi_target_stats {
/*
* WMI_RSSI_THRESHOLD_EVENTID.
* Indicate the RSSI events to host. Events are indicated when we breach a
- * thresold value.
+ * threshold value.
*/
enum wmi_rssi_threshold_val {
WMI_RSSI_THRESHOLD1_ABOVE = 0,
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 49b7ab26c477..802e6596a6a8 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -16,37 +16,21 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
#include <linux/nl80211.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
-#include <linux/module.h>
-#include <linux/mod_devicetable.h>
+
#include "ath9k.h"
-static const struct platform_device_id ath9k_platform_id_table[] = {
- {
- .name = "ath9k",
- .driver_data = AR5416_AR9100_DEVID,
- },
- {
- .name = "ar933x_wmac",
- .driver_data = AR9300_DEVID_AR9330,
- },
- {
- .name = "ar934x_wmac",
- .driver_data = AR9300_DEVID_AR9340,
- },
- {
- .name = "qca955x_wmac",
- .driver_data = AR9300_DEVID_QCA955X,
- },
- {
- .name = "qca953x_wmac",
- .driver_data = AR9300_DEVID_AR953X,
- },
- {
- .name = "qca956x_wmac",
- .driver_data = AR9300_DEVID_QCA956X,
- },
+static const struct of_device_id ath9k_of_match_table[] = {
+ { .compatible = "qca,ar9130-wifi", .data = (void *)AR5416_AR9100_DEVID },
+ { .compatible = "qca,ar9330-wifi", .data = (void *)AR9300_DEVID_AR9330 },
+ { .compatible = "qca,ar9340-wifi", .data = (void *)AR9300_DEVID_AR9340 },
+ { .compatible = "qca,qca9530-wifi", .data = (void *)AR9300_DEVID_AR953X },
+ { .compatible = "qca,qca9550-wifi", .data = (void *)AR9300_DEVID_QCA955X },
+ { .compatible = "qca,qca9560-wifi", .data = (void *)AR9300_DEVID_QCA956X },
{},
};
@@ -71,19 +55,14 @@ static const struct ath_bus_ops ath_ahb_bus_ops = {
static int ath_ahb_probe(struct platform_device *pdev)
{
- void __iomem *mem;
- struct ath_softc *sc;
struct ieee80211_hw *hw;
- const struct platform_device_id *id = platform_get_device_id(pdev);
- int irq;
- int ret = 0;
+ struct ath_softc *sc;
struct ath_hw *ah;
+ void __iomem *mem;
char hw_name[64];
-
- if (!dev_get_platdata(&pdev->dev)) {
- dev_err(&pdev->dev, "no platform data specified\n");
- return -EINVAL;
- }
+ u16 dev_id;
+ int irq;
+ int ret;
mem = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mem)) {
@@ -117,7 +96,8 @@ static int ath_ahb_probe(struct platform_device *pdev)
goto err_free_hw;
}
- ret = ath9k_init_device(id->driver_data, sc, &ath_ahb_bus_ops);
+ dev_id = (u16)(kernel_ulong_t)of_device_get_match_data(&pdev->dev);
+ ret = ath9k_init_device(dev_id, sc, &ath_ahb_bus_ops);
if (ret) {
dev_err(&pdev->dev, "failed to initialize device\n");
goto err_irq;
@@ -155,11 +135,11 @@ static struct platform_driver ath_ahb_driver = {
.remove = ath_ahb_remove,
.driver = {
.name = "ath9k",
+ .of_match_table = ath9k_of_match_table,
},
- .id_table = ath9k_platform_id_table,
};
-MODULE_DEVICE_TABLE(platform, ath9k_platform_id_table);
+MODULE_DEVICE_TABLE(of, ath9k_of_match_table);
int ath_ahb_init(void)
{
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 74edd007cd8d..6d376f85fbde 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -53,7 +53,7 @@ MODULE_PARM_DESC(led_id,
*
* There are several buses present on the WIL6210 card.
* Same memory areas are visible at different address on
- * the different busses. There are 3 main bus masters:
+ * the different buses. There are 3 main bus masters:
* - MAC CPU (ucode)
* - User CPU (firmware)
* - AHB (host)
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index 38f64524019e..a6761a1e9d7d 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -3495,7 +3495,7 @@ struct wmi_aoa_meas_event {
u8 channel;
/* enum wmi_aoa_meas_type */
u8 aoa_meas_type;
- /* Measurments are from RFs, defined by the mask */
+ /* Measurements are from RFs, defined by the mask */
__le32 meas_rf_mask;
/* enum wmi_aoa_meas_status */
u8 meas_status;
@@ -3634,7 +3634,7 @@ struct wmi_tof_ftm_per_dest_res_event {
__le32 tsf_sync;
/* actual received ftm per burst */
u8 actual_ftm_per_burst;
- /* Measurments are from RFs, defined by the mask */
+ /* Measurements are from RFs, defined by the mask */
__le32 meas_rf_mask;
u8 reserved0[3];
struct wmi_responder_ftm_res responder_ftm_res[];
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index 6bc107476a2a..8ab7d1e34a6e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -996,6 +996,7 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4354, WCC),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4356, WCC),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4359, WCC),
+ BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43751, WCC),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_4373, CYW),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_43012, CYW),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_43752, CYW),
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 40a9a8177de6..8af402555b5e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -1559,10 +1559,6 @@ brcmf_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
return -EAGAIN;
}
- /* If scan req comes for p2p0, send it over primary I/F */
- if (vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif)
- vif = cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif;
-
brcmf_dbg(SCAN, "START ESCAN\n");
cfg->scan_request = request;
@@ -1578,6 +1574,10 @@ brcmf_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
if (err)
goto scan_out;
+ /* If scan req comes for p2p0, send it over primary I/F */
+ if (vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif)
+ vif = cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif;
+
err = brcmf_do_escan(vif->ifp, request);
if (err)
goto scan_out;
@@ -3878,7 +3878,7 @@ brcmf_notify_sched_scan_results(struct brcmf_if *ifp,
brcmf_dbg(SCAN, "Enter\n");
if (e->datalen < (sizeof(*pfn_result) + sizeof(*netinfo))) {
- brcmf_dbg(SCAN, "Event data to small. Ignore\n");
+ brcmf_dbg(SCAN, "Event data too small. Ignore\n");
return 0;
}
@@ -4046,7 +4046,7 @@ brcmf_wowl_nd_results(struct brcmf_if *ifp, const struct brcmf_event_msg *e,
brcmf_dbg(SCAN, "Enter\n");
if (e->datalen < (sizeof(*pfn_result) + sizeof(*netinfo))) {
- brcmf_dbg(SCAN, "Event data to small. Ignore\n");
+ brcmf_dbg(SCAN, "Event data too small. Ignore\n");
return 0;
}
@@ -4308,7 +4308,7 @@ static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy,
brcmf_set_mpc(ifp, 1);
} else {
- /* Configure WOWL paramaters */
+ /* Configure WOWL parameters */
brcmf_configure_wowl(cfg, ifp, wowl);
/* Prevent disassociation due to inactivity with keep-alive */
@@ -5544,8 +5544,7 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
struct brcmf_fil_action_frame_le *action_frame;
struct brcmf_fil_af_params_le *af_params;
bool ack;
- s32 chan_nr;
- u32 freq;
+ __le32 hw_ch;
brcmf_dbg(TRACE, "Enter\n");
@@ -5606,25 +5605,34 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
/* Add the channel. Use the one specified as parameter if any or
* the current one (got from the firmware) otherwise
*/
- if (chan)
- freq = chan->center_freq;
- else
- brcmf_fil_cmd_int_get(vif->ifp, BRCMF_C_GET_CHANNEL,
- &freq);
- chan_nr = ieee80211_frequency_to_channel(freq);
- af_params->channel = cpu_to_le32(chan_nr);
+ if (chan) {
+ hw_ch = cpu_to_le32(chan->hw_value);
+ } else {
+ err = brcmf_fil_cmd_data_get(vif->ifp,
+ BRCMF_C_GET_CHANNEL,
+ &hw_ch, sizeof(hw_ch));
+ if (err) {
+ bphy_err(drvr,
+ "unable to get current hw channel\n");
+ goto free;
+ }
+ }
+ af_params->channel = hw_ch;
+
af_params->dwell_time = cpu_to_le32(params->wait);
memcpy(action_frame->data, &buf[DOT11_MGMT_HDR_LEN],
le16_to_cpu(action_frame->len));
- brcmf_dbg(TRACE, "Action frame, cookie=%lld, len=%d, freq=%d\n",
- *cookie, le16_to_cpu(action_frame->len), freq);
+ brcmf_dbg(TRACE, "Action frame, cookie=%lld, len=%d, channel=%d\n",
+ *cookie, le16_to_cpu(action_frame->len),
+ le32_to_cpu(af_params->channel));
ack = brcmf_p2p_send_action_frame(cfg, cfg_to_ndev(cfg),
af_params);
cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, ack,
GFP_KERNEL);
+free:
kfree(af_params);
} else {
brcmf_dbg(TRACE, "Unhandled, fc=%04x!!\n", mgmt->frame_control);
@@ -8330,7 +8338,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
cfg->d11inf.io_type = (u8)io_type;
brcmu_d11_attach(&cfg->d11inf);
- /* regulatory notifer below needs access to cfg so
+ /* regulatory notifier below needs access to cfg so
* assign it now.
*/
drvr->config = cfg;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
index 2ef92ef25517..9074ab49e806 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
@@ -739,6 +739,7 @@ static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci)
case CY_CC_4373_CHIP_ID:
return 0x160000;
case CY_CC_43752_CHIP_ID:
+ case BRCM_CC_43751_CHIP_ID:
case BRCM_CC_4377_CHIP_ID:
return 0x170000;
case BRCM_CC_4378_CHIP_ID:
@@ -1450,6 +1451,7 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
reg = chip->ops->read32(chip->ctx, addr);
return (reg & CC_SR_CTL0_ENABLE_MASK) != 0;
case BRCM_CC_4359_CHIP_ID:
+ case BRCM_CC_43751_CHIP_ID:
case CY_CC_43752_CHIP_ID:
case CY_CC_43012_CHIP_ID:
addr = CORE_CC_REG(pmu->base, retention_ctl);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
index 75f101622db1..688f16c51319 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
@@ -525,7 +525,7 @@ struct brcmf_mp_device *brcmf_get_module_param(struct device *dev,
if (!settings)
return NULL;
- /* start by using the module paramaters */
+ /* start by using the module parameters */
settings->p2p_enable = !!brcmf_p2p_enable;
settings->feature_disable = brcmf_feature_disable;
settings->fcmode = brcmf_fcmode;
@@ -612,7 +612,7 @@ static int __init brcmfmac_module_init(void)
if (err == -ENODEV)
brcmf_dbg(INFO, "No platform data available.\n");
- /* Initialize global module paramaters */
+ /* Initialize global module parameters */
brcmf_mp_attach();
/* Continue the initialization by registering the different busses */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h
index 2be2986d2110..3bdb6984b2dd 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h
@@ -20,7 +20,7 @@
*/
/**
- * struct brcmf_mp_global_t - Global module paramaters.
+ * struct brcmf_mp_global_t - Global module parameters.
*
* @firmware_path: Alternative firmware path.
*/
@@ -31,7 +31,7 @@ struct brcmf_mp_global_t {
extern struct brcmf_mp_global_t brcmf_mp_global;
/**
- * struct brcmf_mp_device - Device module paramaters.
+ * struct brcmf_mp_device - Device module parameters.
*
* @p2p_enable: Legacy P2P0 enable (old wpa_supplicant).
* @feature_disable: Feature_disable bitmask.
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
index d53839f855d7..399b6810e394 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
@@ -67,7 +67,7 @@ struct brcmf_ampdu_rx_reorder {
/* Forward decls for struct brcmf_pub (see below) */
struct brcmf_proto; /* device communication protocol info */
struct brcmf_fws_info; /* firmware signalling info */
-struct brcmf_mp_device; /* module paramateres, device specific */
+struct brcmf_mp_device; /* module parameters, device specific */
/*
* struct brcmf_rev_info
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c
index c9537fb597ce..4f0ea4347840 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c
@@ -112,8 +112,7 @@ int brcmf_cyw_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
struct brcmf_cfg80211_vif *vif;
s32 err = 0;
bool ack = false;
- s32 chan_nr;
- u32 freq;
+ __le16 hw_ch;
struct brcmf_mf_params_le *mf_params;
u32 mf_params_len;
s32 ready;
@@ -143,13 +142,18 @@ int brcmf_cyw_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
mf_params->len = cpu_to_le16(len - DOT11_MGMT_HDR_LEN);
mf_params->frame_control = mgmt->frame_control;
- if (chan)
- freq = chan->center_freq;
- else
- brcmf_fil_cmd_int_get(vif->ifp, BRCMF_C_GET_CHANNEL,
- &freq);
- chan_nr = ieee80211_frequency_to_channel(freq);
- mf_params->channel = cpu_to_le16(chan_nr);
+ if (chan) {
+ hw_ch = cpu_to_le16(chan->hw_value);
+ } else {
+ err = brcmf_fil_cmd_data_get(vif->ifp, BRCMF_C_GET_CHANNEL,
+ &hw_ch, sizeof(hw_ch));
+ if (err) {
+ bphy_err(drvr, "unable to get current hw channel\n");
+ goto free;
+ }
+ }
+ mf_params->channel = hw_ch;
+
memcpy(&mf_params->da[0], &mgmt->da[0], ETH_ALEN);
memcpy(&mf_params->bssid[0], &mgmt->bssid[0], ETH_ALEN);
mf_params->packet_id = cpu_to_le32(*cookie);
@@ -159,7 +163,8 @@ int brcmf_cyw_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
brcmf_dbg(TRACE, "Auth frame, cookie=%d, fc=%04x, len=%d, channel=%d\n",
le32_to_cpu(mf_params->packet_id),
le16_to_cpu(mf_params->frame_control),
- le16_to_cpu(mf_params->len), chan_nr);
+ le16_to_cpu(mf_params->len),
+ le16_to_cpu(mf_params->channel));
vif->mgmt_tx_id = le32_to_cpu(mf_params->packet_id);
set_bit(BRCMF_MGMT_TX_SEND_FRAME, &vif->mgmt_tx_status);
@@ -185,6 +190,7 @@ int brcmf_cyw_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
tx_status:
cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, ack,
GFP_KERNEL);
+free:
kfree(mf_params);
return err;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/fwil_types.h
index 08c69142495a..669564382e32 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/fwil_types.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/fwil_types.h
@@ -80,7 +80,7 @@ struct brcmf_mf_params_le {
u8 da[ETH_ALEN];
u8 bssid[ETH_ALEN];
__le32 packet_id;
- u8 data[] __counted_by(len);
+ u8 data[] __counted_by_le(len);
};
#endif /* CYW_FWIL_TYPES_H_ */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index 6e0c90f4718b..0dc9d28cd77b 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -1403,7 +1403,7 @@ int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp,
u8 action;
if (e->datalen < sizeof(*rxframe)) {
- brcmf_dbg(SCAN, "Event data to small. Ignore\n");
+ brcmf_dbg(SCAN, "Event data too small. Ignore\n");
return 0;
}
@@ -1949,7 +1949,7 @@ s32 brcmf_p2p_notify_rx_mgmt_p2p_probereq(struct brcmf_if *ifp,
e->reason);
if (e->datalen < sizeof(*rxframe)) {
- brcmf_dbg(SCAN, "Event data to small. Ignore\n");
+ brcmf_dbg(SCAN, "Event data too small. Ignore\n");
return 0;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 9747928a3650..6327f4eca500 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -71,6 +71,7 @@ BRCMF_FW_CLM_DEF(4377B3, "brcmfmac4377b3-pcie");
BRCMF_FW_CLM_DEF(4378B1, "brcmfmac4378b1-pcie");
BRCMF_FW_CLM_DEF(4378B3, "brcmfmac4378b3-pcie");
BRCMF_FW_CLM_DEF(4387C2, "brcmfmac4387c2-pcie");
+BRCMF_FW_CLM_DEF(54591, "brcmfmac54591-pcie");
/* firmware config files */
MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-pcie.txt");
@@ -88,6 +89,7 @@ static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
BRCMF_FW_ENTRY(BRCM_CC_4350_CHIP_ID, 0xFFFFFF00, 4350),
BRCMF_FW_ENTRY(BRCM_CC_43525_CHIP_ID, 0xFFFFFFF0, 4365C),
BRCMF_FW_ENTRY(BRCM_CC_4355_CHIP_ID, 0x000007FF, 4355),
+ BRCMF_FW_ENTRY(BRCM_CC_4355_CHIP_ID, 0x00002000, 54591),
BRCMF_FW_ENTRY(BRCM_CC_4355_CHIP_ID, 0xFFFFF800, 4355C1), /* rev ID 12/C2 seen */
BRCMF_FW_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356),
BRCMF_FW_ENTRY(BRCM_CC_43567_CHIP_ID, 0xFFFFFFFF, 43570),
@@ -2522,10 +2524,19 @@ brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
goto fail_bus;
- ret = brcmf_pcie_read_otp(devinfo);
- if (ret) {
- brcmf_err(bus, "failed to parse OTP\n");
- goto fail_brcmf;
+ /* otp read operation */
+ switch (bus->fwvid) {
+ case BRCMF_FWVENDOR_WCC:
+ case BRCMF_FWVENDOR_BCA:
+ ret = brcmf_pcie_read_otp(devinfo);
+ if (ret) {
+ brcmf_err(bus, "failed to parse OTP\n");
+ goto fail_brcmf;
+ }
+ break;
+ case BRCMF_FWVENDOR_CYW:
+ default:
+ break;
}
#ifdef DEBUG
@@ -2740,7 +2751,7 @@ static const struct pci_device_id brcmf_pcie_devid_table[] = {
BRCMF_PCIE_DEVICE(BRCM_PCIE_4378_DEVICE_ID, WCC_SEED),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4387_DEVICE_ID, WCC_SEED),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43752_DEVICE_ID, WCC_SEED),
-
+ BRCMF_PCIE_DEVICE(CY_PCIE_54591_DEVICE_ID, CYW),
{ /* end: all zeroes */ }
};
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index cf26ab15ee0c..8a0bad5119a0 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -654,6 +654,7 @@ static const struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = {
BRCMF_FW_ENTRY(BRCM_CC_4354_CHIP_ID, 0xFFFFFFFF, 4354),
BRCMF_FW_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356),
BRCMF_FW_ENTRY(BRCM_CC_4359_CHIP_ID, 0xFFFFFFFF, 4359),
+ BRCMF_FW_ENTRY(BRCM_CC_43751_CHIP_ID, 0xFFFFFFFF, 43752),
BRCMF_FW_ENTRY(CY_CC_4373_CHIP_ID, 0xFFFFFFFF, 4373),
BRCMF_FW_ENTRY(CY_CC_43012_CHIP_ID, 0xFFFFFFFF, 43012),
BRCMF_FW_ENTRY(CY_CC_43439_CHIP_ID, 0xFFFFFFFF, 43439),
@@ -3424,7 +3425,8 @@ err:
static bool brcmf_sdio_aos_no_decode(struct brcmf_sdio *bus)
{
- if (bus->ci->chip == CY_CC_43012_CHIP_ID ||
+ if (bus->ci->chip == BRCM_CC_43751_CHIP_ID ||
+ bus->ci->chip == CY_CC_43012_CHIP_ID ||
bus->ci->chip == CY_CC_43752_CHIP_ID)
return true;
else
@@ -4275,6 +4277,7 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
bus->hostintmask, NULL);
switch (sdiod->func1->device) {
+ case SDIO_DEVICE_ID_BROADCOM_43751:
case SDIO_DEVICE_ID_BROADCOM_CYPRESS_4373:
case SDIO_DEVICE_ID_BROADCOM_CYPRESS_43752:
brcmf_dbg(INFO, "set F2 watermark to 0x%x*4 bytes\n",
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
index b056336d5da6..f0129d10d2b9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
@@ -927,10 +927,7 @@ brcmf_usb_dl_writeimage(struct brcmf_usbdev_info *devinfo, u8 *fw, int fwlen)
/* Wait until the usb device reports it received all
* the bytes we sent */
if ((rdlbytes == sent) && (rdlbytes != dllen)) {
- if ((dllen-sent) < TRX_RDL_CHUNK)
- sendlen = dllen-sent;
- else
- sendlen = TRX_RDL_CHUNK;
+ sendlen = min(dllen - sent, TRX_RDL_CHUNK);
/* simply avoid having to send a ZLP by ensuring we
* never have an even
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
index 8ab452cf48c4..aadcff1e2b5d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
@@ -24,6 +24,7 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/bcma/bcma.h>
+#include <linux/string_choices.h>
#include <net/mac80211.h>
#include <defs.h>
#include "phy/phy_int.h"
@@ -540,13 +541,13 @@ static int brcms_ops_config(struct ieee80211_hw *hw, int radio_idx,
conf->listen_interval);
}
if (changed & IEEE80211_CONF_CHANGE_MONITOR)
- brcms_dbg_info(core, "%s: change monitor mode: %s\n",
- __func__, conf->flags & IEEE80211_CONF_MONITOR ?
- "true" : "false");
+ brcms_dbg_info(core, "%s: change monitor mode: %s\n", __func__,
+ str_true_false(conf->flags &
+ IEEE80211_CONF_MONITOR));
if (changed & IEEE80211_CONF_CHANGE_PS)
brcms_err(core, "%s: change power-save mode: %s (implement)\n",
- __func__, conf->flags & IEEE80211_CONF_PS ?
- "true" : "false");
+ __func__,
+ str_true_false(conf->flags & IEEE80211_CONF_PS));
if (changed & IEEE80211_CONF_CHANGE_POWER) {
err = brcms_c_set_tx_power(wl->wlc, conf->power_level);
@@ -697,7 +698,7 @@ brcms_ops_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_BEACON_ENABLED) {
/* Beaconing should be enabled/disabled (beaconing modes) */
brcms_err(core, "%s: Beacon enabled: %s\n", __func__,
- info->enable_beacon ? "true" : "false");
+ str_true_false(info->enable_beacon));
if (info->enable_beacon &&
hw->wiphy->flags & WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD) {
brcms_c_enable_probe_resp(wl->wlc, true);
@@ -716,7 +717,7 @@ brcms_ops_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_IBSS) {
/* IBSS join status changed */
brcms_err(core, "%s: IBSS joined: %s (implement)\n",
- __func__, vif->cfg.ibss_joined ? "true" : "false");
+ __func__, str_true_false(vif->cfg.ibss_joined));
}
if (changed & BSS_CHANGED_ARP_FILTER) {
@@ -731,7 +732,7 @@ brcms_ops_bss_info_changed(struct ieee80211_hw *hw,
* Note that it is only ever disabled for station mode.
*/
brcms_err(core, "%s: qos enabled: %s (implement)\n",
- __func__, info->qos ? "true" : "false");
+ __func__, str_true_false(info->qos));
}
return;
}
@@ -908,7 +909,7 @@ static void brcms_ops_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct brcms_info *wl = hw->priv;
int ret;
- no_printk("%s: drop = %s\n", __func__, drop ? "true" : "false");
+ no_printk("%s: drop = %s\n", __func__, str_true_false(drop));
ret = wait_event_timeout(wl->tx_flush_wq,
brcms_tx_flush_completed(wl),
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
index c3d7aa570b4e..ce6ce2dea39c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
@@ -127,23 +127,6 @@ void wlc_phyreg_exit(struct brcms_phy_pub *pih)
wlapi_bmac_ucode_wake_override_phyreg_clear(pi->sh->physhim);
}
-void wlc_radioreg_enter(struct brcms_phy_pub *pih)
-{
- struct brcms_phy *pi = container_of(pih, struct brcms_phy, pubpi_ro);
- wlapi_bmac_mctrl(pi->sh->physhim, MCTL_LOCK_RADIO, MCTL_LOCK_RADIO);
-
- udelay(10);
-}
-
-void wlc_radioreg_exit(struct brcms_phy_pub *pih)
-{
- struct brcms_phy *pi = container_of(pih, struct brcms_phy, pubpi_ro);
-
- (void)bcma_read16(pi->d11core, D11REGOFFS(phyversion));
- pi->phy_wreg = 0;
- wlapi_bmac_mctrl(pi->sh->physhim, MCTL_LOCK_RADIO, 0);
-}
-
u16 read_radio_reg(struct brcms_phy *pi, u16 addr)
{
u16 data;
@@ -263,11 +246,6 @@ void mod_radio_reg(struct brcms_phy *pi, u16 addr, u16 mask, u16 val)
write_radio_reg(pi, addr, (rval & ~mask) | (val & mask));
}
-void write_phy_channel_reg(struct brcms_phy *pi, uint val)
-{
- bcma_write16(pi->d11core, D11REGOFFS(phychannel), val);
-}
-
u16 read_phy_reg(struct brcms_phy *pi, u16 addr)
{
bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), addr);
@@ -692,18 +670,6 @@ void wlc_phy_por_inform(struct brcms_phy_pub *ppi)
pi->phy_init_por = true;
}
-void wlc_phy_edcrs_lock(struct brcms_phy_pub *pih, bool lock)
-{
- struct brcms_phy *pi = container_of(pih, struct brcms_phy, pubpi_ro);
-
- pi->edcrs_threshold_lock = lock;
-
- write_phy_reg(pi, 0x22c, 0x46b);
- write_phy_reg(pi, 0x22d, 0x46b);
- write_phy_reg(pi, 0x22e, 0x3c0);
- write_phy_reg(pi, 0x22f, 0x3c0);
-}
-
void wlc_phy_initcal_enable(struct brcms_phy_pub *pih, bool initcal)
{
struct brcms_phy *pi = container_of(pih, struct brcms_phy, pubpi_ro);
@@ -1083,20 +1049,6 @@ void wlc_phy_mute_upd(struct brcms_phy_pub *pih, bool mute, u32 flags)
return;
}
-void wlc_phy_clear_tssi(struct brcms_phy_pub *pih)
-{
- struct brcms_phy *pi = container_of(pih, struct brcms_phy, pubpi_ro);
-
- if (ISNPHY(pi)) {
- return;
- } else {
- wlapi_bmac_write_shm(pi->sh->physhim, M_B_TSSI_0, NULL_TSSI_W);
- wlapi_bmac_write_shm(pi->sh->physhim, M_B_TSSI_1, NULL_TSSI_W);
- wlapi_bmac_write_shm(pi->sh->physhim, M_G_TSSI_0, NULL_TSSI_W);
- wlapi_bmac_write_shm(pi->sh->physhim, M_G_TSSI_1, NULL_TSSI_W);
- }
-}
-
static bool wlc_phy_cal_txpower_recalc_sw(struct brcms_phy *pi)
{
return false;
@@ -1136,13 +1088,6 @@ void wlc_phy_switch_radio(struct brcms_phy_pub *pih, bool on)
}
}
-u16 wlc_phy_bw_state_get(struct brcms_phy_pub *ppi)
-{
- struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro);
-
- return pi->bw;
-}
-
void wlc_phy_bw_state_set(struct brcms_phy_pub *ppi, u16 bw)
{
struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro);
@@ -1182,36 +1127,6 @@ void wlc_phy_chanspec_set(struct brcms_phy_pub *ppi, u16 chanspec)
}
-int wlc_phy_chanspec_freq2bandrange_lpssn(uint freq)
-{
- int range = -1;
-
- if (freq < 2500)
- range = WL_CHAN_FREQ_RANGE_2G;
- else if (freq <= 5320)
- range = WL_CHAN_FREQ_RANGE_5GL;
- else if (freq <= 5700)
- range = WL_CHAN_FREQ_RANGE_5GM;
- else
- range = WL_CHAN_FREQ_RANGE_5GH;
-
- return range;
-}
-
-int wlc_phy_chanspec_bandrange_get(struct brcms_phy *pi, u16 chanspec)
-{
- int range = -1;
- uint channel = CHSPEC_CHANNEL(chanspec);
- uint freq = wlc_phy_channel2freq(channel);
-
- if (ISNPHY(pi))
- range = wlc_phy_get_chan_freq_range_nphy(pi, channel);
- else if (ISLCNPHY(pi))
- range = wlc_phy_chanspec_freq2bandrange_lpssn(freq);
-
- return range;
-}
-
void wlc_phy_chanspec_ch14_widefilter_set(struct brcms_phy_pub *ppi,
bool wide_filter)
{
@@ -1254,50 +1169,6 @@ wlc_phy_chanspec_band_validch(struct brcms_phy_pub *ppi, uint band,
}
}
-u16 wlc_phy_chanspec_band_firstch(struct brcms_phy_pub *ppi, uint band)
-{
- struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro);
- uint i;
- uint channel;
- u16 chspec;
-
- for (i = 0; i < ARRAY_SIZE(chan_info_all); i++) {
- channel = chan_info_all[i].chan;
-
- if (ISNPHY(pi) && pi->bw == WL_CHANSPEC_BW_40) {
- uint j;
-
- for (j = 0; j < ARRAY_SIZE(chan_info_all); j++) {
- if (chan_info_all[j].chan ==
- channel + CH_10MHZ_APART)
- break;
- }
-
- if (j == ARRAY_SIZE(chan_info_all))
- continue;
-
- channel = upper_20_sb(channel);
- chspec = channel | WL_CHANSPEC_BW_40 |
- WL_CHANSPEC_CTL_SB_LOWER;
- if (band == BRCM_BAND_2G)
- chspec |= WL_CHANSPEC_BAND_2G;
- else
- chspec |= WL_CHANSPEC_BAND_5G;
- } else
- chspec = ch20mhz_chspec(channel);
-
- if ((pi->a_band_high_disable) && (channel >= FIRST_REF5_CHANNUM)
- && (channel <= LAST_REF5_CHANNUM))
- continue;
-
- if ((band == BRCM_BAND_2G && channel <= CH_MAX_2G_CHANNEL) ||
- (band == BRCM_BAND_5G && channel > CH_MAX_2G_CHANNEL))
- return chspec;
- }
-
- return (u16) INVCHANSPEC;
-}
-
int wlc_phy_txpower_get(struct brcms_phy_pub *ppi, uint *qdbm, bool *override)
{
struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro);
@@ -1308,56 +1179,6 @@ int wlc_phy_txpower_get(struct brcms_phy_pub *ppi, uint *qdbm, bool *override)
return 0;
}
-void wlc_phy_txpower_target_set(struct brcms_phy_pub *ppi,
- struct txpwr_limits *txpwr)
-{
- bool mac_enabled = false;
- struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro);
-
- memcpy(&pi->tx_user_target[TXP_FIRST_CCK],
- &txpwr->cck[0], BRCMS_NUM_RATES_CCK);
-
- memcpy(&pi->tx_user_target[TXP_FIRST_OFDM],
- &txpwr->ofdm[0], BRCMS_NUM_RATES_OFDM);
- memcpy(&pi->tx_user_target[TXP_FIRST_OFDM_20_CDD],
- &txpwr->ofdm_cdd[0], BRCMS_NUM_RATES_OFDM);
-
- memcpy(&pi->tx_user_target[TXP_FIRST_OFDM_40_SISO],
- &txpwr->ofdm_40_siso[0], BRCMS_NUM_RATES_OFDM);
- memcpy(&pi->tx_user_target[TXP_FIRST_OFDM_40_CDD],
- &txpwr->ofdm_40_cdd[0], BRCMS_NUM_RATES_OFDM);
-
- memcpy(&pi->tx_user_target[TXP_FIRST_MCS_20_SISO],
- &txpwr->mcs_20_siso[0], BRCMS_NUM_RATES_MCS_1_STREAM);
- memcpy(&pi->tx_user_target[TXP_FIRST_MCS_20_CDD],
- &txpwr->mcs_20_cdd[0], BRCMS_NUM_RATES_MCS_1_STREAM);
- memcpy(&pi->tx_user_target[TXP_FIRST_MCS_20_STBC],
- &txpwr->mcs_20_stbc[0], BRCMS_NUM_RATES_MCS_1_STREAM);
- memcpy(&pi->tx_user_target[TXP_FIRST_MCS_20_SDM],
- &txpwr->mcs_20_mimo[0], BRCMS_NUM_RATES_MCS_2_STREAM);
-
- memcpy(&pi->tx_user_target[TXP_FIRST_MCS_40_SISO],
- &txpwr->mcs_40_siso[0], BRCMS_NUM_RATES_MCS_1_STREAM);
- memcpy(&pi->tx_user_target[TXP_FIRST_MCS_40_CDD],
- &txpwr->mcs_40_cdd[0], BRCMS_NUM_RATES_MCS_1_STREAM);
- memcpy(&pi->tx_user_target[TXP_FIRST_MCS_40_STBC],
- &txpwr->mcs_40_stbc[0], BRCMS_NUM_RATES_MCS_1_STREAM);
- memcpy(&pi->tx_user_target[TXP_FIRST_MCS_40_SDM],
- &txpwr->mcs_40_mimo[0], BRCMS_NUM_RATES_MCS_2_STREAM);
-
- if (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) & MCTL_EN_MAC)
- mac_enabled = true;
-
- if (mac_enabled)
- wlapi_suspend_mac_and_wait(pi->sh->physhim);
-
- wlc_phy_txpower_recalc_target(pi);
- wlc_phy_cal_txpower_recalc_sw(pi);
-
- if (mac_enabled)
- wlapi_enable_mac(pi->sh->physhim);
-}
-
int wlc_phy_txpower_set(struct brcms_phy_pub *ppi, uint qdbm, bool override)
{
struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro);
@@ -1441,59 +1262,6 @@ wlc_phy_txpower_sromlimit(struct brcms_phy_pub *ppi, uint channel, u8 *min_pwr,
}
}
-void
-wlc_phy_txpower_sromlimit_max_get(struct brcms_phy_pub *ppi, uint chan,
- u8 *max_txpwr, u8 *min_txpwr)
-{
- struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro);
- u8 tx_pwr_max = 0;
- u8 tx_pwr_min = 255;
- u8 max_num_rate;
- u8 maxtxpwr, mintxpwr, rate, pactrl;
-
- pactrl = 0;
-
- max_num_rate = ISNPHY(pi) ? TXP_NUM_RATES :
- ISLCNPHY(pi) ? (TXP_LAST_SISO_MCS_20 +
- 1) : (TXP_LAST_OFDM + 1);
-
- for (rate = 0; rate < max_num_rate; rate++) {
-
- wlc_phy_txpower_sromlimit(ppi, chan, &mintxpwr, &maxtxpwr,
- rate);
-
- maxtxpwr = (maxtxpwr > pactrl) ? (maxtxpwr - pactrl) : 0;
-
- maxtxpwr = (maxtxpwr > 6) ? (maxtxpwr - 6) : 0;
-
- tx_pwr_max = max(tx_pwr_max, maxtxpwr);
- tx_pwr_min = min(tx_pwr_min, maxtxpwr);
- }
- *max_txpwr = tx_pwr_max;
- *min_txpwr = tx_pwr_min;
-}
-
-void
-wlc_phy_txpower_boardlimit_band(struct brcms_phy_pub *ppi, uint bandunit,
- s32 *max_pwr, s32 *min_pwr, u32 *step_pwr)
-{
- return;
-}
-
-u8 wlc_phy_txpower_get_target_min(struct brcms_phy_pub *ppi)
-{
- struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro);
-
- return pi->tx_power_min;
-}
-
-u8 wlc_phy_txpower_get_target_max(struct brcms_phy_pub *ppi)
-{
- struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro);
-
- return pi->tx_power_max;
-}
-
static s8 wlc_phy_env_measure_vbat(struct brcms_phy *pi)
{
if (ISLCNPHY(pi))
@@ -1797,13 +1565,6 @@ wlc_phy_txpower_reg_limit_calc(struct brcms_phy *pi, struct txpwr_limits *txpwr,
}
}
-void wlc_phy_txpwr_percent_set(struct brcms_phy_pub *ppi, u8 txpwr_percent)
-{
- struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro);
-
- pi->txpwr_percent = txpwr_percent;
-}
-
void wlc_phy_machwcap_set(struct brcms_phy_pub *ppi, u32 machwcap)
{
struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro);
@@ -1811,35 +1572,6 @@ void wlc_phy_machwcap_set(struct brcms_phy_pub *ppi, u32 machwcap)
pi->sh->machwcap = machwcap;
}
-void wlc_phy_runbist_config(struct brcms_phy_pub *ppi, bool start_end)
-{
- struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro);
- u16 rxc;
- rxc = 0;
-
- if (start_end == ON) {
- if (!ISNPHY(pi))
- return;
-
- if (NREV_IS(pi->pubpi.phy_rev, 3)
- || NREV_IS(pi->pubpi.phy_rev, 4)) {
- bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr),
- 0xa0);
- bcma_set16(pi->d11core, D11REGOFFS(phyregdata),
- 0x1 << 15);
- }
- } else {
- if (NREV_IS(pi->pubpi.phy_rev, 3)
- || NREV_IS(pi->pubpi.phy_rev, 4)) {
- bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr),
- 0xa0);
- bcma_write16(pi->d11core, D11REGOFFS(phyregdata), rxc);
- }
-
- wlc_phy_por_inform(ppi);
- }
-}
-
void
wlc_phy_txpower_limit_set(struct brcms_phy_pub *ppi, struct txpwr_limits *txpwr,
u16 chanspec)
@@ -1940,37 +1672,6 @@ bool wlc_phy_txpower_hw_ctrl_get(struct brcms_phy_pub *ppi)
return pi->hwpwrctrl;
}
-void wlc_phy_txpower_hw_ctrl_set(struct brcms_phy_pub *ppi, bool hwpwrctrl)
-{
- struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro);
- bool suspend;
-
- if (!pi->hwpwrctrl_capable)
- return;
-
- pi->hwpwrctrl = hwpwrctrl;
- pi->nphy_txpwrctrl = hwpwrctrl;
- pi->txpwrctrl = hwpwrctrl;
-
- if (ISNPHY(pi)) {
- suspend = (0 == (bcma_read32(pi->d11core,
- D11REGOFFS(maccontrol)) &
- MCTL_EN_MAC));
- if (!suspend)
- wlapi_suspend_mac_and_wait(pi->sh->physhim);
-
- wlc_phy_txpwrctrl_enable_nphy(pi, pi->nphy_txpwrctrl);
- if (pi->nphy_txpwrctrl == PHY_TPC_HW_OFF)
- wlc_phy_txpwr_fixpower_nphy(pi);
- else
- mod_phy_reg(pi, 0x1e7, (0x7f << 0),
- pi->saved_txpwr_idx);
-
- if (!suspend)
- wlapi_enable_mac(pi->sh->physhim);
- }
-}
-
void wlc_phy_txpower_ipa_upd(struct brcms_phy *pi)
{
@@ -2128,13 +1829,6 @@ void wlc_phy_antsel_type_set(struct brcms_phy_pub *ppi, u8 antsel_type)
pi->antsel_type = antsel_type;
}
-bool wlc_phy_test_ison(struct brcms_phy_pub *ppi)
-{
- struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro);
-
- return pi->phytest_on;
-}
-
void wlc_phy_ant_rxdiv_set(struct brcms_phy_pub *ppi, u8 val)
{
struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro);
@@ -2448,15 +2142,6 @@ done:
}
-void wlc_phy_noise_sample_request_external(struct brcms_phy_pub *pih)
-{
- u8 channel;
-
- channel = CHSPEC_CHANNEL(wlc_phy_chanspec_get(pih));
-
- wlc_phy_noise_sample_request(pih, PHY_NOISE_SAMPLE_EXTERNAL, channel);
-}
-
static const s8 lcnphy_gain_index_offset_for_pkt_rssi[] = {
8,
8,
@@ -2555,27 +2240,6 @@ end:
return rssi;
}
-void wlc_phy_freqtrack_start(struct brcms_phy_pub *pih)
-{
- return;
-}
-
-void wlc_phy_freqtrack_end(struct brcms_phy_pub *pih)
-{
- return;
-}
-
-void wlc_phy_set_deaf(struct brcms_phy_pub *ppi, bool user_flag)
-{
- struct brcms_phy *pi;
- pi = (struct brcms_phy *) ppi;
-
- if (ISLCNPHY(pi))
- wlc_lcnphy_deaf_mode(pi, true);
- else if (ISNPHY(pi))
- wlc_nphy_deaf_mode(pi, true);
-}
-
void wlc_phy_watchdog(struct brcms_phy_pub *pih)
{
struct brcms_phy *pi = container_of(pih, struct brcms_phy, pubpi_ro);
@@ -2636,28 +2300,6 @@ void wlc_phy_watchdog(struct brcms_phy_pub *pih)
}
}
-void wlc_phy_BSSinit(struct brcms_phy_pub *pih, bool bonlyap, int rssi)
-{
- struct brcms_phy *pi = container_of(pih, struct brcms_phy, pubpi_ro);
- uint i;
- uint k;
-
- for (i = 0; i < MA_WINDOW_SZ; i++)
- pi->sh->phy_noise_window[i] = (s8) (rssi & 0xff);
- if (ISLCNPHY(pi)) {
- for (i = 0; i < MA_WINDOW_SZ; i++)
- pi->sh->phy_noise_window[i] =
- PHY_NOISE_FIXED_VAL_LCNPHY;
- }
- pi->sh->phy_noise_index = 0;
-
- for (i = 0; i < PHY_NOISE_WINDOW_SZ; i++) {
- for (k = WL_ANT_IDX_1; k < WL_ANT_RX_MAX; k++)
- pi->nphy_noise_win[k][i] = PHY_NOISE_FIXED_VAL_NPHY;
- }
- pi->nphy_noise_index = 0;
-}
-
void
wlc_phy_papd_decode_epsilon(u32 epsilon, s32 *eps_real, s32 *eps_imag)
{
@@ -2812,14 +2454,6 @@ void wlc_phy_stf_chain_set(struct brcms_phy_pub *pih, u8 txchain, u8 rxchain)
pi->pubpi.phy_corenum = (u8)hweight8(pi->sh->phyrxchain);
}
-void wlc_phy_stf_chain_get(struct brcms_phy_pub *pih, u8 *txchain, u8 *rxchain)
-{
- struct brcms_phy *pi = container_of(pih, struct brcms_phy, pubpi_ro);
-
- *txchain = pi->sh->phytxchain;
- *rxchain = pi->sh->phyrxchain;
-}
-
u8 wlc_phy_stf_chain_active_get(struct brcms_phy_pub *pih)
{
s16 nphy_currtemp;
@@ -2852,89 +2486,12 @@ u8 wlc_phy_stf_chain_active_get(struct brcms_phy_pub *pih)
return active_bitmap;
}
-s8 wlc_phy_stf_ssmode_get(struct brcms_phy_pub *pih, u16 chanspec)
-{
- struct brcms_phy *pi = container_of(pih, struct brcms_phy, pubpi_ro);
- u8 siso_mcs_id, cdd_mcs_id;
-
- siso_mcs_id =
- (CHSPEC_IS40(chanspec)) ? TXP_FIRST_MCS_40_SISO :
- TXP_FIRST_MCS_20_SISO;
- cdd_mcs_id =
- (CHSPEC_IS40(chanspec)) ? TXP_FIRST_MCS_40_CDD :
- TXP_FIRST_MCS_20_CDD;
-
- if (pi->tx_power_target[siso_mcs_id] >
- (pi->tx_power_target[cdd_mcs_id] + 12))
- return PHY_TXC1_MODE_SISO;
- else
- return PHY_TXC1_MODE_CDD;
-}
-
const u8 *wlc_phy_get_ofdm_rate_lookup(void)
{
return ofdm_rate_lookup;
}
-void wlc_lcnphy_epa_switch(struct brcms_phy *pi, bool mode)
-{
- if ((pi->sh->chip == BCMA_CHIP_ID_BCM4313) &&
- (pi->sh->boardflags & BFL_FEM)) {
- if (mode) {
- u16 txant = 0;
- txant = wlapi_bmac_get_txant(pi->sh->physhim);
- if (txant == 1) {
- mod_phy_reg(pi, 0x44d, (0x1 << 2), (1) << 2);
-
- mod_phy_reg(pi, 0x44c, (0x1 << 2), (1) << 2);
-
- }
-
- bcma_chipco_gpio_control(&pi->d11core->bus->drv_cc,
- 0x0, 0x0);
- bcma_chipco_gpio_out(&pi->d11core->bus->drv_cc,
- ~0x40, 0x40);
- bcma_chipco_gpio_outen(&pi->d11core->bus->drv_cc,
- ~0x40, 0x40);
- } else {
- mod_phy_reg(pi, 0x44c, (0x1 << 2), (0) << 2);
-
- mod_phy_reg(pi, 0x44d, (0x1 << 2), (0) << 2);
-
- bcma_chipco_gpio_out(&pi->d11core->bus->drv_cc,
- ~0x40, 0x00);
- bcma_chipco_gpio_outen(&pi->d11core->bus->drv_cc,
- ~0x40, 0x00);
- bcma_chipco_gpio_control(&pi->d11core->bus->drv_cc,
- 0x0, 0x40);
- }
- }
-}
-
void wlc_phy_ldpc_override_set(struct brcms_phy_pub *ppi, bool ldpc)
{
return;
}
-
-void
-wlc_phy_get_pwrdet_offsets(struct brcms_phy *pi, s8 *cckoffset, s8 *ofdmoffset)
-{
- *cckoffset = 0;
- *ofdmoffset = 0;
-}
-
-s8 wlc_phy_upd_rssi_offset(struct brcms_phy *pi, s8 rssi, u16 chanspec)
-{
-
- return rssi;
-}
-
-bool wlc_phy_txpower_ipa_ison(struct brcms_phy_pub *ppi)
-{
- struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro);
-
- if (ISNPHY(pi))
- return wlc_phy_n_txpower_ipa_ison(pi);
- else
- return false;
-}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_hal.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_hal.h
index 1efc92fd1671..cab08f0669d1 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_hal.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_hal.h
@@ -186,7 +186,6 @@ void wlc_phy_antsel_init(struct brcms_phy_pub *ppi, bool lut_init);
void wlc_phy_chanspec_set(struct brcms_phy_pub *ppi, u16 chanspec);
u16 wlc_phy_chanspec_get(struct brcms_phy_pub *ppi);
void wlc_phy_chanspec_radio_set(struct brcms_phy_pub *ppi, u16 newch);
-u16 wlc_phy_bw_state_get(struct brcms_phy_pub *ppi);
void wlc_phy_bw_state_set(struct brcms_phy_pub *ppi, u16 bw);
int wlc_phy_rssi_compute(struct brcms_phy_pub *pih, struct d11rxhdr *rxh);
@@ -194,26 +193,16 @@ void wlc_phy_por_inform(struct brcms_phy_pub *ppi);
void wlc_phy_noise_sample_intr(struct brcms_phy_pub *ppi);
bool wlc_phy_bist_check_phy(struct brcms_phy_pub *ppi);
-void wlc_phy_set_deaf(struct brcms_phy_pub *ppi, bool user_flag);
-
void wlc_phy_switch_radio(struct brcms_phy_pub *ppi, bool on);
void wlc_phy_anacore(struct brcms_phy_pub *ppi, bool on);
-
-void wlc_phy_BSSinit(struct brcms_phy_pub *ppi, bool bonlyap, int rssi);
-
void wlc_phy_chanspec_ch14_widefilter_set(struct brcms_phy_pub *ppi,
bool wide_filter);
void wlc_phy_chanspec_band_validch(struct brcms_phy_pub *ppi, uint band,
struct brcms_chanvec *channels);
-u16 wlc_phy_chanspec_band_firstch(struct brcms_phy_pub *ppi, uint band);
void wlc_phy_txpower_sromlimit(struct brcms_phy_pub *ppi, uint chan, u8 *_min_,
u8 *_max_, int rate);
-void wlc_phy_txpower_sromlimit_max_get(struct brcms_phy_pub *ppi, uint chan,
- u8 *_max_, u8 *_min_);
-void wlc_phy_txpower_boardlimit_band(struct brcms_phy_pub *ppi, uint band,
- s32 *, s32 *, u32 *);
void wlc_phy_txpower_limit_set(struct brcms_phy_pub *ppi, struct txpwr_limits *,
u16 chanspec);
int wlc_phy_txpower_get(struct brcms_phy_pub *ppi, uint *qdbm, bool *override);
@@ -221,25 +210,16 @@ int wlc_phy_txpower_set(struct brcms_phy_pub *ppi, uint qdbm, bool override);
void wlc_phy_txpower_target_set(struct brcms_phy_pub *ppi,
struct txpwr_limits *);
bool wlc_phy_txpower_hw_ctrl_get(struct brcms_phy_pub *ppi);
-void wlc_phy_txpower_hw_ctrl_set(struct brcms_phy_pub *ppi, bool hwpwrctrl);
-u8 wlc_phy_txpower_get_target_min(struct brcms_phy_pub *ppi);
-u8 wlc_phy_txpower_get_target_max(struct brcms_phy_pub *ppi);
-bool wlc_phy_txpower_ipa_ison(struct brcms_phy_pub *pih);
void wlc_phy_stf_chain_init(struct brcms_phy_pub *pih, u8 txchain, u8 rxchain);
void wlc_phy_stf_chain_set(struct brcms_phy_pub *pih, u8 txchain, u8 rxchain);
-void wlc_phy_stf_chain_get(struct brcms_phy_pub *pih, u8 *txchain, u8 *rxchain);
u8 wlc_phy_stf_chain_active_get(struct brcms_phy_pub *pih);
-s8 wlc_phy_stf_ssmode_get(struct brcms_phy_pub *pih, u16 chanspec);
void wlc_phy_ldpc_override_set(struct brcms_phy_pub *ppi, bool val);
void wlc_phy_cal_perical(struct brcms_phy_pub *ppi, u8 reason);
-void wlc_phy_noise_sample_request_external(struct brcms_phy_pub *ppi);
-void wlc_phy_edcrs_lock(struct brcms_phy_pub *pih, bool lock);
void wlc_phy_cal_papd_recal(struct brcms_phy_pub *ppi);
void wlc_phy_ant_rxdiv_set(struct brcms_phy_pub *ppi, u8 val);
-void wlc_phy_clear_tssi(struct brcms_phy_pub *ppi);
void wlc_phy_hold_upd(struct brcms_phy_pub *ppi, u32 id, bool val);
void wlc_phy_mute_upd(struct brcms_phy_pub *ppi, bool val, u32 flags);
@@ -249,17 +229,10 @@ void wlc_phy_txpower_get_current(struct brcms_phy_pub *ppi,
struct tx_power *power, uint channel);
void wlc_phy_initcal_enable(struct brcms_phy_pub *pih, bool initcal);
-bool wlc_phy_test_ison(struct brcms_phy_pub *ppi);
-void wlc_phy_txpwr_percent_set(struct brcms_phy_pub *ppi, u8 txpwr_percent);
void wlc_phy_ofdm_rateset_war(struct brcms_phy_pub *pih, bool war);
void wlc_phy_bf_preempt_enable(struct brcms_phy_pub *pih, bool bf_preempt);
void wlc_phy_machwcap_set(struct brcms_phy_pub *ppi, u32 machwcap);
-void wlc_phy_runbist_config(struct brcms_phy_pub *ppi, bool start_end);
-
-void wlc_phy_freqtrack_start(struct brcms_phy_pub *ppi);
-void wlc_phy_freqtrack_end(struct brcms_phy_pub *ppi);
-
const u8 *wlc_phy_get_ofdm_rate_lookup(void);
s8 wlc_phy_get_tx_power_offset_by_mcs(struct brcms_phy_pub *ppi,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h
index 70a9ec050717..4e80f4d27949 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h
@@ -908,8 +908,6 @@ void write_radio_reg(struct brcms_phy *pi, u16 addr, u16 val);
void wlc_phyreg_enter(struct brcms_phy_pub *pih);
void wlc_phyreg_exit(struct brcms_phy_pub *pih);
-void wlc_radioreg_enter(struct brcms_phy_pub *pih);
-void wlc_radioreg_exit(struct brcms_phy_pub *pih);
void wlc_phy_read_table(struct brcms_phy *pi,
const struct phytbl_info *ptbl_info,
@@ -921,7 +919,6 @@ void wlc_phy_table_addr(struct brcms_phy *pi, uint tbl_id, uint tbl_offset,
u16 tblAddr, u16 tblDataHi, u16 tblDataLo);
void wlc_phy_table_data_write(struct brcms_phy *pi, uint width, u32 val);
-void write_phy_channel_reg(struct brcms_phy *pi, uint val);
void wlc_phy_txpower_update_shm(struct brcms_phy *pi);
u8 wlc_phy_nbits(s32 value);
@@ -985,7 +982,6 @@ s8 wlc_lcnphy_tempsense_degree(struct brcms_phy *pi, bool mode);
s8 wlc_lcnphy_vbatsense(struct brcms_phy *pi, bool mode);
void wlc_phy_carrier_suppress_lcnphy(struct brcms_phy *pi);
void wlc_lcnphy_crsuprs(struct brcms_phy *pi, int channel);
-void wlc_lcnphy_epa_switch(struct brcms_phy *pi, bool mode);
void wlc_2064_vco_cal(struct brcms_phy *pi);
void wlc_phy_txpower_recalc_target(struct brcms_phy *pi);
@@ -1031,7 +1027,6 @@ struct phy_iq_est {
};
void wlc_phy_stay_in_carriersearch_nphy(struct brcms_phy *pi, bool enable);
-void wlc_nphy_deaf_mode(struct brcms_phy *pi, bool mode);
#define wlc_phy_write_table_nphy(pi, pti) \
wlc_phy_write_table(pi, pti, 0x72, 0x74, 0x73)
@@ -1115,10 +1110,4 @@ int wlc_phy_rssi_compute_nphy(struct brcms_phy *pi, struct d11rxhdr *rxh);
#define NPHY_TESTPATTERN_BPHY_RFCS 1
void wlc_phy_nphy_tkip_rifs_war(struct brcms_phy *pi, u8 rifs);
-
-void wlc_phy_get_pwrdet_offsets(struct brcms_phy *pi, s8 *cckoffset,
- s8 *ofdmoffset);
-s8 wlc_phy_upd_rssi_offset(struct brcms_phy *pi, s8 rssi, u16 chanspec);
-
-bool wlc_phy_n_txpower_ipa_ison(struct brcms_phy *pih);
#endif /* _BRCM_PHY_INT_H_ */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
index d0faba240561..b4bba67a45ec 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
@@ -919,7 +919,7 @@ void wlc_lcnphy_read_table(struct brcms_phy *pi, struct phytbl_info *pti)
static void
wlc_lcnphy_common_read_table(struct brcms_phy *pi, u32 tbl_id,
- const u16 *tbl_ptr, u32 tbl_len,
+ u16 *tbl_ptr, u32 tbl_len,
u32 tbl_width, u32 tbl_offset)
{
struct phytbl_info tab;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
index d362c4337616..b03d5a1f1a93 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
@@ -19713,11 +19713,6 @@ u8 wlc_phy_rxcore_getstate_nphy(struct brcms_phy_pub *pih)
return (u8) rxen_bits;
}
-bool wlc_phy_n_txpower_ipa_ison(struct brcms_phy *pi)
-{
- return PHY_IPA(pi);
-}
-
void wlc_phy_cal_init_nphy(struct brcms_phy *pi)
{
}
@@ -25825,10 +25820,8 @@ wlc_phy_cal_txiqlo_nphy(struct brcms_phy *pi, struct nphy_txgains target_gain,
if (mphase) {
cal_cnt = pi->mphase_txcal_cmdidx;
- if ((cal_cnt + pi->mphase_txcal_numcmds) < max_cal_cmds)
- num_cals = cal_cnt + pi->mphase_txcal_numcmds;
- else
- num_cals = max_cal_cmds;
+ num_cals = min(cal_cnt + pi->mphase_txcal_numcmds,
+ max_cal_cmds);
} else {
cal_cnt = 0;
num_cals = max_cal_cmds;
@@ -28577,17 +28570,3 @@ void wlc_phy_stay_in_carriersearch_nphy(struct brcms_phy *pi, bool enable)
}
}
}
-
-void wlc_nphy_deaf_mode(struct brcms_phy *pi, bool mode)
-{
- wlapi_suspend_mac_and_wait(pi->sh->physhim);
-
- if (mode) {
- if (pi->nphy_deaf_count == 0)
- wlc_phy_stay_in_carriersearch_nphy(pi, true);
- } else if (pi->nphy_deaf_count > 0) {
- wlc_phy_stay_in_carriersearch_nphy(pi, false);
- }
-
- wlapi_enable_mac(pi->sh->physhim);
-}
diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
index c1e22c589d85..b39c5c1ee18b 100644
--- a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
@@ -52,6 +52,7 @@
#define BRCM_CC_43664_CHIP_ID 43664
#define BRCM_CC_43666_CHIP_ID 43666
#define BRCM_CC_4371_CHIP_ID 0x4371
+#define BRCM_CC_43751_CHIP_ID 43751
#define BRCM_CC_43752_CHIP_ID 43752
#define BRCM_CC_4377_CHIP_ID 0x4377
#define BRCM_CC_4378_CHIP_ID 0x4378
@@ -99,6 +100,7 @@
#define BRCM_PCIE_4377_DEVICE_ID 0x4488
#define BRCM_PCIE_4378_DEVICE_ID 0x4425
#define BRCM_PCIE_4387_DEVICE_ID 0x4433
+#define CY_PCIE_54591_DEVICE_ID 0x4417
/* brcmsmac IDs */
#define BCM4313_D11N2G_ID 0x4727 /* 4313 802.11n 2.4G device */
diff --git a/drivers/net/wireless/intel/iwlegacy/commands.h b/drivers/net/wireless/intel/iwlegacy/commands.h
index 4a9fa8b83f0f..b61b8f377702 100644
--- a/drivers/net/wireless/intel/iwlegacy/commands.h
+++ b/drivers/net/wireless/intel/iwlegacy/commands.h
@@ -2229,7 +2229,7 @@ struct il_spectrum_notification {
u8 channel;
u8 type; /* see enum il_measurement_type */
u8 reserved1;
- /* NOTE: cca_ofdm, cca_cck, basic_type, and histogram are only only
+ /* NOTE: cca_ofdm, cca_cck, basic_type, and histogram are only
* valid if applicable for measurement type requested. */
__le32 cca_ofdm; /* cca fraction time in 40Mhz clock periods */
__le32 cca_cck; /* cca fraction time in 44Mhz clock periods */
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
index 7b70640abf53..6d4a3bce49b9 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
@@ -27,8 +27,6 @@
#define IWL_SC_A_WH_A_FW_PRE "iwlwifi-sc-a0-wh-a0"
#define IWL_SC2_A_FM_C_FW_PRE "iwlwifi-sc2-a0-fm-c0"
#define IWL_SC2_A_WH_A_FW_PRE "iwlwifi-sc2-a0-wh-a0"
-#define IWL_SC2F_A_FM_C_FW_PRE "iwlwifi-sc2f-a0-fm-c0"
-#define IWL_SC2F_A_WH_A_FW_PRE "iwlwifi-sc2f-a0-wh-a0"
static const struct iwl_family_base_params iwl_sc_base = {
.num_of_queues = 512,
@@ -101,5 +99,3 @@ IWL_FW_AND_PNVM(IWL_SC_A_FM_C_FW_PRE, IWL_SC_UCODE_API_MAX);
IWL_FW_AND_PNVM(IWL_SC_A_WH_A_FW_PRE, IWL_SC_UCODE_API_MAX);
IWL_FW_AND_PNVM(IWL_SC2_A_FM_C_FW_PRE, IWL_SC_UCODE_API_MAX);
IWL_FW_AND_PNVM(IWL_SC2_A_WH_A_FW_PRE, IWL_SC_UCODE_API_MAX);
-IWL_FW_AND_PNVM(IWL_SC2F_A_FM_C_FW_PRE, IWL_SC_UCODE_API_MAX);
-IWL_FW_AND_PNVM(IWL_SC2F_A_WH_A_FW_PRE, IWL_SC_UCODE_API_MAX);
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
index f46c65d75962..9d99374ee093 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
@@ -388,7 +388,7 @@ static inline void iwl_dvm_set_pmi(struct iwl_priv *priv, bool state)
/**
* iwl_parse_eeprom_data - parse EEPROM data and return values
*
- * @trans: ransport we're parsing for, for debug only
+ * @trans: transport we're parsing for, for debug only
* @cfg: device configuration for parsing and overrides
* @eeprom: the EEPROM data
* @eeprom_size: length of the EEPROM data
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
index ee822a87c42c..b1c6ee8ae2df 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
@@ -91,12 +91,6 @@ enum iwl_data_path_subcmd_ids {
SEC_KEY_CMD = 0x18,
/**
- * @OMI_SEND_STATUS_NOTIF: notification after OMI was sent
- * uses &struct iwl_omi_send_status_notif
- */
- OMI_SEND_STATUS_NOTIF = 0xF2,
-
- /**
* @ESR_MODE_NOTIF: notification to recommend/force a wanted esr mode,
* uses &struct iwl_esr_mode_notif or &struct iwl_esr_mode_notif_v1
*/
@@ -699,24 +693,4 @@ struct iwl_sec_key_cmd {
} __packed u; /* SEC_KEY_OPERATION_API_U_VER_1 */
} __packed; /* SEC_KEY_CMD_API_S_VER_1 */
-/**
- * struct iwl_omi_send_status_notif_v1 - OMI status notification
- * @success: indicates that the OMI was sent successfully
- * (currently always set)
- */
-struct iwl_omi_send_status_notif_v1 {
- __le32 success;
-} __packed; /* OMI_SEND_STATUS_NTFY_API_S_VER_1 */
-
-/**
- * struct iwl_omi_send_status_notif - OMI status notification
- * @success: indicates that the OMI was sent successfully
- * (currently always set)
- * @sta_id: sta_id to which the OMI was sent
- */
-struct iwl_omi_send_status_notif {
- __le32 success;
- __le32 sta_id;
-} __packed; /* OMI_SEND_STATUS_NTFY_API_S_VER_2 */
-
#endif /* __iwl_fw_api_datapath_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
index ab84aac6605d..786b3bf4b448 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
@@ -571,7 +571,8 @@ enum iwl_ppag_flags {
/**
* union iwl_ppag_table_cmd - union for all versions of PPAG command
* @v1: command version 1 structure.
- * @v2: command version 5 structure.
+ * @v2: command version from 2 to 6 are same structure as v2.
+ * but has a different format of the flags bitmap
* @v3: command version 7 structure.
* @v1.flags: values from &enum iwl_ppag_flags
* @v1.gain: table of antenna gain values per chain and sub-band
@@ -592,7 +593,9 @@ union iwl_ppag_table_cmd {
__le32 flags;
s8 gain[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V2];
s8 reserved[2];
- } __packed v2; /* PER_PLAT_ANTENNA_GAIN_CMD_API_S_VER_5 */
+ } __packed v2; /* PER_PLAT_ANTENNA_GAIN_CMD_API_S_VER_2, VER3, VER4,
+ * VER5, VER6
+ */
struct {
struct bios_value_u32 ppag_config_info;
s8 gain[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V2];
@@ -600,11 +603,20 @@ union iwl_ppag_table_cmd {
} __packed v3; /* PER_PLAT_ANTENNA_GAIN_CMD_API_S_VER_7 */
} __packed;
-#define IWL_PPAG_CMD_V1_MASK (IWL_PPAG_ETSI_MASK | IWL_PPAG_CHINA_MASK)
-#define IWL_PPAG_CMD_V5_MASK (IWL_PPAG_CMD_V1_MASK | \
+#define IWL_PPAG_CMD_V4_MASK (IWL_PPAG_ETSI_MASK | IWL_PPAG_CHINA_MASK)
+#define IWL_PPAG_CMD_V5_MASK (IWL_PPAG_CMD_V4_MASK | \
IWL_PPAG_ETSI_LPI_UHB_MASK | \
IWL_PPAG_USA_LPI_UHB_MASK)
+#define IWL_PPAG_CMD_V6_MASK (IWL_PPAG_CMD_V5_MASK | \
+ IWL_PPAG_ETSI_VLP_UHB_MASK | \
+ IWL_PPAG_ETSI_SP_UHB_MASK | \
+ IWL_PPAG_USA_VLP_UHB_MASK | \
+ IWL_PPAG_USA_SP_UHB_MASK | \
+ IWL_PPAG_CANADA_LPI_UHB_MASK | \
+ IWL_PPAG_CANADA_VLP_UHB_MASK | \
+ IWL_PPAG_CANADA_SP_UHB_MASK)
+
#define MCC_TO_SAR_OFFSET_TABLE_ROW_SIZE 26
#define MCC_TO_SAR_OFFSET_TABLE_COL_SIZE 13
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h
index 58d5a6ef633e..08edd1d99992 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h
@@ -50,7 +50,7 @@ struct iwl_tdls_channel_switch_timing {
*/
struct iwl_tdls_channel_switch_frame {
__le32 switch_time_offset;
- struct iwl_tx_cmd_v6 tx_cmd;
+ struct iwl_tx_cmd_v6_params tx_cmd;
u8 data[IWL_TDLS_CH_SW_FRAME_MAX_SIZE];
} __packed; /* TDLS_STA_CHANNEL_SWITCH_FRAME_API_S_VER_1 */
@@ -131,7 +131,7 @@ struct iwl_tdls_config_cmd {
struct iwl_tdls_sta_info sta_info[IWL_TDLS_STA_COUNT];
__le32 pti_req_data_offset;
- struct iwl_tx_cmd_v6 pti_req_tx_cmd;
+ struct iwl_tx_cmd_v6_params pti_req_tx_cmd;
u8 pti_req_template[];
} __packed; /* TDLS_CONFIG_CMD_API_S_VER_1 */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
index f586379d66dd..46d35ef4751e 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
@@ -452,7 +452,7 @@ struct iwl_roc_notif {
* listen mode. Will be fragmented. Valid only on the P2P Device MAC.
* Valid only on the P2P Device MAC. The firmware will take into account
* the duration, the interval and the repetition count.
- * @SESSION_PROTECT_CONF_P2P_GO_NEGOTIATION: Schedule the P2P Device to be be
+ * @SESSION_PROTECT_CONF_P2P_GO_NEGOTIATION: Schedule the P2P Device to be
* able to run the GO Negotiation. Will not be fragmented and not
* repetitive. Valid only on the P2P Device MAC. Only the duration will
* be taken into account.
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
index 62bd35a8f680..26d2013905ed 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
@@ -181,8 +181,8 @@ enum iwl_tx_offload_assist_flags_pos {
/* TODO: complete documentation for try_cnt and btkill_cnt */
/**
- * struct iwl_tx_cmd_v6 - TX command struct to FW
- * ( TX_CMD = 0x1c )
+ * struct iwl_tx_cmd_v6_params - parameters of the TX
+ *
* @len: in bytes of the payload, see below for details
* @offload_assist: TX offload configuration
* @tx_flags: combination of TX_CMD_FLG_*, see &enum iwl_tx_flags
@@ -205,8 +205,6 @@ enum iwl_tx_offload_assist_flags_pos {
* @tid_tspec: TID/tspec
* @pm_frame_timeout: PM TX frame timeout
* @reserved4: reserved
- * @payload: payload (same as @hdr)
- * @hdr: 802.11 header (same as @payload)
*
* The byte count (both len and next_frame_len) includes MAC header
* (24/26/30/32 bytes)
@@ -217,11 +215,8 @@ enum iwl_tx_offload_assist_flags_pos {
* It does not include post-MAC padding, i.e.,
* MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.
* Range of len: 14-2342 bytes.
- *
- * After the struct fields the MAC header is placed, plus any padding,
- * and then the actial payload.
*/
-struct iwl_tx_cmd_v6 {
+struct iwl_tx_cmd_v6_params {
__le16 len;
__le16 offload_assist;
__le32 tx_flags;
@@ -245,10 +240,20 @@ struct iwl_tx_cmd_v6 {
u8 tid_tspec;
__le16 pm_frame_timeout;
__le16 reserved4;
- union {
- DECLARE_FLEX_ARRAY(u8, payload);
- DECLARE_FLEX_ARRAY(struct ieee80211_hdr, hdr);
- };
+} __packed; /* TX_CMD_API_S_VER_6 */
+
+/**
+ * struct iwl_tx_cmd_v6 - TX command struct to FW
+ * ( TX_CMD = 0x1c )
+ * @params: parameters of the TX, see &struct iwl_tx_cmd_v6_tx_params
+ * @hdr: 802.11 header
+ *
+ * After &params, the MAC header is placed, plus any padding,
+ * and then the actual payload.
+ */
+struct iwl_tx_cmd_v6 {
+ struct iwl_tx_cmd_v6_params params;
+ struct ieee80211_hdr hdr[];
} __packed; /* TX_CMD_API_S_VER_6 */
struct iwl_dram_sec_info {
@@ -748,7 +753,7 @@ struct iwl_compressed_ba_notif {
* @frame: the template of the beacon frame
*/
struct iwl_mac_beacon_cmd_v6 {
- struct iwl_tx_cmd_v6 tx;
+ struct iwl_tx_cmd_v6_params tx;
__le32 template_id;
__le32 tim_idx;
__le32 tim_size;
@@ -767,7 +772,7 @@ struct iwl_mac_beacon_cmd_v6 {
* @frame: the template of the beacon frame
*/
struct iwl_mac_beacon_cmd_v7 {
- struct iwl_tx_cmd_v6 tx;
+ struct iwl_tx_cmd_v6_params tx;
__le32 template_id;
__le32 tim_idx;
__le32 tim_size;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
index 80d8373fccfc..3d6d1a85bb51 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
@@ -344,18 +344,18 @@ int iwl_fill_ppag_table(struct iwl_fw_runtime *fwrt,
num_sub_bands = IWL_NUM_SUB_BANDS_V1;
gain = cmd->v1.gain[0];
*cmd_size = sizeof(cmd->v1);
- cmd->v1.flags = cpu_to_le32(fwrt->ppag_flags & IWL_PPAG_CMD_V1_MASK);
+ cmd->v1.flags = cpu_to_le32(fwrt->ppag_flags);
if (fwrt->ppag_bios_rev >= 1) {
/* in this case FW supports revision 0 */
IWL_DEBUG_RADIO(fwrt,
"PPAG table rev is %d, send truncated table\n",
fwrt->ppag_bios_rev);
}
- } else if (cmd_ver == 5) {
+ } else if (cmd_ver >= 2 && cmd_ver <= 6) {
num_sub_bands = IWL_NUM_SUB_BANDS_V2;
gain = cmd->v2.gain[0];
*cmd_size = sizeof(cmd->v2);
- cmd->v2.flags = cpu_to_le32(fwrt->ppag_flags & IWL_PPAG_CMD_V5_MASK);
+ cmd->v2.flags = cpu_to_le32(fwrt->ppag_flags);
if (fwrt->ppag_bios_rev == 0) {
/* in this case FW supports revisions 1,2 or 3 */
IWL_DEBUG_RADIO(fwrt,
@@ -378,9 +378,17 @@ int iwl_fill_ppag_table(struct iwl_fw_runtime *fwrt,
"PPAG MODE bits were read from bios: %d\n",
fwrt->ppag_flags);
- if (cmd_ver == 1 &&
- !fw_has_capa(&fwrt->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_PPAG_CHINA_BIOS_SUPPORT)) {
+ if (cmd_ver == 6)
+ cmd->v1.flags &= cpu_to_le32(IWL_PPAG_CMD_V6_MASK);
+ else if (cmd_ver == 5)
+ cmd->v1.flags &= cpu_to_le32(IWL_PPAG_CMD_V5_MASK);
+ else if (cmd_ver < 5)
+ cmd->v1.flags &= cpu_to_le32(IWL_PPAG_CMD_V4_MASK);
+
+ if ((cmd_ver == 1 &&
+ !fw_has_capa(&fwrt->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_PPAG_CHINA_BIOS_SUPPORT)) ||
+ (cmd_ver == 2 && fwrt->ppag_bios_rev >= 2)) {
cmd->v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK);
IWL_DEBUG_RADIO(fwrt, "masking ppag China bit\n");
} else {
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 6d983fe2ee44..28aad975434b 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -236,10 +236,9 @@ const char *iwl_drv_get_fwname_pre(struct iwl_trans *trans, char *buf)
mac = "sc";
break;
case IWL_CFG_MAC_TYPE_SC2:
- mac = "sc2";
- break;
+ /* Uses the same firmware as SC2 */
case IWL_CFG_MAC_TYPE_SC2F:
- mac = "sc2f";
+ mac = "sc2";
break;
case IWL_CFG_MAC_TYPE_BR:
mac = "br";
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.c b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
index ad857a05d3c3..5e483a55a4ba 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
@@ -75,7 +75,6 @@ u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg)
/* return as if we have a HW timeout/failure */
return 0x5a5a5a5a;
}
-IWL_EXPORT_SYMBOL(iwl_read_direct32);
void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value)
{
@@ -93,7 +92,6 @@ void iwl_write_direct64(struct iwl_trans *trans, u64 reg, u64 value)
iwl_trans_release_nic_access(trans);
}
}
-IWL_EXPORT_SYMBOL(iwl_write_direct64);
int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask,
int timeout)
@@ -109,7 +107,6 @@ int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask,
return -ETIMEDOUT;
}
-IWL_EXPORT_SYMBOL(iwl_poll_direct_bit);
u32 iwl_read_prph_no_grab(struct iwl_trans *trans, u32 ofs)
{
@@ -117,14 +114,12 @@ u32 iwl_read_prph_no_grab(struct iwl_trans *trans, u32 ofs)
trace_iwlwifi_dev_ioread_prph32(trans->dev, ofs, val);
return val;
}
-IWL_EXPORT_SYMBOL(iwl_read_prph_no_grab);
void iwl_write_prph_no_grab(struct iwl_trans *trans, u32 ofs, u32 val)
{
trace_iwlwifi_dev_iowrite_prph32(trans->dev, ofs, val);
iwl_trans_write_prph(trans, ofs, val);
}
-IWL_EXPORT_SYMBOL(iwl_write_prph_no_grab);
void iwl_write_prph64_no_grab(struct iwl_trans *trans, u64 ofs, u64 val)
{
@@ -132,7 +127,6 @@ void iwl_write_prph64_no_grab(struct iwl_trans *trans, u64 ofs, u64 val)
iwl_write_prph_no_grab(trans, ofs, val & 0xffffffff);
iwl_write_prph_no_grab(trans, ofs + 4, val >> 32);
}
-IWL_EXPORT_SYMBOL(iwl_write_prph64_no_grab);
u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs)
{
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index 4424443d2328..a67b9572aac3 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -5,6 +5,7 @@
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
#include <linux/types.h>
+#include <linux/fips.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/etherdevice.h>
@@ -543,16 +544,22 @@ static void iwl_init_vht_hw_capab(struct iwl_trans *trans,
else
vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN;
+ /*
+ * With fips_enabled crypto is done by software, so the HW cannot
+ * split up A-MSDUs and the real limit that was set applies.
+ * Note that EHT doesn't honour this (HE copies the VHT value),
+ * but EHT is also entirely disabled for fips_enabled.
+ */
switch (iwlwifi_mod_params.amsdu_size) {
case IWL_AMSDU_DEF:
- if (trans->mac_cfg->mq_rx_supported)
+ if (trans->mac_cfg->mq_rx_supported && !fips_enabled)
vht_cap->cap |=
IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
else
vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895;
break;
case IWL_AMSDU_2K:
- if (trans->mac_cfg->mq_rx_supported)
+ if (trans->mac_cfg->mq_rx_supported && !fips_enabled)
vht_cap->cap |=
IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
else
@@ -909,7 +916,9 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
bool slow_pcie = (!trans->mac_cfg->integrated &&
trans->info.pcie_link_speed < PCI_EXP_LNKSTA_CLS_8_0GB);
- if (!data->sku_cap_11be_enable || iwlwifi_mod_params.disable_11be)
+ /* EHT needs WPA3/MFP so cannot do it for fips_enabled */
+ if (!data->sku_cap_11be_enable || iwlwifi_mod_params.disable_11be ||
+ fips_enabled)
iftype_data->eht_cap.has_eht = false;
/* Advertise an A-MPDU exponent extension based on
@@ -1197,11 +1206,19 @@ static void iwl_init_sbands(struct iwl_trans *trans,
n_used += iwl_init_sband_channels(data, sband, n_channels,
NL80211_BAND_6GHZ);
- if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax)
+ /*
+ * 6 GHz requires WPA3 which requires MFP, which FW cannot do
+ * when fips_enabled, so don't advertise any 6 GHz channels to
+ * avoid spending time on scanning those channels and perhaps
+ * even finding APs there that cannot be used.
+ */
+ if (!fips_enabled && data->sku_cap_11ax_enable &&
+ !iwlwifi_mod_params.disable_11ax)
iwl_init_he_hw_capab(trans, data, sband, tx_chains, rx_chains,
fw);
else
sband->n_channels = 0;
+
if (n_channels != n_used)
IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n",
n_used, n_channels);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
index 810923053053..3694b41d6621 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
@@ -437,31 +437,26 @@ void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
{
iwl_trans_pcie_write8(trans, ofs, val);
}
-IWL_EXPORT_SYMBOL(iwl_trans_write8);
void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val)
{
iwl_trans_pcie_write32(trans, ofs, val);
}
-IWL_EXPORT_SYMBOL(iwl_trans_write32);
u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs)
{
return iwl_trans_pcie_read32(trans, ofs);
}
-IWL_EXPORT_SYMBOL(iwl_trans_read32);
u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs)
{
return iwl_trans_pcie_read_prph(trans, ofs);
}
-IWL_EXPORT_SYMBOL(iwl_trans_read_prph);
void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
{
return iwl_trans_pcie_write_prph(trans, ofs, val);
}
-IWL_EXPORT_SYMBOL(iwl_trans_write_prph);
int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
void *buf, int dwords)
@@ -502,7 +497,6 @@ int iwl_trans_sw_reset(struct iwl_trans *trans)
{
return iwl_trans_pcie_sw_reset(trans, true);
}
-IWL_EXPORT_SYMBOL(iwl_trans_sw_reset);
struct iwl_trans_dump_data *
iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask,
@@ -512,7 +506,6 @@ iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask,
return iwl_trans_pcie_dump_data(trans, dump_mask,
sanitize_ops, sanitize_ctx);
}
-IWL_EXPORT_SYMBOL(iwl_trans_dump_data);
int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test, bool reset)
{
@@ -548,20 +541,17 @@ void iwl_trans_interrupts(struct iwl_trans *trans, bool enable)
{
iwl_trans_pci_interrupts(trans, enable);
}
-IWL_EXPORT_SYMBOL(iwl_trans_interrupts);
void iwl_trans_sync_nmi(struct iwl_trans *trans)
{
iwl_trans_pcie_sync_nmi(trans);
}
-IWL_EXPORT_SYMBOL(iwl_trans_sync_nmi);
int iwl_trans_write_imr_mem(struct iwl_trans *trans, u32 dst_addr,
u64 src_addr, u32 byte_cnt)
{
return iwl_trans_pcie_copy_imr(trans, dst_addr, src_addr, byte_cnt);
}
-IWL_EXPORT_SYMBOL(iwl_trans_write_imr_mem);
void iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg,
u32 mask, u32 value)
@@ -575,7 +565,6 @@ int iwl_trans_read_config32(struct iwl_trans *trans, u32 ofs,
{
return iwl_trans_pcie_read_config32(trans, ofs, val);
}
-IWL_EXPORT_SYMBOL(iwl_trans_read_config32);
bool _iwl_trans_grab_nic_access(struct iwl_trans *trans)
{
@@ -771,7 +760,6 @@ void iwl_trans_debugfs_cleanup(struct iwl_trans *trans)
{
iwl_trans_pcie_debugfs_cleanup(trans);
}
-IWL_EXPORT_SYMBOL(iwl_trans_debugfs_cleanup);
#endif
void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue, int ptr)
@@ -809,7 +797,6 @@ int iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue,
{
return iwl_trans_pcie_rxq_dma_data(trans, queue, data);
}
-IWL_EXPORT_SYMBOL(iwl_trans_get_rxq_dma_data);
int iwl_trans_load_pnvm(struct iwl_trans *trans,
const struct iwl_pnvm_image *pnvm_data,
@@ -824,7 +811,6 @@ void iwl_trans_set_pnvm(struct iwl_trans *trans,
{
iwl_trans_pcie_ctx_info_v2_set_pnvm(trans, capa);
}
-IWL_EXPORT_SYMBOL(iwl_trans_set_pnvm);
int iwl_trans_load_reduce_power(struct iwl_trans *trans,
const struct iwl_pnvm_image *payloads,
@@ -833,11 +819,9 @@ int iwl_trans_load_reduce_power(struct iwl_trans *trans,
return iwl_trans_pcie_ctx_info_v2_load_reduce_power(trans, payloads,
capa);
}
-IWL_EXPORT_SYMBOL(iwl_trans_load_reduce_power);
void iwl_trans_set_reduce_power(struct iwl_trans *trans,
const struct iwl_ucode_capabilities *capa)
{
iwl_trans_pcie_ctx_info_v2_set_reduce_power(trans, capa);
}
-IWL_EXPORT_SYMBOL(iwl_trans_set_reduce_power);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/agg.c b/drivers/net/wireless/intel/iwlwifi/mld/agg.c
index 6b349270481d..3bf36f8f6874 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/agg.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/agg.c
@@ -305,10 +305,15 @@ iwl_mld_reorder(struct iwl_mld *mld, struct napi_struct *napi,
* already ahead and it will be dropped.
* If the last sub-frame is not on this queue - we will get frame
* release notification with up to date NSSN.
+ * If this is the first frame that is stored in the buffer, the head_sn
+ * may be outdated. Update it based on the last NSSN to make sure it
+ * will be released when the frame release notification arrives.
*/
if (!amsdu || last_subframe)
iwl_mld_reorder_release_frames(mld, sta, napi, baid_data,
buffer, nssn);
+ else if (buffer->num_stored == 1)
+ buffer->head_sn = nssn;
return IWL_MLD_BUFFERED_SKB;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/constants.h b/drivers/net/wireless/intel/iwlwifi/mld/constants.h
index 2a59b29b75cb..49accf96f44b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/constants.h
+++ b/drivers/net/wireless/intel/iwlwifi/mld/constants.h
@@ -40,15 +40,6 @@
#define IWL_MLD_TPT_COUNT_WINDOW (5 * HZ)
-/* OMI reduced BW thresholds (channel load percentage) */
-#define IWL_MLD_OMI_ENTER_CHAN_LOAD 10
-#define IWL_MLD_OMI_EXIT_CHAN_LOAD_160 20
-#define IWL_MLD_OMI_EXIT_CHAN_LOAD_320 30
-/* time (in milliseconds) to let AP "settle" the OMI */
-#define IWL_MLD_OMI_AP_SETTLE_DELAY 27
-/* time (in milliseconds) to not enter OMI reduced BW after leaving */
-#define IWL_MLD_OMI_EXIT_PROTECTION 5000
-
#define IWL_MLD_DIS_RANDOM_FW_ID false
#define IWL_MLD_D3_DEBUG false
#define IWL_MLD_NON_TRANSMITTING_AP false
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/d3.c b/drivers/net/wireless/intel/iwlwifi/mld/d3.c
index 26255246a320..ed0a0f76f1c5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/d3.c
@@ -762,6 +762,7 @@ iwl_mld_add_mcast_rekey(struct ieee80211_vif *vif,
.conf.keyidx = key_data->id,
};
int link_id = vif->active_links ? __ffs(vif->active_links) : -1;
+ u8 key[WOWLAN_KEY_MAX_SIZE];
BUILD_BUG_ON(WLAN_KEY_LEN_CCMP != WLAN_KEY_LEN_GCMP);
BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_CCMP);
@@ -803,7 +804,11 @@ iwl_mld_add_mcast_rekey(struct ieee80211_vif *vif,
}
memcpy(conf.conf.key, key_data->key, conf.conf.keylen);
- key_config = ieee80211_gtk_rekey_add(vif, &conf.conf, link_id);
+
+ memcpy(key, key_data->key, sizeof(key_data->key));
+
+ key_config = ieee80211_gtk_rekey_add(vif, key_data->id, key,
+ sizeof(key), link_id);
if (IS_ERR(key_config))
return;
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/link.c b/drivers/net/wireless/intel/iwlwifi/mld/link.c
index c48cc3909637..782fc41aa1c3 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/link.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/link.c
@@ -242,27 +242,9 @@ static bool iwl_mld_fill_mu_edca(struct iwl_mld *mld,
return true;
}
-static u8 iwl_mld_sta_rx_bw_to_fw(enum ieee80211_sta_rx_bandwidth bw)
-{
- switch (bw) {
- default: /* potential future values not supported by this hw/driver */
- case IEEE80211_STA_RX_BW_20:
- return IWL_LINK_MODIFY_BW_20;
- case IEEE80211_STA_RX_BW_40:
- return IWL_LINK_MODIFY_BW_40;
- case IEEE80211_STA_RX_BW_80:
- return IWL_LINK_MODIFY_BW_80;
- case IEEE80211_STA_RX_BW_160:
- return IWL_LINK_MODIFY_BW_160;
- case IEEE80211_STA_RX_BW_320:
- return IWL_LINK_MODIFY_BW_320;
- }
-}
-
-static int _iwl_mld_change_link_in_fw(struct iwl_mld *mld,
- struct ieee80211_bss_conf *link,
- enum ieee80211_sta_rx_bandwidth bw,
- u32 changes)
+int
+iwl_mld_change_link_in_fw(struct iwl_mld *mld, struct ieee80211_bss_conf *link,
+ u32 changes)
{
struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
struct ieee80211_vif *vif = link->vif;
@@ -318,9 +300,6 @@ static int _iwl_mld_change_link_in_fw(struct iwl_mld *mld,
cmd.bi = cpu_to_le32(link->beacon_int);
cmd.dtim_interval = cpu_to_le32(link->beacon_int * link->dtim_period);
- if (changes & LINK_CONTEXT_MODIFY_BANDWIDTH)
- cmd.modify_bandwidth = iwl_mld_sta_rx_bw_to_fw(bw);
-
/* Configure HE parameters only if HE is supported, and only after
* the parameters are set in mac80211 (meaning after assoc)
*/
@@ -382,29 +361,11 @@ send_cmd:
return iwl_mld_send_link_cmd(mld, &cmd, FW_CTXT_ACTION_MODIFY);
}
-int iwl_mld_change_link_in_fw(struct iwl_mld *mld,
- struct ieee80211_bss_conf *link,
- u32 changes)
-{
- if (WARN_ON(changes & LINK_CONTEXT_MODIFY_BANDWIDTH))
- changes &= ~LINK_CONTEXT_MODIFY_BANDWIDTH;
-
- return _iwl_mld_change_link_in_fw(mld, link, 0, changes);
-}
-
-int iwl_mld_change_link_omi_bw(struct iwl_mld *mld,
- struct ieee80211_bss_conf *link,
- enum ieee80211_sta_rx_bandwidth bw)
-{
- return _iwl_mld_change_link_in_fw(mld, link, bw,
- LINK_CONTEXT_MODIFY_BANDWIDTH);
-}
-
int iwl_mld_activate_link(struct iwl_mld *mld,
struct ieee80211_bss_conf *link)
{
struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
- struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(mld_link->vif);
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(link->vif);
int ret;
lockdep_assert_wiphy(mld->wiphy);
@@ -412,7 +373,6 @@ int iwl_mld_activate_link(struct iwl_mld *mld,
if (WARN_ON(!mld_link || mld_link->active))
return -EINVAL;
- mld_link->rx_omi.exit_ts = jiffies;
mld_link->active = true;
ret = iwl_mld_change_link_in_fw(mld, link,
@@ -477,331 +437,6 @@ iwl_mld_rm_link_from_fw(struct iwl_mld *mld, struct ieee80211_bss_conf *link)
iwl_mld_send_link_cmd(mld, &cmd, FW_CTXT_ACTION_REMOVE);
}
-static void iwl_mld_omi_bw_update(struct iwl_mld *mld,
- struct ieee80211_bss_conf *link_conf,
- struct iwl_mld_link *mld_link,
- struct ieee80211_link_sta *link_sta,
- enum ieee80211_sta_rx_bandwidth bw,
- bool ap_update)
-{
- enum ieee80211_sta_rx_bandwidth apply_bw;
-
- mld_link->rx_omi.desired_bw = bw;
-
- /* Can't update OMI while already in progress, desired_bw was
- * set so on FW notification the worker will see the change
- * and apply new the new desired bw.
- */
- if (mld_link->rx_omi.bw_in_progress)
- return;
-
- if (bw == IEEE80211_STA_RX_BW_MAX)
- apply_bw = ieee80211_chan_width_to_rx_bw(link_conf->chanreq.oper.width);
- else
- apply_bw = bw;
-
- if (!ap_update) {
- /* The update isn't due to AP tracking after leaving OMI,
- * where the AP could increase BW and then we must tell
- * it that we can do the increased BW as well, if we did
- * update the chandef.
- * In this case, if we want MAX, then we will need to send
- * a new OMI to the AP if it increases its own bandwidth as
- * we can (due to internal and FW limitations, and being
- * worried the AP might break) only send to what we're doing
- * at the moment. In this case, set last_max_bw; otherwise
- * if we really want to decrease our bandwidth set it to 0
- * to indicate no updates are needed if the AP changes.
- */
- if (bw != IEEE80211_STA_RX_BW_MAX)
- mld_link->rx_omi.last_max_bw = apply_bw;
- else
- mld_link->rx_omi.last_max_bw = 0;
- } else {
- /* Otherwise, if we're already trying to do maximum and
- * the AP is changing, set last_max_bw to the new max the
- * AP is using, we'll only get to this code path if the
- * new bandwidth of the AP is bigger than what we sent it
- * previously. This avoids repeatedly sending updates if
- * it changes bandwidth, only doing it once on an increase.
- */
- mld_link->rx_omi.last_max_bw = apply_bw;
- }
-
- if (ieee80211_prepare_rx_omi_bw(link_sta, bw)) {
- mld_link->rx_omi.bw_in_progress = apply_bw;
- iwl_mld_change_link_omi_bw(mld, link_conf, apply_bw);
- }
-}
-
-static void iwl_mld_omi_bw_finished_work(struct wiphy *wiphy,
- struct wiphy_work *work)
-{
- struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
- struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
- struct iwl_mld_link *mld_link =
- container_of(work, typeof(*mld_link), rx_omi.finished_work.work);
- enum ieee80211_sta_rx_bandwidth desired_bw, switched_to_bw;
- struct ieee80211_vif *vif = mld_link->vif;
- struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
- struct ieee80211_bss_conf *link_conf;
- struct ieee80211_link_sta *link_sta;
-
- if (!mld_vif->ap_sta)
- return;
-
- link_sta = wiphy_dereference(mld->wiphy,
- mld_vif->ap_sta->link[mld_link->link_id]);
- if (WARN_ON_ONCE(!link_sta))
- return;
-
- link_conf = link_conf_dereference_protected(vif, link_sta->link_id);
- if (WARN_ON_ONCE(!link_conf))
- return;
-
- if (WARN_ON(!mld_link->rx_omi.bw_in_progress))
- return;
-
- desired_bw = mld_link->rx_omi.desired_bw;
- switched_to_bw = mld_link->rx_omi.bw_in_progress;
-
- ieee80211_finalize_rx_omi_bw(link_sta);
- mld_link->rx_omi.bw_in_progress = 0;
-
- if (desired_bw != switched_to_bw)
- iwl_mld_omi_bw_update(mld, link_conf, mld_link, link_sta,
- desired_bw, false);
-}
-
-static struct ieee80211_vif *
-iwl_mld_get_omi_bw_reduction_pointers(struct iwl_mld *mld,
- struct ieee80211_link_sta **link_sta,
- struct iwl_mld_link **mld_link)
-{
- struct iwl_mld_vif *mld_vif;
- struct ieee80211_vif *vif;
- int n_link_stas = 0;
-
- *link_sta = NULL;
-
- if (mld->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_SC)
- return NULL;
-
- vif = iwl_mld_get_bss_vif(mld);
- if (!vif)
- return NULL;
-
- for (int i = 0; i < ARRAY_SIZE(mld->fw_id_to_link_sta); i++) {
- struct ieee80211_link_sta *tmp;
-
- tmp = wiphy_dereference(mld->wiphy, mld->fw_id_to_link_sta[i]);
- if (IS_ERR_OR_NULL(tmp))
- continue;
-
- n_link_stas++;
- *link_sta = tmp;
- }
-
- /* can't do anything if we have TDLS peers or EMLSR */
- if (n_link_stas != 1)
- return NULL;
-
- mld_vif = iwl_mld_vif_from_mac80211(vif);
- *mld_link = iwl_mld_link_dereference_check(mld_vif,
- (*link_sta)->link_id);
- if (WARN_ON(!*mld_link))
- return NULL;
-
- return vif;
-}
-
-void iwl_mld_omi_ap_changed_bw(struct iwl_mld *mld,
- struct ieee80211_bss_conf *link_conf,
- enum ieee80211_sta_rx_bandwidth bw)
-{
- struct ieee80211_link_sta *link_sta;
- struct iwl_mld_link *mld_link;
- struct ieee80211_vif *vif;
-
- vif = iwl_mld_get_omi_bw_reduction_pointers(mld, &link_sta, &mld_link);
- if (!vif)
- return;
-
- if (WARN_ON(link_conf->vif != vif))
- return;
-
- /* This is 0 if we requested an OMI BW reduction and don't want to
- * be sending an OMI when the AP's bandwidth changes.
- */
- if (!mld_link->rx_omi.last_max_bw)
- return;
-
- /* We only need to tell the AP if it increases BW over what we last
- * told it we were using, if it reduces then our last OMI to it will
- * not get used anyway (e.g. we said we want 160 but it's doing 80.)
- */
- if (bw < mld_link->rx_omi.last_max_bw)
- return;
-
- iwl_mld_omi_bw_update(mld, link_conf, mld_link, link_sta, bw, true);
-}
-
-void iwl_mld_handle_omi_status_notif(struct iwl_mld *mld,
- struct iwl_rx_packet *pkt)
-{
- int ver = iwl_fw_lookup_notif_ver(mld->fw, DATA_PATH_GROUP,
- OMI_SEND_STATUS_NOTIF, 1);
- struct ieee80211_link_sta *link_sta;
- struct iwl_mld_link *mld_link;
- struct ieee80211_vif *vif;
-
- if (ver == 2) {
- const struct iwl_omi_send_status_notif *notif =
- (const void *)pkt->data;
- u32 sta_id = le32_to_cpu(notif->sta_id);
- struct iwl_mld_vif *mld_vif;
-
- if (IWL_FW_CHECK(mld, sta_id >= mld->fw->ucode_capa.num_stations,
- "Invalid station %d\n", sta_id))
- return;
-
- link_sta = wiphy_dereference(mld->wiphy,
- mld->fw_id_to_link_sta[sta_id]);
- if (IWL_FW_CHECK(mld, !link_sta, "Station does not exist\n"))
- return;
-
- vif = iwl_mld_sta_from_mac80211(link_sta->sta)->vif;
- mld_vif = iwl_mld_vif_from_mac80211(vif);
-
- mld_link = iwl_mld_link_dereference_check(mld_vif,
- link_sta->link_id);
- if (WARN(!mld_link, "Link %d does not exist\n",
- link_sta->link_id))
- return;
- } else {
- vif = iwl_mld_get_omi_bw_reduction_pointers(mld, &link_sta,
- &mld_link);
- }
- if (IWL_FW_CHECK(mld, !vif, "unexpected OMI notification\n"))
- return;
-
- if (IWL_FW_CHECK(mld, !mld_link->rx_omi.bw_in_progress,
- "OMI notification when not requested\n"))
- return;
-
- wiphy_delayed_work_queue(mld->hw->wiphy,
- &mld_link->rx_omi.finished_work,
- msecs_to_jiffies(IWL_MLD_OMI_AP_SETTLE_DELAY));
-}
-
-void iwl_mld_leave_omi_bw_reduction(struct iwl_mld *mld)
-{
- struct ieee80211_bss_conf *link_conf;
- struct ieee80211_link_sta *link_sta;
- struct iwl_mld_link *mld_link;
- struct ieee80211_vif *vif;
-
- vif = iwl_mld_get_omi_bw_reduction_pointers(mld, &link_sta, &mld_link);
- if (!vif)
- return;
-
- link_conf = link_conf_dereference_protected(vif, link_sta->link_id);
- if (WARN_ON_ONCE(!link_conf))
- return;
-
- if (!link_conf->he_support)
- return;
-
- mld_link->rx_omi.exit_ts = jiffies;
-
- iwl_mld_omi_bw_update(mld, link_conf, mld_link, link_sta,
- IEEE80211_STA_RX_BW_MAX, false);
-}
-
-void iwl_mld_check_omi_bw_reduction(struct iwl_mld *mld)
-{
- enum ieee80211_sta_rx_bandwidth bw = IEEE80211_STA_RX_BW_MAX;
- struct ieee80211_chanctx_conf *chanctx;
- struct ieee80211_bss_conf *link_conf;
- struct ieee80211_link_sta *link_sta;
- struct cfg80211_chan_def chandef;
- struct iwl_mld_link *mld_link;
- struct iwl_mld_vif *mld_vif;
- struct ieee80211_vif *vif;
- struct iwl_mld_phy *phy;
- u16 punctured;
- int exit_thr;
-
- /* not allowed in CAM mode */
- if (iwlmld_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
- return;
-
- /* must have one BSS connection (no P2P), no TDLS, nor EMLSR */
- vif = iwl_mld_get_omi_bw_reduction_pointers(mld, &link_sta, &mld_link);
- if (!vif)
- return;
-
- link_conf = link_conf_dereference_protected(vif, link_sta->link_id);
- if (WARN_ON_ONCE(!link_conf))
- return;
-
- if (!link_conf->he_support)
- return;
-
- chanctx = wiphy_dereference(mld->wiphy, mld_link->chan_ctx);
- if (WARN_ON(!chanctx))
- return;
-
- mld_vif = iwl_mld_vif_from_mac80211(vif);
- if (!mld_vif->authorized)
- goto apply;
-
- /* must not be in low-latency mode */
- if (iwl_mld_vif_low_latency(mld_vif))
- goto apply;
-
- chandef = link_conf->chanreq.oper;
-
- switch (chandef.width) {
- case NL80211_CHAN_WIDTH_320:
- exit_thr = IWL_MLD_OMI_EXIT_CHAN_LOAD_320;
- break;
- case NL80211_CHAN_WIDTH_160:
- exit_thr = IWL_MLD_OMI_EXIT_CHAN_LOAD_160;
- break;
- default:
- /* since we reduce to 80 MHz, must have more to start with */
- goto apply;
- }
-
- /* not to be done if primary 80 MHz is punctured */
- if (cfg80211_chandef_primary(&chandef, NL80211_CHAN_WIDTH_80,
- &punctured) < 0 ||
- punctured != 0)
- goto apply;
-
- phy = iwl_mld_phy_from_mac80211(chanctx);
-
- if (phy->channel_load_by_us > exit_thr) {
- /* send OMI for max bandwidth */
- goto apply;
- }
-
- if (phy->channel_load_by_us > IWL_MLD_OMI_ENTER_CHAN_LOAD) {
- /* no changes between enter/exit thresholds */
- return;
- }
-
- if (time_is_after_jiffies(mld_link->rx_omi.exit_ts +
- msecs_to_jiffies(IWL_MLD_OMI_EXIT_PROTECTION)))
- return;
-
- /* reduce bandwidth to 80 MHz to save power */
- bw = IEEE80211_STA_RX_BW_80;
-apply:
- iwl_mld_omi_bw_update(mld, link_conf, mld_link, link_sta, bw, false);
-}
-
IWL_MLD_ALLOC_FN(link, bss_conf)
/* Constructor function for struct iwl_mld_link */
@@ -809,18 +444,12 @@ static int
iwl_mld_init_link(struct iwl_mld *mld, struct ieee80211_bss_conf *link,
struct iwl_mld_link *mld_link)
{
- mld_link->vif = link->vif;
- mld_link->link_id = link->link_id;
mld_link->average_beacon_energy = 0;
iwl_mld_init_internal_sta(&mld_link->bcast_sta);
iwl_mld_init_internal_sta(&mld_link->mcast_sta);
iwl_mld_init_internal_sta(&mld_link->mon_sta);
- if (!mld->fw_status.in_hw_restart)
- wiphy_delayed_work_init(&mld_link->rx_omi.finished_work,
- iwl_mld_omi_bw_finished_work);
-
return iwl_mld_allocate_link_fw_id(mld, &mld_link->fw_id, link);
}
@@ -884,8 +513,6 @@ void iwl_mld_remove_link(struct iwl_mld *mld,
RCU_INIT_POINTER(mld_vif->link[bss_conf->link_id], NULL);
- wiphy_delayed_work_cancel(mld->wiphy, &link->rx_omi.finished_work);
-
if (WARN_ON(link->fw_id >= mld->fw->ucode_capa.num_links))
return;
@@ -897,21 +524,23 @@ void iwl_mld_handle_missed_beacon_notif(struct iwl_mld *mld,
{
const struct iwl_missed_beacons_notif *notif = (const void *)pkt->data;
union iwl_dbg_tlv_tp_data tp_data = { .fw_pkt = pkt };
- u32 link_id = le32_to_cpu(notif->link_id);
+ u32 fw_link_id = le32_to_cpu(notif->link_id);
u32 missed_bcon = le32_to_cpu(notif->consec_missed_beacons);
u32 missed_bcon_since_rx =
le32_to_cpu(notif->consec_missed_beacons_since_last_rx);
u32 scnd_lnk_bcn_lost =
le32_to_cpu(notif->consec_missed_beacons_other_link);
struct ieee80211_bss_conf *link_conf =
- iwl_mld_fw_id_to_link_conf(mld, link_id);
+ iwl_mld_fw_id_to_link_conf(mld, fw_link_id);
u32 bss_param_ch_cnt_link_id;
struct ieee80211_vif *vif;
+ u8 link_id;
if (WARN_ON(!link_conf))
return;
vif = link_conf->vif;
+ link_id = link_conf->link_id;
bss_param_ch_cnt_link_id = link_conf->bss_param_ch_cnt_link_id;
IWL_DEBUG_INFO(mld,
@@ -923,7 +552,7 @@ void iwl_mld_handle_missed_beacon_notif(struct iwl_mld *mld,
mld->trans->dbg.dump_file_name_ext_valid = true;
snprintf(mld->trans->dbg.dump_file_name_ext, IWL_FW_INI_MAX_NAME,
- "LinkId_%d_MacType_%d", link_id,
+ "LinkId_%d_MacType_%d", fw_link_id,
iwl_mld_mac80211_iftype_to_fw(vif));
iwl_dbg_tlv_time_point(&mld->fwrt,
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/link.h b/drivers/net/wireless/intel/iwlwifi/mld/link.h
index 881823be07ba..cad2c9426349 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/link.h
+++ b/drivers/net/wireless/intel/iwlwifi/mld/link.h
@@ -36,11 +36,9 @@ struct iwl_probe_resp_data {
* @he_ru_2mhz_block: 26-tone RU OFDMA transmissions should be blocked.
* @igtk: fw can only have one IGTK at a time, whereas mac80211 can have two.
* This tracks the one IGTK that currently exists in FW.
- * @vif: the vif this link belongs to
* @bcast_sta: station used for broadcast packets. Used in AP, GO and IBSS.
* @mcast_sta: station used for multicast packets. Used in AP, GO and IBSS.
* @mon_sta: station used for TX injection in monitor interface.
- * @link_id: over the air link ID
* @average_beacon_energy: average beacon energy for beacons received during
* client connections
* @ap_early_keys: The firmware cannot install keys before bcast/mcast STAs,
@@ -49,14 +47,6 @@ struct iwl_probe_resp_data {
* @silent_deactivation: next deactivation needs to be silent.
* @probe_resp_data: data from FW notification to store NOA related data to be
* inserted into probe response.
- * @rx_omi: data for BW reduction with OMI
- * @rx_omi.bw_in_progress: update is in progress (indicates target BW)
- * @rx_omi.exit_ts: timestamp of last exit
- * @rx_omi.finished_work: work for the delayed reaction to the firmware saying
- * the change was applied, and for then applying a new mode if it was
- * updated while waiting for firmware/AP settle delay.
- * @rx_omi.desired_bw: desired bandwidth
- * @rx_omi.last_max_bw: last maximum BW used by firmware, for AP BW changes
*/
struct iwl_mld_link {
struct rcu_head rcu_head;
@@ -71,19 +61,9 @@ struct iwl_mld_link {
struct ieee80211_key_conf *igtk;
);
/* And here fields that survive a fw restart */
- struct ieee80211_vif *vif;
struct iwl_mld_int_sta bcast_sta;
struct iwl_mld_int_sta mcast_sta;
struct iwl_mld_int_sta mon_sta;
- u8 link_id;
-
- struct {
- struct wiphy_delayed_work finished_work;
- unsigned long exit_ts;
- enum ieee80211_sta_rx_bandwidth bw_in_progress,
- desired_bw,
- last_max_bw;
- } rx_omi;
/* we can only have 2 GTK + 2 IGTK + 2 BIGTK active at a time */
struct ieee80211_key_conf *ap_early_keys[6];
@@ -123,9 +103,6 @@ int iwl_mld_activate_link(struct iwl_mld *mld,
struct ieee80211_bss_conf *link);
void iwl_mld_deactivate_link(struct iwl_mld *mld,
struct ieee80211_bss_conf *link);
-int iwl_mld_change_link_omi_bw(struct iwl_mld *mld,
- struct ieee80211_bss_conf *link,
- enum ieee80211_sta_rx_bandwidth bw);
int iwl_mld_change_link_in_fw(struct iwl_mld *mld,
struct ieee80211_bss_conf *link, u32 changes);
void iwl_mld_handle_missed_beacon_notif(struct iwl_mld *mld,
@@ -145,13 +122,6 @@ unsigned int iwl_mld_get_chan_load(struct iwl_mld *mld,
int iwl_mld_get_chan_load_by_others(struct iwl_mld *mld,
struct ieee80211_bss_conf *link_conf,
bool expect_active_link);
-void iwl_mld_handle_omi_status_notif(struct iwl_mld *mld,
- struct iwl_rx_packet *pkt);
-void iwl_mld_leave_omi_bw_reduction(struct iwl_mld *mld);
-void iwl_mld_check_omi_bw_reduction(struct iwl_mld *mld);
-void iwl_mld_omi_ap_changed_bw(struct iwl_mld *mld,
- struct ieee80211_bss_conf *link_conf,
- enum ieee80211_sta_rx_bandwidth bw);
void iwl_mld_handle_beacon_filter_notif(struct iwl_mld *mld,
struct iwl_rx_packet *pkt);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/low_latency.c b/drivers/net/wireless/intel/iwlwifi/mld/low_latency.c
index f7faa87b8ba6..23362867b400 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/low_latency.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/low_latency.c
@@ -224,9 +224,6 @@ void iwl_mld_vif_update_low_latency(struct iwl_mld *mld,
return;
}
- if (low_latency)
- iwl_mld_leave_omi_bw_reduction(mld);
-
if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_P2P_CLIENT)
return;
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c
index 59be9923c3b2..b0bd01914a91 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c
@@ -4,6 +4,7 @@
*/
#include <net/mac80211.h>
+#include <linux/fips.h>
#include <linux/ip.h>
#include "mld.h"
@@ -156,6 +157,9 @@ static void iwl_mld_hw_set_security(struct iwl_mld *mld)
WLAN_CIPHER_SUITE_BIP_GMAC_256
};
+ if (fips_enabled)
+ return;
+
hw->wiphy->n_cipher_suites = ARRAY_SIZE(mld_ciphers);
hw->wiphy->cipher_suites = mld_ciphers;
@@ -180,6 +184,9 @@ static void iwl_mld_hw_set_pm(struct iwl_mld *mld)
if (!device_can_wakeup(mld->trans->dev))
return;
+ if (fips_enabled)
+ return;
+
mld->wowlan.flags |= WIPHY_WOWLAN_MAGIC_PKT |
WIPHY_WOWLAN_DISCONNECT |
WIPHY_WOWLAN_EAP_IDENTITY_REQ |
@@ -284,9 +291,11 @@ static void iwl_mac_hw_set_wiphy(struct iwl_mld *mld)
WIPHY_FLAG_SUPPORTS_TDLS |
WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK;
+ /* For fips_enabled, don't support WiFi7 due to WPA3/MFP requirements */
if (mld->nvm_data->sku_cap_11be_enable &&
!iwlwifi_mod_params.disable_11ax &&
- !iwlwifi_mod_params.disable_11be)
+ !iwlwifi_mod_params.disable_11be &&
+ !fips_enabled)
wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO;
/* the firmware uses u8 for num of iterations, but 0xff is saved for
@@ -1006,8 +1015,6 @@ int iwl_mld_assign_vif_chanctx(struct ieee80211_hw *hw,
if (n_active > 1) {
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
- iwl_mld_leave_omi_bw_reduction(mld);
-
/* Indicate to mac80211 that EML is enabled */
vif->driver_flags |= IEEE80211_VIF_EML_ACTIVE;
mld_vif->emlsr.last_entry_ts = jiffies;
@@ -1203,20 +1210,6 @@ iwl_mld_mac80211_link_info_changed_sta(struct iwl_mld *mld,
if (changes & (BSS_CHANGED_CQM | BSS_CHANGED_BEACON_INFO))
iwl_mld_enable_beacon_filter(mld, link_conf, false);
- /* If we have used OMI before to reduce bandwidth to 80 MHz and then
- * increased to 160 MHz again, and then the AP changes to 320 MHz, it
- * will think that we're limited to 160 MHz right now. Update it by
- * requesting a new OMI bandwidth.
- */
- if (changes & BSS_CHANGED_BANDWIDTH) {
- enum ieee80211_sta_rx_bandwidth bw;
-
- bw = ieee80211_chan_width_to_rx_bw(link_conf->chanreq.oper.width);
-
- iwl_mld_omi_ap_changed_bw(mld, link_conf, bw);
-
- }
-
if (changes & BSS_CHANGED_BANDWIDTH)
iwl_mld_retry_emlsr(mld, vif);
}
@@ -1420,30 +1413,6 @@ iwl_mld_mac80211_sched_scan_stop(struct ieee80211_hw *hw,
}
static void
-iwl_mld_restart_complete_vif(void *data, u8 *mac, struct ieee80211_vif *vif)
-{
- struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
- struct ieee80211_bss_conf *link_conf;
- struct iwl_mld *mld = data;
- int link_id;
-
- for_each_vif_active_link(vif, link_conf, link_id) {
- enum ieee80211_sta_rx_bandwidth bw;
- struct iwl_mld_link *mld_link;
-
- mld_link = wiphy_dereference(mld->wiphy,
- mld_vif->link[link_id]);
-
- if (WARN_ON_ONCE(!mld_link))
- continue;
-
- bw = mld_link->rx_omi.bw_in_progress;
- if (bw)
- iwl_mld_change_link_omi_bw(mld, link_conf, bw);
- }
-}
-
-static void
iwl_mld_mac80211_reconfig_complete(struct ieee80211_hw *hw,
enum ieee80211_reconfig_type reconfig_type)
{
@@ -1453,11 +1422,6 @@ iwl_mld_mac80211_reconfig_complete(struct ieee80211_hw *hw,
case IEEE80211_RECONFIG_TYPE_RESTART:
mld->fw_status.in_hw_restart = false;
iwl_mld_send_recovery_cmd(mld, ERROR_RECOVERY_END_OF_RECOVERY);
-
- ieee80211_iterate_interfaces(mld->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- iwl_mld_restart_complete_vif, mld);
-
iwl_trans_finish_sw_reset(mld->trans);
/* no need to lock, adding in parallel would schedule too */
if (!list_empty(&mld->txqs_to_add))
@@ -1681,18 +1645,6 @@ static int iwl_mld_move_sta_state_up(struct iwl_mld *mld,
return -EBUSY;
}
- /*
- * If this is the first STA (i.e. the AP) it won't do
- * anything, otherwise must leave for any new STA on
- * any other interface, or for TDLS, etc.
- * Need to call this _before_ adding the STA so it can
- * look up the one STA to use to ask mac80211 to leave
- * OMI; in the unlikely event that adding the new STA
- * then fails we'll just re-enter OMI later (via the
- * statistics notification handling.)
- */
- iwl_mld_leave_omi_bw_reduction(mld);
-
ret = iwl_mld_add_sta(mld, sta, vif, STATION_TYPE_PEER);
if (ret)
return ret;
@@ -1918,6 +1870,10 @@ iwl_mld_mac80211_ampdu_action(struct ieee80211_hw *hw,
switch (action) {
case IEEE80211_AMPDU_RX_START:
+ if (!iwl_enable_rx_ampdu()) {
+ ret = -EINVAL;
+ break;
+ }
ret = iwl_mld_ampdu_rx_start(mld, sta, tid, ssn, buf_size,
timeout);
break;
@@ -2671,6 +2627,7 @@ const struct ieee80211_ops iwl_mld_hw_ops = {
.mgd_complete_tx = iwl_mld_mac_mgd_complete_tx,
.sta_state = iwl_mld_mac80211_sta_state,
.sta_statistics = iwl_mld_mac80211_sta_statistics,
+ .get_survey = iwl_mld_mac80211_get_survey,
.flush = iwl_mld_mac80211_flush,
.flush_sta = iwl_mld_mac80211_flush_sta,
.ampdu_action = iwl_mld_mac80211_ampdu_action,
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mld.c b/drivers/net/wireless/intel/iwlwifi/mld/mld.c
index e7cbfb9009af..7b46ccc306ab 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/mld.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mld.c
@@ -251,7 +251,6 @@ static const struct iwl_hcmd_names iwl_mld_data_path_names[] = {
HCMD_NAME(TLC_MNG_CONFIG_CMD),
HCMD_NAME(RX_BAID_ALLOCATION_CONFIG_CMD),
HCMD_NAME(SCD_QUEUE_CONFIG_CMD),
- HCMD_NAME(OMI_SEND_STATUS_NOTIF),
HCMD_NAME(ESR_MODE_NOTIF),
HCMD_NAME(MONITOR_NOTIF),
HCMD_NAME(TLC_MNG_UPDATE_NOTIF),
@@ -262,6 +261,13 @@ static const struct iwl_hcmd_names iwl_mld_data_path_names[] = {
/* Please keep this array *SORTED* by hex value.
* Access is done through binary search
*/
+static const struct iwl_hcmd_names iwl_mld_scan_names[] = {
+ HCMD_NAME(CHANNEL_SURVEY_NOTIF),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
static const struct iwl_hcmd_names iwl_mld_location_names[] = {
HCMD_NAME(TOF_RANGE_REQ_CMD),
HCMD_NAME(TOF_RANGE_RESPONSE_NOTIF),
@@ -310,6 +316,7 @@ const struct iwl_hcmd_arr iwl_mld_groups[] = {
[SYSTEM_GROUP] = HCMD_ARR(iwl_mld_system_names),
[MAC_CONF_GROUP] = HCMD_ARR(iwl_mld_mac_conf_names),
[DATA_PATH_GROUP] = HCMD_ARR(iwl_mld_data_path_names),
+ [SCAN_GROUP] = HCMD_ARR(iwl_mld_scan_names),
[LOCATION_GROUP] = HCMD_ARR(iwl_mld_location_names),
[REGULATORY_AND_NVM_GROUP] = HCMD_ARR(iwl_mld_reg_and_nvm_names),
[DEBUG_GROUP] = HCMD_ARR(iwl_mld_debug_names),
@@ -507,6 +514,7 @@ iwl_op_mode_mld_stop(struct iwl_op_mode *op_mode)
kfree(mld->nvm_data);
kfree(mld->scan.cmd);
+ kfree(mld->channel_survey);
kfree(mld->error_recovery_buf);
kfree(mld->mcast_filter_cmd);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mld.h b/drivers/net/wireless/intel/iwlwifi/mld/mld.h
index 8bc4749599ca..94dc9da6360d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/mld.h
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mld.h
@@ -160,6 +160,7 @@
* device
* @addresses: device MAC addresses.
* @scan: instance of the scan object
+ * @channel_survey: channel survey information collected during scan
* @wowlan: WoWLAN support data.
* @debug_max_sleep: maximum sleep time in D3 (for debug purposes)
* @led: the led device
@@ -253,6 +254,7 @@ struct iwl_mld {
struct mac_address addresses[IWL_MLD_MAX_ADDRESSES];
struct iwl_mld_scan scan;
+ struct iwl_mld_survey *channel_survey;
#ifdef CONFIG_PM_SLEEP
struct wiphy_wowlan_support wowlan;
u32 debug_max_sleep;
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/notif.c b/drivers/net/wireless/intel/iwlwifi/mld/notif.c
index 262d8e25e62a..f17aeca4fae6 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/notif.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/notif.c
@@ -78,13 +78,6 @@ static bool iwl_mld_cancel_##name##_notif(struct iwl_mld *mld, \
u8: (notif)->id_member); \
}
-static bool iwl_mld_always_cancel(struct iwl_mld *mld,
- struct iwl_rx_packet *pkt,
- u32 obj_id)
-{
- return true;
-}
-
/* Currently only defined for the RX_HANDLER_SIZES options. Use this for
* notifications that belong to a specific object, and that should be
* canceled when the object is removed
@@ -295,6 +288,8 @@ CMD_VERSIONS(scan_complete_notif,
CMD_VER_ENTRY(1, iwl_umac_scan_complete))
CMD_VERSIONS(scan_iter_complete_notif,
CMD_VER_ENTRY(2, iwl_umac_scan_iter_complete_notif))
+CMD_VERSIONS(channel_survey_notif,
+ CMD_VER_ENTRY(1, iwl_umac_scan_channel_survey_notif))
CMD_VERSIONS(mfuart_notif,
CMD_VER_ENTRY(2, iwl_mfuart_load_notif))
CMD_VERSIONS(update_mcc,
@@ -348,9 +343,6 @@ CMD_VERSIONS(time_msmt_notif,
CMD_VER_ENTRY(1, iwl_time_msmt_notify))
CMD_VERSIONS(time_sync_confirm_notif,
CMD_VER_ENTRY(1, iwl_time_msmt_cfm_notify))
-CMD_VERSIONS(omi_status_notif,
- CMD_VER_ENTRY(1, iwl_omi_send_status_notif_v1)
- CMD_VER_ENTRY(2, iwl_omi_send_status_notif))
CMD_VERSIONS(ftm_resp_notif, CMD_VER_ENTRY(10, iwl_tof_range_rsp_ntfy))
CMD_VERSIONS(beacon_filter_notif, CMD_VER_ENTRY(2, iwl_beacon_filter_notif))
@@ -368,7 +360,6 @@ DEFINE_SIMPLE_CANCELLATION(probe_resp_data, iwl_probe_resp_data_notif,
mac_id)
DEFINE_SIMPLE_CANCELLATION(uapsd_misbehaving_ap, iwl_uapsd_misbehaving_ap_notif,
mac_id)
-#define iwl_mld_cancel_omi_status_notif iwl_mld_always_cancel
DEFINE_SIMPLE_CANCELLATION(ftm_resp, iwl_tof_range_rsp_ntfy, request_id)
DEFINE_SIMPLE_CANCELLATION(beacon_filter, iwl_beacon_filter_notif, link_id)
@@ -415,6 +406,10 @@ const struct iwl_rx_handler iwl_mld_rx_handlers[] = {
RX_HANDLER_NO_VAL(LEGACY_GROUP, MATCH_FOUND_NOTIFICATION,
match_found_notif, RX_HANDLER_SYNC)
+ RX_HANDLER_NO_OBJECT(SCAN_GROUP, CHANNEL_SURVEY_NOTIF,
+ channel_survey_notif,
+ RX_HANDLER_ASYNC)
+
RX_HANDLER_NO_OBJECT(STATISTICS_GROUP, STATISTICS_OPER_NOTIF,
stats_oper_notif, RX_HANDLER_ASYNC)
RX_HANDLER_NO_OBJECT(STATISTICS_GROUP, STATISTICS_OPER_PART1_NOTIF,
@@ -461,8 +456,6 @@ const struct iwl_rx_handler iwl_mld_rx_handlers[] = {
RX_HANDLER_NO_OBJECT(LEGACY_GROUP,
WNM_80211V_TIMING_MEASUREMENT_CONFIRM_NOTIFICATION,
time_sync_confirm_notif, RX_HANDLER_ASYNC)
- RX_HANDLER_OF_LINK(DATA_PATH_GROUP, OMI_SEND_STATUS_NOTIF,
- omi_status_notif)
RX_HANDLER_OF_LINK(DATA_PATH_GROUP, BEACON_FILTER_IN_NOTIF,
beacon_filter_notif)
RX_HANDLER_OF_FTM_REQ(LOCATION_GROUP, TOF_RANGE_RESPONSE_NOTIF,
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/rx.c b/drivers/net/wireless/intel/iwlwifi/mld/rx.c
index 3d19cec3f696..b6dedd1ecd4d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/rx.c
@@ -1089,6 +1089,15 @@ static void iwl_mld_rx_eht(struct iwl_mld *mld, struct sk_buff *skb,
rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
}
+ /* update aggregation data for monitor sake on default queue */
+ if (!queue && (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) &&
+ (phy_info & IWL_RX_MPDU_PHY_AMPDU) && phy_data->first_subframe) {
+ rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
+ if (phy_data->data0 &
+ cpu_to_le32(IWL_RX_PHY_DATA0_EHT_DELIM_EOF))
+ rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
+ }
+
if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD)
iwl_mld_decode_eht_phy_data(mld, phy_data, rx_status, eht, usig);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/scan.c b/drivers/net/wireless/intel/iwlwifi/mld/scan.c
index 479a76a94aa8..62f97a18a16c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/scan.c
@@ -4,6 +4,8 @@
*/
#include <linux/crc32.h>
+#include "iwl-utils.h"
+
#include "mld.h"
#include "scan.h"
#include "hcmd.h"
@@ -482,7 +484,9 @@ iwl_mld_scan_get_cmd_gen_flags(struct iwl_mld *mld,
static u8
iwl_mld_scan_get_cmd_gen_flags2(struct iwl_mld *mld,
struct iwl_mld_scan_params *params,
- struct ieee80211_vif *vif, u16 gen_flags)
+ struct ieee80211_vif *vif,
+ enum iwl_mld_scan_status scan_status,
+ u16 gen_flags)
{
u8 flags = 0;
@@ -494,6 +498,17 @@ iwl_mld_scan_get_cmd_gen_flags2(struct iwl_mld *mld,
if (params->scan_6ghz)
flags |= IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_DONT_TOGGLE_ANT;
+ /* For AP interfaces, request survey data for regular scans and if
+ * it is supported. For non-AP interfaces, EBS will be enabled and
+ * the results may be missing information for some channels.
+ */
+ if (scan_status == IWL_MLD_SCAN_REGULAR &&
+ ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_AP &&
+ gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE &&
+ iwl_fw_lookup_notif_ver(mld->fw, SCAN_GROUP,
+ CHANNEL_SURVEY_NOTIF, 0) >= 1)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS2_COLLECT_CHANNEL_STATS;
+
return flags;
}
@@ -544,6 +559,7 @@ iwl_mld_scan_cmd_set_gen_params(struct iwl_mld *mld,
u16 gen_flags = iwl_mld_scan_get_cmd_gen_flags(mld, params, vif,
scan_status);
u8 gen_flags2 = iwl_mld_scan_get_cmd_gen_flags2(mld, params, vif,
+ scan_status,
gen_flags);
IWL_DEBUG_SCAN(mld, "General: flags=0x%x, flags2=0x%x\n",
@@ -1752,6 +1768,11 @@ int iwl_mld_regular_scan_start(struct iwl_mld *mld, struct ieee80211_vif *vif,
struct cfg80211_scan_request *req,
struct ieee80211_scan_ies *ies)
{
+ /* Clear survey data when starting the first part of a regular scan */
+ if (req->first_part && mld->channel_survey)
+ memset(mld->channel_survey->channels, 0,
+ sizeof(mld->channel_survey->channels[0]) *
+ mld->channel_survey->n_channels);
if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
iwl_mld_emlsr_block_tmp_non_bss(mld);
@@ -2025,3 +2046,136 @@ int iwl_mld_alloc_scan_cmd(struct iwl_mld *mld)
return 0;
}
+
+static int iwl_mld_chanidx_from_phy(struct iwl_mld *mld,
+ enum nl80211_band band,
+ u16 phy_chan_num)
+{
+ struct ieee80211_supported_band *sband = mld->wiphy->bands[band];
+
+ if (WARN_ON_ONCE(!sband))
+ return -EINVAL;
+
+ for (int chan_idx = 0; chan_idx < sband->n_channels; chan_idx++) {
+ struct ieee80211_channel *channel = &sband->channels[chan_idx];
+
+ if (channel->hw_value == phy_chan_num)
+ return chan_idx;
+ }
+
+ return -EINVAL;
+}
+
+void iwl_mld_handle_channel_survey_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ const struct iwl_umac_scan_channel_survey_notif *notif =
+ (void *)pkt->data;
+ struct iwl_mld_survey_channel *info;
+ enum nl80211_band band;
+ int chan_idx;
+
+ if (!mld->channel_survey) {
+ size_t n_channels = 0;
+
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ if (!mld->wiphy->bands[band])
+ continue;
+
+ n_channels += mld->wiphy->bands[band]->n_channels;
+ }
+
+ mld->channel_survey = kzalloc(struct_size(mld->channel_survey,
+ channels, n_channels),
+ GFP_KERNEL);
+
+ if (!mld->channel_survey)
+ return;
+
+ mld->channel_survey->n_channels = n_channels;
+ n_channels = 0;
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ if (!mld->wiphy->bands[band])
+ continue;
+
+ mld->channel_survey->bands[band] =
+ &mld->channel_survey->channels[n_channels];
+ n_channels += mld->wiphy->bands[band]->n_channels;
+ }
+ }
+
+ band = iwl_mld_phy_band_to_nl80211(le32_to_cpu(notif->band));
+ chan_idx = iwl_mld_chanidx_from_phy(mld, band,
+ le32_to_cpu(notif->channel));
+ if (WARN_ON_ONCE(chan_idx < 0))
+ return;
+
+ IWL_DEBUG_SCAN(mld, "channel survey received for freq %d\n",
+ mld->wiphy->bands[band]->channels[chan_idx].center_freq);
+
+ info = &mld->channel_survey->bands[band][chan_idx];
+
+ /* Times are all in ms */
+ info->time = le32_to_cpu(notif->active_time);
+ info->time_busy = le32_to_cpu(notif->busy_time);
+ info->noise =
+ iwl_average_neg_dbm(notif->noise, ARRAY_SIZE(notif->noise));
+}
+
+int iwl_mld_mac80211_get_survey(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ int curr_idx = 0;
+
+ if (!mld->channel_survey)
+ return -ENOENT;
+
+ /* Iterate bands/channels to find the requested index.
+ * Logically this returns the entry with index "idx" from a flattened
+ * survey result array that only contains channels with information.
+ * The current index into this flattened array is tracked in curr_idx.
+ */
+ for (enum nl80211_band band = 0; band < NUM_NL80211_BANDS; band++) {
+ struct ieee80211_supported_band *sband =
+ mld->wiphy->bands[band];
+
+ if (!sband)
+ continue;
+
+ for (int per_band_idx = 0;
+ per_band_idx < sband->n_channels;
+ per_band_idx++) {
+ struct iwl_mld_survey_channel *info =
+ &mld->channel_survey->bands[band][per_band_idx];
+
+ /* Skip entry entirely, it was not reported/scanned,
+ * do not increase curr_idx for this entry.
+ */
+ if (!info->time)
+ continue;
+
+ /* Search did not reach the requested entry yet,
+ * increment curr_idx and continue.
+ */
+ if (idx != curr_idx) {
+ curr_idx++;
+ continue;
+ }
+
+ /* Found (the next) channel to report */
+ survey->channel = &sband->channels[per_band_idx];
+ survey->filled = SURVEY_INFO_TIME |
+ SURVEY_INFO_TIME_BUSY;
+ survey->time = info->time;
+ survey->time_busy = info->time_busy;
+ survey->noise = info->noise;
+ if (survey->noise < 0)
+ survey->filled |= SURVEY_INFO_NOISE_DBM;
+
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/scan.h b/drivers/net/wireless/intel/iwlwifi/mld/scan.h
index 4044cac3f086..69110f0cfc8e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/scan.h
+++ b/drivers/net/wireless/intel/iwlwifi/mld/scan.h
@@ -30,6 +30,12 @@ void iwl_mld_handle_match_found_notif(struct iwl_mld *mld,
void iwl_mld_handle_scan_complete_notif(struct iwl_mld *mld,
struct iwl_rx_packet *pkt);
+int iwl_mld_mac80211_get_survey(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey);
+
+void iwl_mld_handle_channel_survey_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt);
+
#define WFA_TPC_IE_LEN 9
static inline int iwl_mld_scan_max_template_size(void)
@@ -133,4 +139,35 @@ struct iwl_mld_scan {
u64 last_mlo_scan_time;
};
+/**
+ * struct iwl_mld_survey_channel - per-channel survey information
+ *
+ * Driver version of &struct survey_info with just the data we want to report.
+ *
+ * @time: time in ms the radio was on the channel
+ * @time_busy: time in ms the channel was sensed busy
+ * @noise: channel noise in dBm
+ */
+struct iwl_mld_survey_channel {
+ u32 time;
+ u32 time_busy;
+ s8 noise;
+};
+
+/**
+ * struct iwl_mld_survey - survey information
+ *
+ * Survey information for all available channels.
+ *
+ * @bands: per-band array for per-channel survey data, points into @channels
+ * @n_channels: Number of @channels entries that are allocated
+ * @channels: per-channel information
+ */
+struct iwl_mld_survey {
+ struct iwl_mld_survey_channel *bands[NUM_NL80211_BANDS];
+
+ int n_channels;
+ struct iwl_mld_survey_channel channels[] __counted_by(n_channels);
+};
+
#endif /* __iwl_mld_scan_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/stats.c b/drivers/net/wireless/intel/iwlwifi/mld/stats.c
index f633cb1cf510..cbc64db5eab6 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/stats.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/stats.c
@@ -508,8 +508,6 @@ void iwl_mld_handle_stats_oper_notif(struct iwl_mld *mld,
iwl_mld_process_per_link_stats(mld, stats->per_link, curr_ts_usec);
iwl_mld_process_per_sta_stats(mld, stats->per_sta);
iwl_mld_process_per_phy_stats(mld, stats->per_phy);
-
- iwl_mld_check_omi_bw_reduction(mld);
}
void iwl_mld_handle_stats_oper_part1_notif(struct iwl_mld *mld,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index ef9bab042902..029c846a236f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -250,7 +250,7 @@ static void iwl_mvm_wowlan_get_rsc_tsc_data(struct ieee80211_hw *hw,
/*
* For non-QoS this relies on the fact that both the uCode and
- * mac80211 use TID 0 (as they need to to avoid replay attacks)
+ * mac80211 use TID 0 (as they need to avoid replay attacks)
* for checking the IV in the frames.
*/
for (i = 0; i < IWL_NUM_RSC; i++) {
@@ -386,7 +386,7 @@ static void iwl_mvm_wowlan_get_rsc_v5_data(struct ieee80211_hw *hw,
/*
* For non-QoS this relies on the fact that both the uCode and
- * mac80211 use TID 0 (as they need to to avoid replay attacks)
+ * mac80211 use TID 0 (as they need to avoid replay attacks)
* for checking the IV in the frames.
*/
for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
@@ -494,7 +494,7 @@ static int iwl_mvm_wowlan_config_rsc_tsc(struct iwl_mvm *mvm,
if (data.have_rsc_tsc)
ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_TSC_RSC_PARAM,
CMD_ASYNC,
- sizeof(data.rsc_tsc),
+ sizeof(*data.rsc_tsc),
data.rsc_tsc);
else
ret = 0;
@@ -1954,6 +1954,7 @@ static bool iwl_mvm_gtk_rekey(struct iwl_wowlan_status_data *status,
DEFINE_RAW_FLEX(struct ieee80211_key_conf, conf, key,
WOWLAN_KEY_MAX_SIZE);
int link_id = vif->active_links ? __ffs(vif->active_links) : -1;
+ u8 key_data[WOWLAN_KEY_MAX_SIZE];
conf->cipher = gtk_cipher;
@@ -1988,8 +1989,10 @@ static bool iwl_mvm_gtk_rekey(struct iwl_wowlan_status_data *status,
conf->cipher, conf->keyidx);
memcpy(conf->key, status->gtk[i].key,
sizeof(status->gtk[i].key));
+ memcpy(key_data, status->gtk[i].key, sizeof(status->gtk[i].key));
- key = ieee80211_gtk_rekey_add(vif, conf, link_id);
+ key = ieee80211_gtk_rekey_add(vif, status->gtk[i].id, key_data,
+ sizeof(key_data), link_id);
if (IS_ERR(key)) {
/* FW may send also the old keys */
if (PTR_ERR(key) == -EALREADY)
@@ -2021,6 +2024,7 @@ iwl_mvm_d3_igtk_bigtk_rekey_add(struct iwl_wowlan_status_data *status,
struct ieee80211_key_conf *key_config;
struct ieee80211_key_seq seq;
int link_id = vif->active_links ? __ffs(vif->active_links) : -1;
+ u8 key[WOWLAN_KEY_MAX_SIZE];
s8 keyidx = key_data->id;
conf->cipher = cipher;
@@ -2050,7 +2054,10 @@ iwl_mvm_d3_igtk_bigtk_rekey_add(struct iwl_wowlan_status_data *status,
BUILD_BUG_ON(WOWLAN_KEY_MAX_SIZE < sizeof(key_data->key));
memcpy(conf->key, key_data->key, conf->keylen);
- key_config = ieee80211_gtk_rekey_add(vif, conf, link_id);
+ memcpy(key, key_data->key, sizeof(key_data->key));
+
+ key_config = ieee80211_gtk_rekey_add(vif, keyidx, key, sizeof(key),
+ link_id);
if (IS_ERR(key_config)) {
/* FW may send also the old keys */
return PTR_ERR(key_config) == -EALREADY;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index d9d61e25807a..8805ab344895 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -301,7 +301,7 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
iwl_mvm_init_link(&mvmvif->deflink);
- /* No need to allocate data queues to P2P Device MAC and NAN.*/
+ /* No need to allocate data queues to P2P Device MAC */
if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
return 0;
@@ -976,7 +976,7 @@ u8 iwl_mvm_mac_ctxt_get_beacon_rate(struct iwl_mvm *mvm,
static void iwl_mvm_mac_ctxt_set_tx(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct sk_buff *beacon,
- struct iwl_tx_cmd_v6 *tx)
+ struct iwl_tx_cmd_v6_params *tx_params)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct ieee80211_tx_info *info;
@@ -986,30 +986,30 @@ static void iwl_mvm_mac_ctxt_set_tx(struct iwl_mvm *mvm,
info = IEEE80211_SKB_CB(beacon);
/* Set up TX command fields */
- tx->len = cpu_to_le16((u16)beacon->len);
- tx->sta_id = mvmvif->deflink.bcast_sta.sta_id;
- tx->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
+ tx_params->len = cpu_to_le16((u16)beacon->len);
+ tx_params->sta_id = mvmvif->deflink.bcast_sta.sta_id;
+ tx_params->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
tx_flags = TX_CMD_FLG_SEQ_CTL | TX_CMD_FLG_TSF;
tx_flags |=
iwl_mvm_bt_coex_tx_prio(mvm, (void *)beacon->data, info, 0) <<
TX_CMD_FLG_BT_PRIO_POS;
- tx->tx_flags = cpu_to_le32(tx_flags);
+ tx_params->tx_flags = cpu_to_le32(tx_flags);
if (!fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION)) {
iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx);
- tx->rate_n_flags =
+ tx_params->rate_n_flags =
cpu_to_le32(BIT(mvm->mgmt_last_antenna_idx) <<
RATE_MCS_ANT_POS);
}
rate = iwl_mvm_mac_ctxt_get_beacon_rate(mvm, info, vif);
- tx->rate_n_flags |=
+ tx_params->rate_n_flags |=
cpu_to_le32(iwl_mvm_mac80211_idx_to_hwrate(mvm->fw, rate));
if (rate == IWL_FIRST_CCK_RATE)
- tx->rate_n_flags |= cpu_to_le32(RATE_MCS_CCK_MSK_V1);
+ tx_params->rate_n_flags |= cpu_to_le32(RATE_MCS_CCK_MSK_V1);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index fa9d5e0b6609..4ad3d32683d8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -5,6 +5,7 @@
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
#include <linux/kernel.h>
+#include <linux/fips.h>
#include <linux/slab.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
@@ -461,7 +462,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
IWL_ERR(mvm,
"iwlmvm doesn't allow to disable BT Coex, check bt_coex_active module parameter\n");
- ieee80211_hw_set(hw, MFP_CAPABLE);
+ if (!fips_enabled)
+ ieee80211_hw_set(hw, MFP_CAPABLE);
+
mvm->ciphers[hw->wiphy->n_cipher_suites] = WLAN_CIPHER_SUITE_AES_CMAC;
hw->wiphy->n_cipher_suites++;
if (iwl_mvm_has_new_rx_api(mvm)) {
@@ -485,12 +488,17 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->pmsr_capa = &iwl_mvm_pmsr_capa;
}
- if (sec_key_ver &&
+ /*
+ * beacon protection must be handled by firmware,
+ * so cannot be done with fips_enabled
+ */
+ if (!fips_enabled && sec_key_ver &&
fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_BIGTK_TX_SUPPORT))
wiphy_ext_feature_set(hw->wiphy,
NL80211_EXT_FEATURE_BEACON_PROTECTION);
- else if (fw_has_capa(&mvm->fw->ucode_capa,
+ else if (!fips_enabled &&
+ fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_BIGTK_SUPPORT))
wiphy_ext_feature_set(hw->wiphy,
NL80211_EXT_FEATURE_BEACON_PROTECTION_CLIENT);
@@ -730,7 +738,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
#ifdef CONFIG_PM_SLEEP
if ((unified || mvm->fw->img[IWL_UCODE_WOWLAN].num_sec) &&
- device_can_wakeup(mvm->trans->dev)) {
+ device_can_wakeup(mvm->trans->dev) && !fips_enabled) {
mvm->wowlan.flags |= WIPHY_WOWLAN_MAGIC_PKT |
WIPHY_WOWLAN_DISCONNECT |
WIPHY_WOWLAN_EAP_IDENTITY_REQ |
@@ -1825,12 +1833,6 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
rcu_assign_pointer(mvm->vif_id_to_mac[mvmvif->id], vif);
- /* Currently not much to do for NAN */
- if (vif->type == NL80211_IFTYPE_NAN) {
- ret = 0;
- goto out;
- }
-
/*
* The AP binding flow can be done only after the beacon
* template is configured (which happens only in the mac80211
@@ -3330,7 +3332,7 @@ void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
* us to stop a hw_scan when it's already stopped. This can
* happen, for instance, if we stopped the scan ourselves,
* called ieee80211_scan_completed() and the userspace called
- * cancel scan scan before ieee80211_scan_work() could run.
+ * cancel scan before ieee80211_scan_work() could run.
* To handle that, simply return if the scan is not running.
*/
if (mvm->scan_status & IWL_MVM_SCAN_REGULAR)
@@ -4343,7 +4345,7 @@ int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
* us to stop a sched_scan when it's already stopped. This
* can happen, for instance, if we stopped the scan ourselves,
* called ieee80211_sched_scan_stopped() and the userspace called
- * stop sched scan scan before ieee80211_sched_scan_stopped_work()
+ * stop sched scan before ieee80211_sched_scan_stopped_work()
* could run. To handle this, simply return if the scan is
* not running.
*/
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c
index 3f8b840871d3..2d116a41913c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c
@@ -282,9 +282,6 @@ int iwl_mvm_mld_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int ret;
- if (WARN_ON_ONCE(vif->type == NL80211_IFTYPE_NAN))
- return -EOPNOTSUPP;
-
if (WARN_ONCE(mvmvif->uploaded, "Adding active MAC %pM/%d\n",
vif->addr, ieee80211_vif_type_p2p(vif)))
return -EIO;
@@ -307,9 +304,6 @@ int iwl_mvm_mld_mac_ctxt_changed(struct iwl_mvm *mvm,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- if (WARN_ON_ONCE(vif->type == NL80211_IFTYPE_NAN))
- return -EOPNOTSUPP;
-
if (WARN_ONCE(!mvmvif->uploaded, "Changing inactive MAC %pM/%d\n",
vif->addr, ieee80211_vif_type_p2p(vif)))
return -EIO;
@@ -327,9 +321,6 @@ int iwl_mvm_mld_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
};
int ret;
- if (WARN_ON_ONCE(vif->type == NL80211_IFTYPE_NAN))
- return -EOPNOTSUPP;
-
if (WARN_ONCE(!mvmvif->uploaded, "Removing inactive MAC %pM/%d\n",
vif->addr, ieee80211_vif_type_p2p(vif)))
return -EIO;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 9e83c671f4e2..fdaeefa305e1 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1325,7 +1325,6 @@ struct iwl_mvm {
u8 range_resp;
} cmd_ver;
- struct ieee80211_vif *nan_vif;
struct iwl_mvm_baid_data __rcu *baid_map[IWL_MAX_BAID];
/*
@@ -1821,11 +1820,12 @@ int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_sta *sta);
int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb);
void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
- struct iwl_tx_cmd_v6 *tx_cmd,
+ struct iwl_tx_cmd_v6_params *tx_cmd_params,
struct ieee80211_tx_info *info, u8 sta_id);
-void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd_v6 *tx_cmd,
- struct ieee80211_tx_info *info,
- struct ieee80211_sta *sta, __le16 fc);
+void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm,
+ struct iwl_tx_cmd_v6_params *tx_cmd_params,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta, __le16 fc);
void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
@@ -1854,12 +1854,12 @@ int iwl_mvm_set_sta_pkt_ext(struct iwl_mvm *mvm,
void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm);
static inline void iwl_mvm_set_tx_cmd_ccmp(struct ieee80211_tx_info *info,
- struct iwl_tx_cmd_v6 *tx_cmd)
+ struct iwl_tx_cmd_v6_params *tx_cmd_params)
{
struct ieee80211_key_conf *keyconf = info->control.hw_key;
- tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
- memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
+ tx_cmd_params->sec_ctl = TX_CMD_SEC_CCM;
+ memcpy(tx_cmd_params->key, keyconf->key, keyconf->keylen);
}
static inline void iwl_mvm_wait_for_async_handlers(struct iwl_mvm *mvm)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
index 0057fddf88f0..610de29b7be0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
@@ -231,7 +231,6 @@ static void iwl_mvm_allow_uapsd_iterator(void *_data, u8 *mac,
switch (vif->type) {
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_ADHOC:
- case NL80211_IFTYPE_NAN:
data->allow_uapsd = false;
break;
case NL80211_IFTYPE_STATION:
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 4b57ca56e1f6..62e76a79f621 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -905,10 +905,15 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
* already ahead and it will be dropped.
* If the last sub-frame is not on this queue - we will get frame
* release notification with up to date NSSN.
+ * If this is the first frame that is stored in the buffer, the head_sn
+ * may be outdated. Update it based on the last NSSN to make sure it
+ * will be released when the frame release notification arrives.
*/
if (!amsdu || last_subframe)
iwl_mvm_release_frames(mvm, sta, napi, baid_data,
buffer, nssn);
+ else if (buffer->num_stored == 1)
+ buffer->head_sn = nssn;
spin_unlock_bh(&buffer->lock);
return true;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 5f6797598998..11c6b86db4ec 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -2461,7 +2461,7 @@ void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
mvmvif->deflink.bcast_sta.tfd_queue_msk &= ~BIT(queue);
}
-/* Send the FW a request to remove the station from it's internal data
+/* Send the FW a request to remove the station from its internal data
* structures, but DO NOT remove the entry from the local data structures. */
int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
@@ -2524,7 +2524,7 @@ void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
}
/*
- * Send the FW a request to remove the station from it's internal data
+ * Send the FW a request to remove the station from its internal data
* structures, and in addition remove it from the local data structure.
*/
int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
@@ -2677,7 +2677,7 @@ static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
}
/*
- * Send the FW a request to remove the station from it's internal data
+ * Send the FW a request to remove the station from its internal data
* structures, and in addition remove it from the local data structure.
*/
int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index ac2cf1b8ce23..25d1a882a6a0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -148,12 +148,12 @@ out:
* Sets most of the Tx cmd's fields
*/
void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
- struct iwl_tx_cmd_v6 *tx_cmd,
+ struct iwl_tx_cmd_v6_params *tx_cmd_params,
struct ieee80211_tx_info *info, u8 sta_id)
{
struct ieee80211_hdr *hdr = (void *)skb->data;
__le16 fc = hdr->frame_control;
- u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags);
+ u32 tx_flags = le32_to_cpu(tx_cmd_params->tx_flags);
u32 len = skb->len + FCS_LEN;
bool amsdu = false;
u8 ac;
@@ -173,7 +173,7 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
if (ieee80211_is_data_qos(fc)) {
u8 *qc = ieee80211_get_qos_ctl(hdr);
- tx_cmd->tid_tspec = qc[0] & 0xf;
+ tx_cmd_params->tid_tspec = qc[0] & 0xf;
tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
amsdu = *qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT;
} else if (ieee80211_is_back_req(fc)) {
@@ -182,17 +182,17 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
u16 ssn = le16_to_cpu(bar->start_seq_num);
tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
- tx_cmd->tid_tspec = (control &
+ tx_cmd_params->tid_tspec = (control &
IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
IEEE80211_BAR_CTRL_TID_INFO_SHIFT;
- WARN_ON_ONCE(tx_cmd->tid_tspec >= IWL_MAX_TID_COUNT);
- iwl_mvm_bar_check_trigger(mvm, bar->ra, tx_cmd->tid_tspec,
+ WARN_ON_ONCE(tx_cmd_params->tid_tspec >= IWL_MAX_TID_COUNT);
+ iwl_mvm_bar_check_trigger(mvm, bar->ra, tx_cmd_params->tid_tspec,
ssn);
} else {
if (ieee80211_is_data(fc))
- tx_cmd->tid_tspec = IWL_TID_NON_QOS;
+ tx_cmd_params->tid_tspec = IWL_TID_NON_QOS;
else
- tx_cmd->tid_tspec = IWL_MAX_TID_COUNT;
+ tx_cmd_params->tid_tspec = IWL_MAX_TID_COUNT;
if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
tx_flags |= TX_CMD_FLG_SEQ_CTL;
@@ -201,8 +201,8 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
}
/* Default to 0 (BE) when tid_spec is set to IWL_MAX_TID_COUNT */
- if (tx_cmd->tid_tspec < IWL_MAX_TID_COUNT)
- ac = tid_to_mac80211_ac[tx_cmd->tid_tspec];
+ if (tx_cmd_params->tid_tspec < IWL_MAX_TID_COUNT)
+ ac = tid_to_mac80211_ac[tx_cmd_params->tid_tspec];
else
ac = tid_to_mac80211_ac[0];
@@ -211,20 +211,20 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
if (ieee80211_is_mgmt(fc)) {
if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
- tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_ASSOC);
+ tx_cmd_params->pm_frame_timeout = cpu_to_le16(PM_FRAME_ASSOC);
else if (ieee80211_is_action(fc))
- tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE);
+ tx_cmd_params->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE);
else
- tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT);
+ tx_cmd_params->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT);
/* The spec allows Action frames in A-MPDU, we don't support
* it
*/
WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU);
} else if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) {
- tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT);
+ tx_cmd_params->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT);
} else {
- tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE);
+ tx_cmd_params->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE);
}
if (ieee80211_is_data(fc) && len > mvm->rts_threshold &&
@@ -236,13 +236,13 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
ieee80211_action_contains_tpc(skb))
tx_flags |= TX_CMD_FLG_WRITE_TX_POWER;
- tx_cmd->tx_flags = cpu_to_le32(tx_flags);
+ tx_cmd_params->tx_flags = cpu_to_le32(tx_flags);
/* Total # bytes to be transmitted - PCIe code will adjust for A-MSDU */
- tx_cmd->len = cpu_to_le16((u16)skb->len);
- tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
- tx_cmd->sta_id = sta_id;
+ tx_cmd_params->len = cpu_to_le16((u16)skb->len);
+ tx_cmd_params->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
+ tx_cmd_params->sta_id = sta_id;
- tx_cmd->offload_assist =
+ tx_cmd_params->offload_assist =
cpu_to_le16(iwl_mvm_tx_csum(mvm, skb, info, amsdu));
}
@@ -395,22 +395,23 @@ static __le32 iwl_mvm_get_tx_rate_n_flags(struct iwl_mvm *mvm,
/*
* Sets the fields in the Tx cmd that are rate related
*/
-void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd_v6 *tx_cmd,
- struct ieee80211_tx_info *info,
- struct ieee80211_sta *sta, __le16 fc)
+void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm,
+ struct iwl_tx_cmd_v6_params *tx_cmd_params,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta, __le16 fc)
{
/* Set retry limit on RTS packets */
- tx_cmd->rts_retry_limit = IWL_RTS_DFAULT_RETRY_LIMIT;
+ tx_cmd_params->rts_retry_limit = IWL_RTS_DFAULT_RETRY_LIMIT;
/* Set retry limit on DATA packets and Probe Responses*/
if (ieee80211_is_probe_resp(fc)) {
- tx_cmd->data_retry_limit = IWL_MGMT_DFAULT_RETRY_LIMIT;
- tx_cmd->rts_retry_limit =
- min(tx_cmd->data_retry_limit, tx_cmd->rts_retry_limit);
+ tx_cmd_params->data_retry_limit = IWL_MGMT_DFAULT_RETRY_LIMIT;
+ tx_cmd_params->rts_retry_limit =
+ min(tx_cmd_params->data_retry_limit, tx_cmd_params->rts_retry_limit);
} else if (ieee80211_is_back_req(fc)) {
- tx_cmd->data_retry_limit = IWL_BAR_DFAULT_RETRY_LIMIT;
+ tx_cmd_params->data_retry_limit = IWL_BAR_DFAULT_RETRY_LIMIT;
} else {
- tx_cmd->data_retry_limit = IWL_DEFAULT_TX_RETRY;
+ tx_cmd_params->data_retry_limit = IWL_DEFAULT_TX_RETRY;
}
/*
@@ -423,17 +424,17 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd_v6 *tx_cmd,
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
if (mvmsta->sta_state >= IEEE80211_STA_AUTHORIZED) {
- tx_cmd->initial_rate_index = 0;
- tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
+ tx_cmd_params->initial_rate_index = 0;
+ tx_cmd_params->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
return;
}
} else if (ieee80211_is_back_req(fc)) {
- tx_cmd->tx_flags |=
+ tx_cmd_params->tx_flags |=
cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR);
}
/* Set the rate in the TX cmd */
- tx_cmd->rate_n_flags = iwl_mvm_get_tx_rate_n_flags(mvm, info, sta, fc);
+ tx_cmd_params->rate_n_flags = iwl_mvm_get_tx_rate_n_flags(mvm, info, sta, fc);
}
static inline void iwl_mvm_set_tx_cmd_pn(struct ieee80211_tx_info *info,
@@ -458,7 +459,7 @@ static inline void iwl_mvm_set_tx_cmd_pn(struct ieee80211_tx_info *info,
*/
static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
struct ieee80211_tx_info *info,
- struct iwl_tx_cmd_v6 *tx_cmd,
+ struct iwl_tx_cmd_v6_params *tx_cmd_params,
struct sk_buff *skb_frag,
int hdrlen)
{
@@ -469,26 +470,26 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
switch (keyconf->cipher) {
case WLAN_CIPHER_SUITE_CCMP:
- iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd);
+ iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd_params);
iwl_mvm_set_tx_cmd_pn(info, crypto_hdr);
break;
case WLAN_CIPHER_SUITE_TKIP:
- tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
+ tx_cmd_params->sec_ctl = TX_CMD_SEC_TKIP;
pn = atomic64_inc_return(&keyconf->tx_pn);
ieee80211_tkip_add_iv(crypto_hdr, keyconf, pn);
- ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
+ ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd_params->key);
break;
case WLAN_CIPHER_SUITE_WEP104:
- tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
+ tx_cmd_params->sec_ctl |= TX_CMD_SEC_KEY128;
fallthrough;
case WLAN_CIPHER_SUITE_WEP40:
- tx_cmd->sec_ctl |= TX_CMD_SEC_WEP |
+ tx_cmd_params->sec_ctl |= TX_CMD_SEC_WEP |
((keyconf->keyidx << TX_CMD_SEC_WEP_KEY_IDX_POS) &
TX_CMD_SEC_WEP_KEY_IDX_MSK);
- memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
+ memcpy(&tx_cmd_params->key[3], keyconf->key, keyconf->keylen);
break;
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
@@ -501,12 +502,12 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
* one.
* Need to handle this.
*/
- tx_cmd->sec_ctl |= type | TX_CMD_SEC_KEY_FROM_TABLE;
- tx_cmd->key[0] = keyconf->hw_key_idx;
+ tx_cmd_params->sec_ctl |= type | TX_CMD_SEC_KEY_FROM_TABLE;
+ tx_cmd_params->key[0] = keyconf->hw_key_idx;
iwl_mvm_set_tx_cmd_pn(info, crypto_hdr);
break;
default:
- tx_cmd->sec_ctl |= TX_CMD_SEC_EXT;
+ tx_cmd_params->sec_ctl |= TX_CMD_SEC_EXT;
}
}
@@ -636,11 +637,11 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
tx_cmd = (struct iwl_tx_cmd_v6 *)dev_cmd->payload;
if (info->control.hw_key)
- iwl_mvm_set_tx_cmd_crypto(mvm, info, tx_cmd, skb, hdrlen);
+ iwl_mvm_set_tx_cmd_crypto(mvm, info, &tx_cmd->params, skb, hdrlen);
- iwl_mvm_set_tx_cmd(mvm, skb, tx_cmd, info, sta_id);
+ iwl_mvm_set_tx_cmd(mvm, skb, &tx_cmd->params, info, sta_id);
- iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control);
+ iwl_mvm_set_tx_cmd_rate(mvm, &tx_cmd->params, info, sta, hdr->frame_control);
/* Copy MAC header from skb into command buffer */
iwl_mvm_copy_hdr(tx_cmd->hdr, hdr, hdrlen, addr3_override);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c
index 10a12938d8f8..84a05cc1c27a 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c
@@ -1963,7 +1963,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
* have in the MPDU by themselves, but that we duplicate into
* all the different MSDUs inside the A-MSDU.
*/
- le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
+ le16_add_cpu(&tx_cmd->params.len, -snap_ip_tcp_hdrlen);
tso_start(skb, &tso);
@@ -2006,7 +2006,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
hdr_tb_phys, hdr_tb_len);
/* add this subframe's headers' length to the tx_cmd */
- le16_add_cpu(&tx_cmd->len, pos_hdr - subf_hdrs_start);
+ le16_add_cpu(&tx_cmd->params.len, pos_hdr - subf_hdrs_start);
/* prepare the start_hdr for the next subframe */
start_hdr = pos_hdr;
@@ -2074,11 +2074,11 @@ static void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
__le16 bc_ent;
struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd;
struct iwl_tx_cmd_v6 *tx_cmd = (void *)dev_cmd->payload;
- u8 sta_id = tx_cmd->sta_id;
+ u8 sta_id = tx_cmd->params.sta_id;
scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr;
- sec_ctl = tx_cmd->sec_ctl;
+ sec_ctl = tx_cmd->params.sec_ctl;
switch (sec_ctl & TX_CMD_SEC_MSK) {
case TX_CMD_SEC_CCM:
@@ -2185,10 +2185,10 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
tb0_phys = iwl_txq_get_first_tb_dma(txq, txq->write_ptr);
scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) +
- offsetof(struct iwl_tx_cmd_v6, scratch);
+ offsetof(struct iwl_tx_cmd_v6_params, scratch);
- tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
- tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
+ tx_cmd->params.dram_lsb_ptr = cpu_to_le32(scratch_phys);
+ tx_cmd->params.dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
/* Set up first empty entry in queue's array of Tx/cmd buffers */
out_meta = &txq->entries[txq->write_ptr].meta;
@@ -2210,7 +2210,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
tb1_len = ALIGN(len, 4);
/* Tell NIC about any 2-byte padding after MAC header */
if (tb1_len != len)
- tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_MH_PAD);
+ tx_cmd->params.tx_flags |= cpu_to_le32(TX_CMD_FLG_MH_PAD);
} else {
tb1_len = len;
}
@@ -2225,7 +2225,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
/* there must be data left over for TB1 or this code must be changed */
BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_v6) < IWL_FIRST_TB_SIZE);
BUILD_BUG_ON(sizeof(struct iwl_cmd_header) +
- offsetofend(struct iwl_tx_cmd_v6, scratch) >
+ offsetofend(struct iwl_tx_cmd_v6_params, scratch) >
IWL_FIRST_TB_SIZE);
/* map the data for TB1 */
@@ -2271,7 +2271,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr);
/* Set up entry for this TFD in Tx byte-count array */
- iwl_txq_gen1_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len),
+ iwl_txq_gen1_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->params.len),
iwl_txq_gen1_tfd_get_num_tbs(tfd));
wait_write_ptr = ieee80211_has_morefrags(fc);
@@ -2323,7 +2323,7 @@ static void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,
WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
if (txq_id != trans->conf.cmd_queue)
- sta_id = tx_cmd->sta_id;
+ sta_id = tx_cmd->params.sta_id;
bc_ent = cpu_to_le16(1 | (sta_id << 12));
diff --git a/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c b/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c
index 4d660cef3de9..c31bbd4e7a4a 100644
--- a/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c
+++ b/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c
@@ -238,6 +238,33 @@ static void devinfo_no_mac_cfg_dups(struct kunit *test)
}
}
+static void devinfo_api_range(struct kunit *test)
+{
+ /* Check that all iwl_mac_cfg's have either both min and max set, or neither */
+ for (int i = 0; iwl_hw_card_ids[i].vendor; i++) {
+ const struct iwl_mac_cfg *mac_cfg =
+ (void *)iwl_hw_card_ids[i].driver_data;
+ const struct iwl_family_base_params *base = mac_cfg->base;
+
+ KUNIT_EXPECT_EQ_MSG(test, !!base->ucode_api_min,
+ !!base->ucode_api_max,
+ "%ps: ucode_api_min (%u) and ucode_api_min (%u) should be both set or neither.\n",
+ base, base->ucode_api_min,
+ base->ucode_api_max);
+ }
+
+ /* Check the same for the iwl_rf_cfg's */
+ for (int i = 0; i < iwl_dev_info_table_size; i++) {
+ const struct iwl_rf_cfg *rf_cfg = iwl_dev_info_table[i].cfg;
+
+ KUNIT_EXPECT_EQ_MSG(test, !!rf_cfg->ucode_api_min,
+ !!rf_cfg->ucode_api_max,
+ "%ps: ucode_api_min (%u) and ucode_api_min (%u) should be both set or neither.\n",
+ rf_cfg, rf_cfg->ucode_api_min,
+ rf_cfg->ucode_api_max);
+ }
+}
+
static struct kunit_case devinfo_test_cases[] = {
KUNIT_CASE(devinfo_table_order),
KUNIT_CASE(devinfo_discrete_match),
@@ -248,6 +275,7 @@ static struct kunit_case devinfo_test_cases[] = {
KUNIT_CASE(devinfo_check_killer_subdev),
KUNIT_CASE(devinfo_pci_ids),
KUNIT_CASE(devinfo_no_mac_cfg_dups),
+ KUNIT_CASE(devinfo_api_range),
{}
};
diff --git a/drivers/net/wireless/intersil/p54/p54spi.c b/drivers/net/wireless/intersil/p54/p54spi.c
index 27f44a9f0bc1..9d66dcae54e0 100644
--- a/drivers/net/wireless/intersil/p54/p54spi.c
+++ b/drivers/net/wireless/intersil/p54/p54spi.c
@@ -32,7 +32,7 @@ MODULE_FIRMWARE("3826.eeprom");
/* gpios should be handled in board files and provided via platform data,
* but because it's currently impossible for p54spi to have a header file
- * in include/linux, let's use module paramaters for now
+ * in include/linux, let's use module parameters for now
*/
static int p54spi_gpio_power = 97;
@@ -155,7 +155,7 @@ static int p54spi_request_firmware(struct ieee80211_hw *dev)
struct p54s_priv *priv = dev->priv;
int ret;
- /* FIXME: should driver use it's own struct device? */
+ /* FIXME: should driver use its own struct device? */
ret = request_firmware(&priv->firmware, "3826.arm", &priv->spi->dev);
if (ret < 0) {
diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c
index 2e2c193716d9..94dd488becaf 100644
--- a/drivers/net/wireless/marvell/libertas/cfg.c
+++ b/drivers/net/wireless/marvell/libertas/cfg.c
@@ -2146,8 +2146,8 @@ static void lbs_reg_notifier(struct wiphy *wiphy,
}
/*
- * This function get's called after lbs_setup_firmware() determined the
- * firmware capabities. So we can setup the wiphy according to our
+ * This function gets called after lbs_setup_firmware() determined the
+ * firmware capabilities. So we can setup the wiphy according to our
* hardware/firmware.
*/
int lbs_cfg_register(struct lbs_private *priv)
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index 286378770e9e..3498743d5ec0 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -4783,10 +4783,9 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
wiphy->iface_combinations = &mwifiex_iface_comb_ap_sta;
wiphy->n_iface_combinations = 1;
- if (adapter->max_sta_conn > adapter->max_p2p_conn)
- wiphy->max_ap_assoc_sta = adapter->max_sta_conn;
- else
- wiphy->max_ap_assoc_sta = adapter->max_p2p_conn;
+ wiphy->max_ap_assoc_sta = max_t(typeof(wiphy->max_ap_assoc_sta),
+ adapter->max_sta_conn,
+ adapter->max_p2p_conn);
/* Initialize cipher suits */
wiphy->cipher_suites = mwifiex_cipher_suites;
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index 91458f3bd14a..e9e896606912 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -1119,7 +1119,7 @@ struct host_cmd_ds_get_hw_spec {
__le32 fw_cap_info;
__le32 dot_11n_dev_cap;
u8 dev_mcs_support;
- __le16 mp_end_port; /* SDIO only, reserved for other interfacces */
+ __le16 mp_end_port; /* SDIO only, reserved for other interfaces */
__le16 mgmt_buf_count; /* mgmt IE buffer count */
__le32 reserved_5;
__le32 reserved_6;
@@ -1739,7 +1739,7 @@ struct host_cmd_ds_11n_cfg {
struct host_cmd_ds_txbuf_cfg {
__le16 action;
__le16 buff_size;
- __le16 mp_end_port; /* SDIO only, reserved for other interfacces */
+ __le16 mp_end_port; /* SDIO only, reserved for other interfaces */
__le16 reserved3;
} __packed;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index 2912568612bc..8dd5c29fb75b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -1810,7 +1810,8 @@ static inline void mt76_put_page_pool_buf(void *buf, bool allow_direct)
{
struct page *page = virt_to_head_page(buf);
- page_pool_put_full_page(page->pp, page, allow_direct);
+ page_pool_put_full_page(pp_page_to_nmdesc(page)->pp, page,
+ allow_direct);
}
static inline void *
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.c b/drivers/net/wireless/microchip/wilc1000/wlan.c
index 9d80adc45d6b..fedc7d59216a 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan.c
+++ b/drivers/net/wireless/microchip/wilc1000/wlan.c
@@ -1287,10 +1287,7 @@ int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer,
offset += 8;
while (((int)size) && (offset < buffer_size)) {
- if (size <= blksz)
- size2 = size;
- else
- size2 = blksz;
+ size2 = min(size, blksz);
memcpy(dma_buffer, &buffer[offset], size2);
ret = wilc->hif_func->hif_block_tx(wilc, addr,
diff --git a/drivers/net/wireless/purelifi/plfxlc/mac.c b/drivers/net/wireless/purelifi/plfxlc/mac.c
index d375ad60167f..a900421753ac 100644
--- a/drivers/net/wireless/purelifi/plfxlc/mac.c
+++ b/drivers/net/wireless/purelifi/plfxlc/mac.c
@@ -99,11 +99,6 @@ int plfxlc_mac_init_hw(struct ieee80211_hw *hw)
return r;
}
-void plfxlc_mac_release(struct plfxlc_mac *mac)
-{
- plfxlc_chip_release(&mac->chip);
-}
-
int plfxlc_op_start(struct ieee80211_hw *hw)
{
plfxlc_hw_mac(hw)->chip.usb.initialized = 1;
@@ -756,3 +751,9 @@ struct ieee80211_hw *plfxlc_mac_alloc_hw(struct usb_interface *intf)
SET_IEEE80211_DEV(hw, &intf->dev);
return hw;
}
+
+void plfxlc_mac_release_hw(struct ieee80211_hw *hw)
+{
+ plfxlc_chip_release(&plfxlc_hw_mac(hw)->chip);
+ ieee80211_free_hw(hw);
+}
diff --git a/drivers/net/wireless/purelifi/plfxlc/mac.h b/drivers/net/wireless/purelifi/plfxlc/mac.h
index 9384acddcf26..56da502999c1 100644
--- a/drivers/net/wireless/purelifi/plfxlc/mac.h
+++ b/drivers/net/wireless/purelifi/plfxlc/mac.h
@@ -168,7 +168,7 @@ static inline u8 *plfxlc_mac_get_perm_addr(struct plfxlc_mac *mac)
}
struct ieee80211_hw *plfxlc_mac_alloc_hw(struct usb_interface *intf);
-void plfxlc_mac_release(struct plfxlc_mac *mac);
+void plfxlc_mac_release_hw(struct ieee80211_hw *hw);
int plfxlc_mac_preinit_hw(struct ieee80211_hw *hw, const u8 *hw_address);
int plfxlc_mac_init_hw(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/purelifi/plfxlc/usb.c b/drivers/net/wireless/purelifi/plfxlc/usb.c
index d8b0b79dea1a..711902a809db 100644
--- a/drivers/net/wireless/purelifi/plfxlc/usb.c
+++ b/drivers/net/wireless/purelifi/plfxlc/usb.c
@@ -604,7 +604,7 @@ static int probe(struct usb_interface *intf,
r = plfxlc_upload_mac_and_serial(intf, hw_address, serial_number);
if (r) {
dev_err(&intf->dev, "MAC and Serial upload failed (%d)\n", r);
- goto error;
+ goto error_free_hw;
}
chip->unit_type = STA;
@@ -613,13 +613,13 @@ static int probe(struct usb_interface *intf,
r = plfxlc_mac_preinit_hw(hw, hw_address);
if (r) {
dev_err(&intf->dev, "Init mac failed (%d)\n", r);
- goto error;
+ goto error_free_hw;
}
r = ieee80211_register_hw(hw);
if (r) {
dev_err(&intf->dev, "Register device failed (%d)\n", r);
- goto error;
+ goto error_free_hw;
}
if ((le16_to_cpu(interface_to_usbdev(intf)->descriptor.idVendor) ==
@@ -632,7 +632,7 @@ static int probe(struct usb_interface *intf,
}
if (r != 0) {
dev_err(&intf->dev, "FPGA download failed (%d)\n", r);
- goto error;
+ goto error_unreg_hw;
}
tx->mac_fifo_full = 0;
@@ -642,21 +642,21 @@ static int probe(struct usb_interface *intf,
r = plfxlc_usb_init_hw(usb);
if (r < 0) {
dev_err(&intf->dev, "usb_init_hw failed (%d)\n", r);
- goto error;
+ goto error_unreg_hw;
}
msleep(PLF_MSLEEP_TIME);
r = plfxlc_chip_switch_radio(chip, PLFXLC_RADIO_ON);
if (r < 0) {
dev_dbg(&intf->dev, "chip_switch_radio_on failed (%d)\n", r);
- goto error;
+ goto error_unreg_hw;
}
msleep(PLF_MSLEEP_TIME);
r = plfxlc_chip_set_rate(chip, 8);
if (r < 0) {
dev_dbg(&intf->dev, "chip_set_rate failed (%d)\n", r);
- goto error;
+ goto error_unreg_hw;
}
msleep(PLF_MSLEEP_TIME);
@@ -664,7 +664,7 @@ static int probe(struct usb_interface *intf,
hw_address, ETH_ALEN, USB_REQ_MAC_WR);
if (r < 0) {
dev_dbg(&intf->dev, "MAC_WR failure (%d)\n", r);
- goto error;
+ goto error_unreg_hw;
}
plfxlc_chip_enable_rxtx(chip);
@@ -691,12 +691,12 @@ static int probe(struct usb_interface *intf,
plfxlc_mac_init_hw(hw);
usb->initialized = true;
return 0;
+
+error_unreg_hw:
+ ieee80211_unregister_hw(hw);
+error_free_hw:
+ plfxlc_mac_release_hw(hw);
error:
- if (hw) {
- plfxlc_mac_release(plfxlc_hw_mac(hw));
- ieee80211_unregister_hw(hw);
- ieee80211_free_hw(hw);
- }
dev_err(&intf->dev, "pureLifi:Device error");
return r;
}
@@ -730,8 +730,7 @@ static void disconnect(struct usb_interface *intf)
*/
usb_reset_device(interface_to_usbdev(intf));
- plfxlc_mac_release(mac);
- ieee80211_free_hw(hw);
+ plfxlc_mac_release_hw(hw);
}
static void plfxlc_usb_resume(struct plfxlc_usb *usb)
diff --git a/drivers/net/wireless/ralink/rt2x00/Kconfig b/drivers/net/wireless/ralink/rt2x00/Kconfig
index d1fd66d44a7e..4d98b7723c56 100644
--- a/drivers/net/wireless/ralink/rt2x00/Kconfig
+++ b/drivers/net/wireless/ralink/rt2x00/Kconfig
@@ -202,8 +202,7 @@ endif
config RT2800SOC
tristate "Ralink WiSoC support"
- depends on SOC_RT288X || SOC_RT305X || SOC_MT7620
- select RT2X00_LIB_SOC
+ depends on OF && (SOC_RT288X || SOC_RT305X || SOC_MT7620 || COMPILE_TEST)
select RT2X00_LIB_MMIO
select RT2X00_LIB_CRYPTO
select RT2X00_LIB_FIRMWARE
@@ -231,10 +230,6 @@ config RT2X00_LIB_PCI
tristate
select RT2X00_LIB
-config RT2X00_LIB_SOC
- tristate
- select RT2X00_LIB
-
config RT2X00_LIB_USB
tristate
select RT2X00_LIB
diff --git a/drivers/net/wireless/ralink/rt2x00/Makefile b/drivers/net/wireless/ralink/rt2x00/Makefile
index de030ebcdf6e..48d84d243606 100644
--- a/drivers/net/wireless/ralink/rt2x00/Makefile
+++ b/drivers/net/wireless/ralink/rt2x00/Makefile
@@ -12,7 +12,6 @@ rt2x00lib-$(CONFIG_RT2X00_LIB_LEDS) += rt2x00leds.o
obj-$(CONFIG_RT2X00_LIB) += rt2x00lib.o
obj-$(CONFIG_RT2X00_LIB_MMIO) += rt2x00mmio.o
obj-$(CONFIG_RT2X00_LIB_PCI) += rt2x00pci.o
-obj-$(CONFIG_RT2X00_LIB_SOC) += rt2x00soc.o
obj-$(CONFIG_RT2X00_LIB_USB) += rt2x00usb.o
obj-$(CONFIG_RT2800_LIB) += rt2800lib.o
obj-$(CONFIG_RT2800_LIB_MMIO) += rt2800mmio.o
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index 4b5a7c9b6499..b264ed0af923 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -639,7 +639,7 @@ static bool rt2800_check_firmware_crc(const u8 *data, const size_t len)
/*
* Use the crc ccitt algorithm.
* This will return the same value as the legacy driver which
- * used bit ordering reversion on the both the firmware bytes
+ * used bit ordering reversion on both the firmware bytes
* before input input as well as on the final output.
* Obviously using crc ccitt directly is much more efficient.
*/
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800soc.c b/drivers/net/wireless/ralink/rt2x00/rt2800soc.c
index 701ba54bf3e5..8f510a84e7f1 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800soc.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800soc.c
@@ -18,11 +18,11 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include "rt2x00.h"
#include "rt2x00mmio.h"
-#include "rt2x00soc.h"
#include "rt2800.h"
#include "rt2800lib.h"
#include "rt2800mmio.h"
@@ -131,6 +131,24 @@ static int rt2800soc_write_firmware(struct rt2x00_dev *rt2x00dev,
return 0;
}
+#ifdef CONFIG_PM
+static int rt2800soc_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct ieee80211_hw *hw = platform_get_drvdata(pdev);
+ struct rt2x00_dev *rt2x00dev = hw->priv;
+
+ return rt2x00lib_suspend(rt2x00dev);
+}
+
+static int rt2800soc_resume(struct platform_device *pdev)
+{
+ struct ieee80211_hw *hw = platform_get_drvdata(pdev);
+ struct rt2x00_dev *rt2x00dev = hw->priv;
+
+ return rt2x00lib_resume(rt2x00dev);
+}
+#endif /* CONFIG_PM */
+
static const struct ieee80211_ops rt2800soc_mac80211_ops = {
.add_chanctx = ieee80211_emulate_add_chanctx,
.remove_chanctx = ieee80211_emulate_remove_chanctx,
@@ -238,20 +256,102 @@ static const struct rt2x00_ops rt2800soc_ops = {
#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
};
+static int rt2x00soc_probe(struct platform_device *pdev, const struct rt2x00_ops *ops)
+{
+ struct rt2x00_dev *rt2x00dev;
+ struct ieee80211_hw *hw;
+ void __iomem *mem;
+ struct clk *clk;
+ __le16 *eeprom;
+ int retval;
+ u32 *rf;
+ int irq;
+
+ mem = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mem))
+ return PTR_ERR(mem);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ clk = devm_clk_get_optional(&pdev->dev, NULL);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ eeprom = devm_kzalloc(&pdev->dev, ops->eeprom_size, GFP_KERNEL);
+ if (!eeprom)
+ return -ENOMEM;
+
+ rf = devm_kzalloc(&pdev->dev, ops->rf_size, GFP_KERNEL);
+ if (!rf)
+ return -ENOMEM;
+
+ hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw);
+ if (!hw)
+ return dev_err_probe(&pdev->dev, -ENOMEM, "Failed to allocate hardware");
+
+ platform_set_drvdata(pdev, hw);
+
+ rt2x00dev = hw->priv;
+ rt2x00dev->dev = &pdev->dev;
+ rt2x00dev->ops = ops;
+ rt2x00dev->hw = hw;
+ rt2x00dev->irq = irq;
+ rt2x00dev->clk = clk;
+ rt2x00dev->eeprom = eeprom;
+ rt2x00dev->rf = rf;
+ rt2x00dev->name = pdev->dev.driver->name;
+ rt2x00dev->csr.base = mem;
+
+ rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_SOC);
+
+ retval = rt2x00lib_probe_dev(rt2x00dev);
+ if (retval)
+ goto exit_free_device;
+
+ return 0;
+
+exit_free_device:
+ ieee80211_free_hw(hw);
+
+ return retval;
+}
+
static int rt2800soc_probe(struct platform_device *pdev)
{
return rt2x00soc_probe(pdev, &rt2800soc_ops);
}
+static void rt2800soc_remove(struct platform_device *pdev)
+{
+ struct ieee80211_hw *hw = platform_get_drvdata(pdev);
+ struct rt2x00_dev *rt2x00dev = hw->priv;
+
+ /*
+ * Free all allocated data.
+ */
+ rt2x00lib_remove_dev(rt2x00dev);
+ ieee80211_free_hw(hw);
+}
+
+static const struct of_device_id rt2880_wmac_match[] = {
+ { .compatible = "ralink,rt2880-wifi" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, rt2880_wmac_match);
+
static struct platform_driver rt2800soc_driver = {
.driver = {
.name = "rt2800_wmac",
- .mod_name = KBUILD_MODNAME,
+ .of_match_table = rt2880_wmac_match,
},
.probe = rt2800soc_probe,
- .remove = rt2x00soc_remove,
- .suspend = rt2x00soc_suspend,
- .resume = rt2x00soc_resume,
+ .remove = rt2800soc_remove,
+#ifdef CONFIG_PM
+ .suspend = rt2800soc_suspend,
+ .resume = rt2800soc_resume,
+#endif
};
module_platform_driver(rt2800soc_driver);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
index 432ddfac2c33..7db29e90eb4f 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
@@ -357,7 +357,7 @@ static void rt2x00lib_fill_tx_status(struct rt2x00_dev *rt2x00dev,
}
/*
- * Every single frame has it's own tx status, hence report
+ * Every single frame has its own tx status, hence report
* every frame as ampdu of size 1.
*
* TODO: if we can find out how many frames were aggregated
@@ -496,7 +496,7 @@ void rt2x00lib_txdone(struct queue_entry *entry,
/*
* If the IV/EIV data was stripped from the frame before it was
* passed to the hardware, we should now reinsert it again because
- * mac80211 will expect the same data to be present it the
+ * mac80211 will expect the same data to be present in the
* frame as it was passed to us.
*/
if (rt2x00_has_cap_hw_crypto(rt2x00dev))
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
index 013003777fee..13e48b1e7356 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
@@ -45,7 +45,7 @@ struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp)
/*
* For IV/EIV/ICV assembly we must make sure there is
- * at least 8 bytes bytes available in headroom for IV/EIV
+ * at least 8 bytes available in headroom for IV/EIV
* and 8 bytes for ICV data as tailroon.
*/
if (rt2x00_has_cap_hw_crypto(rt2x00dev)) {
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00soc.c b/drivers/net/wireless/ralink/rt2x00/rt2x00soc.c
deleted file mode 100644
index f7f3a2340c39..000000000000
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00soc.c
+++ /dev/null
@@ -1,151 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
- Copyright (C) 2004 - 2009 Felix Fietkau <nbd@openwrt.org>
- <http://rt2x00.serialmonkey.com>
-
- */
-
-/*
- Module: rt2x00soc
- Abstract: rt2x00 generic soc device routines.
- */
-
-#include <linux/bug.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-
-#include "rt2x00.h"
-#include "rt2x00soc.h"
-
-static void rt2x00soc_free_reg(struct rt2x00_dev *rt2x00dev)
-{
- kfree(rt2x00dev->rf);
- rt2x00dev->rf = NULL;
-
- kfree(rt2x00dev->eeprom);
- rt2x00dev->eeprom = NULL;
-
- iounmap(rt2x00dev->csr.base);
-}
-
-static int rt2x00soc_alloc_reg(struct rt2x00_dev *rt2x00dev)
-{
- struct platform_device *pdev = to_platform_device(rt2x00dev->dev);
- struct resource *res;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
- rt2x00dev->csr.base = ioremap(res->start, resource_size(res));
- if (!rt2x00dev->csr.base)
- return -ENOMEM;
-
- rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL);
- if (!rt2x00dev->eeprom)
- goto exit;
-
- rt2x00dev->rf = kzalloc(rt2x00dev->ops->rf_size, GFP_KERNEL);
- if (!rt2x00dev->rf)
- goto exit;
-
- return 0;
-
-exit:
- rt2x00_probe_err("Failed to allocate registers\n");
- rt2x00soc_free_reg(rt2x00dev);
-
- return -ENOMEM;
-}
-
-int rt2x00soc_probe(struct platform_device *pdev, const struct rt2x00_ops *ops)
-{
- struct ieee80211_hw *hw;
- struct rt2x00_dev *rt2x00dev;
- int retval;
-
- hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw);
- if (!hw) {
- rt2x00_probe_err("Failed to allocate hardware\n");
- return -ENOMEM;
- }
-
- platform_set_drvdata(pdev, hw);
-
- rt2x00dev = hw->priv;
- rt2x00dev->dev = &pdev->dev;
- rt2x00dev->ops = ops;
- rt2x00dev->hw = hw;
- rt2x00dev->irq = platform_get_irq(pdev, 0);
- rt2x00dev->name = pdev->dev.driver->name;
-
- rt2x00dev->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(rt2x00dev->clk))
- rt2x00dev->clk = NULL;
-
- rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_SOC);
-
- retval = rt2x00soc_alloc_reg(rt2x00dev);
- if (retval)
- goto exit_free_device;
-
- retval = rt2x00lib_probe_dev(rt2x00dev);
- if (retval)
- goto exit_free_reg;
-
- return 0;
-
-exit_free_reg:
- rt2x00soc_free_reg(rt2x00dev);
-
-exit_free_device:
- ieee80211_free_hw(hw);
-
- return retval;
-}
-EXPORT_SYMBOL_GPL(rt2x00soc_probe);
-
-void rt2x00soc_remove(struct platform_device *pdev)
-{
- struct ieee80211_hw *hw = platform_get_drvdata(pdev);
- struct rt2x00_dev *rt2x00dev = hw->priv;
-
- /*
- * Free all allocated data.
- */
- rt2x00lib_remove_dev(rt2x00dev);
- rt2x00soc_free_reg(rt2x00dev);
- ieee80211_free_hw(hw);
-}
-EXPORT_SYMBOL_GPL(rt2x00soc_remove);
-
-#ifdef CONFIG_PM
-int rt2x00soc_suspend(struct platform_device *pdev, pm_message_t state)
-{
- struct ieee80211_hw *hw = platform_get_drvdata(pdev);
- struct rt2x00_dev *rt2x00dev = hw->priv;
-
- return rt2x00lib_suspend(rt2x00dev);
-}
-EXPORT_SYMBOL_GPL(rt2x00soc_suspend);
-
-int rt2x00soc_resume(struct platform_device *pdev)
-{
- struct ieee80211_hw *hw = platform_get_drvdata(pdev);
- struct rt2x00_dev *rt2x00dev = hw->priv;
-
- return rt2x00lib_resume(rt2x00dev);
-}
-EXPORT_SYMBOL_GPL(rt2x00soc_resume);
-#endif /* CONFIG_PM */
-
-/*
- * rt2x00soc module information.
- */
-MODULE_AUTHOR(DRV_PROJECT);
-MODULE_VERSION(DRV_VERSION);
-MODULE_DESCRIPTION("rt2x00 soc library");
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00soc.h b/drivers/net/wireless/ralink/rt2x00/rt2x00soc.h
deleted file mode 100644
index d6226b8a10e0..000000000000
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00soc.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
- <http://rt2x00.serialmonkey.com>
-
- */
-
-/*
- Module: rt2x00soc
- Abstract: Data structures for the rt2x00soc module.
- */
-
-#ifndef RT2X00SOC_H
-#define RT2X00SOC_H
-
-/*
- * SoC driver handlers.
- */
-int rt2x00soc_probe(struct platform_device *pdev, const struct rt2x00_ops *ops);
-void rt2x00soc_remove(struct platform_device *pdev);
-#ifdef CONFIG_PM
-int rt2x00soc_suspend(struct platform_device *pdev, pm_message_t state);
-int rt2x00soc_resume(struct platform_device *pdev);
-#else
-#define rt2x00soc_suspend NULL
-#define rt2x00soc_resume NULL
-#endif /* CONFIG_PM */
-
-#endif /* RT2X00SOC_H */
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/core.c b/drivers/net/wireless/realtek/rtl8xxxu/core.c
index 496836f716aa..831b5025c634 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/core.c
@@ -1163,7 +1163,7 @@ static void rtl8xxxu_start_tx_beacon(struct rtl8xxxu_priv *priv)
/*
- * The rtl8723a has 3 channel groups for it's efuse settings. It only
+ * The rtl8723a has 3 channel groups for its efuse settings. It only
* supports the 2.4GHz band, so channels 1 - 14:
* group 0: channels 1 - 3
* group 1: channels 4 - 9
@@ -6618,7 +6618,7 @@ static int rtl8xxxu_submit_rx_urb(struct rtl8xxxu_priv *priv,
skb_size = fops->rx_agg_buf_size;
skb_size += (rx_desc_sz + sizeof(struct rtl8723au_phy_stats));
} else {
- skb_size = IEEE80211_MAX_FRAME_LEN;
+ skb_size = IEEE80211_MAX_FRAME_LEN + rx_desc_sz;
}
skb = __netdev_alloc_skb(NULL, skb_size, GFP_KERNEL);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
index 5ca6b49e73c7..4354ae67a379 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
@@ -1487,22 +1487,9 @@ _rtl92ce_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
rtlefuse->txpwrlevel_ht40_1s[rf_path][i] =
rtlefuse->
eeprom_chnlarea_txpwr_ht40_1s[rf_path][index];
-
- if ((rtlefuse->
- eeprom_chnlarea_txpwr_ht40_1s[rf_path][index] -
- rtlefuse->
- eprom_chnl_txpwr_ht40_2sdf[rf_path][index])
- > 0) {
- rtlefuse->txpwrlevel_ht40_2s[rf_path][i] =
- rtlefuse->
- eeprom_chnlarea_txpwr_ht40_1s[rf_path]
- [index] -
- rtlefuse->
- eprom_chnl_txpwr_ht40_2sdf[rf_path]
- [index];
- } else {
- rtlefuse->txpwrlevel_ht40_2s[rf_path][i] = 0;
- }
+ rtlefuse->txpwrlevel_ht40_2s[rf_path][i] =
+ max(rtlefuse->eeprom_chnlarea_txpwr_ht40_1s[rf_path][index] -
+ rtlefuse->eprom_chnl_txpwr_ht40_2sdf[rf_path][index], 0);
}
for (i = 0; i < 14; i++) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
index ec5d558609fe..989e7cff8e20 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
@@ -163,20 +163,9 @@ _rtl92cu_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
rtlefuse->txpwrlevel_ht40_1s[rf_path][i] =
rtlefuse->
eeprom_chnlarea_txpwr_ht40_1s[rf_path][index];
- if ((rtlefuse->
- eeprom_chnlarea_txpwr_ht40_1s[rf_path][index] -
- rtlefuse->
- eprom_chnl_txpwr_ht40_2sdf[rf_path][index])
- > 0) {
- rtlefuse->txpwrlevel_ht40_2s[rf_path][i] =
- rtlefuse->
- eeprom_chnlarea_txpwr_ht40_1s[rf_path]
- [index] - rtlefuse->
- eprom_chnl_txpwr_ht40_2sdf[rf_path]
- [index];
- } else {
- rtlefuse->txpwrlevel_ht40_2s[rf_path][i] = 0;
- }
+ rtlefuse->txpwrlevel_ht40_2s[rf_path][i] =
+ max(rtlefuse->eeprom_chnlarea_txpwr_ht40_1s[rf_path][index] -
+ rtlefuse->eprom_chnl_txpwr_ht40_2sdf[rf_path][index], 0);
}
for (i = 0; i < 14; i++) {
RTPRINT(rtlpriv, FINIT, INIT_TXPOWER,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c
index eb7d8b070cc7..494b2706abee 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c
@@ -84,7 +84,7 @@ bool rtl92d_phy_rf6052_config(struct ieee80211_hw *hw)
rtlphy->num_total_rfpath = 2;
/* Single phy mode: use radio_a radio_b config path_A path_B */
- /* seperately by MAC0, and MAC1 needn't configure RF; */
+ /* separately by MAC0, and MAC1 needn't configure RF; */
/* Dual PHY mode:MAC0 use radio_a config 1st phy path_A, */
/* MAC1 use radio_b config 2nd PHY path_A. */
/* DMDP,MAC0 on G band,MAC1 on A band. */
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c
index 17486e3f322c..0108850bb9e5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c
@@ -223,10 +223,7 @@ static void rtl92ee_dm_dig(struct ieee80211_hw *hw)
if (mac->link_state >= MAC80211_LINKED) {
if (bfirstconnect) {
- if (dm_dig->rssi_val_min <= dig_maxofmin)
- current_igi = dm_dig->rssi_val_min;
- else
- current_igi = dig_maxofmin;
+ current_igi = min(dm_dig->rssi_val_min, dig_maxofmin);
dm_dig->large_fa_hit = 0;
} else {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c
index 5a493602aaf2..17d29249a711 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c
@@ -438,7 +438,7 @@ bool rtl92s_phy_rf6052_config(struct ieee80211_hw *hw)
rtl92s_phy_set_bb_reg(hw, pphyreg->rfhssi_para2,
B3WIRE_DATALENGTH, 0x0);
- /* Initialize RF fom connfiguration file */
+ /* Initialize RF from configuration file */
switch (rfpath) {
case RF90_PATH_A:
rtstatus = rtl92s_phy_config_rf(hw,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
index 21b827f519b6..bd45d9bd40bb 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
@@ -1449,18 +1449,9 @@ _rtl8723e_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
rtlefuse->eeprom_chnlarea_txpwr_ht40_1s
[rf_path][index];
- if ((rtlefuse->eeprom_chnlarea_txpwr_ht40_1s
- [rf_path][index] -
- rtlefuse->eprom_chnl_txpwr_ht40_2sdf
- [rf_path][index]) > 0) {
- rtlefuse->txpwrlevel_ht40_2s[rf_path][i] =
- rtlefuse->eeprom_chnlarea_txpwr_ht40_1s
- [rf_path][index] -
- rtlefuse->eprom_chnl_txpwr_ht40_2sdf
- [rf_path][index];
- } else {
- rtlefuse->txpwrlevel_ht40_2s[rf_path][i] = 0;
- }
+ rtlefuse->txpwrlevel_ht40_2s[rf_path][i] =
+ max(rtlefuse->eeprom_chnlarea_txpwr_ht40_1s[rf_path][index] -
+ rtlefuse->eprom_chnl_txpwr_ht40_2sdf[rf_path][index], 0);
}
for (i = 0; i < 14; i++) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c
index c53f95144812..c65d14fb914f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c
@@ -468,10 +468,7 @@ static void rtl8723be_dm_dig(struct ieee80211_hw *hw)
if (mac->link_state >= MAC80211_LINKED) {
if (bfirstconnect) {
- if (dm_digtable->rssi_val_min <= dig_maxofmin)
- current_igi = dm_digtable->rssi_val_min;
- else
- current_igi = dig_maxofmin;
+ current_igi = min(dm_digtable->rssi_val_min, dig_maxofmin);
dm_digtable->large_fa_hit = 0;
} else {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
index 76b5395539d0..f8b159c74658 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
@@ -756,10 +756,7 @@ static void rtl8821ae_dm_dig(struct ieee80211_hw *hw)
rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
"DIG AfterLink\n");
if (first_connect) {
- if (dm_digtable->rssi_val_min <= dig_max_of_min)
- current_igi = dm_digtable->rssi_val_min;
- else
- current_igi = dig_max_of_min;
+ current_igi = min(dm_digtable->rssi_val_min, dig_max_of_min);
rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
"First Connect\n");
} else {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
index a5a34b5edcfd..3a4a33476205 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
@@ -1026,7 +1026,7 @@ static void _rtl8821ae_hw_configure(struct ieee80211_hw *hw)
/*Set retry limit*/
rtl_write_word(rtlpriv, REG_RL, 0x0707);
- /* Set Data / Response auto rate fallack retry count*/
+ /* Set Data / Response auto rate fallback retry count*/
rtl_write_dword(rtlpriv, REG_DARFRC, 0x01000000);
rtl_write_dword(rtlpriv, REG_DARFRC + 4, 0x07060504);
rtl_write_dword(rtlpriv, REG_RARFRC, 0x01000000);
@@ -1295,12 +1295,12 @@ static bool _rtl8821ae_reset_pcie_interface_dma(struct ieee80211_hw *hw,
rtl_write_byte(rtlpriv, REG_CR, 0xFF);
/* We should init LLT & RQPN and
- * prepare Tx/Rx descrptor address later
+ * prepare Tx/Rx descriptor address later
* because MAC function is reset.*/
}
/* 7. Restore PCIe autoload down bit */
- /* 8812AE does not has the defination. */
+ /* 8812AE does not have the definition. */
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) {
/* write 0xF8 bit[17] = 1'b1 */
tmp = rtl_read_byte(rtlpriv, REG_MAC_PHY_CTRL_NORMAL + 2);
@@ -1308,7 +1308,7 @@ static bool _rtl8821ae_reset_pcie_interface_dma(struct ieee80211_hw *hw,
rtl_write_byte(rtlpriv, REG_MAC_PHY_CTRL_NORMAL + 2, tmp);
}
- /* In MAC power on state, BB and RF maybe in ON state,
+ /* In MAC power on state, BB and RF may be in ON state,
* if we release TRx DMA here.
* it will cause packets to be started to Tx/Rx,
* so we release Tx/Rx DMA later.*/
@@ -1713,7 +1713,7 @@ static bool _rtl8821ae_wowlan_initialize_adapter(struct ieee80211_hw *hw)
_rtl8821ae_get_wakeup_reason(hw);
/* Patch Pcie Rx DMA hang after S3/S4 several times.
- * The root cause has not be found. */
+ * The root cause has not been found. */
if (_rtl8821ae_check_pcie_dma_hang(hw))
_rtl8821ae_reset_pcie_interface_dma(hw, true, false);
@@ -1926,7 +1926,7 @@ int rtl8821ae_hw_init(struct ieee80211_hw *hw)
rtl8821ae_phy_mac_config(hw);
/* because last function modify RCR, so we update
* rcr var here, or TP will unstable for receive_config
- * is wrong, RX RCR_ACRC32 will cause TP unstabel & Rx
+ * is wrong, RX RCR_ACRC32 will cause TP unstable & Rx
* RCR_APP_ICV will cause mac80211 unassoc for cisco 1252
rtlpci->receive_config = rtl_read_dword(rtlpriv, REG_RCR);
rtlpci->receive_config &= ~(RCR_ACRC32 | RCR_AICV);
@@ -2332,7 +2332,7 @@ void rtl8821ae_card_disable(struct ieee80211_hw *hw)
if (_rtl8821ae_dynamic_rqpn(hw, 0xE0, 0x3, 0x80c20d0d))
rtlhal->re_init_llt_table = true;
- /* 3 <2> Set Fw releted H2C cmd. */
+ /* 3 <2> Set Fw related H2C cmd. */
/* Set WoWLAN related security information. */
rtl8821ae_set_fw_global_info_cmd(hw);
@@ -2357,8 +2357,8 @@ void rtl8821ae_card_disable(struct ieee80211_hw *hw)
/* 3 <3> Hw Configutations */
- /* Wait untill Rx DMA Finished before host sleep.
- * FW Pause Rx DMA may happens when received packet doing dma.
+ /* Wait until Rx DMA Finished before host sleep.
+ * FW Pause Rx DMA may happen when received packet doing DMA.
*/
rtl_write_byte(rtlpriv, REG_RXDMA_CONTROL, BIT(2));
@@ -3927,7 +3927,7 @@ void rtl8821ae_resume(struct ieee80211_hw *hw)
{
}
-/* Turn on AAP (RCR:bit 0) for promicuous mode. */
+/* Turn on AAP (RCR:bit 0) for promiscuous mode. */
void rtl8821ae_allow_all_destaddr(struct ieee80211_hw *hw,
bool allow_all_da, bool write_into_reg)
{
@@ -3964,7 +3964,7 @@ void rtl8821ae_add_wowlan_pattern(struct ieee80211_hw *hw,
/* RX page size = 128 byte */
offset = MAX_RX_DMA_BUFFER_SIZE_8812 / 128;
- /* We should start from the boundry */
+ /* We should start from the boundary */
cam_start = offset * 128;
/* Enable Rx packet buffer access. */
diff --git a/drivers/net/wireless/realtek/rtw88/coex.c b/drivers/net/wireless/realtek/rtw88/coex.c
index 64904278ddad..b4dc6ff2c175 100644
--- a/drivers/net/wireless/realtek/rtw88/coex.c
+++ b/drivers/net/wireless/realtek/rtw88/coex.c
@@ -1501,28 +1501,28 @@ static u8 rtw_coex_algorithm(struct rtw_dev *rtwdev)
algorithm = COEX_ALGO_HFP;
break;
case BPM_HID:
- case BPM_HFP + BPM_HID:
+ case BPM_HFP | BPM_HID:
algorithm = COEX_ALGO_HID;
break;
- case BPM_HFP + BPM_A2DP:
- case BPM_HID + BPM_A2DP:
- case BPM_HFP + BPM_HID + BPM_A2DP:
+ case BPM_HFP | BPM_A2DP:
+ case BPM_HID | BPM_A2DP:
+ case BPM_HFP | BPM_HID | BPM_A2DP:
algorithm = COEX_ALGO_A2DP_HID;
break;
- case BPM_HFP + BPM_PAN:
- case BPM_HID + BPM_PAN:
- case BPM_HFP + BPM_HID + BPM_PAN:
+ case BPM_HFP | BPM_PAN:
+ case BPM_HID | BPM_PAN:
+ case BPM_HFP | BPM_HID | BPM_PAN:
algorithm = COEX_ALGO_PAN_HID;
break;
- case BPM_HFP + BPM_A2DP + BPM_PAN:
- case BPM_HID + BPM_A2DP + BPM_PAN:
- case BPM_HFP + BPM_HID + BPM_A2DP + BPM_PAN:
+ case BPM_HFP | BPM_A2DP | BPM_PAN:
+ case BPM_HID | BPM_A2DP | BPM_PAN:
+ case BPM_HFP | BPM_HID | BPM_A2DP | BPM_PAN:
algorithm = COEX_ALGO_A2DP_PAN_HID;
break;
case BPM_PAN:
algorithm = COEX_ALGO_PAN;
break;
- case BPM_A2DP + BPM_PAN:
+ case BPM_A2DP | BPM_PAN:
algorithm = COEX_ALGO_A2DP_PAN;
break;
case BPM_A2DP:
diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c
index 011b81c82f3b..eaa928bab240 100644
--- a/drivers/net/wireless/realtek/rtw88/mac.c
+++ b/drivers/net/wireless/realtek/rtw88/mac.c
@@ -1409,3 +1409,13 @@ int rtw_mac_init(struct rtw_dev *rtwdev)
return 0;
}
+
+int rtw_mac_postinit(struct rtw_dev *rtwdev)
+{
+ const struct rtw_chip_info *chip = rtwdev->chip;
+
+ if (!chip->ops->mac_postinit)
+ return 0;
+
+ return chip->ops->mac_postinit(rtwdev);
+}
diff --git a/drivers/net/wireless/realtek/rtw88/mac.h b/drivers/net/wireless/realtek/rtw88/mac.h
index e92b1483728d..b73af90ee1d7 100644
--- a/drivers/net/wireless/realtek/rtw88/mac.h
+++ b/drivers/net/wireless/realtek/rtw88/mac.h
@@ -38,6 +38,7 @@ void rtw_write_firmware_page(struct rtw_dev *rtwdev, u32 page,
const u8 *data, u32 size);
int rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw);
int rtw_mac_init(struct rtw_dev *rtwdev);
+int rtw_mac_postinit(struct rtw_dev *rtwdev);
void rtw_mac_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop);
int rtw_set_trx_fifo_info(struct rtw_dev *rtwdev);
int rtw_ddma_to_fw_fifo(struct rtw_dev *rtwdev, u32 ocp_src, u32 size);
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index 97756bdf57b2..fa0ed39cb199 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -349,7 +349,7 @@ int rtw_sta_add(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
int i;
- if (vif->type == NL80211_IFTYPE_STATION) {
+ if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) {
si->mac_id = rtwvif->mac_id;
} else {
si->mac_id = rtw_acquire_macid(rtwdev);
@@ -386,7 +386,7 @@ void rtw_sta_remove(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
cancel_work_sync(&si->rc_work);
- if (vif->type != NL80211_IFTYPE_STATION)
+ if (vif->type != NL80211_IFTYPE_STATION || sta->tdls)
rtw_release_macid(rtwdev, si->mac_id);
if (fw_exist)
rtw_fw_media_status_report(rtwdev, si->mac_id, false);
@@ -1412,6 +1412,12 @@ int rtw_power_on(struct rtw_dev *rtwdev)
chip->ops->phy_set_param(rtwdev);
+ ret = rtw_mac_postinit(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to configure mac in postinit\n");
+ goto err_off;
+ }
+
ret = rtw_hci_start(rtwdev);
if (ret) {
rtw_err(rtwdev, "failed to start hci\n");
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
index b42538cce359..43ed6d6b4291 100644
--- a/drivers/net/wireless/realtek/rtw88/main.h
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -858,6 +858,7 @@ struct rtw_chip_ops {
int (*power_on)(struct rtw_dev *rtwdev);
void (*power_off)(struct rtw_dev *rtwdev);
int (*mac_init)(struct rtw_dev *rtwdev);
+ int (*mac_postinit)(struct rtw_dev *rtwdev);
int (*dump_fw_crash)(struct rtw_dev *rtwdev);
void (*shutdown)(struct rtw_dev *rtwdev);
int (*read_efuse)(struct rtw_dev *rtwdev, u8 *map);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8703b.c b/drivers/net/wireless/realtek/rtw88/rtw8703b.c
index 03475af973b5..821c28d9cb5d 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8703b.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8703b.c
@@ -1832,6 +1832,7 @@ static const struct rtw_chip_ops rtw8703b_ops = {
.power_on = rtw_power_on,
.power_off = rtw_power_off,
.mac_init = rtw8723x_mac_init,
+ .mac_postinit = rtw8723x_mac_postinit,
.dump_fw_crash = NULL,
.shutdown = NULL,
.read_efuse = rtw8703b_read_efuse,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723d.c b/drivers/net/wireless/realtek/rtw88/rtw8723d.c
index bf69f5b06ce2..8715e0435f17 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8723d.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723d.c
@@ -1397,6 +1397,7 @@ static const struct rtw_chip_ops rtw8723d_ops = {
.query_phy_status = query_phy_status,
.set_channel = rtw8723d_set_channel,
.mac_init = rtw8723x_mac_init,
+ .mac_postinit = rtw8723x_mac_postinit,
.shutdown = rtw8723d_shutdown,
.read_rf = rtw_phy_read_rf_sipi,
.write_rf = rtw_phy_write_rf_reg_sipi,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723x.c b/drivers/net/wireless/realtek/rtw88/rtw8723x.c
index 4c77963fdd37..3f3e9b0c44e8 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8723x.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723x.c
@@ -353,7 +353,6 @@ static int __rtw8723x_read_efuse(struct rtw_dev *rtwdev, u8 *log_map)
static int __rtw8723x_mac_init(struct rtw_dev *rtwdev)
{
- rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 1, WLAN_TXQ_RPT_EN);
rtw_write32(rtwdev, REG_TCR, BIT_TCR_CFG);
rtw_write16(rtwdev, REG_RXFLTMAP0, WLAN_RX_FILTER0);
@@ -370,6 +369,13 @@ static int __rtw8723x_mac_init(struct rtw_dev *rtwdev)
return 0;
}
+static int __rtw8723x_mac_postinit(struct rtw_dev *rtwdev)
+{
+ rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 1, WLAN_TXQ_RPT_EN);
+
+ return 0;
+}
+
static void __rtw8723x_cfg_ldo25(struct rtw_dev *rtwdev, bool enable)
{
u8 ldo_pwr;
@@ -760,6 +766,7 @@ const struct rtw8723x_common rtw8723x_common = {
.lck = __rtw8723x_lck,
.read_efuse = __rtw8723x_read_efuse,
.mac_init = __rtw8723x_mac_init,
+ .mac_postinit = __rtw8723x_mac_postinit,
.cfg_ldo25 = __rtw8723x_cfg_ldo25,
.set_tx_power_index = __rtw8723x_set_tx_power_index,
.efuse_grant = __rtw8723x_efuse_grant,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723x.h b/drivers/net/wireless/realtek/rtw88/rtw8723x.h
index a99af527c92c..0fc70dfdfc8b 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8723x.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723x.h
@@ -137,6 +137,7 @@ struct rtw8723x_common {
void (*lck)(struct rtw_dev *rtwdev);
int (*read_efuse)(struct rtw_dev *rtwdev, u8 *log_map);
int (*mac_init)(struct rtw_dev *rtwdev);
+ int (*mac_postinit)(struct rtw_dev *rtwdev);
void (*cfg_ldo25)(struct rtw_dev *rtwdev, bool enable);
void (*set_tx_power_index)(struct rtw_dev *rtwdev);
void (*efuse_grant)(struct rtw_dev *rtwdev, bool on);
@@ -383,6 +384,11 @@ static inline int rtw8723x_mac_init(struct rtw_dev *rtwdev)
return rtw8723x_common.mac_init(rtwdev);
}
+static inline int rtw8723x_mac_postinit(struct rtw_dev *rtwdev)
+{
+ return rtw8723x_common.mac_postinit(rtwdev);
+}
+
static inline void rtw8723x_cfg_ldo25(struct rtw_dev *rtwdev, bool enable)
{
rtw8723x_common.cfg_ldo25(rtwdev, enable);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8812a.c b/drivers/net/wireless/realtek/rtw88/rtw8812a.c
index 03b441639611..2078eb6e3628 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8812a.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8812a.c
@@ -919,6 +919,7 @@ static const struct rtw_chip_ops rtw8812a_ops = {
.query_phy_status = rtw8812a_query_phy_status,
.set_channel = rtw88xxa_set_channel,
.mac_init = NULL,
+ .mac_postinit = NULL,
.read_rf = rtw88xxa_phy_read_rf,
.write_rf = rtw_phy_write_rf_reg_sipi,
.set_antenna = NULL,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8814a.c b/drivers/net/wireless/realtek/rtw88/rtw8814a.c
index 4a1f850d05c8..ca1079e12023 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8814a.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8814a.c
@@ -2055,6 +2055,7 @@ static const struct rtw_chip_ops rtw8814a_ops = {
.query_phy_status = rtw8814a_query_phy_status,
.set_channel = rtw8814a_set_channel,
.mac_init = rtw8814a_mac_init,
+ .mac_postinit = NULL,
.read_rf = rtw_phy_read_rf,
.write_rf = rtw_phy_write_rf_reg_sipi,
.set_tx_power_index = rtw8814a_set_tx_power_index,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821a.c b/drivers/net/wireless/realtek/rtw88/rtw8821a.c
index 1d02ea400b2e..414b77eef07c 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821a.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821a.c
@@ -865,6 +865,7 @@ static const struct rtw_chip_ops rtw8821a_ops = {
.query_phy_status = rtw8821a_query_phy_status,
.set_channel = rtw88xxa_set_channel,
.mac_init = NULL,
+ .mac_postinit = NULL,
.read_rf = rtw88xxa_phy_read_rf,
.write_rf = rtw_phy_write_rf_reg_sipi,
.set_antenna = NULL,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
index a2a358d6033f..2078b067562e 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
@@ -1663,6 +1663,7 @@ static const struct rtw_chip_ops rtw8821c_ops = {
.query_phy_status = query_phy_status,
.set_channel = rtw8821c_set_channel,
.mac_init = rtw8821c_mac_init,
+ .mac_postinit = NULL,
.read_rf = rtw_phy_read_rf,
.write_rf = rtw_phy_write_rf_reg_sipi,
.set_antenna = NULL,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
index bb5c41905afe..89b6485b229a 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
@@ -2154,6 +2154,7 @@ static const struct rtw_chip_ops rtw8822b_ops = {
.query_phy_status = query_phy_status,
.set_channel = rtw8822b_set_channel,
.mac_init = rtw8822b_mac_init,
+ .mac_postinit = NULL,
.read_rf = rtw_phy_read_rf,
.write_rf = rtw_phy_write_rf_reg_sipi,
.set_tx_power_index = rtw8822b_set_tx_power_index,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
index 58c1958e6170..28c121cf1e68 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
@@ -4964,6 +4964,7 @@ static const struct rtw_chip_ops rtw8822c_ops = {
.query_phy_status = query_phy_status,
.set_channel = rtw8822c_set_channel,
.mac_init = rtw8822c_mac_init,
+ .mac_postinit = NULL,
.dump_fw_crash = rtw8822c_dump_fw_crash,
.read_rf = rtw_phy_read_rf,
.write_rf = rtw_phy_write_rf_reg_mix,
diff --git a/drivers/net/wireless/realtek/rtw89/Kconfig b/drivers/net/wireless/realtek/rtw89/Kconfig
index 205d7ecca7d7..4288c30b400a 100644
--- a/drivers/net/wireless/realtek/rtw89/Kconfig
+++ b/drivers/net/wireless/realtek/rtw89/Kconfig
@@ -17,6 +17,9 @@ config RTW89_CORE
config RTW89_PCI
tristate
+config RTW89_USB
+ tristate
+
config RTW89_8851B
tristate
@@ -49,6 +52,17 @@ config RTW89_8851BE
802.11ax PCIe wireless network (Wi-Fi 6) adapter
+config RTW89_8851BU
+ tristate "Realtek 8851BU USB wireless network (Wi-Fi 6) adapter"
+ depends on USB
+ select RTW89_CORE
+ select RTW89_USB
+ select RTW89_8851B
+ help
+ Select this option will enable support for 8851BU chipset
+
+ 802.11ax USB wireless network (Wi-Fi 6) adapter
+
config RTW89_8852AE
tristate "Realtek 8852AE PCI wireless network (Wi-Fi 6) adapter"
depends on PCI
@@ -72,6 +86,18 @@ config RTW89_8852BE
802.11ax PCIe wireless network (Wi-Fi 6) adapter
+config RTW89_8852BU
+ tristate "Realtek 8852BU USB wireless network (Wi-Fi 6) adapter"
+ depends on USB
+ select RTW89_CORE
+ select RTW89_USB
+ select RTW89_8852B
+ select RTW89_8852B_COMMON
+ help
+ Select this option will enable support for 8852BU chipset
+
+ 802.11ax USB wireless network (Wi-Fi 6) adapter
+
config RTW89_8852BTE
tristate "Realtek 8852BE-VT PCI wireless network (Wi-Fi 6) adapter"
depends on PCI
diff --git a/drivers/net/wireless/realtek/rtw89/Makefile b/drivers/net/wireless/realtek/rtw89/Makefile
index c751013e811e..23e43c444f69 100644
--- a/drivers/net/wireless/realtek/rtw89/Makefile
+++ b/drivers/net/wireless/realtek/rtw89/Makefile
@@ -31,6 +31,9 @@ rtw89_8851b-objs := rtw8851b.o \
obj-$(CONFIG_RTW89_8851BE) += rtw89_8851be.o
rtw89_8851be-objs := rtw8851be.o
+obj-$(CONFIG_RTW89_8851BU) += rtw89_8851bu.o
+rtw89_8851bu-objs := rtw8851bu.o
+
obj-$(CONFIG_RTW89_8852A) += rtw89_8852a.o
rtw89_8852a-objs := rtw8852a.o \
rtw8852a_table.o \
@@ -52,6 +55,9 @@ rtw89_8852b-objs := rtw8852b.o \
obj-$(CONFIG_RTW89_8852BE) += rtw89_8852be.o
rtw89_8852be-objs := rtw8852be.o
+obj-$(CONFIG_RTW89_8852BU) += rtw89_8852bu.o
+rtw89_8852bu-objs := rtw8852bu.o
+
obj-$(CONFIG_RTW89_8852BT) += rtw89_8852bt.o
rtw89_8852bt-objs := rtw8852bt.o \
rtw8852bt_rfk.o \
@@ -81,3 +87,6 @@ rtw89_core-$(CONFIG_RTW89_DEBUG) += debug.o
obj-$(CONFIG_RTW89_PCI) += rtw89_pci.o
rtw89_pci-y := pci.o pci_be.o
+obj-$(CONFIG_RTW89_USB) += rtw89_usb.o
+rtw89_usb-y := usb.o
+
diff --git a/drivers/net/wireless/realtek/rtw89/acpi.c b/drivers/net/wireless/realtek/rtw89/acpi.c
index 581d6d4154d3..f1e758a5f32b 100644
--- a/drivers/net/wireless/realtek/rtw89/acpi.c
+++ b/drivers/net/wireless/realtek/rtw89/acpi.c
@@ -236,6 +236,50 @@ int rtw89_acpi_dsm_get_policy_6ghz_sp(struct rtw89_dev *rtwdev,
return 0;
}
+static bool chk_acpi_policy_6ghz_vlp_sig(const struct rtw89_acpi_policy_6ghz_vlp *p)
+{
+ return p->signature[0] == 0x52 &&
+ p->signature[1] == 0x54 &&
+ p->signature[2] == 0x4B &&
+ p->signature[3] == 0x0B;
+}
+
+static
+int rtw89_acpi_dsm_get_policy_6ghz_vlp(struct rtw89_dev *rtwdev,
+ union acpi_object *obj,
+ struct rtw89_acpi_policy_6ghz_vlp **policy)
+{
+ const struct rtw89_acpi_policy_6ghz_vlp *ptr;
+ u32 buf_len;
+
+ if (obj->type != ACPI_TYPE_BUFFER) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI,
+ "acpi: expect buffer but type: %d\n", obj->type);
+ return -EINVAL;
+ }
+
+ buf_len = obj->buffer.length;
+ if (buf_len < sizeof(*ptr)) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI, "%s: invalid buffer length: %u\n",
+ __func__, buf_len);
+ return -EINVAL;
+ }
+
+ ptr = (typeof(ptr))obj->buffer.pointer;
+ if (!chk_acpi_policy_6ghz_vlp_sig(ptr)) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI, "%s: bad signature\n", __func__);
+ return -EINVAL;
+ }
+
+ *policy = kmemdup(ptr, sizeof(*ptr), GFP_KERNEL);
+ if (!*policy)
+ return -ENOMEM;
+
+ rtw89_hex_dump(rtwdev, RTW89_DBG_ACPI, "policy_6ghz_vlp: ", *policy,
+ sizeof(*ptr));
+ return 0;
+}
+
static bool chk_acpi_policy_tas_sig(const struct rtw89_acpi_policy_tas *p)
{
return p->signature[0] == 0x52 &&
@@ -279,6 +323,51 @@ static int rtw89_acpi_dsm_get_policy_tas(struct rtw89_dev *rtwdev,
return 0;
}
+static
+bool chk_acpi_policy_reg_rules_sig(const struct rtw89_acpi_policy_reg_rules *p)
+{
+ return p->signature[0] == 0x52 &&
+ p->signature[1] == 0x54 &&
+ p->signature[2] == 0x4B &&
+ p->signature[3] == 0x0A;
+}
+
+static
+int rtw89_acpi_dsm_get_policy_reg_rules(struct rtw89_dev *rtwdev,
+ union acpi_object *obj,
+ struct rtw89_acpi_policy_reg_rules **policy)
+{
+ const struct rtw89_acpi_policy_reg_rules *ptr;
+ u32 buf_len;
+
+ if (obj->type != ACPI_TYPE_BUFFER) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI,
+ "acpi: expect buffer but type: %d\n", obj->type);
+ return -EINVAL;
+ }
+
+ buf_len = obj->buffer.length;
+ if (buf_len < sizeof(*ptr)) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI, "%s: invalid buffer length: %u\n",
+ __func__, buf_len);
+ return -EINVAL;
+ }
+
+ ptr = (typeof(ptr))obj->buffer.pointer;
+ if (!chk_acpi_policy_reg_rules_sig(ptr)) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI, "%s: bad signature\n", __func__);
+ return -EINVAL;
+ }
+
+ *policy = kmemdup(ptr, sizeof(*ptr), GFP_KERNEL);
+ if (!*policy)
+ return -ENOMEM;
+
+ rtw89_hex_dump(rtwdev, RTW89_DBG_ACPI, "policy_reg_rules: ", *policy,
+ sizeof(*ptr));
+ return 0;
+}
+
int rtw89_acpi_evaluate_dsm(struct rtw89_dev *rtwdev,
enum rtw89_acpi_dsm_func func,
struct rtw89_acpi_dsm_result *res)
@@ -300,8 +389,14 @@ int rtw89_acpi_evaluate_dsm(struct rtw89_dev *rtwdev,
else if (func == RTW89_ACPI_DSM_FUNC_6GHZ_SP_SUP)
ret = rtw89_acpi_dsm_get_policy_6ghz_sp(rtwdev, obj,
&res->u.policy_6ghz_sp);
+ else if (func == RTW89_ACPI_DSM_FUNC_6GHZ_VLP_SUP)
+ ret = rtw89_acpi_dsm_get_policy_6ghz_vlp(rtwdev, obj,
+ &res->u.policy_6ghz_vlp);
else if (func == RTW89_ACPI_DSM_FUNC_TAS_EN)
ret = rtw89_acpi_dsm_get_policy_tas(rtwdev, obj, &res->u.policy_tas);
+ else if (func == RTW89_ACPI_DSM_FUNC_REG_RULES_EN)
+ ret = rtw89_acpi_dsm_get_policy_reg_rules(rtwdev, obj,
+ &res->u.policy_reg_rules);
else
ret = rtw89_acpi_dsm_get_value(rtwdev, obj, &res->u.value);
diff --git a/drivers/net/wireless/realtek/rtw89/acpi.h b/drivers/net/wireless/realtek/rtw89/acpi.h
index 8c918ee02d2e..48a46f2005b1 100644
--- a/drivers/net/wireless/realtek/rtw89/acpi.h
+++ b/drivers/net/wireless/realtek/rtw89/acpi.h
@@ -19,11 +19,13 @@ enum rtw89_acpi_dsm_func {
RTW89_ACPI_DSM_FUNC_TAS_EN = 5,
RTW89_ACPI_DSM_FUNC_UNII4_SUP = 6,
RTW89_ACPI_DSM_FUNC_6GHZ_SP_SUP = 7,
+ RTW89_ACPI_DSM_FUNC_REG_RULES_EN = 10,
+ RTW89_ACPI_DSM_FUNC_6GHZ_VLP_SUP = 11,
};
enum rtw89_acpi_conf_unii4 {
- RTW89_ACPI_CONF_UNII4_FCC = BIT(0),
- RTW89_ACPI_CONF_UNII4_IC = BIT(1),
+ RTW89_ACPI_CONF_UNII4_US = BIT(0),
+ RTW89_ACPI_CONF_UNII4_CA = BIT(1),
};
enum rtw89_acpi_policy_mode {
@@ -56,6 +58,7 @@ struct rtw89_acpi_policy_6ghz {
enum rtw89_acpi_conf_6ghz_sp {
RTW89_ACPI_CONF_6GHZ_SP_US = BIT(0),
+ RTW89_ACPI_CONF_6GHZ_SP_CA = BIT(1),
};
struct rtw89_acpi_policy_6ghz_sp {
@@ -66,6 +69,19 @@ struct rtw89_acpi_policy_6ghz_sp {
u8 rsvd;
} __packed;
+enum rtw89_acpi_conf_6ghz_vlp {
+ RTW89_ACPI_CONF_6GHZ_VLP_US = BIT(0),
+ RTW89_ACPI_CONF_6GHZ_VLP_CA = BIT(1),
+};
+
+struct rtw89_acpi_policy_6ghz_vlp {
+ u8 signature[4];
+ u8 revision;
+ u8 override;
+ u8 conf;
+ u8 rsvd;
+} __packed;
+
struct rtw89_acpi_policy_tas {
u8 signature[4];
u8 revision;
@@ -74,13 +90,26 @@ struct rtw89_acpi_policy_tas {
u8 rsvd[3];
} __packed;
+enum rtw89_acpi_conf_reg_rules {
+ RTW89_ACPI_CONF_REG_RULE_REGD_UK = BIT(0),
+};
+
+struct rtw89_acpi_policy_reg_rules {
+ u8 signature[4];
+ u8 revision;
+ u8 conf;
+ u8 rsvd[3];
+} __packed;
+
struct rtw89_acpi_dsm_result {
union {
u8 value;
/* caller needs to free it after using */
struct rtw89_acpi_policy_6ghz *policy_6ghz;
struct rtw89_acpi_policy_6ghz_sp *policy_6ghz_sp;
+ struct rtw89_acpi_policy_6ghz_vlp *policy_6ghz_vlp;
struct rtw89_acpi_policy_tas *policy_tas;
+ struct rtw89_acpi_policy_reg_rules *policy_reg_rules;
} u;
};
diff --git a/drivers/net/wireless/realtek/rtw89/chan.c b/drivers/net/wireless/realtek/rtw89/chan.c
index 6f10235647a1..f7d1c5d3b92e 100644
--- a/drivers/net/wireless/realtek/rtw89/chan.c
+++ b/drivers/net/wireless/realtek/rtw89/chan.c
@@ -7,6 +7,7 @@
#include "debug.h"
#include "fw.h"
#include "mac.h"
+#include "phy.h"
#include "ps.h"
#include "sar.h"
#include "util.h"
@@ -128,6 +129,48 @@ void rtw89_chan_create(struct rtw89_chan *chan, u8 center_chan, u8 primary_chan,
bandwidth);
}
+static void _rtw89_chan_update_punctured(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link,
+ const struct cfg80211_chan_def *chandef)
+{
+ struct ieee80211_bss_conf *bss_conf;
+
+ if (rtwvif_link->wifi_role != RTW89_WIFI_ROLE_STATION &&
+ rtwvif_link->wifi_role != RTW89_WIFI_ROLE_P2P_CLIENT)
+ return;
+
+ rcu_read_lock();
+
+ bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
+ if (!bss_conf->eht_support) {
+ rcu_read_unlock();
+ return;
+ }
+
+ rcu_read_unlock();
+
+ rtw89_chip_h2c_punctured_cmac_tbl(rtwdev, rtwvif_link, chandef->punctured);
+}
+
+static void rtw89_chan_update_punctured(struct rtw89_dev *rtwdev,
+ enum rtw89_chanctx_idx idx,
+ const struct cfg80211_chan_def *chandef)
+{
+ struct rtw89_vif_link *rtwvif_link;
+ struct rtw89_vif *rtwvif;
+ unsigned int link_id;
+
+ rtw89_for_each_rtwvif(rtwdev, rtwvif) {
+ rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) {
+ if (!rtwvif_link->chanctx_assigned ||
+ rtwvif_link->chanctx_idx != idx)
+ continue;
+
+ _rtw89_chan_update_punctured(rtwdev, rtwvif_link, chandef);
+ }
+ }
+}
+
bool rtw89_assign_entity_chan(struct rtw89_dev *rtwdev,
enum rtw89_chanctx_idx idx,
const struct rtw89_chan *new)
@@ -999,6 +1042,11 @@ static void rtw89_mcc_assign_pattern(struct rtw89_dev *rtwdev,
*pattern = *new;
memset(&pattern->courtesy, 0, sizeof(pattern->courtesy));
+ if (RTW89_MCC_REQ_COURTESY(pattern, aux) && aux->is_gc)
+ aux->ignore_bcn = true;
+ else
+ aux->ignore_bcn = false;
+
if (RTW89_MCC_REQ_COURTESY(pattern, aux) && rtw89_mcc_can_courtesy(ref, aux)) {
crtz = &pattern->courtesy.ref;
ref->crtz = crtz;
@@ -1013,6 +1061,11 @@ static void rtw89_mcc_assign_pattern(struct rtw89_dev *rtwdev,
ref->crtz = NULL;
}
+ if (RTW89_MCC_REQ_COURTESY(pattern, ref) && ref->is_gc)
+ ref->ignore_bcn = true;
+ else
+ ref->ignore_bcn = false;
+
if (RTW89_MCC_REQ_COURTESY(pattern, ref) && rtw89_mcc_can_courtesy(aux, ref)) {
crtz = &pattern->courtesy.aux;
aux->crtz = crtz;
@@ -1115,7 +1168,7 @@ static int __rtw89_mcc_calc_pattern_strict(struct rtw89_dev *rtwdev,
struct rtw89_mcc_role *ref = &mcc->role_ref;
struct rtw89_mcc_role *aux = &mcc->role_aux;
struct rtw89_mcc_config *config = &mcc->config;
- u16 min_tob = RTW89_MCC_EARLY_RX_BCN_TIME;
+ u16 min_tob = RTW89_MCC_EARLY_RX_BCN_TIME + RTW89_MCC_SWITCH_CH_TIME;
u16 min_toa = RTW89_MCC_MIN_RX_BCN_TIME;
u16 bcn_ofst = config->beacon_offset;
s16 upper_toa_ref, lower_toa_ref;
@@ -1271,11 +1324,11 @@ static int __rtw89_mcc_calc_pattern_anchor(struct rtw89_dev *rtwdev,
u16 bcn_ofst = config->beacon_offset;
bool small_bcn_ofst;
- if (bcn_ofst < RTW89_MCC_MIN_RX_BCN_TIME)
+ if (bcn_ofst < RTW89_MCC_MIN_RX_BCN_WITH_SWITCH_CH_TIME)
small_bcn_ofst = true;
else if (bcn_ofst < aux->duration - aux->limit.max_toa)
small_bcn_ofst = true;
- else if (mcc_intvl - bcn_ofst < RTW89_MCC_MIN_RX_BCN_TIME)
+ else if (mcc_intvl - bcn_ofst < RTW89_MCC_MIN_RX_BCN_WITH_SWITCH_CH_TIME)
small_bcn_ofst = false;
else
return -EPERM;
@@ -2170,6 +2223,7 @@ static void rtw89_mcc_handle_beacon_noa(struct rtw89_dev *rtwdev, bool enable)
rtw89_p2p_noa_renew(rtwvif_go);
if (enable) {
+ duration += RTW89_MCC_SWITCH_CH_TIME;
noa_desc.start_time = cpu_to_le32(start_time);
noa_desc.interval = cpu_to_le32(ieee80211_tu_to_usec(interval));
noa_desc.duration = cpu_to_le32(ieee80211_tu_to_usec(duration));
@@ -2253,15 +2307,6 @@ static int rtw89_mcc_start(struct rtw89_dev *rtwdev)
else
mcc->mode = RTW89_MCC_MODE_GC_STA;
- if (rtw89_mcc_ignore_bcn(rtwdev, ref)) {
- rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, aux->rtwvif_link, false);
- } else if (rtw89_mcc_ignore_bcn(rtwdev, aux)) {
- rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, ref->rtwvif_link, false);
- } else {
- rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, ref->rtwvif_link, true);
- rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, aux->rtwvif_link, true);
- }
-
rtw89_debug(rtwdev, RTW89_DBG_CHAN, "MCC sel mode: %d\n", mcc->mode);
mcc->group = RTW89_MCC_DFLT_GROUP;
@@ -2270,6 +2315,15 @@ static int rtw89_mcc_start(struct rtw89_dev *rtwdev)
if (ret)
return ret;
+ if (rtw89_mcc_ignore_bcn(rtwdev, ref) || aux->ignore_bcn) {
+ rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, aux->rtwvif_link, false);
+ } else if (rtw89_mcc_ignore_bcn(rtwdev, aux) || ref->ignore_bcn) {
+ rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, ref->rtwvif_link, false);
+ } else {
+ rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, ref->rtwvif_link, true);
+ rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, aux->rtwvif_link, true);
+ }
+
if (rtw89_concurrent_via_mrc(rtwdev))
ret = __mrc_fw_start(rtwdev, false);
else
@@ -2281,6 +2335,7 @@ static int rtw89_mcc_start(struct rtw89_dev *rtwdev)
rtw89_chanctx_notify(rtwdev, RTW89_CHANCTX_STATE_MCC_START);
rtw89_mcc_start_beacon_noa(rtwdev);
+ rtw89_phy_dig_suspend(rtwdev);
rtw89_mcc_prepare(rtwdev, true);
return 0;
@@ -2333,9 +2388,11 @@ static void rtw89_mcc_stop(struct rtw89_dev *rtwdev,
struct rtw89_hal *hal = &rtwdev->hal;
struct rtw89_mcc_info *mcc = &rtwdev->mcc;
struct rtw89_mcc_role *ref = &mcc->role_ref;
+ struct rtw89_mcc_role *aux = &mcc->role_aux;
struct rtw89_mcc_stop_sel sel = {
.hint.target = pause ? pause->trigger : NULL,
};
+ bool rsn_scan;
int ret;
if (!pause) {
@@ -2343,6 +2400,12 @@ static void rtw89_mcc_stop(struct rtw89_dev *rtwdev,
bitmap_zero(hal->changes, NUM_OF_RTW89_CHANCTX_CHANGES);
}
+ rsn_scan = pause && pause->rsn == RTW89_CHANCTX_PAUSE_REASON_HW_SCAN;
+ if (rsn_scan && ref->is_go)
+ sel.hint.target = ref->rtwvif_link;
+ else if (rsn_scan && aux->is_go)
+ sel.hint.target = aux->rtwvif_link;
+
/* by default, stop at ref */
rtw89_iterate_mcc_roles(rtwdev, rtw89_mcc_stop_sel_iterator, &sel);
if (!sel.filled)
@@ -2371,6 +2434,8 @@ static void rtw89_mcc_stop(struct rtw89_dev *rtwdev,
rtw89_chanctx_notify(rtwdev, RTW89_CHANCTX_STATE_MCC_STOP);
rtw89_mcc_stop_beacon_noa(rtwdev);
+ rtw89_fw_h2c_mcc_dig(rtwdev, RTW89_CHANCTX_0, 0, 0, false);
+ rtw89_phy_dig_resume(rtwdev, true);
rtw89_mcc_prepare(rtwdev, false);
}
@@ -2378,7 +2443,11 @@ static void rtw89_mcc_stop(struct rtw89_dev *rtwdev,
static int rtw89_mcc_update(struct rtw89_dev *rtwdev)
{
struct rtw89_mcc_info *mcc = &rtwdev->mcc;
+ bool old_ref_ignore_bcn = mcc->role_ref.ignore_bcn;
+ bool old_aux_ignore_bcn = mcc->role_aux.ignore_bcn;
struct rtw89_mcc_config *config = &mcc->config;
+ struct rtw89_mcc_role *ref = &mcc->role_ref;
+ struct rtw89_mcc_role *aux = &mcc->role_aux;
struct rtw89_mcc_config old_cfg = *config;
bool courtesy_changed;
bool sync_changed;
@@ -2393,6 +2462,11 @@ static int rtw89_mcc_update(struct rtw89_dev *rtwdev)
if (ret)
return ret;
+ if (old_ref_ignore_bcn != ref->ignore_bcn)
+ rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, ref->rtwvif_link, !ref->ignore_bcn);
+ else if (old_aux_ignore_bcn != aux->ignore_bcn)
+ rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, aux->rtwvif_link, !aux->ignore_bcn);
+
if (memcmp(&old_cfg.pattern.courtesy, &config->pattern.courtesy,
sizeof(old_cfg.pattern.courtesy)) == 0)
courtesy_changed = false;
@@ -2428,10 +2502,105 @@ static int rtw89_mcc_update(struct rtw89_dev *rtwdev)
return 0;
}
+static int rtw89_mcc_search_gc_iterator(struct rtw89_dev *rtwdev,
+ struct rtw89_mcc_role *mcc_role,
+ unsigned int ordered_idx,
+ void *data)
+{
+ struct rtw89_mcc_role **role = data;
+
+ if (mcc_role->is_gc)
+ *role = mcc_role;
+
+ return 0;
+}
+
+static struct rtw89_mcc_role *rtw89_mcc_get_gc_role(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_mcc_info *mcc = &rtwdev->mcc;
+ struct rtw89_mcc_role *role = NULL;
+
+ if (mcc->mode != RTW89_MCC_MODE_GC_STA)
+ return NULL;
+
+ rtw89_iterate_mcc_roles(rtwdev, rtw89_mcc_search_gc_iterator, &role);
+
+ return role;
+}
+
+void rtw89_mcc_gc_detect_beacon_work(struct wiphy *wiphy, struct wiphy_work *work)
+{
+ struct rtw89_vif_link *rtwvif_link = container_of(work, struct rtw89_vif_link,
+ mcc_gc_detect_beacon_work.work);
+ struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
+ enum rtw89_entity_mode mode;
+ struct rtw89_dev *rtwdev;
+
+ lockdep_assert_wiphy(wiphy);
+
+ rtwdev = rtwvif_link->rtwvif->rtwdev;
+
+ mode = rtw89_get_entity_mode(rtwdev);
+ if (mode != RTW89_ENTITY_MODE_MCC)
+ return;
+
+ if (READ_ONCE(rtwvif_link->sync_bcn_tsf) > rtwvif_link->last_sync_bcn_tsf)
+ rtwvif_link->detect_bcn_count = 0;
+ else
+ rtwvif_link->detect_bcn_count++;
+
+ if (rtwvif_link->detect_bcn_count < RTW89_MCC_DETECT_BCN_MAX_TRIES)
+ rtw89_chanctx_proceed(rtwdev, NULL);
+ else
+ ieee80211_connection_loss(vif);
+}
+
+bool rtw89_mcc_detect_go_bcn(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link)
+{
+ enum rtw89_entity_mode mode = rtw89_get_entity_mode(rtwdev);
+ struct rtw89_chanctx_pause_parm pause_parm = {
+ .rsn = RTW89_CHANCTX_PAUSE_REASON_GC_BCN_LOSS,
+ .trigger = rtwvif_link,
+ };
+ struct ieee80211_bss_conf *bss_conf;
+ struct rtw89_mcc_role *role;
+ u16 bcn_int;
+
+ if (mode != RTW89_ENTITY_MODE_MCC)
+ return false;
+
+ role = rtw89_mcc_get_gc_role(rtwdev);
+ if (!role)
+ return false;
+
+ if (role->rtwvif_link != rtwvif_link)
+ return false;
+
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MCC GC beacon loss, pause MCC to detect GO beacon\n");
+
+ rcu_read_lock();
+
+ bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
+ bcn_int = bss_conf->beacon_int;
+
+ rcu_read_unlock();
+
+ rtw89_chanctx_pause(rtwdev, &pause_parm);
+ rtwvif_link->last_sync_bcn_tsf = READ_ONCE(rtwvif_link->sync_bcn_tsf);
+ wiphy_delayed_work_queue(rtwdev->hw->wiphy,
+ &rtwvif_link->mcc_gc_detect_beacon_work,
+ usecs_to_jiffies(ieee80211_tu_to_usec(bcn_int)));
+
+ return true;
+}
+
static void rtw89_mcc_detect_connection(struct rtw89_dev *rtwdev,
struct rtw89_mcc_role *role)
{
struct ieee80211_vif *vif;
+ bool start_detect;
int ret;
ret = rtw89_core_send_nullfunc(rtwdev, role->rtwvif_link, true, false,
@@ -2445,7 +2614,12 @@ static void rtw89_mcc_detect_connection(struct rtw89_dev *rtwdev,
return;
rtw89_debug(rtwdev, RTW89_DBG_CHAN,
- "MCC <macid %d> can not detect AP\n", role->rtwvif_link->mac_id);
+ "MCC <macid %d> can not detect AP/GO\n", role->rtwvif_link->mac_id);
+
+ start_detect = rtw89_mcc_detect_go_bcn(rtwdev, role->rtwvif_link);
+ if (start_detect)
+ return;
+
vif = rtwvif_link_to_vif(role->rtwvif_link);
ieee80211_connection_loss(vif);
}
@@ -2461,9 +2635,9 @@ static void rtw89_mcc_track(struct rtw89_dev *rtwdev)
u16 bcn_ofst;
u16 diff;
- if (rtw89_mcc_ignore_bcn(rtwdev, ref))
+ if (rtw89_mcc_ignore_bcn(rtwdev, ref) || aux->ignore_bcn)
rtw89_mcc_detect_connection(rtwdev, aux);
- else if (rtw89_mcc_ignore_bcn(rtwdev, aux))
+ else if (rtw89_mcc_ignore_bcn(rtwdev, aux) || ref->ignore_bcn)
rtw89_mcc_detect_connection(rtwdev, ref);
if (mcc->mode != RTW89_MCC_MODE_GC_STA)
@@ -2622,6 +2796,30 @@ static void rtw89_mcc_update_limit(struct rtw89_dev *rtwdev)
rtw89_iterate_mcc_roles(rtwdev, rtw89_mcc_upd_lmt_iterator, NULL);
}
+static int rtw89_mcc_get_links_iterator(struct rtw89_dev *rtwdev,
+ struct rtw89_mcc_role *mcc_role,
+ unsigned int ordered_idx,
+ void *data)
+{
+ struct rtw89_mcc_links_info *info = data;
+
+ info->links[ordered_idx] = mcc_role->rtwvif_link;
+ return 0;
+}
+
+void rtw89_mcc_get_links(struct rtw89_dev *rtwdev, struct rtw89_mcc_links_info *info)
+{
+ enum rtw89_entity_mode mode;
+
+ memset(info, 0, sizeof(*info));
+
+ mode = rtw89_get_entity_mode(rtwdev);
+ if (unlikely(mode != RTW89_ENTITY_MODE_MCC))
+ return;
+
+ rtw89_iterate_mcc_roles(rtwdev, rtw89_mcc_get_links_iterator, info);
+}
+
void rtw89_chanctx_work(struct wiphy *wiphy, struct wiphy_work *work)
{
struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
@@ -2690,6 +2888,7 @@ void rtw89_queue_chanctx_change(struct rtw89_dev *rtwdev,
return;
case RTW89_ENTITY_MODE_MCC_PREPARE:
delay = ieee80211_tu_to_usec(RTW89_CHANCTX_TIME_MCC_PREPARE);
+ rtw89_phy_dig_suspend(rtwdev);
break;
case RTW89_ENTITY_MODE_MCC:
delay = ieee80211_tu_to_usec(RTW89_CHANCTX_TIME_MCC);
@@ -3088,6 +3287,9 @@ void rtw89_chanctx_ops_change(struct rtw89_dev *rtwdev,
rtw89_config_entity_chandef(rtwdev, idx, &ctx->def);
rtw89_set_channel(rtwdev);
}
+
+ if (changed & IEEE80211_CHANCTX_CHANGE_PUNCTURING)
+ rtw89_chan_update_punctured(rtwdev, idx, &ctx->def);
}
int rtw89_chanctx_ops_assign_vif(struct rtw89_dev *rtwdev,
@@ -3231,5 +3433,7 @@ assign:
return ret;
}
+ _rtw89_chan_update_punctured(rtwdev, rtwvif_link, &new_ctx->def);
+
return 0;
}
diff --git a/drivers/net/wireless/realtek/rtw89/chan.h b/drivers/net/wireless/realtek/rtw89/chan.h
index 57355cb3d765..b1175419f92b 100644
--- a/drivers/net/wireless/realtek/rtw89/chan.h
+++ b/drivers/net/wireless/realtek/rtw89/chan.h
@@ -18,16 +18,22 @@
#define RTW89_MCC_EARLY_RX_BCN_TIME 5
#define RTW89_MCC_MIN_RX_BCN_TIME 10
#define RTW89_MCC_DFLT_BCN_OFST_TIME 40
+#define RTW89_MCC_SWITCH_CH_TIME 3
#define RTW89_MCC_PROBE_TIMEOUT 100
#define RTW89_MCC_PROBE_MAX_TRIES 3
+#define RTW89_MCC_DETECT_BCN_MAX_TRIES 2
+
#define RTW89_MCC_MIN_GO_DURATION \
(RTW89_MCC_EARLY_TX_BCN_TIME + RTW89_MCC_MIN_RX_BCN_TIME)
#define RTW89_MCC_MIN_STA_DURATION \
(RTW89_MCC_EARLY_RX_BCN_TIME + RTW89_MCC_MIN_RX_BCN_TIME)
+#define RTW89_MCC_MIN_RX_BCN_WITH_SWITCH_CH_TIME \
+ (RTW89_MCC_MIN_RX_BCN_TIME + RTW89_MCC_SWITCH_CH_TIME)
+
#define RTW89_MCC_DFLT_GROUP 0
#define RTW89_MCC_NEXT_GROUP(cur) (((cur) + 1) % 4)
@@ -90,6 +96,7 @@ struct rtw89_mr_chanctx_info {
enum rtw89_chanctx_pause_reasons {
RTW89_CHANCTX_PAUSE_REASON_HW_SCAN,
RTW89_CHANCTX_PAUSE_REASON_ROC,
+ RTW89_CHANCTX_PAUSE_REASON_GC_BCN_LOSS,
};
struct rtw89_chanctx_pause_parm {
@@ -178,7 +185,15 @@ const struct rtw89_chan *__rtw89_mgnt_chan_get(struct rtw89_dev *rtwdev,
#define rtw89_mgnt_chan_get(rtwdev, link_index) \
__rtw89_mgnt_chan_get(rtwdev, __func__, link_index)
+struct rtw89_mcc_links_info {
+ struct rtw89_vif_link *links[NUM_OF_RTW89_MCC_ROLES];
+};
+
+void rtw89_mcc_get_links(struct rtw89_dev *rtwdev, struct rtw89_mcc_links_info *info);
void rtw89_mcc_prepare_done_work(struct wiphy *wiphy, struct wiphy_work *work);
+void rtw89_mcc_gc_detect_beacon_work(struct wiphy *wiphy, struct wiphy_work *work);
+bool rtw89_mcc_detect_go_bcn(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link);
int rtw89_chanctx_ops_add(struct rtw89_dev *rtwdev,
struct ieee80211_chanctx_conf *ctx);
diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
index 1f5639a5d166..57590f5577a3 100644
--- a/drivers/net/wireless/realtek/rtw89/core.c
+++ b/drivers/net/wireless/realtek/rtw89/core.c
@@ -319,15 +319,25 @@ static const struct ieee80211_supported_band rtw89_sband_6ghz = {
.n_bitrates = ARRAY_SIZE(rtw89_bitrates) - 4,
};
+static void __rtw89_traffic_stats_accu(struct rtw89_traffic_stats *stats,
+ struct sk_buff *skb, bool tx)
+{
+ if (tx) {
+ stats->tx_cnt++;
+ stats->tx_unicast += skb->len;
+ } else {
+ stats->rx_cnt++;
+ stats->rx_unicast += skb->len;
+ }
+}
+
static void rtw89_traffic_stats_accu(struct rtw89_dev *rtwdev,
- struct rtw89_traffic_stats *stats,
- struct sk_buff *skb, bool tx)
+ struct rtw89_vif *rtwvif,
+ struct sk_buff *skb,
+ bool accu_dev, bool tx)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- if (tx && ieee80211_is_assoc_req(hdr->frame_control))
- rtw89_wow_parse_akm(rtwdev, skb);
-
if (!ieee80211_is_data(hdr->frame_control))
return;
@@ -335,12 +345,12 @@ static void rtw89_traffic_stats_accu(struct rtw89_dev *rtwdev,
is_multicast_ether_addr(hdr->addr1))
return;
- if (tx) {
- stats->tx_cnt++;
- stats->tx_unicast += skb->len;
- } else {
- stats->rx_cnt++;
- stats->rx_unicast += skb->len;
+ if (accu_dev)
+ __rtw89_traffic_stats_accu(&rtwdev->stats, skb, tx);
+
+ if (rtwvif) {
+ __rtw89_traffic_stats_accu(&rtwvif->stats, skb, tx);
+ __rtw89_traffic_stats_accu(&rtwvif->stats_ps, skb, tx);
}
}
@@ -984,13 +994,25 @@ rtw89_core_tx_wake(struct rtw89_dev *rtwdev,
if (!RTW89_CHK_FW_FEATURE(TX_WAKE, &rtwdev->fw))
return;
- if (!test_bit(RTW89_FLAG_LOW_POWER_MODE, rtwdev->flags))
- return;
+ switch (chip->chip_id) {
+ case RTL8852BT:
+ if (test_bit(RTW89_FLAG_LEISURE_PS, rtwdev->flags))
+ goto notify;
+ break;
+ case RTL8852C:
+ if (test_bit(RTW89_FLAG_LOW_POWER_MODE, rtwdev->flags))
+ goto notify;
+ break;
+ default:
+ if (test_bit(RTW89_FLAG_LOW_POWER_MODE, rtwdev->flags) &&
+ tx_req->tx_type == RTW89_CORE_TX_TYPE_MGMT)
+ goto notify;
+ break;
+ }
- if (chip->chip_id != RTL8852C &&
- tx_req->tx_type != RTW89_CORE_TX_TYPE_MGMT)
- return;
+ return;
+notify:
rtw89_mac_notify_wake(rtwdev);
}
@@ -1150,8 +1172,8 @@ static int rtw89_core_tx_write_link(struct rtw89_dev *rtwdev,
tx_req.rtwsta_link = rtwsta_link;
tx_req.desc_info.sw_mld = sw_mld;
- rtw89_traffic_stats_accu(rtwdev, &rtwdev->stats, skb, true);
- rtw89_traffic_stats_accu(rtwdev, &rtwvif->stats, skb, true);
+ rtw89_traffic_stats_accu(rtwdev, rtwvif, skb, true, true);
+ rtw89_wow_parse_akm(rtwdev, skb);
rtw89_core_tx_update_desc_info(rtwdev, &tx_req);
rtw89_core_tx_wake(rtwdev, &tx_req);
@@ -2267,7 +2289,7 @@ static void rtw89_vif_rx_stats_iter(void *data, u8 *mac,
if (desc_info->data_rate < RTW89_HW_RATE_NR)
pkt_stat->rx_rate_cnt[desc_info->data_rate]++;
- rtw89_traffic_stats_accu(rtwdev, &rtwvif->stats, skb, false);
+ rtw89_traffic_stats_accu(rtwdev, rtwvif, skb, false, false);
out:
rcu_read_unlock();
@@ -2280,7 +2302,7 @@ static void rtw89_core_rx_stats(struct rtw89_dev *rtwdev,
{
struct rtw89_vif_rx_stats_iter_data iter_data;
- rtw89_traffic_stats_accu(rtwdev, &rtwdev->stats, skb, false);
+ rtw89_traffic_stats_accu(rtwdev, NULL, skb, true, false);
iter_data.rtwdev = rtwdev;
iter_data.phy_ppdu = phy_ppdu;
@@ -2884,6 +2906,9 @@ static enum rtw89_ps_mode rtw89_update_ps_mode(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
+ if (rtwdev->hci.type != RTW89_HCI_TYPE_PCIE)
+ return RTW89_PS_MODE_NONE;
+
if (rtw89_disable_ps_mode || !chip->ps_mode_supported ||
RTW89_CHK_FW_FEATURE(NO_DEEP_PS, &rtwdev->fw))
return RTW89_PS_MODE_NONE;
@@ -3567,9 +3592,22 @@ void rtw89_roc_work(struct wiphy *wiphy, struct wiphy_work *work)
}
static enum rtw89_tfc_lv rtw89_get_traffic_level(struct rtw89_dev *rtwdev,
- u32 throughput, u64 cnt)
+ u32 throughput, u64 cnt,
+ enum rtw89_tfc_interval interval)
{
- if (cnt < 100)
+ u64 cnt_level;
+
+ switch (interval) {
+ default:
+ case RTW89_TFC_INTERVAL_100MS:
+ cnt_level = 5;
+ break;
+ case RTW89_TFC_INTERVAL_2SEC:
+ cnt_level = 100;
+ break;
+ }
+
+ if (cnt < cnt_level)
return RTW89_TFC_IDLE;
if (throughput > 50)
return RTW89_TFC_HIGH;
@@ -3581,13 +3619,14 @@ static enum rtw89_tfc_lv rtw89_get_traffic_level(struct rtw89_dev *rtwdev,
}
static bool rtw89_traffic_stats_calc(struct rtw89_dev *rtwdev,
- struct rtw89_traffic_stats *stats)
+ struct rtw89_traffic_stats *stats,
+ enum rtw89_tfc_interval interval)
{
enum rtw89_tfc_lv tx_tfc_lv = stats->tx_tfc_lv;
enum rtw89_tfc_lv rx_tfc_lv = stats->rx_tfc_lv;
- stats->tx_throughput_raw = (u32)(stats->tx_unicast >> RTW89_TP_SHIFT);
- stats->rx_throughput_raw = (u32)(stats->rx_unicast >> RTW89_TP_SHIFT);
+ stats->tx_throughput_raw = rtw89_bytes_to_mbps(stats->tx_unicast, interval);
+ stats->rx_throughput_raw = rtw89_bytes_to_mbps(stats->rx_unicast, interval);
ewma_tp_add(&stats->tx_ewma_tp, stats->tx_throughput_raw);
ewma_tp_add(&stats->rx_ewma_tp, stats->rx_throughput_raw);
@@ -3595,9 +3634,9 @@ static bool rtw89_traffic_stats_calc(struct rtw89_dev *rtwdev,
stats->tx_throughput = ewma_tp_read(&stats->tx_ewma_tp);
stats->rx_throughput = ewma_tp_read(&stats->rx_ewma_tp);
stats->tx_tfc_lv = rtw89_get_traffic_level(rtwdev, stats->tx_throughput,
- stats->tx_cnt);
+ stats->tx_cnt, interval);
stats->rx_tfc_lv = rtw89_get_traffic_level(rtwdev, stats->rx_throughput,
- stats->rx_cnt);
+ stats->rx_cnt, interval);
stats->tx_avg_len = stats->tx_cnt ?
DIV_ROUND_DOWN_ULL(stats->tx_unicast, stats->tx_cnt) : 0;
stats->rx_avg_len = stats->rx_cnt ?
@@ -3623,10 +3662,12 @@ static bool rtw89_traffic_stats_track(struct rtw89_dev *rtwdev)
unsigned int link_id;
bool tfc_changed;
- tfc_changed = rtw89_traffic_stats_calc(rtwdev, &rtwdev->stats);
+ tfc_changed = rtw89_traffic_stats_calc(rtwdev, &rtwdev->stats,
+ RTW89_TFC_INTERVAL_2SEC);
rtw89_for_each_rtwvif(rtwdev, rtwvif) {
- rtw89_traffic_stats_calc(rtwdev, &rtwvif->stats);
+ rtw89_traffic_stats_calc(rtwdev, &rtwvif->stats,
+ RTW89_TFC_INTERVAL_2SEC);
rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
rtw89_fw_h2c_tp_offload(rtwdev, rtwvif_link);
@@ -3646,8 +3687,8 @@ static void rtw89_enter_lps_track(struct rtw89_dev *rtwdev)
if (rtwvif->offchan)
continue;
- if (rtwvif->stats.tx_tfc_lv != RTW89_TFC_IDLE ||
- rtwvif->stats.rx_tfc_lv != RTW89_TFC_IDLE)
+ if (rtwvif->stats_ps.tx_tfc_lv >= RTW89_TFC_MID ||
+ rtwvif->stats_ps.rx_tfc_lv >= RTW89_TFC_MID)
continue;
vif = rtwvif_to_vif(rtwvif);
@@ -3786,6 +3827,34 @@ static void rtw89_core_mlo_track(struct rtw89_dev *rtwdev)
}
}
+static void rtw89_track_ps_work(struct wiphy *wiphy, struct wiphy_work *work)
+{
+ struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
+ track_ps_work.work);
+ struct rtw89_vif *rtwvif;
+
+ lockdep_assert_wiphy(wiphy);
+
+ if (test_bit(RTW89_FLAG_FORBIDDEN_TRACK_WORK, rtwdev->flags))
+ return;
+
+ if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags))
+ return;
+
+ wiphy_delayed_work_queue(wiphy, &rtwdev->track_ps_work,
+ RTW89_TRACK_PS_WORK_PERIOD);
+
+ rtw89_for_each_rtwvif(rtwdev, rtwvif)
+ rtw89_traffic_stats_calc(rtwdev, &rtwvif->stats_ps,
+ RTW89_TFC_INTERVAL_100MS);
+
+ if (rtwdev->scanning)
+ return;
+
+ if (rtwdev->lps_enabled && !rtwdev->btc.lps)
+ rtw89_enter_lps_track(rtwdev);
+}
+
static void rtw89_track_work(struct wiphy *wiphy, struct wiphy_work *work)
{
struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
@@ -4025,6 +4094,7 @@ int rtw89_core_sta_link_add(struct rtw89_dev *rtwdev,
&rtwsta_link->tx_retry);
rtw89_mac_set_tx_retry_limit(rtwdev, rtwsta_link, false, 60);
}
+ rtw89_phy_dig_suspend(rtwdev);
} else if (vif->type == NL80211_IFTYPE_AP || sta->tdls) {
ret = rtw89_mac_set_macid_pause(rtwdev, rtwsta_link->mac_id, false);
if (ret) {
@@ -4213,6 +4283,7 @@ int rtw89_core_sta_link_assoc(struct rtw89_dev *rtwdev,
if (vif->p2p)
rtw89_mac_set_tx_retry_limit(rtwdev, rtwsta_link, false,
rtwsta_link->tx_retry);
+ rtw89_phy_dig_resume(rtwdev, false);
}
rtw89_assoc_link_set(rtwsta_link);
@@ -4872,6 +4943,8 @@ int rtw89_core_start(struct rtw89_dev *rtwdev)
wiphy_delayed_work_queue(rtwdev->hw->wiphy, &rtwdev->track_work,
RTW89_TRACK_WORK_PERIOD);
+ wiphy_delayed_work_queue(rtwdev->hw->wiphy, &rtwdev->track_ps_work,
+ RTW89_TRACK_PS_WORK_PERIOD);
set_bit(RTW89_FLAG_RUNNING, rtwdev->flags);
@@ -4906,6 +4979,7 @@ void rtw89_core_stop(struct rtw89_dev *rtwdev)
wiphy_work_cancel(wiphy, &btc->icmp_notify_work);
cancel_delayed_work_sync(&rtwdev->txq_reinvoke_work);
wiphy_delayed_work_cancel(wiphy, &rtwdev->track_work);
+ wiphy_delayed_work_cancel(wiphy, &rtwdev->track_ps_work);
wiphy_delayed_work_cancel(wiphy, &rtwdev->chanctx_work);
wiphy_delayed_work_cancel(wiphy, &rtwdev->coex_act1_work);
wiphy_delayed_work_cancel(wiphy, &rtwdev->coex_bt_devinfo_work);
@@ -5133,6 +5207,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
INIT_WORK(&rtwdev->txq_work, rtw89_core_txq_work);
INIT_DELAYED_WORK(&rtwdev->txq_reinvoke_work, rtw89_core_txq_reinvoke_work);
wiphy_delayed_work_init(&rtwdev->track_work, rtw89_track_work);
+ wiphy_delayed_work_init(&rtwdev->track_ps_work, rtw89_track_ps_work);
wiphy_delayed_work_init(&rtwdev->chanctx_work, rtw89_chanctx_work);
wiphy_delayed_work_init(&rtwdev->coex_act1_work, rtw89_coex_act1_work);
wiphy_delayed_work_init(&rtwdev->coex_bt_devinfo_work, rtw89_coex_bt_devinfo_work);
@@ -5588,6 +5663,9 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
int ret;
int tx_headroom = IEEE80211_HT_CTL_LEN;
+ if (rtwdev->hci.type == RTW89_HCI_TYPE_USB)
+ tx_headroom += chip->txwd_body_size + chip->txwd_info_size;
+
hw->vif_data_size = struct_size_t(struct rtw89_vif, links_inst, n);
hw->sta_data_size = struct_size_t(struct rtw89_sta, links_inst, n);
hw->txq_data_size = sizeof(struct rtw89_txq);
diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h
index cdacf100a59a..43e10278e14d 100644
--- a/drivers/net/wireless/realtek/rtw89/core.h
+++ b/drivers/net/wireless/realtek/rtw89/core.h
@@ -40,6 +40,7 @@ extern const struct ieee80211_ops rtw89_ops;
#define BYPASS_CR_DATA 0xbabecafe
#define RTW89_TRACK_WORK_PERIOD round_jiffies_relative(HZ * 2)
+#define RTW89_TRACK_PS_WORK_PERIOD msecs_to_jiffies(100)
#define RTW89_FORBID_BA_TIMER round_jiffies_relative(HZ * 4)
#define CFO_TRACK_MAX_USER 64
#define MAX_RSSI 110
@@ -130,6 +131,17 @@ enum rtw89_hci_type {
RTW89_HCI_TYPE_PCIE,
RTW89_HCI_TYPE_USB,
RTW89_HCI_TYPE_SDIO,
+
+ RTW89_HCI_TYPE_NUM,
+};
+
+enum rtw89_hci_dle_type {
+ RTW89_HCI_DLE_TYPE_PCIE,
+ RTW89_HCI_DLE_TYPE_USB2,
+ RTW89_HCI_DLE_TYPE_USB3,
+ RTW89_HCI_DLE_TYPE_SDIO,
+
+ RTW89_HCI_DLE_TYPE_NUM,
};
enum rtw89_core_chip_id {
@@ -1381,6 +1393,11 @@ struct rtw89_btc_wl_smap {
u32 emlsr: 1;
};
+enum rtw89_tfc_interval {
+ RTW89_TFC_INTERVAL_100MS,
+ RTW89_TFC_INTERVAL_2SEC,
+};
+
enum rtw89_tfc_lv {
RTW89_TFC_IDLE,
RTW89_TFC_ULTRA_LOW,
@@ -1389,7 +1406,6 @@ enum rtw89_tfc_lv {
RTW89_TFC_HIGH,
};
-#define RTW89_TP_SHIFT 18 /* bytes/2s --> Mbps */
DECLARE_EWMA(tp, 10, 2);
struct rtw89_traffic_stats {
@@ -3480,6 +3496,7 @@ struct rtw89_efuse {
u8 addr[ETH_ALEN];
u8 rfe_type;
char country_code[2];
+ u8 adc_td;
};
struct rtw89_phy_rate_pattern {
@@ -3580,6 +3597,7 @@ struct rtw89_vif_link {
u8 hit_rule;
u8 last_noa_nr;
u64 sync_bcn_tsf;
+ u64 last_sync_bcn_tsf;
bool rand_tsf_done;
bool trigger;
bool lsig_txop;
@@ -3603,6 +3621,8 @@ struct rtw89_vif_link {
struct list_head general_pkt_list;
struct rtw89_p2p_noa_setter p2p_noa;
struct rtw89_ps_noa_once_handler noa_once;
+ struct wiphy_delayed_work mcc_gc_detect_beacon_work;
+ u8 detect_bcn_count;
};
enum rtw89_lv1_rcvy_step {
@@ -3659,6 +3679,7 @@ struct rtw89_hci_ops {
struct rtw89_hci_info {
const struct rtw89_hci_ops *ops;
enum rtw89_hci_type type;
+ enum rtw89_hci_dle_type dle_type;
u32 rpwm_addr;
u32 cpwm_addr;
bool paused;
@@ -3758,6 +3779,9 @@ struct rtw89_chip_ops {
struct rtw89_sta_link *rtwsta_link);
int (*h2c_txtime_cmac_tbl)(struct rtw89_dev *rtwdev,
struct rtw89_sta_link *rtwsta_link);
+ int (*h2c_punctured_cmac_tbl)(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link,
+ u16 punctured);
int (*h2c_default_dmac_tbl)(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
struct rtw89_sta_link *rtwsta_link);
@@ -3842,7 +3866,7 @@ struct rtw89_scan_option {
u16 slow_pd;
u16 norm_cy;
u8 opch_end;
- u16 delay;
+ u16 delay; /* in unit of ms */
u64 prohib_chan;
enum rtw89_phy_idx band;
enum rtw89_scan_be_operation operation;
@@ -4357,8 +4381,8 @@ struct rtw89_chip_info {
u16 max_amsdu_limit;
bool dis_2g_40m_ul_ofdma;
u32 rsvd_ple_ofst;
- const struct rtw89_hfc_param_ini *hfc_param_ini;
- const struct rtw89_dle_mem *dle_mem;
+ const struct rtw89_hfc_param_ini *hfc_param_ini[RTW89_HCI_TYPE_NUM];
+ const struct rtw89_dle_mem *dle_mem[RTW89_HCI_DLE_TYPE_NUM];
u8 wde_qempty_acq_grpnum;
u8 wde_qempty_mgq_grpsel;
u32 rf_base_addr[2];
@@ -4562,11 +4586,21 @@ enum rtw89_fw_type {
RTW89_FW_LOGFMT = 255,
};
+#define RTW89_FW_FEATURE_GROUP(_grp, _features...) \
+ RTW89_FW_FEATURE_##_grp##_MIN, \
+ __RTW89_FW_FEATURE_##_grp##_S = RTW89_FW_FEATURE_##_grp##_MIN - 1, \
+ _features \
+ __RTW89_FW_FEATURE_##_grp##_E, \
+ RTW89_FW_FEATURE_##_grp##_MAX = __RTW89_FW_FEATURE_##_grp##_E - 1
+
enum rtw89_fw_feature {
RTW89_FW_FEATURE_OLD_HT_RA_FORMAT,
RTW89_FW_FEATURE_SCAN_OFFLOAD,
RTW89_FW_FEATURE_TX_WAKE,
- RTW89_FW_FEATURE_CRASH_TRIGGER,
+ RTW89_FW_FEATURE_GROUP(CRASH_TRIGGER,
+ RTW89_FW_FEATURE_CRASH_TRIGGER_TYPE_0,
+ RTW89_FW_FEATURE_CRASH_TRIGGER_TYPE_1,
+ ),
RTW89_FW_FEATURE_NO_PACKET_DROP,
RTW89_FW_FEATURE_NO_DEEP_PS,
RTW89_FW_FEATURE_NO_LPS_PG,
@@ -4587,6 +4621,7 @@ enum rtw89_fw_feature {
RTW89_FW_FEATURE_BEACON_LOSS_COUNT_V1,
RTW89_FW_FEATURE_SCAN_OFFLOAD_EXTRA_OP,
RTW89_FW_FEATURE_RFK_NTFY_MCC_V0,
+ RTW89_FW_FEATURE_LPS_DACK_BY_C2H_REG,
};
struct rtw89_fw_suit {
@@ -4684,6 +4719,10 @@ struct rtw89_fw_info {
#define RTW89_CHK_FW_FEATURE(_feat, _fw) \
(!!((_fw)->feature_map & BIT(RTW89_FW_FEATURE_ ## _feat)))
+#define RTW89_CHK_FW_FEATURE_GROUP(_grp, _fw) \
+ (!!((_fw)->feature_map & GENMASK(RTW89_FW_FEATURE_ ## _grp ## _MAX, \
+ RTW89_FW_FEATURE_ ## _grp ## _MIN)))
+
#define RTW89_SET_FW_FEATURE(_fw_feature, _fw) \
((_fw)->feature_map |= BIT(_fw_feature))
@@ -4982,6 +5021,7 @@ enum rtw89_flags {
RTW89_FLAG_FORBIDDEN_TRACK_WORK,
RTW89_FLAG_CHANGING_INTERFACE,
RTW89_FLAG_HW_RFKILL_STATE,
+ RTW89_FLAG_UNPLUGGED,
NUM_OF_RTW89_FLAGS,
};
@@ -5146,7 +5186,7 @@ struct rtw89_dpk_bkup_para {
enum rtw89_band band;
enum rtw89_bandwidth bw;
u8 ch;
- bool path_ok;
+ u8 path_ok;
u8 mdpd_en;
u8 txagc_dpk;
u8 ther_dpk;
@@ -5224,8 +5264,10 @@ struct rtw89_dig_info {
s8 tia_gain_a[TIA_GAIN_NUM];
s8 tia_gain_g[TIA_GAIN_NUM];
s8 *tia_gain;
+ u32 bak_dig;
bool is_linked_pre;
bool bypass_dig;
+ bool pause_dig;
};
enum rtw89_multi_cfo_mode {
@@ -5367,6 +5409,7 @@ struct rtw89_regulatory_info {
DECLARE_BITMAP(block_unii4, RTW89_REGD_MAX_COUNTRY_NUM);
DECLARE_BITMAP(block_6ghz, RTW89_REGD_MAX_COUNTRY_NUM);
DECLARE_BITMAP(block_6ghz_sp, RTW89_REGD_MAX_COUNTRY_NUM);
+ DECLARE_BITMAP(block_6ghz_vlp, RTW89_REGD_MAX_COUNTRY_NUM);
};
enum rtw89_ifs_clm_application {
@@ -5517,7 +5560,9 @@ struct rtw89_early_h2c {
struct rtw89_hw_scan_extra_op {
bool set;
u8 macid;
+ u8 port;
struct rtw89_chan chan;
+ struct rtw89_vif_link *rtwvif_link;
};
struct rtw89_hw_scan_info {
@@ -5528,6 +5573,8 @@ struct rtw89_hw_scan_info {
struct rtw89_hw_scan_extra_op extra_op;
bool connected;
bool abort;
+ u16 delay; /* in unit of ms */
+ u8 seq: 2;
};
enum rtw89_phy_bb_gain_band {
@@ -5753,6 +5800,7 @@ struct rtw89_mcc_role {
bool is_2ghz;
bool is_go;
bool is_gc;
+ bool ignore_bcn;
};
struct rtw89_mcc_bt_role {
@@ -5929,6 +5977,7 @@ struct rtw89_dev {
} bbs[RTW89_PHY_NUM];
struct wiphy_delayed_work track_work;
+ struct wiphy_delayed_work track_ps_work;
struct wiphy_delayed_work chanctx_work;
struct wiphy_delayed_work coex_act1_work;
struct wiphy_delayed_work coex_bt_devinfo_work;
@@ -5979,6 +6028,7 @@ struct rtw89_vif {
__be32 ip_addr;
struct rtw89_traffic_stats stats;
+ struct rtw89_traffic_stats stats_ps;
u32 tdls_peer;
struct ieee80211_scan_ies *scan_ies;
@@ -7291,6 +7341,17 @@ static inline bool rtw89_is_rtl885xb(struct rtw89_dev *rtwdev)
return false;
}
+static inline u32 rtw89_bytes_to_mbps(u64 bytes, enum rtw89_tfc_interval interval)
+{
+ switch (interval) {
+ default:
+ case RTW89_TFC_INTERVAL_2SEC:
+ return bytes >> 18; /* bytes/2s --> Mbps */;
+ case RTW89_TFC_INTERVAL_100MS:
+ return (bytes * 10) >> 17; /* bytes/100ms --> Mbps */
+ }
+}
+
int rtw89_core_tx_write(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct sk_buff *skb, int *qsel);
int rtw89_h2c_tx(struct rtw89_dev *rtwdev,
diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c
index 4acb567b3ad4..afdfb9647fc1 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.c
+++ b/drivers/net/wireless/realtek/rtw89/debug.c
@@ -3596,7 +3596,7 @@ rtw89_debug_priv_fw_crash_set(struct rtw89_dev *rtwdev,
switch (crash_type) {
case RTW89_DBG_SIM_CPU_EXCEPTION:
- if (!RTW89_CHK_FW_FEATURE(CRASH_TRIGGER, &rtwdev->fw))
+ if (!RTW89_CHK_FW_FEATURE_GROUP(CRASH_TRIGGER, &rtwdev->fw))
return -EOPNOTSUPP;
sim = rtw89_fw_h2c_trigger_cpu_exception;
break;
diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
index c613431e754f..16e59a4a486e 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.c
+++ b/drivers/net/wireless/realtek/rtw89/fw.c
@@ -814,33 +814,39 @@ struct __fw_feat_cfg {
static const struct __fw_feat_cfg fw_feat_tbl[] = {
__CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, TX_WAKE),
__CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, SCAN_OFFLOAD),
- __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 41, 0, CRASH_TRIGGER),
+ __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 41, 0, CRASH_TRIGGER_TYPE_0),
__CFG_FW_FEAT(RTL8852A, le, 0, 13, 29, 0, OLD_HT_RA_FORMAT),
__CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, SCAN_OFFLOAD),
__CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, TX_WAKE),
- __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 36, 0, CRASH_TRIGGER),
+ __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 36, 0, CRASH_TRIGGER_TYPE_0),
__CFG_FW_FEAT(RTL8852A, lt, 0, 13, 37, 0, NO_WOW_CPU_IO_RX),
__CFG_FW_FEAT(RTL8852A, lt, 0, 13, 38, 0, NO_PACKET_DROP),
__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, NO_LPS_PG),
__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, TX_WAKE),
- __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, CRASH_TRIGGER),
+ __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, CRASH_TRIGGER_TYPE_0),
__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, SCAN_OFFLOAD),
__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 7, BEACON_FILTER),
__CFG_FW_FEAT(RTL8852B, lt, 0, 29, 30, 0, NO_WOW_CPU_IO_RX),
+ __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 127, 0, LPS_DACK_BY_C2H_REG),
+ __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 128, 0, CRASH_TRIGGER_TYPE_1),
+ __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 128, 0, SCAN_OFFLOAD_EXTRA_OP),
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, NO_LPS_PG),
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, TX_WAKE),
- __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 90, 0, CRASH_TRIGGER),
+ __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 90, 0, CRASH_TRIGGER_TYPE_0),
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 91, 0, SCAN_OFFLOAD),
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 110, 0, BEACON_FILTER),
+ __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 127, 0, SCAN_OFFLOAD_EXTRA_OP),
+ __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 127, 0, LPS_DACK_BY_C2H_REG),
+ __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 127, 0, CRASH_TRIGGER_TYPE_1),
__CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS),
__CFG_FW_FEAT(RTL8852C, ge, 0, 0, 0, 0, RFK_NTFY_MCC_V0),
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 34, 0, TX_WAKE),
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD),
- __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 40, 0, CRASH_TRIGGER),
+ __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 40, 0, CRASH_TRIGGER_TYPE_0),
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 56, 10, BEACON_FILTER),
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 80, 0, WOW_REASON_V1),
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 128, 0, BEACON_LOSS_COUNT_V1),
- __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 30, 0, CRASH_TRIGGER),
+ __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 30, 0, CRASH_TRIGGER_TYPE_0),
__CFG_FW_FEAT(RTL8922A, ge, 0, 34, 11, 0, MACID_PAUSE_SLEEP),
__CFG_FW_FEAT(RTL8922A, ge, 0, 34, 35, 0, SCAN_OFFLOAD),
__CFG_FW_FEAT(RTL8922A, lt, 0, 35, 21, 0, SCAN_OFFLOAD_BE_V0),
@@ -856,6 +862,8 @@ static const struct __fw_feat_cfg fw_feat_tbl[] = {
__CFG_FW_FEAT(RTL8922A, lt, 0, 35, 51, 0, NO_PHYCAP_P1),
__CFG_FW_FEAT(RTL8922A, lt, 0, 35, 64, 0, NO_POWER_DIFFERENCE),
__CFG_FW_FEAT(RTL8922A, ge, 0, 35, 71, 0, BEACON_LOSS_COUNT_V1),
+ __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 76, 0, LPS_DACK_BY_C2H_REG),
+ __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 79, 0, CRASH_TRIGGER_TYPE_1),
};
static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw,
@@ -2824,8 +2832,14 @@ int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev,
struct rtw89_lps_parm *lps_param)
{
struct sk_buff *skb;
+ bool done_ack;
int ret;
+ if (RTW89_CHK_FW_FEATURE(LPS_DACK_BY_C2H_REG, &rtwdev->fw))
+ done_ack = false;
+ else
+ done_ack = !lps_param->psmode;
+
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LPS_PARM_LEN);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
@@ -2847,7 +2861,7 @@ int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev,
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC,
H2C_CL_MAC_PS,
- H2C_FUNC_MAC_LPS_PARM, 0, !lps_param->psmode,
+ H2C_FUNC_MAC_LPS_PARM, 0, done_ack,
H2C_LPS_PARM_LEN);
ret = rtw89_h2c_tx(rtwdev, skb, false);
@@ -3731,6 +3745,49 @@ fail:
}
EXPORT_SYMBOL(rtw89_fw_h2c_txtime_cmac_tbl_g7);
+int rtw89_fw_h2c_punctured_cmac_tbl_g7(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link,
+ u16 punctured)
+{
+ struct rtw89_h2c_cctlinfo_ud_g7 *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for punctured cmac g7\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data;
+
+ h2c->c0 = le32_encode_bits(rtwvif_link->mac_id, CCTLINFO_G7_C0_MACID) |
+ le32_encode_bits(1, CCTLINFO_G7_C0_OP);
+
+ h2c->w4 = le32_encode_bits(~punctured, CCTLINFO_G7_W4_ACT_SUBCH_CBW);
+ h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_ACT_SUBCH_CBW);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
+ H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+EXPORT_SYMBOL(rtw89_fw_h2c_punctured_cmac_tbl_g7);
+
int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev,
struct rtw89_sta_link *rtwsta_link)
{
@@ -5583,7 +5640,6 @@ int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num,
return 0;
}
-#define RTW89_SCAN_DELAY_TSF_UNIT 1000000
int rtw89_fw_h2c_scan_offload_ax(struct rtw89_dev *rtwdev,
struct rtw89_scan_option *option,
struct rtw89_vif_link *rtwvif_link,
@@ -5615,7 +5671,7 @@ int rtw89_fw_h2c_scan_offload_ax(struct rtw89_dev *rtwdev,
scan_mode = RTW89_SCAN_IMMEDIATE;
} else {
scan_mode = RTW89_SCAN_DELAY;
- tsf += (u64)option->delay * RTW89_SCAN_DELAY_TSF_UNIT;
+ tsf += (u64)option->delay * 1000;
}
}
@@ -5701,26 +5757,47 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
{
struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+ const struct rtw89_hw_scan_extra_op *ext = &scan_info->extra_op;
struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
struct cfg80211_scan_request *req = rtwvif->scan_req;
struct rtw89_h2c_scanofld_be_macc_role *macc_role;
+ struct rtw89_hw_scan_extra_op scan_op[2] = {};
struct rtw89_chan *op = &scan_info->op_chan;
struct rtw89_h2c_scanofld_be_opch *opch;
struct rtw89_pktofld_info *pkt_info;
struct rtw89_h2c_scanofld_be *h2c;
+ struct ieee80211_vif *vif;
struct sk_buff *skb;
u8 macc_role_size = sizeof(*macc_role) * option->num_macc_role;
u8 opch_size = sizeof(*opch) * option->num_opch;
+ enum rtw89_scan_be_opmode opmode;
u8 probe_id[NUM_NL80211_BANDS];
u8 scan_offload_ver = U8_MAX;
u8 cfg_len = sizeof(*h2c);
unsigned int cond;
+ u8 ap_idx = U8_MAX;
u8 ver = U8_MAX;
+ u8 policy_val;
void *ptr;
+ u8 txbcn;
int ret;
u32 len;
u8 i;
+ scan_op[0].macid = rtwvif_link->mac_id;
+ scan_op[0].port = rtwvif_link->port;
+ scan_op[0].chan = *op;
+ vif = rtwvif_to_vif(rtwvif_link->rtwvif);
+ if (vif->type == NL80211_IFTYPE_AP)
+ ap_idx = 0;
+
+ if (ext->set) {
+ scan_op[1] = *ext;
+ vif = rtwvif_to_vif(ext->rtwvif_link->rtwvif);
+ if (vif->type == NL80211_IFTYPE_AP)
+ ap_idx = 1;
+ }
+
rtw89_scan_get_6g_disabled_chan(rtwdev, option);
if (RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD_BE_V0, &rtwdev->fw)) {
@@ -5781,7 +5858,7 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
RTW89_H2C_SCANOFLD_BE_W4_PROBE_5G) |
le32_encode_bits(probe_id[NL80211_BAND_6GHZ],
RTW89_H2C_SCANOFLD_BE_W4_PROBE_6G) |
- le32_encode_bits(option->delay, RTW89_H2C_SCANOFLD_BE_W4_DELAY_START);
+ le32_encode_bits(option->delay / 1000, RTW89_H2C_SCANOFLD_BE_W4_DELAY_START);
h2c->w5 = le32_encode_bits(option->mlo_mode, RTW89_H2C_SCANOFLD_BE_W5_MLO_MODE);
@@ -5823,29 +5900,35 @@ flex_member:
}
for (i = 0; i < option->num_opch; i++) {
+ bool is_ap_idx = i == ap_idx;
+
+ opmode = is_ap_idx ? RTW89_SCAN_OPMODE_TBTT : RTW89_SCAN_OPMODE_INTV;
+ policy_val = is_ap_idx ? 2 : RTW89_OFF_CHAN_TIME / 10;
+ txbcn = is_ap_idx ? 1 : 0;
+
opch = ptr;
- opch->w0 = le32_encode_bits(rtwvif_link->mac_id,
+ opch->w0 = le32_encode_bits(scan_op[i].macid,
RTW89_H2C_SCANOFLD_BE_OPCH_W0_MACID) |
le32_encode_bits(option->band,
RTW89_H2C_SCANOFLD_BE_OPCH_W0_BAND) |
- le32_encode_bits(rtwvif_link->port,
+ le32_encode_bits(scan_op[i].port,
RTW89_H2C_SCANOFLD_BE_OPCH_W0_PORT) |
- le32_encode_bits(RTW89_SCAN_OPMODE_INTV,
+ le32_encode_bits(opmode,
RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY) |
le32_encode_bits(true,
RTW89_H2C_SCANOFLD_BE_OPCH_W0_TXNULL) |
- le32_encode_bits(RTW89_OFF_CHAN_TIME / 10,
+ le32_encode_bits(policy_val,
RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY_VAL);
- opch->w1 = le32_encode_bits(op->band_type,
+ opch->w1 = le32_encode_bits(scan_op[i].chan.band_type,
RTW89_H2C_SCANOFLD_BE_OPCH_W1_CH_BAND) |
- le32_encode_bits(op->band_width,
+ le32_encode_bits(scan_op[i].chan.band_width,
RTW89_H2C_SCANOFLD_BE_OPCH_W1_BW) |
le32_encode_bits(0x3,
RTW89_H2C_SCANOFLD_BE_OPCH_W1_NOTIFY) |
- le32_encode_bits(op->primary_channel,
+ le32_encode_bits(scan_op[i].chan.primary_channel,
RTW89_H2C_SCANOFLD_BE_OPCH_W1_PRI_CH) |
- le32_encode_bits(op->channel,
+ le32_encode_bits(scan_op[i].chan.channel,
RTW89_H2C_SCANOFLD_BE_OPCH_W1_CENTRAL_CH);
opch->w2 = le32_encode_bits(0,
@@ -5853,7 +5936,9 @@ flex_member:
le32_encode_bits(0,
RTW89_H2C_SCANOFLD_BE_OPCH_W2_SW_DEF) |
le32_encode_bits(rtw89_is_mlo_1_1(rtwdev) ? 1 : 2,
- RTW89_H2C_SCANOFLD_BE_OPCH_W2_SS);
+ RTW89_H2C_SCANOFLD_BE_OPCH_W2_SS) |
+ le32_encode_bits(txbcn,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W2_TXBCN);
opch->w3 = le32_encode_bits(RTW89_SCANOFLD_PKT_NONE,
RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT0) |
@@ -5985,6 +6070,59 @@ fail:
}
EXPORT_SYMBOL(rtw89_fw_h2c_rf_ntfy_mcc);
+int rtw89_fw_h2c_mcc_dig(struct rtw89_dev *rtwdev,
+ enum rtw89_chanctx_idx chanctx_idx,
+ u8 mcc_role_idx, u8 pd_val, bool en)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
+ const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
+ struct rtw89_h2c_mcc_dig *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c mcc_dig\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_mcc_dig *)skb->data;
+
+ h2c->w0 = le32_encode_bits(1, RTW89_H2C_MCC_DIG_W0_REG_CNT) |
+ le32_encode_bits(en, RTW89_H2C_MCC_DIG_W0_DM_EN) |
+ le32_encode_bits(mcc_role_idx, RTW89_H2C_MCC_DIG_W0_IDX) |
+ le32_encode_bits(1, RTW89_H2C_MCC_DIG_W0_SET) |
+ le32_encode_bits(1, RTW89_H2C_MCC_DIG_W0_PHY0_EN) |
+ le32_encode_bits(chan->channel, RTW89_H2C_MCC_DIG_W0_CENTER_CH) |
+ le32_encode_bits(chan->band_type, RTW89_H2C_MCC_DIG_W0_BAND_TYPE);
+ h2c->w1 = le32_encode_bits(dig_regs->seg0_pd_reg,
+ RTW89_H2C_MCC_DIG_W1_ADDR_LSB) |
+ le32_encode_bits(dig_regs->seg0_pd_reg >> 8,
+ RTW89_H2C_MCC_DIG_W1_ADDR_MSB) |
+ le32_encode_bits(dig_regs->pd_lower_bound_mask,
+ RTW89_H2C_MCC_DIG_W1_BMASK_LSB) |
+ le32_encode_bits(dig_regs->pd_lower_bound_mask >> 8,
+ RTW89_H2C_MCC_DIG_W1_BMASK_MSB);
+ h2c->w2 = le32_encode_bits(pd_val, RTW89_H2C_MCC_DIG_W2_VAL_LSB);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM,
+ H2C_FUNC_FW_MCC_DIG, 0, 0, len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
int rtw89_fw_h2c_rf_ps_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
@@ -6015,7 +6153,7 @@ int rtw89_fw_h2c_rf_ps_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
path = rtw89_phy_get_syn_sel(rtwdev, rtwvif_link->phy_idx);
val = rtw89_chip_chan_to_rf18_val(rtwdev, chan);
- if (path >= chip->rf_path_num) {
+ if (path >= chip->rf_path_num || path >= NUM_OF_RTW89_FW_RFK_PATH) {
rtw89_err(rtwdev, "unsupported rf path (%d)\n", path);
ret = -ENOENT;
goto fail;
@@ -6621,6 +6759,34 @@ void rtw89_fw_c2h_work(struct wiphy *wiphy, struct wiphy_work *work)
}
}
+void rtw89_fw_c2h_purge_obsoleted_scan_events(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+ struct sk_buff *skb, *tmp;
+ int limit;
+
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
+
+ limit = skb_queue_len(&rtwdev->c2h_queue);
+
+ skb_queue_walk_safe(&rtwdev->c2h_queue, skb, tmp) {
+ struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(skb);
+
+ if (--limit < 0)
+ return;
+
+ if (!attr->is_scan_event || attr->scan_seq == scan_info->seq)
+ continue;
+
+ rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN,
+ "purge obsoleted scan event with seq=%d (cur=%d)\n",
+ attr->scan_seq, scan_info->seq);
+
+ skb_unlink(skb, &rtwdev->c2h_queue);
+ dev_kfree_skb_any(skb);
+ }
+}
+
static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev,
struct rtw89_mac_h2c_info *info)
{
@@ -6660,13 +6826,18 @@ static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev,
const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_fw_info *fw_info = &rtwdev->fw;
const u32 *c2h_reg = chip->c2h_regs;
- u32 ret;
+ u32 ret, timeout;
u8 i, val;
info->id = RTW89_FWCMD_C2HREG_FUNC_NULL;
+ if (rtwdev->hci.type == RTW89_HCI_TYPE_USB)
+ timeout = RTW89_C2H_TIMEOUT_USB;
+ else
+ timeout = RTW89_C2H_TIMEOUT;
+
ret = read_poll_timeout_atomic(rtw89_read8, val, val, 1,
- RTW89_C2H_TIMEOUT, false, rtwdev,
+ timeout, false, rtwdev,
chip->c2h_ctrl_reg);
if (ret) {
rtw89_warn(rtwdev, "c2h reg timeout\n");
@@ -6773,6 +6944,7 @@ static void rtw89_hw_scan_cleanup(struct rtw89_dev *rtwdev,
scan_info->scanning_vif = NULL;
scan_info->abort = false;
scan_info->connected = false;
+ scan_info->delay = 0;
}
static bool rtw89_is_6ghz_wildcard_probe_req(struct rtw89_dev *rtwdev,
@@ -7572,12 +7744,25 @@ out:
static void rtw89_hw_scan_update_link_beacon_noa(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
- u16 tu)
+ u16 tu, bool scan)
{
struct ieee80211_p2p_noa_desc noa_desc = {};
+ struct ieee80211_bss_conf *bss_conf;
+ u16 beacon_int;
u64 tsf;
int ret;
+ rcu_read_lock();
+
+ bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
+ beacon_int = bss_conf->beacon_int;
+
+ rcu_read_unlock();
+
+ tu += beacon_int * 3;
+ if (rtwdev->chip->chip_gen == RTW89_CHIP_AX)
+ rtwdev->scan_info.delay = ieee80211_tu_to_usec(beacon_int * 3) / 1000;
+
ret = rtw89_mac_port_get_tsf(rtwdev, rtwvif_link, &tsf);
if (ret) {
rtw89_warn(rtwdev, "%s: failed to get tsf\n", __func__);
@@ -7585,17 +7770,24 @@ static void rtw89_hw_scan_update_link_beacon_noa(struct rtw89_dev *rtwdev,
}
noa_desc.start_time = cpu_to_le32(tsf);
- noa_desc.interval = cpu_to_le32(ieee80211_tu_to_usec(tu));
- noa_desc.duration = cpu_to_le32(ieee80211_tu_to_usec(tu));
- noa_desc.count = 1;
+ if (rtwdev->chip->chip_gen == RTW89_CHIP_AX) {
+ noa_desc.interval = cpu_to_le32(ieee80211_tu_to_usec(tu));
+ noa_desc.duration = cpu_to_le32(ieee80211_tu_to_usec(tu));
+ noa_desc.count = 1;
+ } else {
+ noa_desc.duration = cpu_to_le32(ieee80211_tu_to_usec(20000));
+ noa_desc.interval = cpu_to_le32(ieee80211_tu_to_usec(20000));
+ noa_desc.count = 255;
+ }
rtw89_p2p_noa_renew(rtwvif_link);
- rtw89_p2p_noa_append(rtwvif_link, &noa_desc);
+ if (scan)
+ rtw89_p2p_noa_append(rtwvif_link, &noa_desc);
+
rtw89_chip_h2c_update_beacon(rtwdev, rtwvif_link);
}
-static void rtw89_hw_scan_update_beacon_noa(struct rtw89_dev *rtwdev,
- const struct cfg80211_scan_request *req)
+static void rtw89_hw_scan_update_beacon_noa(struct rtw89_dev *rtwdev, bool scan)
{
const struct rtw89_entity_mgnt *mgnt = &rtwdev->hal.entity_mgnt;
const struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
@@ -7610,6 +7802,9 @@ static void rtw89_hw_scan_update_beacon_noa(struct rtw89_dev *rtwdev,
lockdep_assert_wiphy(rtwdev->hw->wiphy);
+ if (!scan)
+ goto update;
+
list_for_each_safe(pos, tmp, &scan_info->chan_list) {
switch (chip->chip_gen) {
case RTW89_CHIP_AX:
@@ -7633,6 +7828,7 @@ static void rtw89_hw_scan_update_beacon_noa(struct rtw89_dev *rtwdev,
return;
}
+update:
list_for_each_entry(rtwvif, &mgnt->active_list, mgnt_entry) {
unsigned int link_id;
@@ -7641,7 +7837,8 @@ static void rtw89_hw_scan_update_beacon_noa(struct rtw89_dev *rtwdev,
continue;
rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
- rtw89_hw_scan_update_link_beacon_noa(rtwdev, rtwvif_link, tu);
+ rtw89_hw_scan_update_link_beacon_noa(rtwdev, rtwvif_link,
+ tu, scan);
}
}
@@ -7676,7 +7873,9 @@ static void rtw89_hw_scan_set_extra_op_info(struct rtw89_dev *rtwdev,
*ext = (struct rtw89_hw_scan_extra_op){
.set = true,
.macid = tmp_link->mac_id,
+ .port = tmp_link->port,
.chan = *tmp_chan,
+ .rtwvif_link = tmp_link,
};
rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN,
@@ -7717,6 +7916,7 @@ int rtw89_hw_scan_start(struct rtw89_dev *rtwdev,
rtwdev->scan_info.connected = rtw89_is_any_vif_connected_or_connecting(rtwdev);
rtwdev->scan_info.scanning_vif = rtwvif_link;
rtwdev->scan_info.abort = false;
+ rtwdev->scan_info.delay = 0;
rtwvif->scan_ies = &scan_req->ies;
rtwvif->scan_req = req;
@@ -7745,9 +7945,10 @@ int rtw89_hw_scan_start(struct rtw89_dev *rtwdev,
rtw89_write32_mask(rtwdev, reg, B_AX_RX_FLTR_CFG_MASK, rx_fltr);
rtw89_chanctx_pause(rtwdev, &pause_parm);
+ rtw89_phy_dig_suspend(rtwdev);
if (mode == RTW89_ENTITY_MODE_MCC)
- rtw89_hw_scan_update_beacon_noa(rtwdev, req);
+ rtw89_hw_scan_update_beacon_noa(rtwdev, true);
return 0;
}
@@ -7760,6 +7961,7 @@ struct rtw89_hw_scan_complete_cb_data {
static int rtw89_hw_scan_complete_cb(struct rtw89_dev *rtwdev, void *data)
{
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ enum rtw89_entity_mode mode = rtw89_get_entity_mode(rtwdev);
struct rtw89_hw_scan_complete_cb_data *cb_data = data;
struct rtw89_vif_link *rtwvif_link = cb_data->rtwvif_link;
struct cfg80211_scan_info info = {
@@ -7778,9 +7980,13 @@ static int rtw89_hw_scan_complete_cb(struct rtw89_dev *rtwdev, void *data)
ieee80211_wake_queues(rtwdev->hw);
rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif_link, true);
rtw89_mac_enable_beacon_for_ap_vifs(rtwdev, true);
+ rtw89_phy_dig_resume(rtwdev, true);
rtw89_hw_scan_cleanup(rtwdev, rtwvif_link);
+ if (mode == RTW89_ENTITY_MODE_MCC)
+ rtw89_hw_scan_update_beacon_noa(rtwdev, false);
+
return 0;
}
@@ -7847,6 +8053,8 @@ int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev,
bool enable)
{
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+ const struct rtw89_hw_scan_extra_op *ext = &scan_info->extra_op;
struct rtw89_scan_option opt = {0};
bool connected;
int ret = 0;
@@ -7857,6 +8065,7 @@ int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev,
connected = rtwdev->scan_info.connected;
opt.enable = enable;
opt.target_ch_mode = connected;
+ opt.delay = rtwdev->scan_info.delay;
if (enable) {
ret = mac->add_chan_list(rtwdev, rtwvif_link);
if (ret)
@@ -7870,49 +8079,62 @@ int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev,
opt.num_macc_role = 0;
opt.mlo_mode = rtwdev->mlo_dbcc_mode;
opt.num_opch = connected ? 1 : 0;
+ if (connected && ext->set)
+ opt.num_opch++;
+
opt.opch_end = connected ? 0 : RTW89_CHAN_INVALID;
}
- ret = mac->scan_offload(rtwdev, &opt, rtwvif_link, false);
+ ret = rtw89_mac_scan_offload(rtwdev, &opt, rtwvif_link, false);
+
out:
return ret;
}
-#define H2C_FW_CPU_EXCEPTION_LEN 4
-#define H2C_FW_CPU_EXCEPTION_TYPE_DEF 0x5566
+#define H2C_FW_CPU_EXCEPTION_TYPE_0 0x5566
+#define H2C_FW_CPU_EXCEPTION_TYPE_1 0x0
int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev)
{
+ struct rtw89_h2c_trig_cpu_except *h2c;
+ u32 cpu_exception_type_def;
+ u32 len = sizeof(*h2c);
struct sk_buff *skb;
int ret;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_FW_CPU_EXCEPTION_LEN);
+ if (RTW89_CHK_FW_FEATURE(CRASH_TRIGGER_TYPE_1, &rtwdev->fw))
+ cpu_exception_type_def = H2C_FW_CPU_EXCEPTION_TYPE_1;
+ else if (RTW89_CHK_FW_FEATURE(CRASH_TRIGGER_TYPE_0, &rtwdev->fw))
+ cpu_exception_type_def = H2C_FW_CPU_EXCEPTION_TYPE_0;
+ else
+ return -EOPNOTSUPP;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
rtw89_err(rtwdev,
"failed to alloc skb for fw cpu exception\n");
return -ENOMEM;
}
- skb_put(skb, H2C_FW_CPU_EXCEPTION_LEN);
- RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(skb->data,
- H2C_FW_CPU_EXCEPTION_TYPE_DEF);
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_trig_cpu_except *)skb->data;
+
+ h2c->w0 = le32_encode_bits(cpu_exception_type_def,
+ RTW89_H2C_CPU_EXCEPTION_TYPE);
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_TEST,
H2C_CL_FW_STATUS_TEST,
H2C_FUNC_CPU_EXCEPTION, 0, 0,
- H2C_FW_CPU_EXCEPTION_LEN);
+ len);
ret = rtw89_h2c_tx(rtwdev, skb, false);
if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
- goto fail;
+ dev_kfree_skb_any(skb);
+ return ret;
}
return 0;
-
-fail:
- dev_kfree_skb_any(skb);
- return ret;
}
#define H2C_PKT_DROP_LEN 24
diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h
index 24d2e8b0d079..3de29137b113 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.h
+++ b/drivers/net/wireless/realtek/rtw89/fw.h
@@ -87,6 +87,9 @@ struct rtw89_c2hreg_phycap {
#define RTW89_C2HREG_AOAC_RPT_2_W3_IGTK_IPN_IV_6 GENMASK(7, 0)
#define RTW89_C2HREG_AOAC_RPT_2_W3_IGTK_IPN_IV_7 GENMASK(15, 8)
+#define RTW89_C2HREG_PS_LEAVE_ACK_RET GENMASK(7, 0)
+#define RTW89_C2HREG_PS_LEAVE_ACK_MACID GENMASK(31, 16)
+
struct rtw89_h2creg_hdr {
u32 w0;
};
@@ -112,6 +115,8 @@ struct rtw89_h2creg_sch_tx_en {
#define RTW89_C2HREG_HDR_LEN 2
#define RTW89_H2CREG_HDR_LEN 2
#define RTW89_C2H_TIMEOUT 1000000
+#define RTW89_C2H_TIMEOUT_USB 4000
+
struct rtw89_mac_c2h_info {
u8 id;
u8 content_len;
@@ -154,6 +159,7 @@ enum rtw89_mac_c2h_type {
RTW89_FWCMD_C2HREG_FUNC_TX_PAUSE_RPT,
RTW89_FWCMD_C2HREG_FUNC_WOW_CPUIO_RX_ACK = 0xA,
RTW89_FWCMD_C2HREG_FUNC_PHY_CAP_PART1 = 0xC,
+ RTW89_FWCMD_C2HREG_FUNC_PS_LEAVE_ACK = 0xD,
RTW89_FWCMD_C2HREG_FUNC_NULL = 0xFF,
};
@@ -1829,10 +1835,11 @@ struct rtw89_h2c_lps_ml_cmn_info {
u8 dup_bcn_ofst[RTW89_PHY_NUM];
} __packed;
-static inline void RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)cmd, val, GENMASK(31, 0));
-}
+struct rtw89_h2c_trig_cpu_except {
+ __le32 w0;
+} __packed;
+
+#define RTW89_H2C_CPU_EXCEPTION_TYPE GENMASK(31, 0)
static inline void RTW89_SET_FWCMD_PKT_DROP_SEL(void *cmd, u32 val)
{
@@ -2817,6 +2824,7 @@ struct rtw89_h2c_scanofld_be_opch {
#define RTW89_H2C_SCANOFLD_BE_OPCH_W2_PKTS_CTRL GENMASK(7, 0)
#define RTW89_H2C_SCANOFLD_BE_OPCH_W2_SW_DEF GENMASK(15, 8)
#define RTW89_H2C_SCANOFLD_BE_OPCH_W2_SS GENMASK(18, 16)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W2_TXBCN BIT(19)
#define RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT0 GENMASK(7, 0)
#define RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT1 GENMASK(15, 8)
#define RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT2 GENMASK(23, 16)
@@ -3564,6 +3572,8 @@ struct rtw89_fw_c2h_attr {
u8 class;
u8 func;
u16 len;
+ u8 is_scan_event: 1;
+ u8 scan_seq: 2;
};
static inline struct rtw89_fw_c2h_attr *RTW89_SKB_C2H_CB(struct sk_buff *skb)
@@ -4341,6 +4351,7 @@ enum rtw89_mrc_h2c_func {
#define H2C_FUNC_OUTSRC_RA_MACIDCFG 0x0
#define H2C_CL_OUTSRC_DM 0x2
+#define H2C_FUNC_FW_MCC_DIG 0x6
#define H2C_FUNC_FW_LPS_CH_INFO 0xb
#define H2C_FUNC_FW_LPS_ML_CMN_INFO 0xe
@@ -4378,6 +4389,27 @@ struct rtw89_fw_h2c_rf_get_mccch_v0 {
__le32 current_band_type;
} __packed;
+struct rtw89_h2c_mcc_dig {
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+} __packed;
+
+#define RTW89_H2C_MCC_DIG_W0_REG_CNT GENMASK(7, 0)
+#define RTW89_H2C_MCC_DIG_W0_DM_EN BIT(8)
+#define RTW89_H2C_MCC_DIG_W0_IDX GENMASK(10, 9)
+#define RTW89_H2C_MCC_DIG_W0_SET BIT(11)
+#define RTW89_H2C_MCC_DIG_W0_PHY0_EN BIT(12)
+#define RTW89_H2C_MCC_DIG_W0_PHY1_EN BIT(13)
+#define RTW89_H2C_MCC_DIG_W0_CENTER_CH GENMASK(23, 16)
+#define RTW89_H2C_MCC_DIG_W0_BAND_TYPE GENMASK(31, 24)
+#define RTW89_H2C_MCC_DIG_W1_ADDR_LSB GENMASK(7, 0)
+#define RTW89_H2C_MCC_DIG_W1_ADDR_MSB GENMASK(15, 8)
+#define RTW89_H2C_MCC_DIG_W1_BMASK_LSB GENMASK(23, 16)
+#define RTW89_H2C_MCC_DIG_W1_BMASK_MSB GENMASK(31, 24)
+#define RTW89_H2C_MCC_DIG_W2_VAL_LSB GENMASK(7, 0)
+#define RTW89_H2C_MCC_DIG_W2_VAL_MSB GENMASK(15, 8)
+
#define NUM_OF_RTW89_FW_RFK_PATH 2
#define NUM_OF_RTW89_FW_RFK_TBL 3
@@ -4667,6 +4699,7 @@ struct rtw89_c2h_rf_tas_info {
#define RTW89_FW_BACKTRACE_KEY 0xBACEBACE
#define FWDL_WAIT_CNT 400000
+#define FWDL_WAIT_CNT_USB 3200
int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev, enum rtw89_fwdl_check_type type);
int rtw89_fw_recognize(struct rtw89_dev *rtwdev);
@@ -4708,6 +4741,9 @@ int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
struct rtw89_sta_link *rtwsta_link);
int rtw89_fw_h2c_txtime_cmac_tbl_g7(struct rtw89_dev *rtwdev,
struct rtw89_sta_link *rtwsta_link);
+int rtw89_fw_h2c_punctured_cmac_tbl_g7(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link,
+ u16 punctured);
int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev,
struct rtw89_sta_link *rtwsta_link);
int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
@@ -4724,6 +4760,7 @@ int rtw89_fw_h2c_dctl_sec_cam_v2(struct rtw89_dev *rtwdev,
struct rtw89_sta_link *rtwsta_link);
void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h);
void rtw89_fw_c2h_work(struct wiphy *wiphy, struct wiphy_work *work);
+void rtw89_fw_c2h_purge_obsoleted_scan_events(struct rtw89_dev *rtwdev);
int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
struct rtw89_sta_link *rtwsta_link,
@@ -4774,6 +4811,9 @@ int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev);
int rtw89_fw_h2c_rf_ps_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx);
+int rtw89_fw_h2c_mcc_dig(struct rtw89_dev *rtwdev,
+ enum rtw89_chanctx_idx chanctx_idx,
+ u8 mcc_role_idx, u8 pd_val, bool en);
int rtw89_fw_h2c_rf_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
const struct rtw89_chan *chan, enum rtw89_tssi_mode tssi_mode);
int rtw89_fw_h2c_rf_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
@@ -5008,6 +5048,19 @@ int rtw89_chip_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
}
static inline
+int rtw89_chip_h2c_punctured_cmac_tbl(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link,
+ u16 punctured)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (!chip->ops->h2c_punctured_cmac_tbl)
+ return 0;
+
+ return chip->ops->h2c_punctured_cmac_tbl(rtwdev, rtwvif_link, punctured);
+}
+
+static inline
int rtw89_chip_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
bool valid, struct ieee80211_ampdu_params *params)
{
diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c
index 53628838a7c5..5a5da9d9c0c5 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.c
+++ b/drivers/net/wireless/realtek/rtw89/mac.c
@@ -88,7 +88,7 @@ int rtw89_mac_write_lte(struct rtw89_dev *rtwdev, const u32 offset, u32 val)
ret = read_poll_timeout(rtw89_read8, lte_ctrl, (lte_ctrl & BIT(5)) != 0,
50, 50000, false, rtwdev, R_AX_LTE_CTRL + 3);
- if (ret)
+ if (ret && !test_bit(RTW89_FLAG_UNPLUGGED, rtwdev->flags))
rtw89_err(rtwdev, "[ERR]lte not ready(W)\n");
rtw89_write32(rtwdev, R_AX_LTE_WDATA, val);
@@ -104,7 +104,7 @@ int rtw89_mac_read_lte(struct rtw89_dev *rtwdev, const u32 offset, u32 *val)
ret = read_poll_timeout(rtw89_read8, lte_ctrl, (lte_ctrl & BIT(5)) != 0,
50, 50000, false, rtwdev, R_AX_LTE_CTRL + 3);
- if (ret)
+ if (ret && !test_bit(RTW89_FLAG_UNPLUGGED, rtwdev->flags))
rtw89_err(rtwdev, "[ERR]lte not ready(W)\n");
rtw89_write32(rtwdev, R_AX_LTE_CTRL, 0x800F0000 | offset);
@@ -875,31 +875,30 @@ EXPORT_SYMBOL(rtw89_mac_set_err_status);
static int hfc_reset_param(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_hfc_param_ini *param_ini, *param_inis;
struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param;
- struct rtw89_hfc_param_ini param_ini = {NULL};
u8 qta_mode = rtwdev->mac.dle_info.qta_mode;
- switch (rtwdev->hci.type) {
- case RTW89_HCI_TYPE_PCIE:
- param_ini = rtwdev->chip->hfc_param_ini[qta_mode];
- param->en = 0;
- break;
- default:
+ param_inis = rtwdev->chip->hfc_param_ini[rtwdev->hci.type];
+ if (!param_inis)
return -EINVAL;
- }
- if (param_ini.pub_cfg)
- param->pub_cfg = *param_ini.pub_cfg;
+ param_ini = &param_inis[qta_mode];
+
+ param->en = 0;
+
+ if (param_ini->pub_cfg)
+ param->pub_cfg = *param_ini->pub_cfg;
- if (param_ini.prec_cfg)
- param->prec_cfg = *param_ini.prec_cfg;
+ if (param_ini->prec_cfg)
+ param->prec_cfg = *param_ini->prec_cfg;
- if (param_ini.ch_cfg)
- param->ch_cfg = param_ini.ch_cfg;
+ if (param_ini->ch_cfg)
+ param->ch_cfg = param_ini->ch_cfg;
memset(&param->ch_info, 0, sizeof(param->ch_info));
memset(&param->pub_info, 0, sizeof(param->pub_info));
- param->mode = param_ini.mode;
+ param->mode = param_ini->mode;
return 0;
}
@@ -1441,6 +1440,23 @@ void rtw89_mac_notify_wake(struct rtw89_dev *rtwdev)
rtw89_mac_send_rpwm(rtwdev, state, true);
}
+static void rtw89_mac_power_switch_boot_mode(struct rtw89_dev *rtwdev)
+{
+ u32 boot_mode;
+
+ if (rtwdev->hci.type != RTW89_HCI_TYPE_USB)
+ return;
+
+ boot_mode = rtw89_read32_mask(rtwdev, R_AX_GPIO_MUXCFG, B_AX_BOOT_MODE);
+ if (!boot_mode)
+ return;
+
+ rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APFN_ONMAC);
+ rtw89_write32_clr(rtwdev, R_AX_SYS_STATUS1, B_AX_AUTO_WLPON);
+ rtw89_write32_clr(rtwdev, R_AX_GPIO_MUXCFG, B_AX_BOOT_MODE);
+ rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST);
+}
+
static int rtw89_mac_power_switch(struct rtw89_dev *rtwdev, bool on)
{
#define PWR_ACT 1
@@ -1451,6 +1467,8 @@ static int rtw89_mac_power_switch(struct rtw89_dev *rtwdev, bool on)
int ret;
u8 val;
+ rtw89_mac_power_switch_boot_mode(rtwdev);
+
if (on) {
cfg_seq = chip->pwr_on_seq;
cfg_func = chip->ops->pwr_on_func;
@@ -1646,6 +1664,8 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
/* 8852C PCIE SCC */
.wde_size19 = {RTW89_WDE_PG_64, 3328, 0,},
.wde_size23 = {RTW89_WDE_PG_64, 1022, 2,},
+ /* 8852B USB2.0/USB3.0 SCC */
+ .wde_size25 = {RTW89_WDE_PG_64, 162, 94,},
/* PCIE */
.ple_size0 = {RTW89_PLE_PG_128, 1520, 16,},
.ple_size0_v1 = {RTW89_PLE_PG_128, 2688, 240, 212992,},
@@ -1661,6 +1681,10 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
.ple_size18 = {RTW89_PLE_PG_128, 2544, 16,},
/* 8852C PCIE SCC */
.ple_size19 = {RTW89_PLE_PG_128, 1904, 16,},
+ /* 8852B USB2.0 SCC */
+ .ple_size32 = {RTW89_PLE_PG_128, 620, 20,},
+ /* 8852B USB3.0 SCC */
+ .ple_size33 = {RTW89_PLE_PG_128, 632, 8,},
/* PCIE 64 */
.wde_qt0 = {3792, 196, 0, 107,},
.wde_qt0_v1 = {3302, 6, 0, 20,},
@@ -1675,6 +1699,8 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
/* 8852C PCIE SCC */
.wde_qt18 = {3228, 60, 0, 40,},
.wde_qt23 = {958, 48, 0, 16,},
+ /* 8852B USB2.0/USB3.0 SCC */
+ .wde_qt25 = {152, 2, 0, 8,},
.ple_qt0 = {320, 320, 32, 16, 13, 13, 292, 292, 64, 18, 1, 4, 0,},
.ple_qt1 = {320, 320, 32, 16, 1316, 1316, 1595, 1595, 1367, 1321, 1, 1307, 0,},
/* PCIE SCC */
@@ -1698,6 +1724,13 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
/* PCIE 64 */
.ple_qt58 = {147, 0, 16, 20, 157, 13, 229, 0, 172, 14, 24, 0,},
.ple_qt59 = {147, 0, 32, 20, 1860, 13, 2025, 0, 1879, 14, 24, 0,},
+ /* USB2.0 52B SCC */
+ .ple_qt72 = {130, 0, 16, 48, 4, 13, 322, 0, 32, 14, 8, 0, 0,},
+ /* USB2.0 52B 92K */
+ .ple_qt73 = {130, 0, 32, 48, 37, 13, 355, 0, 65, 14, 24, 0, 0,},
+ /* USB3.0 52B 92K */
+ .ple_qt74 = {286, 0, 16, 48, 4, 13, 178, 0, 32, 14, 8, 0, 0,},
+ .ple_qt75 = {286, 0, 32, 48, 37, 13, 211, 0, 65, 14, 24, 0, 0,},
/* 8852A PCIE WOW */
.ple_qt_52a_wow = {264, 0, 32, 20, 64, 13, 1005, 0, 64, 128, 120,},
/* 8852B PCIE WOW */
@@ -1717,12 +1750,13 @@ static const struct rtw89_dle_mem *get_dle_mem_cfg(struct rtw89_dev *rtwdev,
enum rtw89_qta_mode mode)
{
struct rtw89_mac_info *mac = &rtwdev->mac;
- const struct rtw89_dle_mem *cfg;
+ const struct rtw89_dle_mem *cfg, *cfgs;
- cfg = &rtwdev->chip->dle_mem[mode];
- if (!cfg)
+ cfgs = rtwdev->chip->dle_mem[rtwdev->hci.dle_type];
+ if (!cfgs)
return NULL;
+ cfg = &cfgs[mode];
if (cfg->mode != mode) {
rtw89_warn(rtwdev, "qta mode unmatch!\n");
return NULL;
@@ -5015,6 +5049,7 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb,
if (rtwvif_link && rtwvif->scan_req &&
!list_empty(&rtwdev->scan_info.chan_list)) {
+ rtwdev->scan_info.delay = 0;
ret = rtw89_hw_scan_offload(rtwdev, rtwvif_link, true);
if (ret) {
rtw89_hw_scan_abort(rtwdev, rtwvif_link);
@@ -5053,6 +5088,7 @@ rtw89_mac_bcn_fltr_rpt(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_l
const struct rtw89_c2h_mac_bcnfltr_rpt *c2h =
(const struct rtw89_c2h_mac_bcnfltr_rpt *)skb->data;
u8 type, event, mac_id;
+ bool start_detect;
s8 sig;
type = le32_get_bits(c2h->w2, RTW89_C2H_MAC_BCNFLTR_RPT_W2_TYPE);
@@ -5070,10 +5106,15 @@ rtw89_mac_bcn_fltr_rpt(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_l
switch (type) {
case RTW89_BCN_FLTR_BEACON_LOSS:
if (!rtwdev->scanning && !rtwvif->offchan &&
- !rtwvif_link->noa_once.in_duration)
+ !rtwvif_link->noa_once.in_duration) {
+ start_detect = rtw89_mcc_detect_go_bcn(rtwdev, rtwvif_link);
+ if (start_detect)
+ return;
+
ieee80211_connection_loss(vif);
- else
+ } else {
rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, rtwvif_link, true);
+ }
return;
case RTW89_BCN_FLTR_NOTIFY:
nl_event = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH;
@@ -5124,6 +5165,7 @@ rtw89_mac_c2h_done_ack(struct rtw89_dev *rtwdev, struct sk_buff *skb_c2h, u32 le
{
/* N.B. This will run in interrupt context. */
struct rtw89_wait_info *fw_ofld_wait = &rtwdev->mac.fw_ofld_wait;
+ struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
struct rtw89_wait_info *ps_wait = &rtwdev->mac.ps_wait;
const struct rtw89_c2h_done_ack *c2h =
(const struct rtw89_c2h_done_ack *)skb_c2h->data;
@@ -5166,9 +5208,11 @@ rtw89_mac_c2h_done_ack(struct rtw89_dev *rtwdev, struct sk_buff *skb_c2h, u32 le
h2c_return &= RTW89_C2H_SCAN_DONE_ACK_RETURN;
break;
case H2C_FUNC_SCANOFLD:
+ scan_info->seq++;
cond = RTW89_SCANOFLD_WAIT_COND_START;
break;
case H2C_FUNC_SCANOFLD_BE:
+ scan_info->seq++;
cond = RTW89_SCANOFLD_BE_WAIT_COND_START;
h2c_return &= RTW89_C2H_SCAN_DONE_ACK_RETURN;
break;
@@ -5665,10 +5709,15 @@ static void rtw89_mac_c2h_scanofld_rsp_atomic(struct rtw89_dev *rtwdev,
const struct rtw89_c2h_scanofld *c2h =
(const struct rtw89_c2h_scanofld *)skb->data;
struct rtw89_wait_info *fw_ofld_wait = &rtwdev->mac.fw_ofld_wait;
+ struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+ struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(skb);
struct rtw89_completion_data data = {};
unsigned int cond;
u8 status, reason;
+ attr->is_scan_event = 1;
+ attr->scan_seq = scan_info->seq;
+
status = le32_get_bits(c2h->w2, RTW89_C2H_SCANOFLD_W2_STATUS);
reason = le32_get_bits(c2h->w2, RTW89_C2H_SCANOFLD_W2_RSN);
data.err = status != RTW89_SCAN_STATUS_SUCCESS;
@@ -5824,7 +5873,7 @@ int rtw89_mac_cfg_ppdu_status_ax(struct rtw89_dev *rtwdev, u8 mac_idx, bool enab
rtw89_write32(rtwdev, reg, B_AX_PPDU_STAT_RPT_EN |
B_AX_APP_MAC_INFO_RPT |
- B_AX_APP_RX_CNT_RPT | B_AX_APP_PLCP_HDR_RPT |
+ B_AX_APP_PLCP_HDR_RPT |
B_AX_PPDU_STAT_RPT_CRC32);
rtw89_write32_mask(rtwdev, R_AX_HW_RPT_FWD, B_AX_FWD_PPDU_STAT_MASK,
RTW89_PRPT_DEST_HOST);
@@ -5907,13 +5956,15 @@ int rtw89_mac_coex_init(struct rtw89_dev *rtwdev, const struct rtw89_mac_ax_coex
ret = rtw89_mac_read_lte(rtwdev, R_AX_LTE_SW_CFG_2, &val32);
if (ret) {
- rtw89_err(rtwdev, "Read R_AX_LTE_SW_CFG_2 fail!\n");
+ if (!test_bit(RTW89_FLAG_UNPLUGGED, rtwdev->flags))
+ rtw89_err(rtwdev, "Read R_AX_LTE_SW_CFG_2 fail!\n");
return ret;
}
val32 = val32 & B_AX_WL_RX_CTRL;
ret = rtw89_mac_write_lte(rtwdev, R_AX_LTE_SW_CFG_2, val32);
if (ret) {
- rtw89_err(rtwdev, "Write R_AX_LTE_SW_CFG_2 fail!\n");
+ if (!test_bit(RTW89_FLAG_UNPLUGGED, rtwdev->flags))
+ rtw89_err(rtwdev, "Write R_AX_LTE_SW_CFG_2 fail!\n");
return ret;
}
@@ -6037,7 +6088,8 @@ int rtw89_mac_cfg_gnt(struct rtw89_dev *rtwdev,
ret = rtw89_mac_write_lte(rtwdev, R_AX_LTE_SW_CFG_1, val);
if (ret) {
- rtw89_err(rtwdev, "Write LTE fail!\n");
+ if (!test_bit(RTW89_FLAG_UNPLUGGED, rtwdev->flags))
+ rtw89_err(rtwdev, "Write LTE fail!\n");
return ret;
}
@@ -6888,10 +6940,16 @@ int rtw89_fwdl_check_path_ready_ax(struct rtw89_dev *rtwdev,
bool h2c_or_fwdl)
{
u8 check = h2c_or_fwdl ? B_AX_H2C_PATH_RDY : B_AX_FWDL_PATH_RDY;
+ u32 timeout;
u8 val;
+ if (rtwdev->hci.type == RTW89_HCI_TYPE_USB)
+ timeout = FWDL_WAIT_CNT_USB;
+ else
+ timeout = FWDL_WAIT_CNT;
+
return read_poll_timeout_atomic(rtw89_read8, val, val & check,
- 1, FWDL_WAIT_CNT, false,
+ 1, timeout, false,
rtwdev, R_AX_WCPU_FW_CTRL);
}
diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h
index b7fd4a0fdb84..241e89983c4a 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.h
+++ b/drivers/net/wireless/realtek/rtw89/mac.h
@@ -6,6 +6,7 @@
#define __RTW89_MAC_H__
#include "core.h"
+#include "fw.h"
#include "reg.h"
#define MAC_MEM_DUMP_PAGE_SIZE_AX 0x40000
@@ -924,6 +925,7 @@ struct rtw89_mac_size_set {
const struct rtw89_dle_size wde_size18;
const struct rtw89_dle_size wde_size19;
const struct rtw89_dle_size wde_size23;
+ const struct rtw89_dle_size wde_size25;
const struct rtw89_dle_size ple_size0;
const struct rtw89_dle_size ple_size0_v1;
const struct rtw89_dle_size ple_size3_v1;
@@ -933,6 +935,8 @@ struct rtw89_mac_size_set {
const struct rtw89_dle_size ple_size9;
const struct rtw89_dle_size ple_size18;
const struct rtw89_dle_size ple_size19;
+ const struct rtw89_dle_size ple_size32;
+ const struct rtw89_dle_size ple_size33;
const struct rtw89_wde_quota wde_qt0;
const struct rtw89_wde_quota wde_qt0_v1;
const struct rtw89_wde_quota wde_qt4;
@@ -941,6 +945,7 @@ struct rtw89_mac_size_set {
const struct rtw89_wde_quota wde_qt17;
const struct rtw89_wde_quota wde_qt18;
const struct rtw89_wde_quota wde_qt23;
+ const struct rtw89_wde_quota wde_qt25;
const struct rtw89_ple_quota ple_qt0;
const struct rtw89_ple_quota ple_qt1;
const struct rtw89_ple_quota ple_qt4;
@@ -955,6 +960,10 @@ struct rtw89_mac_size_set {
const struct rtw89_ple_quota ple_qt57;
const struct rtw89_ple_quota ple_qt58;
const struct rtw89_ple_quota ple_qt59;
+ const struct rtw89_ple_quota ple_qt72;
+ const struct rtw89_ple_quota ple_qt73;
+ const struct rtw89_ple_quota ple_qt74;
+ const struct rtw89_ple_quota ple_qt75;
const struct rtw89_ple_quota ple_qt_52a_wow;
const struct rtw89_ple_quota ple_qt_52b_wow;
const struct rtw89_ple_quota ple_qt_52bt_wow;
@@ -1575,4 +1584,28 @@ void rtw89_fwdl_secure_idmem_share_mode(struct rtw89_dev *rtwdev, u8 mode)
return mac->fwdl_secure_idmem_share_mode(rtwdev, mode);
}
+
+static inline
+int rtw89_mac_scan_offload(struct rtw89_dev *rtwdev,
+ struct rtw89_scan_option *option,
+ struct rtw89_vif_link *rtwvif_link,
+ bool wowlan)
+{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ int ret;
+
+ ret = mac->scan_offload(rtwdev, option, rtwvif_link, wowlan);
+
+ if (option->enable) {
+ /*
+ * At this point, new scan request is acknowledged by firmware,
+ * so scan events of previous scan request become obsoleted.
+ * Purge the queued scan events to prevent interference to
+ * current new request.
+ */
+ rtw89_fw_c2h_purge_obsoleted_scan_events(rtwdev);
+ }
+
+ return ret;
+}
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c
index a3ae1e654a98..c1ca6d741b32 100644
--- a/drivers/net/wireless/realtek/rtw89/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw89/mac80211.c
@@ -113,6 +113,8 @@ static int __rtw89_ops_add_iface_link(struct rtw89_dev *rtwdev,
wiphy_work_init(&rtwvif_link->update_beacon_work, rtw89_core_update_beacon_work);
wiphy_delayed_work_init(&rtwvif_link->csa_beacon_work, rtw89_core_csa_beacon_work);
+ wiphy_delayed_work_init(&rtwvif_link->mcc_gc_detect_beacon_work,
+ rtw89_mcc_gc_detect_beacon_work);
INIT_LIST_HEAD(&rtwvif_link->general_pkt_list);
@@ -124,6 +126,7 @@ static int __rtw89_ops_add_iface_link(struct rtw89_dev *rtwdev,
rtwvif_link->chanctx_idx = RTW89_CHANCTX_0;
rtwvif_link->reg_6ghz_power = RTW89_REG_6GHZ_POWER_DFLT;
rtwvif_link->rand_tsf_done = false;
+ rtwvif_link->detect_bcn_count = 0;
rcu_read_lock();
@@ -147,6 +150,8 @@ static void __rtw89_ops_remove_iface_link(struct rtw89_dev *rtwdev,
wiphy_work_cancel(rtwdev->hw->wiphy, &rtwvif_link->update_beacon_work);
wiphy_delayed_work_cancel(rtwdev->hw->wiphy, &rtwvif_link->csa_beacon_work);
+ wiphy_delayed_work_cancel(rtwdev->hw->wiphy,
+ &rtwvif_link->mcc_gc_detect_beacon_work);
rtw89_p2p_noa_once_deinit(rtwvif_link);
@@ -1772,6 +1777,7 @@ static int rtw89_ops_suspend(struct ieee80211_hw *hw,
set_bit(RTW89_FLAG_FORBIDDEN_TRACK_WORK, rtwdev->flags);
wiphy_delayed_work_cancel(hw->wiphy, &rtwdev->track_work);
+ wiphy_delayed_work_cancel(hw->wiphy, &rtwdev->track_ps_work);
ret = rtw89_wow_suspend(rtwdev, wowlan);
if (ret) {
@@ -1797,6 +1803,8 @@ static int rtw89_ops_resume(struct ieee80211_hw *hw)
clear_bit(RTW89_FLAG_FORBIDDEN_TRACK_WORK, rtwdev->flags);
wiphy_delayed_work_queue(hw->wiphy, &rtwdev->track_work,
RTW89_TRACK_WORK_PERIOD);
+ wiphy_delayed_work_queue(hw->wiphy, &rtwdev->track_ps_work,
+ RTW89_TRACK_PS_WORK_PERIOD);
return ret ? 1 : 0;
}
diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c
index 204a3748d913..a669f2f843aa 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.c
+++ b/drivers/net/wireless/realtek/rtw89/pci.c
@@ -2638,6 +2638,10 @@ static void rtw89_pci_set_dbg(struct rtw89_dev *rtwdev)
rtw89_write32_set(rtwdev, R_AX_PCIE_DBG_CTRL,
B_AX_ASFF_FULL_NO_STK | B_AX_EN_STUCK_DBG);
+ rtw89_write32_mask(rtwdev, R_AX_PCIE_EXP_CTRL,
+ B_AX_EN_STUCK_DBG | B_AX_ASFF_FULL_NO_STK,
+ B_AX_EN_STUCK_DBG);
+
if (rtwdev->chip->chip_id == RTL8852A)
rtw89_write32_set(rtwdev, R_AX_PCIE_EXP_CTRL,
B_AX_EN_CHKDSC_NO_RX_STUCK);
@@ -4486,6 +4490,7 @@ int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
rtwdev->pci_info = info->bus.pci;
rtwdev->hci.ops = &rtw89_pci_ops;
rtwdev->hci.type = RTW89_HCI_TYPE_PCIE;
+ rtwdev->hci.dle_type = RTW89_HCI_DLE_TYPE_PCIE;
rtwdev->hci.rpwm_addr = pci_info->rpwm_addr;
rtwdev->hci.cpwm_addr = pci_info->cpwm_addr;
diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c
index f81bee4149bf..d607577b353c 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.c
+++ b/drivers/net/wireless/realtek/rtw89/phy.c
@@ -897,7 +897,8 @@ static u32 rtw89_phy_read_rf_a(struct rtw89_dev *rtwdev,
30, false, rtwdev, R_SWSI_V1,
B_SWSI_R_DATA_DONE_V1);
if (ret) {
- rtw89_err(rtwdev, "read swsi busy\n");
+ if (!test_bit(RTW89_FLAG_UNPLUGGED, rtwdev->flags))
+ rtw89_err(rtwdev, "read swsi busy\n");
return INV_RF_DATA;
}
@@ -6315,18 +6316,13 @@ static void rtw89_phy_dig_config_igi(struct rtw89_dev *rtwdev,
}
}
-static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev,
- struct rtw89_bb_ctx *bb,
- u8 rssi, bool enable)
+static u8 rtw89_phy_dig_cal_under_region(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb,
+ const struct rtw89_chan *chan)
{
- const struct rtw89_chan *chan = rtw89_mgnt_chan_get(rtwdev, bb->phy_idx);
- const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
enum rtw89_bandwidth cbw = chan->band_width;
struct rtw89_dig_info *dig = &bb->dig;
- u8 final_rssi = 0, under_region = dig->pd_low_th_ofst;
- u8 ofdm_cca_th;
- s8 cck_cca_th;
- u32 pd_val = 0;
+ u8 under_region = dig->pd_low_th_ofst;
if (rtwdev->chip->chip_gen == RTW89_CHIP_AX)
under_region += PD_TH_SB_FLTR_CMP_VAL;
@@ -6348,6 +6344,20 @@ static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev,
break;
}
+ return under_region;
+}
+
+static u32 __rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb,
+ u8 rssi, bool enable,
+ const struct rtw89_chan *chan)
+{
+ struct rtw89_dig_info *dig = &bb->dig;
+ u8 ofdm_cca_th, under_region;
+ u8 final_rssi;
+ u32 pd_val;
+
+ under_region = rtw89_phy_dig_cal_under_region(rtwdev, bb, chan);
dig->dyn_pd_th_max = dig->igi_rssi;
final_rssi = min_t(u8, rssi, dig->igi_rssi);
@@ -6360,10 +6370,28 @@ static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev,
"igi=%d, ofdm_ccaTH=%d, backoff=%d, PD_low=%d\n",
final_rssi, ofdm_cca_th, under_region, pd_val);
} else {
+ pd_val = 0;
rtw89_debug(rtwdev, RTW89_DBG_DIG,
"Dynamic PD th disabled, Set PD_low_bd=0\n");
}
+ return pd_val;
+}
+
+static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb,
+ u8 rssi, bool enable)
+{
+ const struct rtw89_chan *chan = rtw89_mgnt_chan_get(rtwdev, bb->phy_idx);
+ const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
+ struct rtw89_dig_info *dig = &bb->dig;
+ u8 final_rssi, under_region = dig->pd_low_th_ofst;
+ s8 cck_cca_th;
+ u32 pd_val;
+
+ pd_val = __rtw89_phy_dig_dyn_pd_th(rtwdev, bb, rssi, enable, chan);
+ dig->bak_dig = pd_val;
+
rtw89_phy_write32_idx(rtwdev, dig_regs->seg0_pd_reg,
dig_regs->pd_lower_bound_mask, pd_val, bb->phy_idx);
rtw89_phy_write32_idx(rtwdev, dig_regs->seg0_pd_reg,
@@ -6372,6 +6400,8 @@ static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev,
if (!rtwdev->hal.support_cckpd)
return;
+ final_rssi = min_t(u8, rssi, dig->igi_rssi);
+ under_region = rtw89_phy_dig_cal_under_region(rtwdev, bb, chan);
cck_cca_th = max_t(s8, final_rssi - under_region, CCKPD_TH_MIN_RSSI);
pd_val = (u32)(cck_cca_th - IGI_RSSI_MAX);
@@ -6399,11 +6429,161 @@ void rtw89_phy_dig_reset(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
#define IGI_RSSI_MIN 10
#define ABS_IGI_MIN 0xc
+static
+void rtw89_phy_cal_igi_fa_rssi(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
+{
+ struct rtw89_dig_info *dig = &bb->dig;
+ u8 igi_min;
+
+ rtw89_phy_dig_igi_offset_by_env(rtwdev, bb);
+
+ igi_min = max_t(int, dig->igi_rssi - IGI_RSSI_MIN, 0);
+ dig->dyn_igi_max = min(igi_min + IGI_OFFSET_MAX, igi_max_performance_mode);
+ dig->dyn_igi_min = max(igi_min, ABS_IGI_MIN);
+
+ if (dig->dyn_igi_max >= dig->dyn_igi_min) {
+ dig->igi_fa_rssi += dig->fa_rssi_ofst;
+ dig->igi_fa_rssi = clamp(dig->igi_fa_rssi, dig->dyn_igi_min,
+ dig->dyn_igi_max);
+ } else {
+ dig->igi_fa_rssi = dig->dyn_igi_max;
+ }
+}
+
+struct rtw89_phy_iter_mcc_dig {
+ struct rtw89_vif_link *rtwvif_link;
+ bool has_sta;
+ u8 rssi_min;
+};
+
+static void rtw89_phy_set_mcc_dig(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link,
+ struct rtw89_bb_ctx *bb,
+ u8 rssi_min, u8 mcc_role_idx,
+ bool is_linked)
+{
+ struct rtw89_dig_info *dig = &bb->dig;
+ const struct rtw89_chan *chan;
+ u8 pd_val;
+
+ if (is_linked) {
+ dig->igi_rssi = rssi_min >> 1;
+ dig->igi_fa_rssi = dig->igi_rssi;
+ } else {
+ rtw89_debug(rtwdev, RTW89_DBG_DIG, "RSSI update : NO Link\n");
+ dig->igi_rssi = rssi_nolink;
+ dig->igi_fa_rssi = dig->igi_rssi;
+ }
+
+ chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
+ rtw89_phy_cal_igi_fa_rssi(rtwdev, bb);
+ pd_val = __rtw89_phy_dig_dyn_pd_th(rtwdev, bb, dig->igi_fa_rssi,
+ is_linked, chan);
+ rtw89_fw_h2c_mcc_dig(rtwdev, rtwvif_link->chanctx_idx,
+ mcc_role_idx, pd_val, true);
+
+ rtw89_debug(rtwdev, RTW89_DBG_DIG,
+ "MCC chanctx_idx %d chan %d rssi %d pd_val %d",
+ rtwvif_link->chanctx_idx, chan->primary_channel,
+ dig->igi_rssi, pd_val);
+}
+
+static void rtw89_phy_set_mcc_dig_iter(void *data, struct ieee80211_sta *sta)
+{
+ struct rtw89_phy_iter_mcc_dig *mcc_dig = (struct rtw89_phy_iter_mcc_dig *)data;
+ unsigned int link_id = mcc_dig->rtwvif_link->link_id;
+ struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
+ struct rtw89_sta_link *rtwsta_link;
+
+ if (rtwsta->rtwvif != mcc_dig->rtwvif_link->rtwvif)
+ return;
+
+ rtwsta_link = rtwsta->links[link_id];
+ if (!rtwsta_link)
+ return;
+
+ mcc_dig->has_sta = true;
+ if (ewma_rssi_read(&rtwsta_link->avg_rssi) < mcc_dig->rssi_min)
+ mcc_dig->rssi_min = ewma_rssi_read(&rtwsta_link->avg_rssi);
+}
+
+static void rtw89_phy_dig_mcc(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
+{
+ struct rtw89_phy_iter_mcc_dig mcc_dig;
+ struct rtw89_vif_link *rtwvif_link;
+ struct rtw89_mcc_links_info info;
+ int i;
+
+ rtw89_mcc_get_links(rtwdev, &info);
+ for (i = 0; i < ARRAY_SIZE(info.links); i++) {
+ rtwvif_link = info.links[i];
+ if (!rtwvif_link)
+ continue;
+
+ memset(&mcc_dig, 0, sizeof(mcc_dig));
+ mcc_dig.rtwvif_link = rtwvif_link;
+ mcc_dig.has_sta = false;
+ mcc_dig.rssi_min = U8_MAX;
+ ieee80211_iterate_stations_atomic(rtwdev->hw,
+ rtw89_phy_set_mcc_dig_iter,
+ &mcc_dig);
+
+ rtw89_phy_set_mcc_dig(rtwdev, rtwvif_link, bb,
+ mcc_dig.rssi_min, i, mcc_dig.has_sta);
+ }
+}
+
+static void rtw89_phy_dig_ctrl(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb,
+ bool pause_dig, bool restore)
+{
+ const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
+ struct rtw89_dig_info *dig = &bb->dig;
+ bool en_dig;
+ u32 pd_val;
+
+ if (dig->pause_dig == pause_dig)
+ return;
+
+ if (pause_dig) {
+ en_dig = false;
+ pd_val = 0;
+ } else {
+ en_dig = rtwdev->total_sta_assoc > 0;
+ pd_val = restore ? dig->bak_dig : 0;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_DIG, "%s <%s> PD_low=%d", __func__,
+ pause_dig ? "suspend" : "resume", pd_val);
+
+ rtw89_phy_write32_idx(rtwdev, dig_regs->seg0_pd_reg,
+ dig_regs->pd_lower_bound_mask, pd_val, bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, dig_regs->seg0_pd_reg,
+ dig_regs->pd_spatial_reuse_en, en_dig, bb->phy_idx);
+
+ dig->pause_dig = pause_dig;
+}
+
+void rtw89_phy_dig_suspend(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_bb_ctx *bb;
+
+ rtw89_for_each_active_bb(rtwdev, bb)
+ rtw89_phy_dig_ctrl(rtwdev, bb, true, false);
+}
+
+void rtw89_phy_dig_resume(struct rtw89_dev *rtwdev, bool restore)
+{
+ struct rtw89_bb_ctx *bb;
+
+ rtw89_for_each_active_bb(rtwdev, bb)
+ rtw89_phy_dig_ctrl(rtwdev, bb, false, restore);
+}
+
static void __rtw89_phy_dig(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
{
struct rtw89_dig_info *dig = &bb->dig;
bool is_linked = rtwdev->total_sta_assoc > 0;
- u8 igi_min;
+ enum rtw89_entity_mode mode;
if (unlikely(dig->bypass_dig)) {
dig->bypass_dig = false;
@@ -6414,6 +6594,15 @@ static void __rtw89_phy_dig(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
rtw89_phy_dig_update_rssi_info(rtwdev, bb);
+ mode = rtw89_get_entity_mode(rtwdev);
+ if (mode == RTW89_ENTITY_MODE_MCC) {
+ rtw89_phy_dig_mcc(rtwdev, bb);
+ return;
+ }
+
+ if (unlikely(dig->pause_dig))
+ return;
+
if (!dig->is_linked_pre && is_linked) {
rtw89_debug(rtwdev, RTW89_DBG_DIG, "First connected\n");
rtw89_phy_dig_update_para(rtwdev, bb);
@@ -6425,19 +6614,7 @@ static void __rtw89_phy_dig(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
}
dig->is_linked_pre = is_linked;
- rtw89_phy_dig_igi_offset_by_env(rtwdev, bb);
-
- igi_min = max_t(int, dig->igi_rssi - IGI_RSSI_MIN, 0);
- dig->dyn_igi_max = min(igi_min + IGI_OFFSET_MAX, igi_max_performance_mode);
- dig->dyn_igi_min = max(igi_min, ABS_IGI_MIN);
-
- if (dig->dyn_igi_max >= dig->dyn_igi_min) {
- dig->igi_fa_rssi += dig->fa_rssi_ofst;
- dig->igi_fa_rssi = clamp(dig->igi_fa_rssi, dig->dyn_igi_min,
- dig->dyn_igi_max);
- } else {
- dig->igi_fa_rssi = dig->dyn_igi_max;
- }
+ rtw89_phy_cal_igi_fa_rssi(rtwdev, bb);
rtw89_debug(rtwdev, RTW89_DBG_DIG,
"rssi=%03d, dyn_joint(max,min)=(%d,%d), final_rssi=%d\n",
diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h
index 63cc33c16c9a..dc156376d951 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.h
+++ b/drivers/net/wireless/realtek/rtw89/phy.h
@@ -1010,6 +1010,8 @@ void rtw89_phy_set_phy_regs(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
u32 val);
void rtw89_phy_dig_reset(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb);
void rtw89_phy_dig(struct rtw89_dev *rtwdev);
+void rtw89_phy_dig_suspend(struct rtw89_dev *rtwdev);
+void rtw89_phy_dig_resume(struct rtw89_dev *rtwdev, bool restore);
void rtw89_phy_tx_path_div_track(struct rtw89_dev *rtwdev);
void rtw89_phy_antdiv_parse(struct rtw89_dev *rtwdev,
struct rtw89_rx_phy_ppdu *phy_ppdu);
diff --git a/drivers/net/wireless/realtek/rtw89/ps.c b/drivers/net/wireless/realtek/rtw89/ps.c
index 3411d642c84a..652f8fc81b79 100644
--- a/drivers/net/wireless/realtek/rtw89/ps.c
+++ b/drivers/net/wireless/realtek/rtw89/ps.c
@@ -13,6 +13,31 @@
#include "reg.h"
#include "util.h"
+static int rtw89_fw_receive_lps_h2c_check(struct rtw89_dev *rtwdev, u8 macid)
+{
+ struct rtw89_mac_c2h_info c2h_info = {};
+ u16 c2hreg_macid;
+ u32 c2hreg_ret;
+ int ret;
+
+ if (!RTW89_CHK_FW_FEATURE(LPS_DACK_BY_C2H_REG, &rtwdev->fw))
+ return 0;
+
+ c2h_info.id = RTW89_FWCMD_C2HREG_FUNC_PS_LEAVE_ACK;
+ ret = rtw89_fw_msg_reg(rtwdev, NULL, &c2h_info);
+ if (ret)
+ return ret;
+
+ c2hreg_macid = u32_get_bits(c2h_info.u.c2hreg[0],
+ RTW89_C2HREG_PS_LEAVE_ACK_MACID);
+ c2hreg_ret = u32_get_bits(c2h_info.u.c2hreg[1], RTW89_C2HREG_PS_LEAVE_ACK_RET);
+
+ if (macid != c2hreg_macid || c2hreg_ret)
+ rtw89_warn(rtwdev, "rtw89: check lps h2c received by firmware fail\n");
+
+ return 0;
+}
+
static int rtw89_fw_leave_lps_check(struct rtw89_dev *rtwdev, u8 macid)
{
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
@@ -106,7 +131,8 @@ static void __rtw89_leave_lps(struct rtw89_dev *rtwdev,
};
rtw89_fw_h2c_lps_parm(rtwdev, &lps_param);
- rtw89_fw_leave_lps_check(rtwdev, 0);
+ rtw89_fw_receive_lps_h2c_check(rtwdev, rtwvif_link->mac_id);
+ rtw89_fw_leave_lps_check(rtwdev, rtwvif_link->mac_id);
rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_WL_ON);
rtw89_chip_digital_pwr_comp(rtwdev, rtwvif_link->phy_idx);
}
diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h
index 4a65b0c9c2d1..de81103a072f 100644
--- a/drivers/net/wireless/realtek/rtw89/reg.h
+++ b/drivers/net/wireless/realtek/rtw89/reg.h
@@ -21,6 +21,7 @@
#define R_AX_SYS_PW_CTRL 0x0004
#define B_AX_SOP_ASWRM BIT(31)
#define B_AX_SOP_PWMM_DSWR BIT(29)
+#define B_AX_SOP_EDSWR BIT(28)
#define B_AX_XTAL_OFF_A_DIE BIT(22)
#define B_AX_DIS_WLBT_PDNSUSEN_SOPC BIT(18)
#define B_AX_RDY_SYSPWR BIT(17)
@@ -182,6 +183,7 @@
#define R_AX_SYS_STATUS1 0x00F4
#define B_AX_SEL_0XC0_MASK GENMASK(17, 16)
+#define B_AX_AUTO_WLPON BIT(10)
#define B_AX_PAD_HCI_SEL_V2_MASK GENMASK(5, 3)
#define MAC_AX_HCI_SEL_SDIO_UART 0
#define MAC_AX_HCI_SEL_MULTI_USB 1
@@ -380,6 +382,18 @@
#define B_AX_ACH1_BUSY BIT(9)
#define B_AX_ACH0_BUSY BIT(8)
+#define R_AX_USB_ENDPOINT_0 0x1060
+#define B_AX_EP_IDX GENMASK(3, 0)
+#define R_AX_USB_ENDPOINT_2 0x1068
+#define NUMP 0x1
+#define R_AX_USB_HOST_REQUEST_2 0x1078
+#define B_AX_R_USBIO_MODE BIT(4)
+#define R_AX_USB3_MAC_NPI_CONFIG_INTF_0 0x1114
+#define B_AX_SSPHY_LFPS_FILTER BIT(31)
+#define R_AX_USB_WLAN0_1 0x1174
+#define B_AX_USBRX_RST BIT(9)
+#define B_AX_USBTX_RST BIT(8)
+
#define R_AX_PCIE_DBG_CTRL 0x11C0
#define B_AX_DBG_DUMMY_MASK GENMASK(23, 16)
#define B_AX_PCIE_DBG_SEL_MASK GENMASK(15, 13)
@@ -459,6 +473,17 @@
#define R_AX_WP_PAGE_CTRL2_V1 0x17A4
#define R_AX_WP_PAGE_INFO1_V1 0x17A8
+#define R_AX_USB_ENDPOINT_0_V1 0x5060
+#define B_AX_EP_IDX_V1 GENMASK(3, 0)
+#define R_AX_USB_ENDPOINT_2_V1 0x5068
+#define R_AX_USB_HOST_REQUEST_2_V1 0x5078
+#define B_AX_R_USBIO_MODE_V1 BIT(4)
+#define R_AX_USB3_MAC_NPI_CONFIG_INTF_0_V1 0x5114
+#define B_AX_SSPHY_LFPS_FILTER_V1 BIT(31)
+#define R_AX_USB_WLAN0_1_V1 0x5174
+#define B_AX_USBRX_RST_V1 BIT(9)
+#define B_AX_USBTX_RST_V1 BIT(8)
+
#define R_AX_H2CREG_DATA0_V1 0x7140
#define R_AX_H2CREG_DATA1_V1 0x7144
#define R_AX_H2CREG_DATA2_V1 0x7148
@@ -1025,6 +1050,12 @@
#define B_AX_DISPATCHER_INTN_SEL_MASK GENMASK(7, 4)
#define B_AX_DISPATCHER_CH_SEL_MASK GENMASK(3, 0)
+#define R_AX_RXDMA_SETTING 0x8908
+#define B_AX_BULK_SIZE GENMASK(1, 0)
+#define USB11_BULKSIZE 0x2
+#define USB2_BULKSIZE 0x1
+#define USB3_BULKSIZE 0x0
+
#define R_AX_RX_FUNCTION_STOP 0x8920
#define B_AX_HDR_RX_STOP BIT(0)
@@ -8775,6 +8806,8 @@
#define B_P0_TSSI_RFC GENMASK(28, 27)
#define B_P0_TSSI_OFT_EN BIT(28)
#define B_P0_TSSI_OFT GENMASK(7, 0)
+#define R_P0_TSSI_SLOPE_CAL 0x581c
+#define B_P0_TSSI_SLOPE_CAL_EN BIT(20)
#define R_P0_TSSI_AVG 0x5820
#define B_P0_TSSI_EN BIT(31)
#define B_P0_TSSI_AVG GENMASK(15, 12)
@@ -9268,6 +9301,7 @@
#define B_WDADC_SEL GENMASK(5, 4)
#define R_ADCMOD 0xC0E8
#define B_ADCMOD_LP GENMASK(31, 16)
+#define B_ADCMOD_AUTO_RST BIT(6)
#define R_DCIM 0xC0EC
#define B_DCIM_RC GENMASK(23, 16)
#define B_DCIM_FR GENMASK(14, 13)
diff --git a/drivers/net/wireless/realtek/rtw89/regd.c b/drivers/net/wireless/realtek/rtw89/regd.c
index 3ad14cab1f58..58582f8d2b74 100644
--- a/drivers/net/wireless/realtek/rtw89/regd.c
+++ b/drivers/net/wireless/realtek/rtw89/regd.c
@@ -360,15 +360,13 @@ static void rtw89_regd_setup_unii4(struct rtw89_dev *rtwdev,
struct wiphy *wiphy)
{
struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
- const struct rtw89_regd_ctrl *regd_ctrl = &regulatory->ctrl;
const struct rtw89_chip_info *chip = rtwdev->chip;
struct ieee80211_supported_band *sband;
struct rtw89_acpi_dsm_result res = {};
- bool enable_by_fcc;
- bool enable_by_ic;
+ bool enable;
+ u8 index;
int ret;
u8 val;
- int i;
sband = wiphy->bands[NL80211_BAND_5GHZ];
if (!sband)
@@ -385,35 +383,25 @@ static void rtw89_regd_setup_unii4(struct rtw89_dev *rtwdev,
if (ret) {
rtw89_debug(rtwdev, RTW89_DBG_REGD,
"acpi: cannot eval unii 4: %d\n", ret);
- enable_by_fcc = true;
- enable_by_ic = false;
+ val = u8_encode_bits(1, RTW89_ACPI_CONF_UNII4_US);
goto bottom;
}
val = res.u.value;
- enable_by_fcc = u8_get_bits(val, RTW89_ACPI_CONF_UNII4_FCC);
- enable_by_ic = u8_get_bits(val, RTW89_ACPI_CONF_UNII4_IC);
rtw89_debug(rtwdev, RTW89_DBG_REGD,
"acpi: eval if allow unii-4: 0x%x\n", val);
bottom:
- for (i = 0; i < regd_ctrl->nr; i++) {
- const struct rtw89_regd *regd = &regd_ctrl->map[i];
-
- switch (regd->txpwr_regd[RTW89_BAND_5G]) {
- case RTW89_FCC:
- if (enable_by_fcc)
- clear_bit(i, regulatory->block_unii4);
- break;
- case RTW89_IC:
- if (enable_by_ic)
- clear_bit(i, regulatory->block_unii4);
- break;
- default:
- break;
- }
- }
+ index = rtw89_regd_get_index_by_name(rtwdev, "US");
+ enable = u8_get_bits(val, RTW89_ACPI_CONF_UNII4_US);
+ if (enable && index != RTW89_REGD_MAX_COUNTRY_NUM)
+ clear_bit(index, regulatory->block_unii4);
+
+ index = rtw89_regd_get_index_by_name(rtwdev, "CA");
+ enable = u8_get_bits(val, RTW89_ACPI_CONF_UNII4_CA);
+ if (enable && index != RTW89_REGD_MAX_COUNTRY_NUM)
+ clear_bit(index, regulatory->block_unii4);
}
static void __rtw89_regd_setup_policy_6ghz(struct rtw89_dev *rtwdev, bool block,
@@ -490,12 +478,11 @@ out:
static void rtw89_regd_setup_policy_6ghz_sp(struct rtw89_dev *rtwdev)
{
struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
- const struct rtw89_regd_ctrl *regd_ctrl = &regulatory->ctrl;
const struct rtw89_acpi_policy_6ghz_sp *ptr;
struct rtw89_acpi_dsm_result res = {};
- bool enable_by_us;
+ bool enable;
+ u8 index;
int ret;
- int i;
ret = rtw89_acpi_evaluate_dsm(rtwdev, RTW89_ACPI_DSM_FUNC_6GHZ_SP_SUP, &res);
if (ret) {
@@ -520,16 +507,66 @@ static void rtw89_regd_setup_policy_6ghz_sp(struct rtw89_dev *rtwdev)
bitmap_fill(regulatory->block_6ghz_sp, RTW89_REGD_MAX_COUNTRY_NUM);
- enable_by_us = u8_get_bits(ptr->conf, RTW89_ACPI_CONF_6GHZ_SP_US);
+ index = rtw89_regd_get_index_by_name(rtwdev, "US");
+ enable = u8_get_bits(ptr->conf, RTW89_ACPI_CONF_6GHZ_SP_US);
+ if (enable && index != RTW89_REGD_MAX_COUNTRY_NUM)
+ clear_bit(index, regulatory->block_6ghz_sp);
- for (i = 0; i < regd_ctrl->nr; i++) {
- const struct rtw89_regd *tmp = &regd_ctrl->map[i];
+ index = rtw89_regd_get_index_by_name(rtwdev, "CA");
+ enable = u8_get_bits(ptr->conf, RTW89_ACPI_CONF_6GHZ_SP_CA);
+ if (enable && index != RTW89_REGD_MAX_COUNTRY_NUM)
+ clear_bit(index, regulatory->block_6ghz_sp);
+
+out:
+ kfree(ptr);
+}
- if (enable_by_us && memcmp(tmp->alpha2, "US", 2) == 0)
- clear_bit(i, regulatory->block_6ghz_sp);
+static void rtw89_regd_setup_policy_6ghz_vlp(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
+ const struct rtw89_acpi_policy_6ghz_vlp *ptr = NULL;
+ struct rtw89_acpi_dsm_result res = {};
+ bool enable;
+ u8 index;
+ int ret;
+ u8 val;
+
+ /* By default, allow 6 GHz VLP on all countries except US and CA. */
+ val = ~(RTW89_ACPI_CONF_6GHZ_VLP_US | RTW89_ACPI_CONF_6GHZ_VLP_CA);
+
+ ret = rtw89_acpi_evaluate_dsm(rtwdev, RTW89_ACPI_DSM_FUNC_6GHZ_VLP_SUP, &res);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_REGD,
+ "acpi: cannot eval policy 6ghz-vlp: %d\n", ret);
+ goto bottom;
}
-out:
+ ptr = res.u.policy_6ghz_vlp;
+
+ switch (ptr->override) {
+ default:
+ rtw89_debug(rtwdev, RTW89_DBG_REGD,
+ "%s: unknown override case: %d\n", __func__,
+ ptr->override);
+ fallthrough;
+ case 0:
+ break;
+ case 1:
+ val = ptr->conf;
+ break;
+ }
+
+bottom:
+ index = rtw89_regd_get_index_by_name(rtwdev, "US");
+ enable = u8_get_bits(val, RTW89_ACPI_CONF_6GHZ_VLP_US);
+ if (!enable && index != RTW89_REGD_MAX_COUNTRY_NUM)
+ set_bit(index, regulatory->block_6ghz_vlp);
+
+ index = rtw89_regd_get_index_by_name(rtwdev, "CA");
+ enable = u8_get_bits(val, RTW89_ACPI_CONF_6GHZ_VLP_CA);
+ if (!enable && index != RTW89_REGD_MAX_COUNTRY_NUM)
+ set_bit(index, regulatory->block_6ghz_vlp);
+
kfree(ptr);
}
@@ -576,6 +613,7 @@ bottom:
if (regd_allow_6ghz) {
rtw89_regd_setup_policy_6ghz(rtwdev);
rtw89_regd_setup_policy_6ghz_sp(rtwdev);
+ rtw89_regd_setup_policy_6ghz_vlp(rtwdev);
return;
}
@@ -620,6 +658,30 @@ const char *rtw89_regd_get_string(enum rtw89_regulation_type regd)
return rtw89_regd_string[regd];
}
+static void rtw89_regd_setup_reg_rules(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
+ const struct rtw89_acpi_policy_reg_rules *ptr;
+ struct rtw89_acpi_dsm_result res = {};
+ int ret;
+
+ regulatory->txpwr_uk_follow_etsi = true;
+
+ ret = rtw89_acpi_evaluate_dsm(rtwdev, RTW89_ACPI_DSM_FUNC_REG_RULES_EN, &res);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_REGD,
+ "acpi: cannot eval policy reg-rules: %d\n", ret);
+ return;
+ }
+
+ ptr = res.u.policy_reg_rules;
+
+ regulatory->txpwr_uk_follow_etsi =
+ !u8_get_bits(ptr->conf, RTW89_ACPI_CONF_REG_RULE_REGD_UK);
+
+ kfree(ptr);
+}
+
int rtw89_regd_setup(struct rtw89_dev *rtwdev)
{
struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
@@ -636,7 +698,8 @@ int rtw89_regd_setup(struct rtw89_dev *rtwdev)
}
regulatory->reg_6ghz_power = RTW89_REG_6GHZ_POWER_DFLT;
- regulatory->txpwr_uk_follow_etsi = true;
+
+ rtw89_regd_setup_reg_rules(rtwdev);
if (!wiphy)
return -EINVAL;
@@ -1046,7 +1109,16 @@ static int rtw89_reg_6ghz_power_recalc(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link, bool active,
unsigned int *changed)
{
+ struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
+ const struct rtw89_regd *regd = regulatory->regd;
+ bool blocked[NUM_OF_RTW89_REG_6GHZ_POWER] = {};
+ u8 index = rtw89_regd_get_index(rtwdev, regd);
struct ieee80211_bss_conf *bss_conf;
+ bool dflt = false;
+
+ if (index == RTW89_REGD_MAX_COUNTRY_NUM ||
+ test_bit(index, regulatory->block_6ghz_vlp))
+ blocked[RTW89_REG_6GHZ_POWER_VLP] = true;
rcu_read_lock();
@@ -1065,6 +1137,7 @@ static int rtw89_reg_6ghz_power_recalc(struct rtw89_dev *rtwdev,
break;
default:
rtwvif_link->reg_6ghz_power = RTW89_REG_6GHZ_POWER_DFLT;
+ dflt = true;
break;
}
} else {
@@ -1073,6 +1146,14 @@ static int rtw89_reg_6ghz_power_recalc(struct rtw89_dev *rtwdev,
rcu_read_unlock();
+ if (!dflt && blocked[rtwvif_link->reg_6ghz_power]) {
+ rtw89_debug(rtwdev, RTW89_DBG_REGD,
+ "%c%c 6 GHz power type-%u is blocked by policy\n",
+ regd->alpha2[0], regd->alpha2[1],
+ rtwvif_link->reg_6ghz_power);
+ return -EINVAL;
+ }
+
*changed += __rtw89_reg_6ghz_power_recalc(rtwdev);
return 0;
}
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b.c b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
index c55833f259de..393df2b0dcae 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
@@ -51,6 +51,48 @@ static const struct rtw89_hfc_param_ini rtw8851b_hfc_param_ini_pcie[] = {
[RTW89_QTA_INVALID] = {NULL},
};
+static const struct rtw89_hfc_ch_cfg rtw8851b_hfc_chcfg_usb[] = {
+ {18, 152, grp_0}, /* ACH 0 */
+ {18, 152, grp_0}, /* ACH 1 */
+ {18, 152, grp_0}, /* ACH 2 */
+ {18, 152, grp_0}, /* ACH 3 */
+ {0, 0, grp_0}, /* ACH 4 */
+ {0, 0, grp_0}, /* ACH 5 */
+ {0, 0, grp_0}, /* ACH 6 */
+ {0, 0, grp_0}, /* ACH 7 */
+ {18, 152, grp_0}, /* B0MGQ */
+ {18, 152, grp_0}, /* B0HIQ */
+ {0, 0, grp_0}, /* B1MGQ */
+ {0, 0, grp_0}, /* B1HIQ */
+ {0, 0, 0} /* FWCMDQ */
+};
+
+static const struct rtw89_hfc_pub_cfg rtw8851b_hfc_pubcfg_usb = {
+ 152, /* Group 0 */
+ 0, /* Group 1 */
+ 152, /* Public Max */
+ 0 /* WP threshold */
+};
+
+static const struct rtw89_hfc_prec_cfg rtw8851b_hfc_preccfg_usb = {
+ 9, /* CH 0-11 pre-cost */
+ 32, /* H2C pre-cost */
+ 64, /* WP CH 0-7 pre-cost */
+ 24, /* WP CH 8-11 pre-cost */
+ 1, /* CH 0-11 full condition */
+ 1, /* H2C full condition */
+ 1, /* WP CH 0-7 full condition */
+ 1, /* WP CH 8-11 full condition */
+};
+
+static const struct rtw89_hfc_param_ini rtw8851b_hfc_param_ini_usb[] = {
+ [RTW89_QTA_SCC] = {rtw8851b_hfc_chcfg_usb, &rtw8851b_hfc_pubcfg_usb,
+ &rtw8851b_hfc_preccfg_usb, RTW89_HCIFC_STF},
+ [RTW89_QTA_DLFW] = {NULL, NULL,
+ &rtw8851b_hfc_preccfg_usb, RTW89_HCIFC_STF},
+ [RTW89_QTA_INVALID] = {NULL},
+};
+
static const struct rtw89_dle_mem rtw8851b_dle_mem_pcie[] = {
[RTW89_QTA_SCC] = {RTW89_QTA_SCC, &rtw89_mac_size.wde_size6,
&rtw89_mac_size.ple_size6, &rtw89_mac_size.wde_qt6,
@@ -68,6 +110,32 @@ static const struct rtw89_dle_mem rtw8851b_dle_mem_pcie[] = {
NULL},
};
+static const struct rtw89_dle_mem rtw8851b_dle_mem_usb2[] = {
+ [RTW89_QTA_SCC] = {RTW89_QTA_SCC, &rtw89_mac_size.wde_size25,
+ &rtw89_mac_size.ple_size32, &rtw89_mac_size.wde_qt25,
+ &rtw89_mac_size.wde_qt25, &rtw89_mac_size.ple_qt72,
+ &rtw89_mac_size.ple_qt73},
+ [RTW89_QTA_DLFW] = {RTW89_QTA_DLFW, &rtw89_mac_size.wde_size9,
+ &rtw89_mac_size.ple_size8, &rtw89_mac_size.wde_qt4,
+ &rtw89_mac_size.wde_qt4, &rtw89_mac_size.ple_qt13,
+ &rtw89_mac_size.ple_qt13},
+ [RTW89_QTA_INVALID] = {RTW89_QTA_INVALID, NULL, NULL, NULL, NULL, NULL,
+ NULL},
+};
+
+static const struct rtw89_dle_mem rtw8851b_dle_mem_usb3[] = {
+ [RTW89_QTA_SCC] = {RTW89_QTA_SCC, &rtw89_mac_size.wde_size25,
+ &rtw89_mac_size.ple_size33, &rtw89_mac_size.wde_qt25,
+ &rtw89_mac_size.wde_qt25, &rtw89_mac_size.ple_qt74,
+ &rtw89_mac_size.ple_qt75},
+ [RTW89_QTA_DLFW] = {RTW89_QTA_DLFW, &rtw89_mac_size.wde_size9,
+ &rtw89_mac_size.ple_size8, &rtw89_mac_size.wde_qt4,
+ &rtw89_mac_size.wde_qt4, &rtw89_mac_size.ple_qt13,
+ &rtw89_mac_size.ple_qt13},
+ [RTW89_QTA_INVALID] = {RTW89_QTA_INVALID, NULL, NULL, NULL, NULL, NULL,
+ NULL},
+};
+
static const struct rtw89_reg3_def rtw8851b_btc_preagc_en_defs[] = {
{0x46D0, GENMASK(1, 0), 0x3},
{0x4AD4, GENMASK(31, 0), 0xf},
@@ -317,7 +385,8 @@ static int rtw8851b_pwr_on_func(struct rtw89_dev *rtwdev)
rtw89_write8_clr(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN);
rtw89_write8_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN);
- rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, B_AX_PCIE_CALIB_EN_V1);
+ if (rtwdev->hci.type == RTW89_HCI_TYPE_PCIE)
+ rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, B_AX_PCIE_CALIB_EN_V1);
ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_OFF_WEI,
XTAL_SI_OFF_WEI);
@@ -362,8 +431,9 @@ static int rtw8851b_pwr_on_func(struct rtw89_dev *rtwdev)
rtw89_write32_clr(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_PWC_EV2EF_B14);
rtw89_write32_clr(rtwdev, R_AX_PMC_DBG_CTRL2, B_AX_SYSON_DIS_PMCR_AX_WRMSK);
- rtw89_write32_set(rtwdev, R_AX_GPIO0_16_EECS_EESK_LED1_PULL_LOW_EN,
- B_AX_GPIO10_PULL_LOW_EN | B_AX_GPIO16_PULL_LOW_EN_V1);
+ if (rtwdev->hci.type == RTW89_HCI_TYPE_PCIE)
+ rtw89_write32_set(rtwdev, R_AX_GPIO0_16_EECS_EESK_LED1_PULL_LOW_EN,
+ B_AX_GPIO10_PULL_LOW_EN | B_AX_GPIO16_PULL_LOW_EN_V1);
if (rtwdev->hal.cv == CHIP_CAV) {
ret = rtw89_read_efuse_ver(rtwdev, &val8);
@@ -447,7 +517,10 @@ static int rtw8851b_pwr_off_func(struct rtw89_dev *rtwdev)
if (ret)
return ret;
- rtw89_write32(rtwdev, R_AX_WLLPS_CTRL, SW_LPS_OPTION);
+ if (rtwdev->hci.type == RTW89_HCI_TYPE_PCIE)
+ rtw89_write32(rtwdev, R_AX_WLLPS_CTRL, SW_LPS_OPTION);
+ else if (rtwdev->hci.type == RTW89_HCI_TYPE_USB)
+ rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_SOP_EDSWR);
if (rtwdev->hal.cv == CHIP_CAV) {
rtw8851b_patch_swr_pfm2pwm(rtwdev);
@@ -456,19 +529,18 @@ static int rtw8851b_pwr_off_func(struct rtw89_dev *rtwdev)
rtw89_write32_set(rtwdev, R_AX_SPSANA_ON_CTRL1, B_AX_FPWMDELAY);
}
- rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APFM_SWLPS);
+ if (rtwdev->hci.type == RTW89_HCI_TYPE_PCIE) {
+ rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APFM_SWLPS);
+ } else if (rtwdev->hci.type == RTW89_HCI_TYPE_USB) {
+ val32 = rtw89_read32(rtwdev, R_AX_SYS_PW_CTRL);
+ val32 &= ~B_AX_AFSM_PCIE_SUS_EN;
+ val32 |= B_AX_AFSM_WLSUS_EN;
+ rtw89_write32(rtwdev, R_AX_SYS_PW_CTRL, val32);
+ }
return 0;
}
-static void rtw8851b_efuse_parsing(struct rtw89_efuse *efuse,
- struct rtw8851b_efuse *map)
-{
- ether_addr_copy(efuse->addr, map->e.mac_addr);
- efuse->rfe_type = map->rfe_type;
- efuse->xtal_cap = map->xtal_k;
-}
-
static void rtw8851b_efuse_parsing_tssi(struct rtw89_dev *rtwdev,
struct rtw8851b_efuse *map)
{
@@ -549,12 +621,18 @@ static int rtw8851b_read_efuse(struct rtw89_dev *rtwdev, u8 *log_map,
switch (rtwdev->hci.type) {
case RTW89_HCI_TYPE_PCIE:
- rtw8851b_efuse_parsing(efuse, map);
+ ether_addr_copy(efuse->addr, map->e.mac_addr);
+ break;
+ case RTW89_HCI_TYPE_USB:
+ ether_addr_copy(efuse->addr, map->u.mac_addr);
break;
default:
return -EOPNOTSUPP;
}
+ efuse->rfe_type = map->rfe_type;
+ efuse->xtal_cap = map->xtal_k;
+
rtw89_info(rtwdev, "chip rfe_type is %d\n", efuse->rfe_type);
return 0;
@@ -712,12 +790,22 @@ static void rtw8851b_phycap_parsing_gain_comp(struct rtw89_dev *rtwdev, u8 *phyc
gain->comp_valid = valid;
}
+static void rtw8851b_phycap_parsing_adc_td(struct rtw89_dev *rtwdev, u8 *phycap_map)
+{
+ u32 phycap_addr = rtwdev->chip->phycap_addr;
+ struct rtw89_efuse *efuse = &rtwdev->efuse;
+ const u32 addr_adc_td = 0x5AF;
+
+ efuse->adc_td = phycap_map[addr_adc_td - phycap_addr] & GENMASK(4, 0);
+}
+
static int rtw8851b_read_phycap(struct rtw89_dev *rtwdev, u8 *phycap_map)
{
rtw8851b_phycap_parsing_tssi(rtwdev, phycap_map);
rtw8851b_phycap_parsing_thermal_trim(rtwdev, phycap_map);
rtw8851b_phycap_parsing_pa_bias_trim(rtwdev, phycap_map);
rtw8851b_phycap_parsing_gain_comp(rtwdev, phycap_map);
+ rtw8851b_phycap_parsing_adc_td(rtwdev, phycap_map);
return 0;
}
@@ -1083,39 +1171,72 @@ static void rtw8851b_ctrl_ch(struct rtw89_dev *rtwdev,
static void rtw8851b_bw_setting(struct rtw89_dev *rtwdev, u8 bw)
{
- rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0, B_P0_CFCH_CTL, 0x8);
- rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0, B_P0_CFCH_EN, 0x2);
- rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0, B_P0_CFCH_BW0, 0x2);
- rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1, B_P0_CFCH_BW1, 0x4);
+ struct rtw89_efuse *efuse = &rtwdev->efuse;
+ u8 adc_bw_sel;
+
+ switch (efuse->adc_td) {
+ default:
+ case 0x19:
+ adc_bw_sel = 0x4;
+ break;
+ case 0x11:
+ adc_bw_sel = 0x5;
+ break;
+ case 0x9:
+ adc_bw_sel = 0x3;
+ break;
+ }
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1, B_P0_CFCH_BW1, adc_bw_sel);
rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_MUL, 0xf);
rtw89_phy_write32_mask(rtwdev, R_ADCMOD, B_ADCMOD_LP, 0xa);
- rtw89_phy_write32_mask(rtwdev, R_P0_RXCK, B_P0_RXCK_ADJ, 0x92);
+ rtw89_phy_write32_mask(rtwdev, R_DCIM, B_DCIM_RC, 0x3);
switch (bw) {
case RTW89_CHANNEL_WIDTH_5:
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0, B_P0_CFCH_CTL, 0x8);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0, B_P0_CFCH_EN, 0x2);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0, B_P0_CFCH_BW0, 0x2);
rtw89_phy_write32_mask(rtwdev, R_DCIM, B_DCIM_FR, 0x1);
rtw89_phy_write32_mask(rtwdev, R_WDADC, B_WDADC_SEL, 0x0);
rtw89_phy_write32_mask(rtwdev, R_ADDCK0D, B_ADDCK_DS, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RXCK, B_P0_RXCK_ADJ, 0x92);
break;
case RTW89_CHANNEL_WIDTH_10:
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0, B_P0_CFCH_CTL, 0x8);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0, B_P0_CFCH_EN, 0x2);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0, B_P0_CFCH_BW0, 0x2);
rtw89_phy_write32_mask(rtwdev, R_DCIM, B_DCIM_FR, 0x1);
rtw89_phy_write32_mask(rtwdev, R_WDADC, B_WDADC_SEL, 0x1);
rtw89_phy_write32_mask(rtwdev, R_ADDCK0D, B_ADDCK_DS, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RXCK, B_P0_RXCK_ADJ, 0x92);
break;
case RTW89_CHANNEL_WIDTH_20:
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0, B_P0_CFCH_CTL, 0x8);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0, B_P0_CFCH_EN, 0x2);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0, B_P0_CFCH_BW0, 0x2);
rtw89_phy_write32_mask(rtwdev, R_DCIM, B_DCIM_FR, 0x2);
rtw89_phy_write32_mask(rtwdev, R_WDADC, B_WDADC_SEL, 0x2);
rtw89_phy_write32_mask(rtwdev, R_ADDCK0D, B_ADDCK_DS, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RXCK, B_P0_RXCK_ADJ, 0x92);
break;
case RTW89_CHANNEL_WIDTH_40:
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0, B_P0_CFCH_CTL, 0x8);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0, B_P0_CFCH_EN, 0x2);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0, B_P0_CFCH_BW0, 0x2);
rtw89_phy_write32_mask(rtwdev, R_DCIM, B_DCIM_FR, 0x2);
rtw89_phy_write32_mask(rtwdev, R_WDADC, B_WDADC_SEL, 0x2);
rtw89_phy_write32_mask(rtwdev, R_ADDCK0D, B_ADDCK_DS, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RXCK, B_P0_RXCK_ADJ, 0x92);
break;
case RTW89_CHANNEL_WIDTH_80:
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0, B_P0_CFCH_CTL, 0x8);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0, B_P0_CFCH_EN, 0x2);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0, B_P0_CFCH_BW0, 0x2);
rtw89_phy_write32_mask(rtwdev, R_DCIM, B_DCIM_FR, 0x0);
rtw89_phy_write32_mask(rtwdev, R_WDADC, B_WDADC_SEL, 0x2);
rtw89_phy_write32_mask(rtwdev, R_ADDCK0D, B_ADDCK_DS, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RXCK, B_P0_RXCK_ADJ, 0x92);
break;
default:
rtw89_warn(rtwdev, "Fail to set ADC\n");
@@ -2425,6 +2546,7 @@ static const struct rtw89_chip_ops rtw8851b_chip_ops = {
.h2c_assoc_cmac_tbl = rtw89_fw_h2c_assoc_cmac_tbl,
.h2c_ampdu_cmac_tbl = NULL,
.h2c_txtime_cmac_tbl = rtw89_fw_h2c_txtime_cmac_tbl,
+ .h2c_punctured_cmac_tbl = NULL,
.h2c_default_dmac_tbl = NULL,
.h2c_update_beacon = rtw89_fw_h2c_update_beacon,
.h2c_ba_cam = rtw89_fw_h2c_ba_cam,
@@ -2467,8 +2589,13 @@ const struct rtw89_chip_info rtw8851b_chip_info = {
.max_amsdu_limit = 3500,
.dis_2g_40m_ul_ofdma = true,
.rsvd_ple_ofst = 0x2f800,
- .hfc_param_ini = rtw8851b_hfc_param_ini_pcie,
- .dle_mem = rtw8851b_dle_mem_pcie,
+ .hfc_param_ini = {rtw8851b_hfc_param_ini_pcie,
+ rtw8851b_hfc_param_ini_usb,
+ NULL},
+ .dle_mem = {rtw8851b_dle_mem_pcie,
+ rtw8851b_dle_mem_usb2,
+ rtw8851b_dle_mem_usb3,
+ NULL},
.wde_qempty_acq_grpnum = 4,
.wde_qempty_mgq_grpsel = 4,
.rf_base_addr = {0xe000},
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c
index f72b3ac6f149..7a319a6c838a 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c
@@ -12,14 +12,14 @@
#include "rtw8851b_rfk_table.h"
#include "rtw8851b_table.h"
-#define DPK_VER_8851B 0x5
-#define DPK_KIP_REG_NUM_8851B 7
+#define DPK_VER_8851B 0x11
+#define DPK_KIP_REG_NUM_8851B 8
#define DPK_RF_REG_NUM_8851B 4
#define DPK_KSET_NUM 4
#define RTW8851B_RXK_GROUP_NR 4
#define RTW8851B_RXK_GROUP_IDX_NR 2
#define RTW8851B_TXK_GROUP_NR 1
-#define RTW8851B_IQK_VER 0x2a
+#define RTW8851B_IQK_VER 0x14
#define RTW8851B_IQK_SS 1
#define RTW8851B_LOK_GRAM 10
#define RTW8851B_TSSI_PATH_NR 1
@@ -85,6 +85,24 @@ enum rf_mode {
RF_RXK2 = 0x7,
};
+enum adc_ck {
+ ADC_NA = 0,
+ ADC_480M = 1,
+ ADC_960M = 2,
+ ADC_1920M = 3,
+};
+
+enum dac_ck {
+ DAC_40M = 0,
+ DAC_80M = 1,
+ DAC_120M = 2,
+ DAC_160M = 3,
+ DAC_240M = 4,
+ DAC_320M = 5,
+ DAC_480M = 6,
+ DAC_960M = 7,
+};
+
static const u32 _tssi_de_cck_long[RF_PATH_NUM_8851B] = {0x5858};
static const u32 _tssi_de_cck_short[RF_PATH_NUM_8851B] = {0x5860};
static const u32 _tssi_de_mcs_20m[RF_PATH_NUM_8851B] = {0x5838};
@@ -116,7 +134,7 @@ static const u32 rtw8851b_backup_rf_regs[] = {
#define BACKUP_RF_REGS_NR ARRAY_SIZE(rtw8851b_backup_rf_regs)
static const u32 dpk_kip_reg[DPK_KIP_REG_NUM_8851B] = {
- 0x813c, 0x8124, 0xc0ec, 0xc0e8, 0xc0c4, 0xc0d4, 0xc0d8};
+ 0x813c, 0x8124, 0xc0ec, 0xc0e8, 0xc0c4, 0xc0d4, 0xc0d8, 0x12a0};
static const u32 dpk_rf_reg[DPK_RF_REG_NUM_8851B] = {0xde, 0x8f, 0x5, 0x10005};
static void _set_ch(struct rtw89_dev *rtwdev, u32 val);
@@ -163,6 +181,51 @@ static void _rfk_drf_direct_cntrl(struct rtw89_dev *rtwdev,
rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x0);
}
+static void _txck_force(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
+ bool force, enum dac_ck ck)
+{
+ rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_ON, 0x0);
+
+ if (!force)
+ return;
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_VAL, ck);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_ON, 0x1);
+}
+
+static void _rxck_force(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
+ bool force, enum adc_ck ck)
+{
+ static const u32 ck960_8851b[] = {0x8, 0x2, 0x2, 0x4, 0xf, 0xa, 0x93};
+ static const u32 ck1920_8851b[] = {0x9, 0x0, 0x0, 0x3, 0xf, 0xa, 0x49};
+ const u32 *data;
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_ON, 0x0);
+ if (!force)
+ return;
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_VAL, ck);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_ON, 0x1);
+
+ switch (ck) {
+ case ADC_960M:
+ data = ck960_8851b;
+ break;
+ case ADC_1920M:
+ default:
+ data = ck1920_8851b;
+ break;
+ }
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_CTL, data[0]);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_EN, data[1]);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, data[2]);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, data[3]);
+ rtw89_phy_write32_mask(rtwdev, R_DRCK | (path << 8), B_DRCK_MUL, data[4]);
+ rtw89_phy_write32_mask(rtwdev, R_ADCMOD | (path << 8), B_ADCMOD_LP, data[5]);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 8), B_P0_RXCK_ADJ, data[6]);
+}
+
static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath)
{
u32 rf_mode;
@@ -1044,10 +1107,43 @@ static void _iqk_rxclk_setting(struct rtw89_dev *rtwdev, u8 path)
rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_CKT, 0x1);
- if (iqk_info->iqk_bw[path] == RTW89_CHANNEL_WIDTH_80)
+ if (iqk_info->iqk_bw[path] == RTW89_CHANNEL_WIDTH_80) {
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0101);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_DPD_GDIS, 0x1);
+
+ _rxck_force(rtwdev, path, true, ADC_960M);
+
rtw89_rfk_parser(rtwdev, &rtw8851b_iqk_rxclk_80_defs_tbl);
- else
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0101);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_DPD_GDIS, 0x1);
+
+ _rxck_force(rtwdev, path, true, ADC_960M);
+
rtw89_rfk_parser(rtwdev, &rtw8851b_iqk_rxclk_others_defs_tbl);
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, (2)before RXK IQK\n", path);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x%x[07:10] = 0x%x\n", path,
+ 0xc0d4, rtw89_phy_read32_mask(rtwdev, 0xc0d4, GENMASK(10, 7)));
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x%x[11:14] = 0x%x\n", path,
+ 0xc0d4, rtw89_phy_read32_mask(rtwdev, 0xc0d4, GENMASK(14, 11)));
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x%x[26:27] = 0x%x\n", path,
+ 0xc0d4, rtw89_phy_read32_mask(rtwdev, 0xc0d4, GENMASK(27, 26)));
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x%x[05:08] = 0x%x\n", path,
+ 0xc0d8, rtw89_phy_read32_mask(rtwdev, 0xc0d8, GENMASK(8, 5)));
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x%x[17:21] = 0x%x\n", path,
+ 0xc0c4, rtw89_phy_read32_mask(rtwdev, 0xc0c4, GENMASK(21, 17)));
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x%x[16:31] = 0x%x\n", path,
+ 0xc0e8, rtw89_phy_read32_mask(rtwdev, 0xc0e8, GENMASK(31, 16)));
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x%x[04:05] = 0x%x\n", path,
+ 0xc0e4, rtw89_phy_read32_mask(rtwdev, 0xc0e4, GENMASK(5, 4)));
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x%x[23:31] = 0x%x\n", path,
+ 0x12a0, rtw89_phy_read32_mask(rtwdev, 0x12a0, GENMASK(31, 23)));
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x%x[13:14] = 0x%x\n", path,
+ 0xc0ec, rtw89_phy_read32_mask(rtwdev, 0xc0ec, GENMASK(14, 13)));
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x%x[16:23] = 0x%x\n", path,
+ 0xc0ec, rtw89_phy_read32_mask(rtwdev, 0xc0ec, GENMASK(23, 16)));
}
static bool _txk_5g_group_sel(struct rtw89_dev *rtwdev,
@@ -1553,6 +1649,14 @@ static void _iqk_macbb_setting(struct rtw89_dev *rtwdev,
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
rtw89_rfk_parser(rtwdev, &rtw8851b_iqk_macbb_defs_tbl);
+
+ _txck_force(rtwdev, path, true, DAC_960M);
+
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_DPD_GDIS, 0x1);
+
+ _rxck_force(rtwdev, path, true, ADC_1920M);
+
+ rtw89_rfk_parser(rtwdev, &rtw8851b_iqk_macbb_bh_defs_tbl);
}
static void _iqk_init(struct rtw89_dev *rtwdev)
@@ -1794,7 +1898,21 @@ static void _dpk_bb_afe_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path pat
rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, BIT(28 + path), 0x0);
rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), MASKDWORD, 0xd801dffd);
- rtw89_rfk_parser(rtwdev, &rtw8851b_iqk_bb_afe_defs_tbl);
+ _txck_force(rtwdev, path, true, DAC_960M);
+ _rxck_force(rtwdev, path, true, ADC_1920M);
+
+ rtw89_phy_write32_mask(rtwdev, R_DCIM, B_DCIM_FR, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_ADCMOD, B_ADCMOD_AUTO_RST, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
+ udelay(1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x1f);
+ udelay(10);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x13);
+ udelay(2);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0001);
+ udelay(2);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0041);
+ udelay(10);
rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, BIT(20 + path), 0x1);
rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, BIT(28 + path), 0x1);
@@ -1827,6 +1945,17 @@ static void _dpk_tssi_pause(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
is_pause ? "pause" : "resume");
}
+static
+void _dpk_tssi_slope_k_onoff(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
+ bool is_on)
+{
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_SLOPE_CAL + (path << 13),
+ B_P0_TSSI_SLOPE_CAL_EN, is_on);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d TSSI slpoe_k %s\n", path,
+ str_on_off(is_on));
+}
+
static void _dpk_tpg_sel(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
{
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
@@ -1874,9 +2003,6 @@ static void _dpk_kip_control_rfc(struct rtw89_dev *rtwdev,
{
rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13),
B_IQK_RFC_ON, ctrl_by_kip);
-
- rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] RFC is controlled by %s\n",
- ctrl_by_kip ? "KIP" : "BB");
}
static void _dpk_kip_preset(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
@@ -2279,7 +2405,7 @@ static void _dpk_set_mdpd_para(struct rtw89_dev *rtwdev, u8 order)
case 0: /* (5,3,1) */
rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, 0x0);
rtw89_phy_write32_mask(rtwdev, R_DPK_IDL, B_DPK_IDL_SEL, 0x2);
- rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, 0x4);
+ rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, 0x3);
rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_DMAN, 0x1);
break;
case 1: /* (5,3,0) */
@@ -2315,8 +2441,6 @@ static void _dpk_set_mdpd_para(struct rtw89_dev *rtwdev, u8 order)
static void _dpk_idl_mpa(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path, u8 kidx)
{
- rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_MA, 0x1);
-
if (rtw89_phy_read32_mask(rtwdev, R_IDL_MPA, B_IDL_MD500) == 0x1)
_dpk_set_mdpd_para(rtwdev, 0x2);
else if (rtw89_phy_read32_mask(rtwdev, R_IDL_MPA, B_IDL_MD530) == 0x1)
@@ -2419,9 +2543,6 @@ static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
u8 init_xdbm = 17;
bool is_fail;
- if (dpk->bp[path][kidx].band != RTW89_BAND_2G)
- init_xdbm = 15;
-
_dpk_kip_control_rfc(rtwdev, path, false);
_rfk_rf_direct_cntrl(rtwdev, path, false);
rtw89_write_rf(rtwdev, path, RR_BBDC, RFREG_MASK, 0x03ffd);
@@ -2485,6 +2606,7 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
"[DPK] ========= S%d[%d] DPK Start =========\n",
path, dpk->cur_idx[path]);
+ _dpk_tssi_slope_k_onoff(rtwdev, path, false);
_dpk_rxagc_onoff(rtwdev, path, false);
_rfk_drf_direct_cntrl(rtwdev, path, false);
_dpk_bb_afe_setting(rtwdev, path);
@@ -2502,7 +2624,7 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
_dpk_reload_rf(rtwdev, dpk_rf_reg, rf_bkup, path);
_dpk_bb_afe_restore(rtwdev, path);
_dpk_rxagc_onoff(rtwdev, path, true);
-
+ _dpk_tssi_slope_k_onoff(rtwdev, path, true);
if (rtwdev->is_tssi_mode[path])
_dpk_tssi_pause(rtwdev, path, false);
}
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk_table.c b/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk_table.c
index 0abf7978ccab..c5f70c045692 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk_table.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk_table.c
@@ -63,16 +63,7 @@ static const struct rtw89_reg5_def rtw8851b_dack_manual_off_defs[] = {
RTW89_DECLARE_RFK_TBL(rtw8851b_dack_manual_off_defs);
static const struct rtw89_reg5_def rtw8851b_iqk_rxclk_80_defs[] = {
- RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x0101),
- RTW89_DECL_RFK_WM(0x5670, 0x00002000, 0x1),
- RTW89_DECL_RFK_WM(0x12a0, 0x00080000, 0x1),
- RTW89_DECL_RFK_WM(0x12a0, 0x00070000, 0x2),
RTW89_DECL_RFK_WM(0x5670, 0x60000000, 0x1),
- RTW89_DECL_RFK_WM(0xc0d4, 0x00000780, 0x8),
- RTW89_DECL_RFK_WM(0xc0d4, 0x00007800, 0x2),
- RTW89_DECL_RFK_WM(0xc0d4, 0x0c000000, 0x2),
- RTW89_DECL_RFK_WM(0xc0d8, 0x000001e0, 0x5),
- RTW89_DECL_RFK_WM(0xc0c4, 0x003e0000, 0xf),
RTW89_DECL_RFK_WM(0xc0ec, 0x00006000, 0x0),
RTW89_DECL_RFK_WM(0x12b8, 0x40000000, 0x1),
RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x0f),
@@ -85,16 +76,7 @@ static const struct rtw89_reg5_def rtw8851b_iqk_rxclk_80_defs[] = {
RTW89_DECLARE_RFK_TBL(rtw8851b_iqk_rxclk_80_defs);
static const struct rtw89_reg5_def rtw8851b_iqk_rxclk_others_defs[] = {
- RTW89_DECL_RFK_WM(0x20fc, 0xffff0000, 0x0101),
- RTW89_DECL_RFK_WM(0x5670, 0x00002000, 0x1),
- RTW89_DECL_RFK_WM(0x12a0, 0x00080000, 0x1),
- RTW89_DECL_RFK_WM(0x12a0, 0x00070000, 0x2),
RTW89_DECL_RFK_WM(0x5670, 0x60000000, 0x0),
- RTW89_DECL_RFK_WM(0xc0d4, 0x00000780, 0x8),
- RTW89_DECL_RFK_WM(0xc0d4, 0x00007800, 0x2),
- RTW89_DECL_RFK_WM(0xc0d4, 0x0c000000, 0x2),
- RTW89_DECL_RFK_WM(0xc0d8, 0x000001e0, 0x5),
- RTW89_DECL_RFK_WM(0xc0c4, 0x003e0000, 0xf),
RTW89_DECL_RFK_WM(0xc0ec, 0x00006000, 0x2),
RTW89_DECL_RFK_WM(0x12b8, 0x40000000, 0x1),
RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x0f),
@@ -157,55 +139,38 @@ static const struct rtw89_reg5_def rtw8851b_iqk_macbb_defs[] = {
RTW89_DECL_RFK_WM(0x20fc, 0x10000000, 0x0),
RTW89_DECL_RFK_WM(0x5670, MASKDWORD, 0xf801fffd),
RTW89_DECL_RFK_WM(0x5670, 0x00004000, 0x1),
- RTW89_DECL_RFK_WM(0x12a0, 0x00008000, 0x1),
RTW89_DECL_RFK_WM(0x5670, 0x80000000, 0x1),
- RTW89_DECL_RFK_WM(0x12a0, 0x00007000, 0x7),
- RTW89_DECL_RFK_WM(0x5670, 0x00002000, 0x1),
- RTW89_DECL_RFK_WM(0x12a0, 0x00080000, 0x1),
- RTW89_DECL_RFK_WM(0x12a0, 0x00070000, 0x3),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8851b_iqk_macbb_defs);
+
+static const struct rtw89_reg5_def rtw8851b_iqk_macbb_bh_defs[] = {
RTW89_DECL_RFK_WM(0x5670, 0x60000000, 0x2),
- RTW89_DECL_RFK_WM(0xc0d4, 0x00000780, 0x9),
- RTW89_DECL_RFK_WM(0xc0d4, 0x00007800, 0x1),
- RTW89_DECL_RFK_WM(0xc0d4, 0x0c000000, 0x0),
- RTW89_DECL_RFK_WM(0xc0d8, 0x000001e0, 0x3),
- RTW89_DECL_RFK_WM(0xc0c4, 0x003e0000, 0xa),
- RTW89_DECL_RFK_WM(0xc0ec, 0x00006000, 0x0),
- RTW89_DECL_RFK_WM(0xc0e8, 0x00000040, 0x1),
RTW89_DECL_RFK_WM(0x12b8, 0x40000000, 0x1),
+ RTW89_DECL_RFK_DELAY(2),
+ RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x1f),
+ RTW89_DECL_RFK_DELAY(10),
+ RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x13),
+ RTW89_DECL_RFK_DELAY(2),
+ RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x0001),
+ RTW89_DECL_RFK_DELAY(2),
+ RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x0041),
+ RTW89_DECL_RFK_DELAY(10),
+ RTW89_DECL_RFK_WM(0x12b8, 0x40000000, 0x1),
+ RTW89_DECL_RFK_DELAY(2),
RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x1f),
+ RTW89_DECL_RFK_DELAY(10),
RTW89_DECL_RFK_WM(0x030c, 0xff000000, 0x13),
+ RTW89_DECL_RFK_DELAY(2),
RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x0001),
+ RTW89_DECL_RFK_DELAY(2),
RTW89_DECL_RFK_WM(0x032c, 0xffff0000, 0x0041),
+ RTW89_DECL_RFK_DELAY(10),
RTW89_DECL_RFK_WM(0x20fc, 0x00100000, 0x1),
RTW89_DECL_RFK_WM(0x20fc, 0x10000000, 0x1),
};
-RTW89_DECLARE_RFK_TBL(rtw8851b_iqk_macbb_defs);
-
-static const struct rtw89_reg5_def rtw8851b_iqk_bb_afe_defs[] = {
- RTW89_DECL_RFK_WM(0x5670, 0x00004000, 0x1),
- RTW89_DECL_RFK_WM(0x12a0, 0x00008000, 0x1),
- RTW89_DECL_RFK_WM(0x5670, 0x80000000, 0x1),
- RTW89_DECL_RFK_WM(0x12a0, 0x00007000, 0x7),
- RTW89_DECL_RFK_WM(0x5670, 0x00002000, 0x1),
- RTW89_DECL_RFK_WM(0x12a0, 0x00080000, 0x1),
- RTW89_DECL_RFK_WM(0x12a0, 0x00070000, 0x3),
- RTW89_DECL_RFK_WM(0x5670, 0x60000000, 0x2),
- RTW89_DECL_RFK_WM(0xc0d4, 0x00000780, 0x9),
- RTW89_DECL_RFK_WM(0xc0d4, 0x00007800, 0x1),
- RTW89_DECL_RFK_WM(0xc0d4, 0x0c000000, 0x0),
- RTW89_DECL_RFK_WM(0xc0d8, 0x000001e0, 0x3),
- RTW89_DECL_RFK_WM(0xc0c4, 0x003e0000, 0xa),
- RTW89_DECL_RFK_WM(0xc0ec, 0x00006000, 0x0),
- RTW89_DECL_RFK_WM(0xc0e8, 0x00000040, 0x1),
- RTW89_DECL_RFK_WM(0x12b8, 0x40000000, 0x1),
- RTW89_DECL_RFK_WM(0x030c, MASKBYTE3, 0x1f),
- RTW89_DECL_RFK_WM(0x030c, MASKBYTE3, 0x13),
- RTW89_DECL_RFK_WM(0x032c, MASKHWORD, 0x0001),
- RTW89_DECL_RFK_WM(0x032c, MASKHWORD, 0x0041),
-};
-
-RTW89_DECLARE_RFK_TBL(rtw8851b_iqk_bb_afe_defs);
+RTW89_DECLARE_RFK_TBL(rtw8851b_iqk_macbb_bh_defs);
static const struct rtw89_reg5_def rtw8851b_tssi_sys_defs[] = {
RTW89_DECL_RFK_WM(0x12bc, 0x000ffff0, 0xb5b5),
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk_table.h b/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk_table.h
index febfbecb691c..3f1547f57505 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk_table.h
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk_table.h
@@ -17,8 +17,8 @@ extern const struct rtw89_rfk_tbl rtw8851b_iqk_rxclk_others_defs_tbl;
extern const struct rtw89_rfk_tbl rtw8851b_iqk_txk_2ghz_defs_tbl;
extern const struct rtw89_rfk_tbl rtw8851b_iqk_txk_5ghz_defs_tbl;
extern const struct rtw89_rfk_tbl rtw8851b_iqk_afebb_restore_defs_tbl;
-extern const struct rtw89_rfk_tbl rtw8851b_iqk_bb_afe_defs_tbl;
extern const struct rtw89_rfk_tbl rtw8851b_iqk_macbb_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8851b_iqk_macbb_bh_defs_tbl;
extern const struct rtw89_rfk_tbl rtw8851b_tssi_sys_defs_tbl;
extern const struct rtw89_rfk_tbl rtw8851b_tssi_sys_a_defs_2g_tbl;
extern const struct rtw89_rfk_tbl rtw8851b_tssi_sys_a_defs_5g_tbl;
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b_table.c b/drivers/net/wireless/realtek/rtw89/rtw8851b_table.c
index 522883c8dfb9..a9c309c245c3 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851b_table.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851b_table.c
@@ -2320,9 +2320,9 @@ static const struct rtw89_reg2_def rtw89_8851b_phy_nctl_regs[] = {
{0x813c, 0x40000000},
{0x8140, 0x00000000},
{0x8144, 0x0b040b03},
- {0x8148, 0x07020b04},
- {0x814c, 0x07020b04},
- {0x8150, 0xa0a00000},
+ {0x8148, 0x07020a04},
+ {0x814c, 0x07020a04},
+ {0x8150, 0xe4e40000},
{0x8158, 0xffffffff},
{0x815c, 0xffffffff},
{0x8160, 0xffffffff},
@@ -2577,14 +2577,14 @@ static const struct rtw89_reg2_def rtw89_8851b_phy_nctl_regs[] = {
{0x8534, 0x400042fe},
{0x8538, 0x50554200},
{0x853c, 0xb4183000},
- {0x8540, 0xe537a50f},
+ {0x8540, 0xe535a50f},
{0x8544, 0xf12bf02b},
{0x8548, 0xf32bf22b},
{0x854c, 0xf62bf42b},
{0x8550, 0xf82bf72b},
{0x8554, 0xfa2bf92b},
{0x8558, 0xfd2bfc2b},
- {0x855c, 0xe537fe2b},
+ {0x855c, 0xe535fe2b},
{0x8560, 0xf12af02a},
{0x8564, 0xf32af22a},
{0x8568, 0xf52af42a},
@@ -2653,7 +2653,7 @@ static const struct rtw89_reg2_def rtw89_8851b_phy_nctl_regs[] = {
{0x8664, 0x9700140f},
{0x8668, 0x00017430},
{0x866c, 0xe39ce35e},
- {0x8670, 0xe52a0bbd},
+ {0x8670, 0xe5280bbd},
{0x8674, 0xe36a0001},
{0x8678, 0x0001e3c4},
{0x867c, 0x55005b30},
@@ -2664,93 +2664,93 @@ static const struct rtw89_reg2_def rtw89_8851b_phy_nctl_regs[] = {
{0x8690, 0x46100005},
{0x8694, 0x00010004},
{0x8698, 0x30f8e35e},
- {0x869c, 0xe52a0023},
+ {0x869c, 0xe5280023},
{0x86a0, 0x54ed0002},
{0x86a4, 0x00230baa},
- {0x86a8, 0x0002e52a},
+ {0x86a8, 0x0002e528},
{0x86ac, 0xe356e3e4},
{0x86b0, 0xe35e0001},
{0x86b4, 0x002230f3},
- {0x86b8, 0x0002e52a},
+ {0x86b8, 0x0002e528},
{0x86bc, 0x0baa54ec},
- {0x86c0, 0xe52a0022},
+ {0x86c0, 0xe5280022},
{0x86c4, 0xe3e40002},
{0x86c8, 0x0001e356},
{0x86cc, 0x0baae35e},
{0x86d0, 0xe3e430ec},
{0x86d4, 0x0001e356},
{0x86d8, 0x6d0f6c67},
- {0x86dc, 0xe52ae39c},
+ {0x86dc, 0xe528e39c},
{0x86e0, 0xe39c6c8b},
- {0x86e4, 0x0bace52a},
+ {0x86e4, 0x0bace528},
{0x86e8, 0x6d0f6cb3},
- {0x86ec, 0xe52ae39c},
+ {0x86ec, 0xe528e39c},
{0x86f0, 0x6cdb0bad},
{0x86f4, 0xe39c6d0f},
- {0x86f8, 0x6cf5e52a},
+ {0x86f8, 0x6cf5e528},
{0x86fc, 0xe39c6d0f},
- {0x8700, 0x6c0be52a},
+ {0x8700, 0x6c0be528},
{0x8704, 0xe39c6d00},
- {0x8708, 0x6c25e52a},
- {0x870c, 0xe52ae39c},
+ {0x8708, 0x6c25e528},
+ {0x870c, 0xe528e39c},
{0x8710, 0x6c4df8c6},
- {0x8714, 0xe52ae39c},
+ {0x8714, 0xe528e39c},
{0x8718, 0x6c75f9cf},
- {0x871c, 0xe52ae39c},
+ {0x871c, 0xe528e39c},
{0x8720, 0xe39c6c99},
- {0x8724, 0xfad6e52a},
+ {0x8724, 0xfad6e528},
{0x8728, 0x21e87410},
{0x872c, 0x6e670009},
{0x8730, 0xe3c46f0f},
- {0x8734, 0x7410e52f},
+ {0x8734, 0x7410e52d},
{0x8738, 0x000b21e8},
{0x873c, 0xe3c46e8b},
- {0x8740, 0x7410e52f},
+ {0x8740, 0x7410e52d},
{0x8744, 0x000d21e8},
{0x8748, 0x6f0f6eb3},
- {0x874c, 0xe52fe3c4},
+ {0x874c, 0xe52de3c4},
{0x8750, 0xfe07ff08},
{0x8754, 0x21e87410},
{0x8758, 0x6ec7000e},
- {0x875c, 0xe52fe3c4},
+ {0x875c, 0xe52de3c4},
{0x8760, 0x21e87410},
{0x8764, 0x6edb000f},
{0x8768, 0xe3c46f0f},
- {0x876c, 0x7410e52f},
+ {0x876c, 0x7410e52d},
{0x8770, 0x001021e8},
{0x8774, 0xe3c46eef},
- {0x8778, 0xff03e52f},
- {0x877c, 0xe52ffe02},
+ {0x8778, 0xff03e52d},
+ {0x877c, 0xe52dfe02},
{0x8780, 0x21e87410},
{0x8784, 0x6e110013},
{0x8788, 0xe3c46f00},
- {0x878c, 0xff03e52f},
- {0x8790, 0xe52ffe02},
+ {0x878c, 0xff03e52d},
+ {0x8790, 0xe52dfe02},
{0x8794, 0x21e87410},
{0x8798, 0x6e250014},
- {0x879c, 0xe52fe3c4},
+ {0x879c, 0xe52de3c4},
{0x87a0, 0xff08fc24},
{0x87a4, 0x7410fe07},
{0x87a8, 0x001521e8},
{0x87ac, 0xe3c46e39},
- {0x87b0, 0x7410e52f},
+ {0x87b0, 0x7410e52d},
{0x87b4, 0x001621e8},
{0x87b8, 0xe3c46e4d},
- {0x87bc, 0xfd27e52f},
+ {0x87bc, 0xfd27e52d},
{0x87c0, 0x21e87410},
{0x87c4, 0x6e750018},
- {0x87c8, 0xe52fe3c4},
+ {0x87c8, 0xe52de3c4},
{0x87cc, 0x21e87410},
{0x87d0, 0x6e99001a},
- {0x87d4, 0xe52fe3c4},
+ {0x87d4, 0xe52de3c4},
{0x87d8, 0xe36afe24},
{0x87dc, 0x63404380},
{0x87e0, 0x43006880},
{0x87e4, 0x31300bac},
- {0x87e8, 0xe52f0022},
+ {0x87e8, 0xe52d0022},
{0x87ec, 0x54ec0002},
{0x87f0, 0x00220baa},
- {0x87f4, 0x0002e52f},
+ {0x87f4, 0x0002e52d},
{0x87f8, 0xe362e3e4},
{0x87fc, 0xe36a0001},
{0x8800, 0x63404380},
@@ -2770,7 +2770,7 @@ static const struct rtw89_reg2_def rtw89_8851b_phy_nctl_regs[] = {
{0x8838, 0x55010004},
{0x883c, 0x66055b40},
{0x8840, 0x62000007},
- {0x8844, 0xe40e6300},
+ {0x8844, 0xe40c6300},
{0x8848, 0x09000004},
{0x884c, 0x0b400a01},
{0x8850, 0x0e010d00},
@@ -2782,13 +2782,13 @@ static const struct rtw89_reg2_def rtw89_8851b_phy_nctl_regs[] = {
{0x8868, 0x00044d01},
{0x886c, 0x00074300},
{0x8870, 0x05a30562},
- {0x8874, 0xe40e961f},
+ {0x8874, 0xe40c961f},
{0x8878, 0xe37e0004},
{0x887c, 0x06a20007},
- {0x8880, 0xe40e07a3},
+ {0x8880, 0xe40c07a3},
{0x8884, 0xe37e0004},
- {0x8888, 0x0002e3fe},
- {0x888c, 0x4380e406},
+ {0x8888, 0x0002e3fc},
+ {0x888c, 0x4380e404},
{0x8890, 0x4d000007},
{0x8894, 0x43000004},
{0x8898, 0x000742fe},
@@ -2815,13 +2815,13 @@ static const struct rtw89_reg2_def rtw89_8851b_phy_nctl_regs[] = {
{0x88ec, 0x42000004},
{0x88f0, 0x00070001},
{0x88f4, 0x62006220},
- {0x88f8, 0x0001e406},
+ {0x88f8, 0x0001e404},
{0x88fc, 0x63000007},
{0x8900, 0x09000004},
{0x8904, 0x0e010a00},
{0x8908, 0x00070032},
- {0x890c, 0xe40e06a2},
- {0x8910, 0x0002e41a},
+ {0x890c, 0xe40c06a2},
+ {0x8910, 0x0002e418},
{0x8914, 0x000742fe},
{0x8918, 0x00044d00},
{0x891c, 0x00014200},
@@ -2839,50 +2839,50 @@ static const struct rtw89_reg2_def rtw89_8851b_phy_nctl_regs[] = {
{0x894c, 0x00014300},
{0x8950, 0x6c060005},
{0x8954, 0xe2aae298},
- {0x8958, 0xe42ae285},
- {0x895c, 0xe432e2f3},
+ {0x8958, 0xe428e285},
+ {0x895c, 0xe430e2f3},
{0x8960, 0x0001e30c},
{0x8964, 0x0005e285},
{0x8968, 0xe2986c06},
- {0x896c, 0xe42ae4a9},
- {0x8970, 0xe432e2f3},
+ {0x896c, 0xe428e4a7},
+ {0x8970, 0xe430e2f3},
{0x8974, 0x0001e30c},
{0x8978, 0x6c000005},
{0x897c, 0xe2aae298},
- {0x8980, 0xe445e285},
- {0x8984, 0xe44de2f3},
+ {0x8980, 0xe443e285},
+ {0x8984, 0xe44be2f3},
{0x8988, 0x0001e30c},
{0x898c, 0x0005e285},
{0x8990, 0xe2986c00},
- {0x8994, 0xe445e4a9},
- {0x8998, 0xe44de2f3},
+ {0x8994, 0xe443e4a7},
+ {0x8998, 0xe44be2f3},
{0x899c, 0x0001e30c},
{0x89a0, 0x6c040005},
{0x89a4, 0xe2aae298},
- {0x89a8, 0xe460e285},
- {0x89ac, 0xe468e2f3},
+ {0x89a8, 0xe45ee285},
+ {0x89ac, 0xe466e2f3},
{0x89b0, 0x0001e30c},
{0x89b4, 0x0005e285},
{0x89b8, 0xe2986c04},
- {0x89bc, 0xe460e4a9},
- {0x89c0, 0xe468e2f3},
+ {0x89bc, 0xe45ee4a7},
+ {0x89c0, 0xe466e2f3},
{0x89c4, 0x0001e30c},
{0x89c8, 0x6c020005},
{0x89cc, 0xe2aae298},
- {0x89d0, 0xe47be285},
- {0x89d4, 0xe483e2f3},
+ {0x89d0, 0xe479e285},
+ {0x89d4, 0xe481e2f3},
{0x89d8, 0x0001e30c},
{0x89dc, 0x0005e285},
{0x89e0, 0xe2986c02},
- {0x89e4, 0xe47be4a9},
- {0x89e8, 0xe483e2f3},
+ {0x89e4, 0xe479e4a7},
+ {0x89e8, 0xe481e2f3},
{0x89ec, 0x0001e30c},
{0x89f0, 0x43800004},
{0x89f4, 0x610a6008},
{0x89f8, 0x63ce6200},
{0x89fc, 0x60800006},
{0x8a00, 0x00047f00},
- {0x8a04, 0xe4e04300},
+ {0x8a04, 0xe4de4300},
{0x8a08, 0x00070001},
{0x8a0c, 0x4d015500},
{0x8a10, 0x74200004},
@@ -2895,22 +2895,22 @@ static const struct rtw89_reg2_def rtw89_8851b_phy_nctl_regs[] = {
{0x8a2c, 0x00014300},
{0x8a30, 0x74200004},
{0x8a34, 0x77000005},
- {0x8a38, 0x73887e07},
+ {0x8a38, 0x73887e06},
{0x8a3c, 0x8f007380},
{0x8a40, 0x0004140f},
{0x8a44, 0x00057430},
{0x8a48, 0x00017300},
- {0x8a4c, 0x0005e496},
+ {0x8a4c, 0x0005e494},
{0x8a50, 0x00017300},
{0x8a54, 0x43800004},
{0x8a58, 0x0006b103},
{0x8a5c, 0x91037cdb},
{0x8a60, 0x40db0007},
{0x8a64, 0x43000004},
- {0x8a68, 0x0005e496},
+ {0x8a68, 0x0005e494},
{0x8a6c, 0x00067380},
{0x8a70, 0x60025d01},
- {0x8a74, 0xe4ba6200},
+ {0x8a74, 0xe4b86200},
{0x8a78, 0x73000005},
{0x8a7c, 0x76080007},
{0x8a80, 0x00047578},
@@ -2943,8 +2943,8 @@ static const struct rtw89_reg2_def rtw89_8851b_phy_nctl_regs[] = {
{0x8aec, 0x7cdb0006},
{0x8af0, 0x00079103},
{0x8af4, 0x000440db},
- {0x8af8, 0xe4964300},
- {0x8afc, 0xe4ba7e03},
+ {0x8af8, 0xe4944300},
+ {0x8afc, 0xe4b87e03},
{0x8b00, 0x43800004},
{0x8b04, 0x0006b103},
{0x8b08, 0x91037c5b},
@@ -2976,14 +2976,14 @@ static const struct rtw89_reg2_def rtw89_8851b_phy_nctl_regs[] = {
{0x8b70, 0x43000004},
{0x8b74, 0x73000005},
{0x8b78, 0x76000007},
- {0x8b7c, 0xe4c30001},
+ {0x8b7c, 0xe4c10001},
{0x8b80, 0x00040001},
{0x8b84, 0x60004380},
{0x8b88, 0x62016100},
{0x8b8c, 0x00066310},
{0x8b90, 0x00046000},
{0x8b94, 0x00014300},
- {0x8b98, 0x0001e4e0},
+ {0x8b98, 0x0001e4de},
{0x8b9c, 0x4e004f02},
{0x8ba0, 0x52015302},
{0x8ba4, 0x140f0001},
@@ -3030,7 +3030,7 @@ static const struct rtw89_reg2_def rtw89_8851b_phy_nctl_regs[] = {
{0x8c48, 0xbf1ae3ac},
{0x8c4c, 0xe36e300b},
{0x8c50, 0xe390e377},
- {0x8c54, 0x0001e523},
+ {0x8c54, 0x0001e521},
{0x8c58, 0x54c054bf},
{0x8c5c, 0x54c154a3},
{0x8c60, 0x4c1854a4},
@@ -3039,7 +3039,7 @@ static const struct rtw89_reg2_def rtw89_8851b_phy_nctl_regs[] = {
{0x8c6c, 0xbf051402},
{0x8c70, 0x54a354c1},
{0x8c74, 0xbf011402},
- {0x8c78, 0x54dfe534},
+ {0x8c78, 0x54dfe532},
{0x8c7c, 0x54bf0001},
{0x8c80, 0x050a54e5},
{0x8c84, 0x000154df},
@@ -3060,186 +3060,185 @@ static const struct rtw89_reg2_def rtw89_8851b_phy_nctl_regs[] = {
{0x8cc0, 0x56005610},
{0x8cc4, 0x00018c00},
{0x8cc8, 0x57005704},
- {0x8ccc, 0xa7038e00},
- {0x8cd0, 0x33f0aff7},
- {0x8cd4, 0xaf034019},
- {0x8cd8, 0x33f0402b},
- {0x8cdc, 0x33df402b},
- {0x8ce0, 0x57005708},
- {0x8ce4, 0x57818e00},
- {0x8ce8, 0x8e005780},
- {0x8cec, 0x00074380},
- {0x8cf0, 0x5c005c01},
- {0x8cf4, 0x00041403},
- {0x8cf8, 0x00014300},
- {0x8cfc, 0x0007427f},
- {0x8d00, 0x62006280},
- {0x8d04, 0x00049200},
- {0x8d08, 0x00014200},
- {0x8d0c, 0x0007427f},
- {0x8d10, 0x63146394},
- {0x8d14, 0x00049200},
- {0x8d18, 0x00014200},
- {0x8d1c, 0x42fe0004},
- {0x8d20, 0x4d010007},
- {0x8d24, 0x42000004},
- {0x8d28, 0x140f7420},
- {0x8d2c, 0x57005710},
- {0x8d30, 0x0001141f},
- {0x8d34, 0x42fe0004},
- {0x8d38, 0x4d010007},
- {0x8d3c, 0x42000004},
- {0x8d40, 0x140f7420},
- {0x8d44, 0x000742bf},
- {0x8d48, 0x62006240},
- {0x8d4c, 0x0004141f},
- {0x8d50, 0x00014200},
- {0x8d54, 0x5d060006},
- {0x8d58, 0x61046003},
- {0x8d5c, 0x00056201},
- {0x8d60, 0x00017310},
- {0x8d64, 0x43800004},
- {0x8d68, 0x5e010007},
- {0x8d6c, 0x140a5e00},
- {0x8d70, 0x0006b103},
- {0x8d74, 0x91037f07},
- {0x8d78, 0x43070007},
- {0x8d7c, 0x5c000006},
- {0x8d80, 0x5e035d02},
- {0x8d84, 0x43000004},
- {0x8d88, 0x00060001},
- {0x8d8c, 0x60005d04},
- {0x8d90, 0x62016104},
- {0x8d94, 0x73100005},
- {0x8d98, 0x00040001},
- {0x8d9c, 0x00074380},
- {0x8da0, 0x5e005e01},
- {0x8da4, 0xb103140a},
- {0x8da8, 0x7fc60006},
- {0x8dac, 0x00079103},
- {0x8db0, 0x000643c6},
- {0x8db4, 0x5d025c00},
- {0x8db8, 0x00045e03},
- {0x8dbc, 0x00014300},
- {0x8dc0, 0x5d040006},
- {0x8dc4, 0x61046000},
- {0x8dc8, 0x00056201},
- {0x8dcc, 0x00017310},
- {0x8dd0, 0x43800004},
- {0x8dd4, 0x5e010007},
- {0x8dd8, 0x140a5e00},
- {0x8ddc, 0x0006b103},
- {0x8de0, 0x91037fc6},
- {0x8de4, 0x43c60007},
- {0x8de8, 0x5c000006},
- {0x8dec, 0x5e035d02},
- {0x8df0, 0x43000004},
- {0x8df4, 0x00060001},
- {0x8df8, 0x60025d00},
- {0x8dfc, 0x62016100},
- {0x8e00, 0x73000005},
- {0x8e04, 0x00040001},
- {0x8e08, 0x00074380},
- {0x8e0c, 0x5e005e01},
- {0x8e10, 0xb103140a},
- {0x8e14, 0x7fc00006},
- {0x8e18, 0x00079103},
- {0x8e1c, 0x000643c0},
- {0x8e20, 0x5d025c00},
- {0x8e24, 0x00045e03},
- {0x8e28, 0x00014300},
- {0x8e2c, 0x7e020005},
- {0x8e30, 0x42f70004},
- {0x8e34, 0x6c080005},
- {0x8e38, 0x42700004},
- {0x8e3c, 0x73810005},
- {0x8e40, 0x93007380},
- {0x8e44, 0x42f70004},
- {0x8e48, 0x6c000005},
- {0x8e4c, 0x42000004},
- {0x8e50, 0x00040001},
- {0x8e54, 0x00074380},
- {0x8e58, 0x73007304},
- {0x8e5c, 0x72401405},
- {0x8e60, 0x43000004},
- {0x8e64, 0x74040006},
- {0x8e68, 0x40010007},
- {0x8e6c, 0xab004000},
- {0x8e70, 0x0001140f},
- {0x8e74, 0x140ae517},
- {0x8e78, 0x140ae4c3},
- {0x8e7c, 0x0001e51e},
- {0x8e80, 0xe4c3e517},
- {0x8e84, 0x00040001},
- {0x8e88, 0x00047410},
- {0x8e8c, 0x42f04380},
- {0x8e90, 0x62080007},
- {0x8e94, 0x24206301},
- {0x8e98, 0x14c80000},
- {0x8e9c, 0x00002428},
- {0x8ea0, 0x1a4215f4},
- {0x8ea4, 0x6300000b},
- {0x8ea8, 0x42000004},
- {0x8eac, 0x74304300},
- {0x8eb0, 0x4380140f},
- {0x8eb4, 0x73080007},
- {0x8eb8, 0x00047300},
- {0x8ebc, 0x00014300},
- {0x8ec0, 0x4bf00007},
- {0x8ec4, 0x490b4a8f},
- {0x8ec8, 0x4a8e48f1},
- {0x8ecc, 0x48a5490a},
- {0x8ed0, 0x49094a8d},
- {0x8ed4, 0x4a8c487d},
- {0x8ed8, 0x48754908},
- {0x8edc, 0x49074a8b},
- {0x8ee0, 0x4a8a4889},
- {0x8ee4, 0x48b74906},
- {0x8ee8, 0x49054a89},
- {0x8eec, 0x4a8848fc},
- {0x8ef0, 0x48564905},
- {0x8ef4, 0x49044a87},
- {0x8ef8, 0x4a8648c1},
- {0x8efc, 0x483d4904},
- {0x8f00, 0x49034a85},
- {0x8f04, 0x4a8448c7},
- {0x8f08, 0x485e4903},
- {0x8f0c, 0x49024a83},
- {0x8f10, 0x4a8248ac},
- {0x8f14, 0x48624902},
- {0x8f18, 0x49024a81},
- {0x8f1c, 0x4a804820},
- {0x8f20, 0x48004900},
- {0x8f24, 0x49014a90},
- {0x8f28, 0x4a10481f},
- {0x8f2c, 0x00060001},
- {0x8f30, 0x5f005f80},
- {0x8f34, 0x00059900},
- {0x8f38, 0x00017300},
- {0x8f3c, 0x63800006},
- {0x8f40, 0x98006300},
- {0x8f44, 0x549f0001},
- {0x8f48, 0x5c015400},
- {0x8f4c, 0x540054df},
- {0x8f50, 0x00015c02},
- {0x8f54, 0x07145c01},
- {0x8f58, 0x5c025400},
- {0x8f5c, 0x5c020001},
- {0x8f60, 0x54000714},
- {0x8f64, 0x00015c01},
- {0x8f68, 0x4c184c98},
- {0x8f6c, 0x00080001},
- {0x8f70, 0x5c020004},
- {0x8f74, 0x09017430},
- {0x8f78, 0x0ba60c01},
- {0x8f7c, 0x77800005},
- {0x8f80, 0x52200007},
- {0x8f84, 0x43800004},
- {0x8f88, 0x610a6008},
- {0x8f8c, 0x63c26200},
- {0x8f90, 0x5c000007},
- {0x8f94, 0x43000004},
- {0x8f98, 0x00000001},
+ {0x8ccc, 0x33ee8e00},
+ {0x8cd0, 0xaf034019},
+ {0x8cd4, 0x33ee402b},
+ {0x8cd8, 0x33df402b},
+ {0x8cdc, 0x57005708},
+ {0x8ce0, 0x57818e00},
+ {0x8ce4, 0x8e005780},
+ {0x8ce8, 0x00074380},
+ {0x8cec, 0x5c005c01},
+ {0x8cf0, 0x00041403},
+ {0x8cf4, 0x00014300},
+ {0x8cf8, 0x0007427f},
+ {0x8cfc, 0x62006280},
+ {0x8d00, 0x00049200},
+ {0x8d04, 0x00014200},
+ {0x8d08, 0x0007427f},
+ {0x8d0c, 0x63146394},
+ {0x8d10, 0x00049200},
+ {0x8d14, 0x00014200},
+ {0x8d18, 0x42fe0004},
+ {0x8d1c, 0x4d010007},
+ {0x8d20, 0x42000004},
+ {0x8d24, 0x140f7420},
+ {0x8d28, 0x57005710},
+ {0x8d2c, 0x0001141f},
+ {0x8d30, 0x42fe0004},
+ {0x8d34, 0x4d010007},
+ {0x8d38, 0x42000004},
+ {0x8d3c, 0x140f7420},
+ {0x8d40, 0x000742bf},
+ {0x8d44, 0x62006240},
+ {0x8d48, 0x0004141f},
+ {0x8d4c, 0x00014200},
+ {0x8d50, 0x5d060006},
+ {0x8d54, 0x61046003},
+ {0x8d58, 0x00056200},
+ {0x8d5c, 0x00017310},
+ {0x8d60, 0x43800004},
+ {0x8d64, 0x5e010007},
+ {0x8d68, 0x140a5e00},
+ {0x8d6c, 0x0006b103},
+ {0x8d70, 0x91037f07},
+ {0x8d74, 0x43070007},
+ {0x8d78, 0x5c000006},
+ {0x8d7c, 0x5e035d02},
+ {0x8d80, 0x43000004},
+ {0x8d84, 0x00060001},
+ {0x8d88, 0x60005d04},
+ {0x8d8c, 0x62006104},
+ {0x8d90, 0x73100005},
+ {0x8d94, 0x00040001},
+ {0x8d98, 0x00074380},
+ {0x8d9c, 0x5e005e01},
+ {0x8da0, 0xb103140a},
+ {0x8da4, 0x7fc60006},
+ {0x8da8, 0x00079103},
+ {0x8dac, 0x000643c6},
+ {0x8db0, 0x5d025c00},
+ {0x8db4, 0x00045e03},
+ {0x8db8, 0x00014300},
+ {0x8dbc, 0x5d040006},
+ {0x8dc0, 0x61046000},
+ {0x8dc4, 0x00056200},
+ {0x8dc8, 0x00017310},
+ {0x8dcc, 0x43800004},
+ {0x8dd0, 0x5e010007},
+ {0x8dd4, 0x140a5e00},
+ {0x8dd8, 0x0006b103},
+ {0x8ddc, 0x91037fc6},
+ {0x8de0, 0x43c60007},
+ {0x8de4, 0x5c000006},
+ {0x8de8, 0x5e035d02},
+ {0x8dec, 0x43000004},
+ {0x8df0, 0x00060001},
+ {0x8df4, 0x60025d00},
+ {0x8df8, 0x62006100},
+ {0x8dfc, 0x73000005},
+ {0x8e00, 0x00040001},
+ {0x8e04, 0x00074380},
+ {0x8e08, 0x5e005e01},
+ {0x8e0c, 0xb103140a},
+ {0x8e10, 0x7fc00006},
+ {0x8e14, 0x00079103},
+ {0x8e18, 0x000643c0},
+ {0x8e1c, 0x5d025c00},
+ {0x8e20, 0x00045e03},
+ {0x8e24, 0x00014300},
+ {0x8e28, 0x7e020005},
+ {0x8e2c, 0x42f70004},
+ {0x8e30, 0x6c080005},
+ {0x8e34, 0x42700004},
+ {0x8e38, 0x73810005},
+ {0x8e3c, 0x93007380},
+ {0x8e40, 0x42f70004},
+ {0x8e44, 0x6c000005},
+ {0x8e48, 0x42000004},
+ {0x8e4c, 0x00040001},
+ {0x8e50, 0x00074380},
+ {0x8e54, 0x73007304},
+ {0x8e58, 0x72401405},
+ {0x8e5c, 0x43000004},
+ {0x8e60, 0x74040006},
+ {0x8e64, 0x40010007},
+ {0x8e68, 0xab004000},
+ {0x8e6c, 0x0001140f},
+ {0x8e70, 0x140ae515},
+ {0x8e74, 0x140ae4c1},
+ {0x8e78, 0x0001e51c},
+ {0x8e7c, 0xe4c1e515},
+ {0x8e80, 0x00040001},
+ {0x8e84, 0x00047410},
+ {0x8e88, 0x42f04380},
+ {0x8e8c, 0x62080007},
+ {0x8e90, 0x24206301},
+ {0x8e94, 0x14c80000},
+ {0x8e98, 0x00002428},
+ {0x8e9c, 0x1a4215f4},
+ {0x8ea0, 0x6300000b},
+ {0x8ea4, 0x42000004},
+ {0x8ea8, 0x74304300},
+ {0x8eac, 0x4380140f},
+ {0x8eb0, 0x73080007},
+ {0x8eb4, 0x00047300},
+ {0x8eb8, 0x00014300},
+ {0x8ebc, 0x4bf00007},
+ {0x8ec0, 0x490b4a8f},
+ {0x8ec4, 0x4a8e48f1},
+ {0x8ec8, 0x48a5490a},
+ {0x8ecc, 0x49094a8d},
+ {0x8ed0, 0x4a8c487d},
+ {0x8ed4, 0x48754908},
+ {0x8ed8, 0x49074a8b},
+ {0x8edc, 0x4a8a4889},
+ {0x8ee0, 0x48b74906},
+ {0x8ee4, 0x49054a89},
+ {0x8ee8, 0x4a8848fc},
+ {0x8eec, 0x48564905},
+ {0x8ef0, 0x49044a87},
+ {0x8ef4, 0x4a8648c1},
+ {0x8ef8, 0x483d4904},
+ {0x8efc, 0x49034a85},
+ {0x8f00, 0x4a8448c7},
+ {0x8f04, 0x485e4903},
+ {0x8f08, 0x49024a83},
+ {0x8f0c, 0x4a8248ac},
+ {0x8f10, 0x48624902},
+ {0x8f14, 0x49024a81},
+ {0x8f18, 0x4a804820},
+ {0x8f1c, 0x48004900},
+ {0x8f20, 0x49014a90},
+ {0x8f24, 0x4a10481f},
+ {0x8f28, 0x00060001},
+ {0x8f2c, 0x5f005f80},
+ {0x8f30, 0x00059900},
+ {0x8f34, 0x00017300},
+ {0x8f38, 0x63800006},
+ {0x8f3c, 0x98006300},
+ {0x8f40, 0x549f0001},
+ {0x8f44, 0x5c015400},
+ {0x8f48, 0x540054df},
+ {0x8f4c, 0x00015c02},
+ {0x8f50, 0x07145c01},
+ {0x8f54, 0x5c025400},
+ {0x8f58, 0x5c020001},
+ {0x8f5c, 0x54000714},
+ {0x8f60, 0x00015c01},
+ {0x8f64, 0x4c184c98},
+ {0x8f68, 0x00080001},
+ {0x8f6c, 0x5c020004},
+ {0x8f70, 0x09017430},
+ {0x8f74, 0x0ba60c01},
+ {0x8f78, 0x77800005},
+ {0x8f7c, 0x52200007},
+ {0x8f80, 0x43800004},
+ {0x8f84, 0x610a6008},
+ {0x8f88, 0x63c26200},
+ {0x8f8c, 0x5c000007},
+ {0x8f90, 0x43000004},
+ {0x8f94, 0x00000001},
{0x8080, 0x00000004},
{0x8080, 0x00000000},
{0x8088, 0x00000000},
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851bu.c b/drivers/net/wireless/realtek/rtw89/rtw8851bu.c
new file mode 100644
index 000000000000..c3722547c6b0
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851bu.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2025 Realtek Corporation
+ */
+
+#include <linux/module.h>
+#include <linux/usb.h>
+#include "rtw8851b.h"
+#include "usb.h"
+
+static const struct rtw89_driver_info rtw89_8851bu_info = {
+ .chip = &rtw8851b_chip_info,
+ .variant = NULL,
+ .quirks = NULL,
+};
+
+static const struct usb_device_id rtw_8851bu_id_table[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0bda, 0xb851, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&rtw89_8851bu_info },
+ /* TP-Link Archer TX10UB Nano */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x3625, 0x010b, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&rtw89_8851bu_info },
+ /* Edimax EW-7611UXB */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xe611, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&rtw89_8851bu_info },
+ {},
+};
+MODULE_DEVICE_TABLE(usb, rtw_8851bu_id_table);
+
+static struct usb_driver rtw_8851bu_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = rtw_8851bu_id_table,
+ .probe = rtw89_usb_probe,
+ .disconnect = rtw89_usb_disconnect,
+};
+module_usb_driver(rtw_8851bu_driver);
+
+MODULE_AUTHOR("Bitterblue Smith <rtl8821cerfe2@gmail.com>");
+MODULE_DESCRIPTION("Realtek 802.11ax wireless 8851BU driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
index 080636e8d0c3..3bbe2a808844 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
@@ -2151,6 +2151,7 @@ static const struct rtw89_chip_ops rtw8852a_chip_ops = {
.h2c_assoc_cmac_tbl = rtw89_fw_h2c_assoc_cmac_tbl,
.h2c_ampdu_cmac_tbl = NULL,
.h2c_txtime_cmac_tbl = rtw89_fw_h2c_txtime_cmac_tbl,
+ .h2c_punctured_cmac_tbl = NULL,
.h2c_default_dmac_tbl = NULL,
.h2c_update_beacon = rtw89_fw_h2c_update_beacon,
.h2c_ba_cam = rtw89_fw_h2c_ba_cam,
@@ -2184,8 +2185,8 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.max_amsdu_limit = 3500,
.dis_2g_40m_ul_ofdma = true,
.rsvd_ple_ofst = 0x6f800,
- .hfc_param_ini = rtw8852a_hfc_param_ini_pcie,
- .dle_mem = rtw8852a_dle_mem_pcie,
+ .hfc_param_ini = {rtw8852a_hfc_param_ini_pcie, NULL, NULL},
+ .dle_mem = {rtw8852a_dle_mem_pcie, NULL, NULL, NULL},
.wde_qempty_acq_grpnum = 16,
.wde_qempty_mgq_grpsel = 16,
.rf_base_addr = {0xc000, 0xd000},
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.c b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
index c0bf80450acf..7ede07f7b1eb 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
@@ -49,6 +49,48 @@ static const struct rtw89_hfc_param_ini rtw8852b_hfc_param_ini_pcie[] = {
[RTW89_QTA_INVALID] = {NULL},
};
+static const struct rtw89_hfc_ch_cfg rtw8852b_hfc_chcfg_usb[] = {
+ {18, 152, grp_0}, /* ACH 0 */
+ {18, 152, grp_0}, /* ACH 1 */
+ {18, 152, grp_0}, /* ACH 2 */
+ {18, 152, grp_0}, /* ACH 3 */
+ {0, 0, grp_0}, /* ACH 4 */
+ {0, 0, grp_0}, /* ACH 5 */
+ {0, 0, grp_0}, /* ACH 6 */
+ {0, 0, grp_0}, /* ACH 7 */
+ {18, 152, grp_0}, /* B0MGQ */
+ {18, 152, grp_0}, /* B0HIQ */
+ {0, 0, grp_0}, /* B1MGQ */
+ {0, 0, grp_0}, /* B1HIQ */
+ {0, 0, 0} /* FWCMDQ */
+};
+
+static const struct rtw89_hfc_pub_cfg rtw8852b_hfc_pubcfg_usb = {
+ 152, /* Group 0 */
+ 0, /* Group 1 */
+ 152, /* Public Max */
+ 0 /* WP threshold */
+};
+
+static const struct rtw89_hfc_prec_cfg rtw8852b_hfc_preccfg_usb = {
+ 9, /* CH 0-11 pre-cost */
+ 32, /* H2C pre-cost */
+ 64, /* WP CH 0-7 pre-cost */
+ 24, /* WP CH 8-11 pre-cost */
+ 1, /* CH 0-11 full condition */
+ 1, /* H2C full condition */
+ 1, /* WP CH 0-7 full condition */
+ 1, /* WP CH 8-11 full condition */
+};
+
+static const struct rtw89_hfc_param_ini rtw8852b_hfc_param_ini_usb[] = {
+ [RTW89_QTA_SCC] = {rtw8852b_hfc_chcfg_usb, &rtw8852b_hfc_pubcfg_usb,
+ &rtw8852b_hfc_preccfg_usb, RTW89_HCIFC_STF},
+ [RTW89_QTA_DLFW] = {NULL, NULL,
+ &rtw8852b_hfc_preccfg_usb, RTW89_HCIFC_STF},
+ [RTW89_QTA_INVALID] = {NULL},
+};
+
static const struct rtw89_dle_mem rtw8852b_dle_mem_pcie[] = {
[RTW89_QTA_SCC] = {RTW89_QTA_SCC, &rtw89_mac_size.wde_size7,
&rtw89_mac_size.ple_size6, &rtw89_mac_size.wde_qt7,
@@ -66,6 +108,19 @@ static const struct rtw89_dle_mem rtw8852b_dle_mem_pcie[] = {
NULL},
};
+static const struct rtw89_dle_mem rtw8852b_dle_mem_usb3[] = {
+ [RTW89_QTA_SCC] = {RTW89_QTA_SCC, &rtw89_mac_size.wde_size25,
+ &rtw89_mac_size.ple_size33, &rtw89_mac_size.wde_qt25,
+ &rtw89_mac_size.wde_qt25, &rtw89_mac_size.ple_qt74,
+ &rtw89_mac_size.ple_qt75},
+ [RTW89_QTA_DLFW] = {RTW89_QTA_DLFW, &rtw89_mac_size.wde_size9,
+ &rtw89_mac_size.ple_size8, &rtw89_mac_size.wde_qt4,
+ &rtw89_mac_size.wde_qt4, &rtw89_mac_size.ple_qt13,
+ &rtw89_mac_size.ple_qt13},
+ [RTW89_QTA_INVALID] = {RTW89_QTA_INVALID, NULL, NULL, NULL, NULL, NULL,
+ NULL},
+};
+
static const u32 rtw8852b_h2c_regs[RTW89_H2CREG_MAX] = {
R_AX_H2CREG_DATA0, R_AX_H2CREG_DATA1, R_AX_H2CREG_DATA2,
R_AX_H2CREG_DATA3
@@ -299,7 +354,8 @@ static int rtw8852b_pwr_on_func(struct rtw89_dev *rtwdev)
rtw89_write8_clr(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN);
rtw89_write8_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN);
- rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, B_AX_PCIE_CALIB_EN_V1);
+ if (rtwdev->hci.type == RTW89_HCI_TYPE_PCIE)
+ rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, B_AX_PCIE_CALIB_EN_V1);
rtw89_write32_set(rtwdev, R_AX_SYS_ADIE_PAD_PWR_CTRL, B_AX_SYM_PADPDN_WL_PTA_1P3);
@@ -361,7 +417,7 @@ static int rtw8852b_pwr_on_func(struct rtw89_dev *rtwdev)
rtw89_write32_mask(rtwdev, R_AX_SPS_DIG_ON_CTRL0, B_AX_VOL_L1_MASK, 0x9);
rtw89_write32_mask(rtwdev, R_AX_SPS_DIG_ON_CTRL0, B_AX_VREFPFM_L_MASK, 0xA);
- if (rtwdev->hal.cv == CHIP_CBV) {
+ if (rtwdev->hal.cv == CHIP_CBV && rtwdev->hci.type == RTW89_HCI_TYPE_PCIE) {
rtw89_write32_set(rtwdev, R_AX_PMC_DBG_CTRL2, B_AX_SYSON_DIS_PMCR_AX_WRMSK);
rtw89_write16_mask(rtwdev, R_AX_HCI_LDO_CTRL, B_AX_R_AX_VADJ_MASK, 0xA);
rtw89_write32_clr(rtwdev, R_AX_PMC_DBG_CTRL2, B_AX_SYSON_DIS_PMCR_AX_WRMSK);
@@ -443,10 +499,22 @@ static int rtw8852b_pwr_off_func(struct rtw89_dev *rtwdev)
if (ret)
return ret;
- rtw89_write32(rtwdev, R_AX_WLLPS_CTRL, SW_LPS_OPTION);
+ if (rtwdev->hci.type == RTW89_HCI_TYPE_PCIE)
+ rtw89_write32(rtwdev, R_AX_WLLPS_CTRL, SW_LPS_OPTION);
+ else if (rtwdev->hci.type == RTW89_HCI_TYPE_USB)
+ rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_SOP_EDSWR);
+
rtw89_write32_set(rtwdev, R_AX_SYS_SWR_CTRL1, B_AX_SYM_CTRL_SPS_PWMFREQ);
rtw89_write32_mask(rtwdev, R_AX_SPS_DIG_ON_CTRL0, B_AX_REG_ZCDC_H_MASK, 0x3);
- rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APFM_SWLPS);
+
+ if (rtwdev->hci.type == RTW89_HCI_TYPE_PCIE) {
+ rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APFM_SWLPS);
+ } else if (rtwdev->hci.type == RTW89_HCI_TYPE_USB) {
+ val32 = rtw89_read32(rtwdev, R_AX_SYS_PW_CTRL);
+ val32 &= ~B_AX_AFSM_PCIE_SUS_EN;
+ val32 |= B_AX_AFSM_WLSUS_EN;
+ rtw89_write32(rtwdev, R_AX_SYS_PW_CTRL, val32);
+ }
return 0;
}
@@ -560,8 +628,11 @@ static void rtw8852b_set_channel_help(struct rtw89_dev *rtwdev, bool enter,
static void rtw8852b_rfk_init(struct rtw89_dev *rtwdev)
{
+ struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc;
+
rtwdev->is_tssi_mode[RF_PATH_A] = false;
rtwdev->is_tssi_mode[RF_PATH_B] = false;
+ memset(rfk_mcc, 0, sizeof(*rfk_mcc));
rtw8852b_dpk_init(rtwdev);
rtw8852b_rck(rtwdev);
@@ -575,6 +646,7 @@ static void rtw8852b_rfk_channel(struct rtw89_dev *rtwdev,
enum rtw89_chanctx_idx chanctx_idx = rtwvif_link->chanctx_idx;
enum rtw89_phy_idx phy_idx = rtwvif_link->phy_idx;
+ rtw8852b_mcc_get_ch_info(rtwdev, phy_idx);
rtw89_btc_ntfy_conn_rfk(rtwdev, true);
rtw8852b_rx_dck(rtwdev, phy_idx, chanctx_idx);
@@ -585,6 +657,7 @@ static void rtw8852b_rfk_channel(struct rtw89_dev *rtwdev,
rtw8852b_dpk(rtwdev, phy_idx, chanctx_idx);
rtw89_btc_ntfy_conn_rfk(rtwdev, false);
+ rtw89_fw_h2c_rf_ntfy_mcc(rtwdev);
}
static void rtw8852b_rfk_band_changed(struct rtw89_dev *rtwdev,
@@ -778,6 +851,7 @@ static const struct rtw89_chip_ops rtw8852b_chip_ops = {
.h2c_assoc_cmac_tbl = rtw89_fw_h2c_assoc_cmac_tbl,
.h2c_ampdu_cmac_tbl = NULL,
.h2c_txtime_cmac_tbl = rtw89_fw_h2c_txtime_cmac_tbl,
+ .h2c_punctured_cmac_tbl = NULL,
.h2c_default_dmac_tbl = NULL,
.h2c_update_beacon = rtw89_fw_h2c_update_beacon,
.h2c_ba_cam = rtw89_fw_h2c_ba_cam,
@@ -793,6 +867,10 @@ static const struct rtw89_chip_ops rtw8852b_chip_ops = {
.btc_set_policy = rtw89_btc_set_policy_v1,
};
+static const struct rtw89_chanctx_listener rtw8852b_chanctx_listener = {
+ .callbacks[RTW89_CHANCTX_CALLBACK_RFK] = rtw8852b_rfk_chanctx_cb,
+};
+
#ifdef CONFIG_PM
static const struct wiphy_wowlan_support rtw_wowlan_stub_8852b = {
.flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT,
@@ -820,8 +898,13 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.max_amsdu_limit = 5000,
.dis_2g_40m_ul_ofdma = true,
.rsvd_ple_ofst = 0x2f800,
- .hfc_param_ini = rtw8852b_hfc_param_ini_pcie,
- .dle_mem = rtw8852b_dle_mem_pcie,
+ .hfc_param_ini = {rtw8852b_hfc_param_ini_pcie,
+ rtw8852b_hfc_param_ini_usb,
+ NULL},
+ .dle_mem = {rtw8852b_dle_mem_pcie,
+ rtw8852b_dle_mem_usb3,
+ rtw8852b_dle_mem_usb3,
+ NULL},
.wde_qempty_acq_grpnum = 4,
.wde_qempty_mgq_grpsel = 4,
.rf_base_addr = {0xe000, 0xf000},
@@ -836,6 +919,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.nctl_post_table = NULL,
.dflt_parms = &rtw89_8852b_dflt_parms,
.rfe_parms_conf = NULL,
+ .chanctx_listener = &rtw8852b_chanctx_listener,
.txpwr_factor_bb = 3,
.txpwr_factor_rf = 2,
.txpwr_factor_mac = 1,
@@ -844,7 +928,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.tssi_dbw_table = NULL,
.support_macid_num = RTW89_MAX_MAC_ID_NUM,
.support_link_num = 0,
- .support_chanctx_num = 0,
+ .support_chanctx_num = 2,
.support_rnr = false,
.support_bands = BIT(NL80211_BAND_2GHZ) |
BIT(NL80211_BAND_5GHZ),
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
index 0cf03f18cbb1..3fb2972ae6f6 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
@@ -172,14 +172,6 @@ static const struct rtw89_reg3_def rtw8852bx_btc_preagc_dis_defs[] = {
static DECLARE_PHY_REG3_TBL(rtw8852bx_btc_preagc_dis_defs);
-static void rtw8852be_efuse_parsing(struct rtw89_efuse *efuse,
- struct rtw8852bx_efuse *map)
-{
- ether_addr_copy(efuse->addr, map->e.mac_addr);
- efuse->rfe_type = map->rfe_type;
- efuse->xtal_cap = map->xtal_k;
-}
-
static void rtw8852bx_efuse_parsing_tssi(struct rtw89_dev *rtwdev,
struct rtw8852bx_efuse *map)
{
@@ -261,12 +253,18 @@ static int __rtw8852bx_read_efuse(struct rtw89_dev *rtwdev, u8 *log_map,
switch (rtwdev->hci.type) {
case RTW89_HCI_TYPE_PCIE:
- rtw8852be_efuse_parsing(efuse, map);
+ ether_addr_copy(efuse->addr, map->e.mac_addr);
+ break;
+ case RTW89_HCI_TYPE_USB:
+ ether_addr_copy(efuse->addr, map->u.mac_addr);
break;
default:
return -EOPNOTSUPP;
}
+ efuse->rfe_type = map->rfe_type;
+ efuse->xtal_cap = map->xtal_k;
+
rtw89_info(rtwdev, "chip rfe_type is %d\n", efuse->rfe_type);
return 0;
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c
index fbf82d42687b..4796588c0256 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c
@@ -2,6 +2,7 @@
/* Copyright(c) 2019-2022 Realtek Corporation
*/
+#include "chan.h"
#include "coex.h"
#include "debug.h"
#include "mac.h"
@@ -1145,19 +1146,19 @@ static void _lok_res_table(struct rtw89_dev *rtwdev, u8 path, u8 ibias)
static bool _lok_finetune_check(struct rtw89_dev *rtwdev, u8 path)
{
+ struct rtw89_rfk_mcc_info_data *rfk_mcc = rtwdev->rfk_mcc.data;
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ u8 ch = rfk_mcc->table_idx;
bool is_fail1, is_fail2;
u32 vbuff_i;
u32 vbuff_q;
u32 core_i;
u32 core_q;
u32 tmp;
- u8 ch;
tmp = rtw89_read_rf(rtwdev, path, RR_TXMO, RFREG_MASK);
core_i = FIELD_GET(RR_TXMO_COI, tmp);
core_q = FIELD_GET(RR_TXMO_COQ, tmp);
- ch = (iqk_info->iqk_times / 2) % RTW89_IQK_CHS_NR;
if (core_i < 0x2 || core_i > 0x1d || core_q < 0x2 || core_q > 0x1d)
is_fail1 = true;
@@ -1386,26 +1387,11 @@ static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u
enum rtw89_chanctx_idx chanctx_idx)
{
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
+ struct rtw89_rfk_mcc_info_data *rfk_mcc = rtwdev->rfk_mcc.data;
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ u8 idx = rfk_mcc->table_idx;
u32 reg_rf18;
u32 reg_35c;
- u8 idx;
- u8 get_empty_table = false;
-
- for (idx = 0; idx < RTW89_IQK_CHS_NR; idx++) {
- if (iqk_info->iqk_mcc_ch[idx][path] == 0) {
- get_empty_table = true;
- break;
- }
- }
- rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] (1)idx = %x\n", idx);
-
- if (!get_empty_table) {
- idx = iqk_info->iqk_table_idx[path] + 1;
- if (idx > 1)
- idx = 0;
- }
- rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] (2)idx = %x\n", idx);
reg_rf18 = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
reg_35c = rtw89_phy_read32_mask(rtwdev, R_CIRST, B_CIRST_SYN);
@@ -1506,11 +1492,10 @@ static void _iqk_afebb_restore(struct rtw89_dev *rtwdev,
static void _iqk_preset(struct rtw89_dev *rtwdev, u8 path)
{
- struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
- u8 idx;
+ struct rtw89_rfk_mcc_info_data *rfk_mcc = rtwdev->rfk_mcc.data;
+ u8 idx = rfk_mcc->table_idx;
- idx = iqk_info->iqk_table_idx[path];
- rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] (3)idx = %x\n", idx);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] idx = %x\n", idx);
rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), B_COEF_SEL_IQC, idx);
rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3, idx);
@@ -4179,3 +4164,49 @@ void rtw8852b_set_channel_rf(struct rtw89_dev *rtwdev,
rtw8852b_ctrl_bw_ch(rtwdev, phy_idx, chan->channel, chan->band_type,
chan->band_width);
}
+
+void rtw8852b_mcc_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ const struct rtw89_chan *chan = rtw89_mgnt_chan_get(rtwdev, 0);
+ struct rtw89_rfk_mcc_info_data *rfk_mcc = rtwdev->rfk_mcc.data;
+ struct rtw89_rfk_chan_desc desc[__RTW89_RFK_CHS_NR_V0] = {};
+ u8 idx;
+
+ for (idx = 0; idx < ARRAY_SIZE(desc); idx++) {
+ struct rtw89_rfk_chan_desc *p = &desc[idx];
+
+ p->ch = rfk_mcc->ch[idx];
+
+ p->has_band = true;
+ p->band = rfk_mcc->band[idx];
+ }
+
+ idx = rtw89_rfk_chan_lookup(rtwdev, desc, ARRAY_SIZE(desc), chan);
+
+ rfk_mcc->ch[idx] = chan->channel;
+ rfk_mcc->band[idx] = chan->band_type;
+ rfk_mcc->table_idx = idx;
+}
+
+void rtw8852b_rfk_chanctx_cb(struct rtw89_dev *rtwdev,
+ enum rtw89_chanctx_state state)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ u8 path;
+
+ switch (state) {
+ case RTW89_CHANCTX_STATE_MCC_START:
+ dpk->is_dpk_enable = false;
+ for (path = 0; path < RTW8852B_DPK_RF_PATH; path++)
+ _dpk_onoff(rtwdev, path, false);
+ break;
+ case RTW89_CHANCTX_STATE_MCC_STOP:
+ dpk->is_dpk_enable = true;
+ for (path = 0; path < RTW8852B_DPK_RF_PATH; path++)
+ _dpk_onoff(rtwdev, path, false);
+ rtw8852b_dpk(rtwdev, RTW89_PHY_0, RTW89_CHANCTX_0);
+ break;
+ default:
+ break;
+ }
+}
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.h
index c31ba446e6e0..5fae980d5e2c 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.h
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.h
@@ -27,5 +27,8 @@ void rtw8852b_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
void rtw8852b_set_channel_rf(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx);
+void rtw8852b_mcc_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+void rtw8852b_rfk_chanctx_cb(struct rtw89_dev *rtwdev,
+ enum rtw89_chanctx_state state);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt.c b/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
index 95e088734423..9427823aca2f 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
@@ -533,8 +533,11 @@ static void rtw8852bt_set_channel_help(struct rtw89_dev *rtwdev, bool enter,
static void rtw8852bt_rfk_init(struct rtw89_dev *rtwdev)
{
+ struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc;
+
rtwdev->is_tssi_mode[RF_PATH_A] = false;
rtwdev->is_tssi_mode[RF_PATH_B] = false;
+ memset(rfk_mcc, 0, sizeof(*rfk_mcc));
rtw8852bt_dpk_init(rtwdev);
rtw8852bt_rck(rtwdev);
@@ -548,6 +551,7 @@ static void rtw8852bt_rfk_channel(struct rtw89_dev *rtwdev,
enum rtw89_chanctx_idx chanctx_idx = rtwvif_link->chanctx_idx;
enum rtw89_phy_idx phy_idx = rtwvif_link->phy_idx;
+ rtw8852bt_mcc_get_ch_info(rtwdev, phy_idx);
rtw89_btc_ntfy_conn_rfk(rtwdev, true);
rtw8852bt_rx_dck(rtwdev, phy_idx, chanctx_idx);
@@ -558,6 +562,7 @@ static void rtw8852bt_rfk_channel(struct rtw89_dev *rtwdev,
rtw8852bt_dpk(rtwdev, phy_idx, chanctx_idx);
rtw89_btc_ntfy_conn_rfk(rtwdev, false);
+ rtw89_fw_h2c_rf_ntfy_mcc(rtwdev);
}
static void rtw8852bt_rfk_band_changed(struct rtw89_dev *rtwdev,
@@ -712,6 +717,7 @@ static const struct rtw89_chip_ops rtw8852bt_chip_ops = {
.h2c_assoc_cmac_tbl = rtw89_fw_h2c_assoc_cmac_tbl,
.h2c_ampdu_cmac_tbl = NULL,
.h2c_txtime_cmac_tbl = rtw89_fw_h2c_txtime_cmac_tbl,
+ .h2c_punctured_cmac_tbl = NULL,
.h2c_default_dmac_tbl = NULL,
.h2c_update_beacon = rtw89_fw_h2c_update_beacon,
.h2c_ba_cam = rtw89_fw_h2c_ba_cam,
@@ -727,6 +733,10 @@ static const struct rtw89_chip_ops rtw8852bt_chip_ops = {
.btc_set_policy = rtw89_btc_set_policy_v1,
};
+static const struct rtw89_chanctx_listener rtw8852bt_chanctx_listener = {
+ .callbacks[RTW89_CHANCTX_CALLBACK_RFK] = rtw8852bt_rfk_chanctx_cb,
+};
+
#ifdef CONFIG_PM
static const struct wiphy_wowlan_support rtw_wowlan_stub_8852bt = {
.flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT,
@@ -754,8 +764,8 @@ const struct rtw89_chip_info rtw8852bt_chip_info = {
.max_amsdu_limit = 5000,
.dis_2g_40m_ul_ofdma = true,
.rsvd_ple_ofst = 0x6f800,
- .hfc_param_ini = rtw8852bt_hfc_param_ini_pcie,
- .dle_mem = rtw8852bt_dle_mem_pcie,
+ .hfc_param_ini = {rtw8852bt_hfc_param_ini_pcie, NULL, NULL},
+ .dle_mem = {rtw8852bt_dle_mem_pcie, NULL, NULL, NULL},
.wde_qempty_acq_grpnum = 4,
.wde_qempty_mgq_grpsel = 4,
.rf_base_addr = {0xe000, 0xf000},
@@ -769,6 +779,7 @@ const struct rtw89_chip_info rtw8852bt_chip_info = {
.nctl_post_table = NULL,
.dflt_parms = NULL,
.rfe_parms_conf = NULL,
+ .chanctx_listener = &rtw8852bt_chanctx_listener,
.txpwr_factor_bb = 3,
.txpwr_factor_rf = 2,
.txpwr_factor_mac = 1,
@@ -777,7 +788,7 @@ const struct rtw89_chip_info rtw8852bt_chip_info = {
.tssi_dbw_table = NULL,
.support_macid_num = RTW89_MAX_MAC_ID_NUM,
.support_link_num = 0,
- .support_chanctx_num = 1,
+ .support_chanctx_num = 2,
.support_rnr = false,
.support_bands = BIT(NL80211_BAND_2GHZ) |
BIT(NL80211_BAND_5GHZ),
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c
index 6e6889eea9a0..d0e299803225 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c
@@ -2,6 +2,7 @@
/* Copyright(c) 2024 Realtek Corporation
*/
+#include "chan.h"
#include "coex.h"
#include "debug.h"
#include "fw.h"
@@ -1529,26 +1530,11 @@ static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u
enum rtw89_chanctx_idx chanctx_idx)
{
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
+ struct rtw89_rfk_mcc_info_data *rfk_mcc = rtwdev->rfk_mcc.data;
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
- u8 get_empty_table = false;
+ u8 idx = rfk_mcc->table_idx;
u32 reg_rf18;
u32 reg_35c;
- u8 idx;
-
- for (idx = 0; idx < RTW89_IQK_CHS_NR; idx++) {
- if (iqk_info->iqk_mcc_ch[idx][path] == 0) {
- get_empty_table = true;
- break;
- }
- }
- rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] (1)idx = %x\n", idx);
-
- if (!get_empty_table) {
- idx = iqk_info->iqk_table_idx[path] + 1;
- if (idx > 1)
- idx = 0;
- }
- rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] (2)idx = %x\n", idx);
reg_rf18 = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
reg_35c = rtw89_phy_read32_mask(rtwdev, R_CIRST, B_CIRST_SYN);
@@ -1640,7 +1626,8 @@ static void _iqk_afebb_restore(struct rtw89_dev *rtwdev,
static void _iqk_preset(struct rtw89_dev *rtwdev, u8 path)
{
- u8 idx = 0;
+ struct rtw89_rfk_mcc_info_data *rfk_mcc = rtwdev->rfk_mcc.data;
+ u8 idx = rfk_mcc->table_idx;
rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), 0x00000001, idx);
rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), 0x00000008, idx);
@@ -4252,3 +4239,49 @@ void rtw8852bt_set_channel_rf(struct rtw89_dev *rtwdev,
rtw8852bt_ctrl_bw_ch(rtwdev, phy_idx, chan->channel, chan->band_type,
chan->band_width);
}
+
+void rtw8852bt_mcc_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ const struct rtw89_chan *chan = rtw89_mgnt_chan_get(rtwdev, 0);
+ struct rtw89_rfk_mcc_info_data *rfk_mcc = rtwdev->rfk_mcc.data;
+ struct rtw89_rfk_chan_desc desc[__RTW89_RFK_CHS_NR_V0] = {};
+ u8 idx;
+
+ for (idx = 0; idx < ARRAY_SIZE(desc); idx++) {
+ struct rtw89_rfk_chan_desc *p = &desc[idx];
+
+ p->ch = rfk_mcc->ch[idx];
+
+ p->has_band = true;
+ p->band = rfk_mcc->band[idx];
+ }
+
+ idx = rtw89_rfk_chan_lookup(rtwdev, desc, ARRAY_SIZE(desc), chan);
+
+ rfk_mcc->ch[idx] = chan->channel;
+ rfk_mcc->band[idx] = chan->band_type;
+ rfk_mcc->table_idx = idx;
+}
+
+void rtw8852bt_rfk_chanctx_cb(struct rtw89_dev *rtwdev,
+ enum rtw89_chanctx_state state)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ u8 path;
+
+ switch (state) {
+ case RTW89_CHANCTX_STATE_MCC_START:
+ dpk->is_dpk_enable = false;
+ for (path = 0; path < RTW8852BT_SS; path++)
+ _dpk_onoff(rtwdev, path, false);
+ break;
+ case RTW89_CHANCTX_STATE_MCC_STOP:
+ dpk->is_dpk_enable = true;
+ for (path = 0; path < RTW8852BT_SS; path++)
+ _dpk_onoff(rtwdev, path, false);
+ rtw8852bt_dpk(rtwdev, RTW89_PHY_0, RTW89_CHANCTX_0);
+ break;
+ default:
+ break;
+ }
+}
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.h
index e34560b4905f..a663bbda4075 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.h
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.h
@@ -27,5 +27,8 @@ void rtw8852bt_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
void rtw8852bt_set_channel_rf(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx);
+void rtw8852bt_mcc_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+void rtw8852bt_rfk_chanctx_cb(struct rtw89_dev *rtwdev,
+ enum rtw89_chanctx_state state);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bu.c b/drivers/net/wireless/realtek/rtw89/rtw8852bu.c
new file mode 100644
index 000000000000..b315cb997758
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bu.c
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2025 Realtek Corporation
+ */
+
+#include <linux/module.h>
+#include <linux/usb.h>
+#include "rtw8852b.h"
+#include "usb.h"
+
+static const struct rtw89_driver_info rtw89_8852bu_info = {
+ .chip = &rtw8852b_chip_info,
+ .variant = NULL,
+ .quirks = NULL,
+};
+
+static const struct usb_device_id rtw_8852bu_id_table[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0bda, 0xb832, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&rtw89_8852bu_info },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0bda, 0xb83a, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&rtw89_8852bu_info },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0bda, 0xb852, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&rtw89_8852bu_info },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0bda, 0xb85a, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&rtw89_8852bu_info },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0bda, 0xa85b, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&rtw89_8852bu_info },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0586, 0x3428, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&rtw89_8852bu_info },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0b05, 0x1a62, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&rtw89_8852bu_info },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0db0, 0x6931, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&rtw89_8852bu_info },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x3574, 0x6121, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&rtw89_8852bu_info },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x35bc, 0x0100, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&rtw89_8852bu_info },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x35bc, 0x0108, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&rtw89_8852bu_info },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0x6822, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&rtw89_8852bu_info },
+ {},
+};
+MODULE_DEVICE_TABLE(usb, rtw_8852bu_id_table);
+
+static struct usb_driver rtw_8852bu_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = rtw_8852bu_id_table,
+ .probe = rtw89_usb_probe,
+ .disconnect = rtw89_usb_disconnect,
+};
+module_usb_driver(rtw_8852bu_driver);
+
+MODULE_AUTHOR("Bitterblue Smith <rtl8821cerfe2@gmail.com>");
+MODULE_DESCRIPTION("Realtek 802.11ax wireless 8852BU driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
index 8f3d0c91a3f8..88cf8ea13e7c 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
@@ -2971,6 +2971,7 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = {
.h2c_assoc_cmac_tbl = rtw89_fw_h2c_assoc_cmac_tbl,
.h2c_ampdu_cmac_tbl = NULL,
.h2c_txtime_cmac_tbl = rtw89_fw_h2c_txtime_cmac_tbl,
+ .h2c_punctured_cmac_tbl = NULL,
.h2c_default_dmac_tbl = NULL,
.h2c_update_beacon = rtw89_fw_h2c_update_beacon,
.h2c_ba_cam = rtw89_fw_h2c_ba_cam,
@@ -3004,8 +3005,8 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.max_amsdu_limit = 8000,
.dis_2g_40m_ul_ofdma = false,
.rsvd_ple_ofst = 0x6f800,
- .hfc_param_ini = rtw8852c_hfc_param_ini_pcie,
- .dle_mem = rtw8852c_dle_mem_pcie,
+ .hfc_param_ini = {rtw8852c_hfc_param_ini_pcie, NULL, NULL},
+ .dle_mem = {rtw8852c_dle_mem_pcie, NULL, NULL, NULL},
.wde_qempty_acq_grpnum = 16,
.wde_qempty_mgq_grpsel = 16,
.rf_base_addr = {0xe000, 0xf000},
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a.c b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
index 603212ed4558..36c641e3bc13 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8922a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
@@ -2826,6 +2826,7 @@ static const struct rtw89_chip_ops rtw8922a_chip_ops = {
.h2c_assoc_cmac_tbl = rtw89_fw_h2c_assoc_cmac_tbl_g7,
.h2c_ampdu_cmac_tbl = rtw89_fw_h2c_ampdu_cmac_tbl_g7,
.h2c_txtime_cmac_tbl = rtw89_fw_h2c_txtime_cmac_tbl_g7,
+ .h2c_punctured_cmac_tbl = rtw89_fw_h2c_punctured_cmac_tbl_g7,
.h2c_default_dmac_tbl = rtw89_fw_h2c_default_dmac_tbl_v2,
.h2c_update_beacon = rtw89_fw_h2c_update_beacon_be,
.h2c_ba_cam = rtw89_fw_h2c_ba_cam_v1,
@@ -2859,8 +2860,8 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
.max_amsdu_limit = 8000,
.dis_2g_40m_ul_ofdma = false,
.rsvd_ple_ofst = 0x8f800,
- .hfc_param_ini = rtw8922a_hfc_param_ini_pcie,
- .dle_mem = rtw8922a_dle_mem_pcie,
+ .hfc_param_ini = {rtw8922a_hfc_param_ini_pcie, NULL, NULL},
+ .dle_mem = {rtw8922a_dle_mem_pcie, NULL, NULL, NULL},
.wde_qempty_acq_grpnum = 4,
.wde_qempty_mgq_grpsel = 4,
.rf_base_addr = {0xe000, 0xf000},
diff --git a/drivers/net/wireless/realtek/rtw89/ser.c b/drivers/net/wireless/realtek/rtw89/ser.c
index d504518b8a57..bb39fdbcba0d 100644
--- a/drivers/net/wireless/realtek/rtw89/ser.c
+++ b/drivers/net/wireless/realtek/rtw89/ser.c
@@ -492,6 +492,7 @@ static void ser_reset_trx_st_hdl(struct rtw89_ser *ser, u8 evt)
case SER_EV_STATE_IN:
wiphy_lock(wiphy);
wiphy_delayed_work_cancel(wiphy, &rtwdev->track_work);
+ wiphy_delayed_work_cancel(wiphy, &rtwdev->track_ps_work);
wiphy_unlock(wiphy);
drv_stop_tx(ser);
@@ -525,6 +526,8 @@ static void ser_reset_trx_st_hdl(struct rtw89_ser *ser, u8 evt)
drv_resume_tx(ser);
wiphy_delayed_work_queue(wiphy, &rtwdev->track_work,
RTW89_TRACK_WORK_PERIOD);
+ wiphy_delayed_work_queue(wiphy, &rtwdev->track_ps_work,
+ RTW89_TRACK_PS_WORK_PERIOD);
break;
default:
diff --git a/drivers/net/wireless/realtek/rtw89/txrx.h b/drivers/net/wireless/realtek/rtw89/txrx.h
index 94f27a9ee9f7..ec01bfc363da 100644
--- a/drivers/net/wireless/realtek/rtw89/txrx.h
+++ b/drivers/net/wireless/realtek/rtw89/txrx.h
@@ -73,6 +73,7 @@ static inline u8 rtw89_get_data_nss(struct rtw89_dev *rtwdev, u16 hw_rate)
#define RTW89_TXWD_BODY0_FW_DL BIT(20)
#define RTW89_TXWD_BODY0_CHANNEL_DMA GENMASK(19, 16)
#define RTW89_TXWD_BODY0_HDR_LLC_LEN GENMASK(15, 11)
+#define RTW89_TXWD_BODY0_STF_MODE BIT(10)
#define RTW89_TXWD_BODY0_WD_PAGE BIT(7)
#define RTW89_TXWD_BODY0_HW_AMSDU BIT(5)
#define RTW89_TXWD_BODY0_HW_SSN_SEL GENMASK(3, 2)
diff --git a/drivers/net/wireless/realtek/rtw89/usb.c b/drivers/net/wireless/realtek/rtw89/usb.c
new file mode 100644
index 000000000000..6cf89aee252e
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/usb.c
@@ -0,0 +1,1042 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2025 Realtek Corporation
+ */
+
+#include <linux/usb.h>
+#include "debug.h"
+#include "mac.h"
+#include "reg.h"
+#include "txrx.h"
+#include "usb.h"
+
+static void rtw89_usb_read_port_complete(struct urb *urb);
+
+static void rtw89_usb_vendorreq(struct rtw89_dev *rtwdev, u32 addr,
+ void *data, u16 len, u8 reqtype)
+{
+ struct rtw89_usb *rtwusb = rtw89_usb_priv(rtwdev);
+ struct usb_device *udev = rtwusb->udev;
+ unsigned int pipe;
+ u16 value, index;
+ int attempt, ret;
+
+ if (test_bit(RTW89_FLAG_UNPLUGGED, rtwdev->flags))
+ return;
+
+ value = u32_get_bits(addr, GENMASK(15, 0));
+ index = u32_get_bits(addr, GENMASK(23, 16));
+
+ for (attempt = 0; attempt < 10; attempt++) {
+ *rtwusb->vendor_req_buf = 0;
+
+ if (reqtype == RTW89_USB_VENQT_READ) {
+ pipe = usb_rcvctrlpipe(udev, 0);
+ } else { /* RTW89_USB_VENQT_WRITE */
+ pipe = usb_sndctrlpipe(udev, 0);
+
+ memcpy(rtwusb->vendor_req_buf, data, len);
+ }
+
+ ret = usb_control_msg(udev, pipe, RTW89_USB_VENQT, reqtype,
+ value, index, rtwusb->vendor_req_buf,
+ len, 500);
+
+ if (ret == len) { /* Success */
+ atomic_set(&rtwusb->continual_io_error, 0);
+
+ if (reqtype == RTW89_USB_VENQT_READ)
+ memcpy(data, rtwusb->vendor_req_buf, len);
+
+ break;
+ }
+
+ if (ret == -ESHUTDOWN || ret == -ENODEV)
+ set_bit(RTW89_FLAG_UNPLUGGED, rtwdev->flags);
+ else if (ret < 0)
+ rtw89_warn(rtwdev,
+ "usb %s%u 0x%x fail ret=%d value=0x%x attempt=%d\n",
+ reqtype == RTW89_USB_VENQT_READ ? "read" : "write",
+ len * 8, addr, ret,
+ le32_to_cpup(rtwusb->vendor_req_buf),
+ attempt);
+ else if (ret > 0 && reqtype == RTW89_USB_VENQT_READ)
+ memcpy(data, rtwusb->vendor_req_buf, len);
+
+ if (atomic_inc_return(&rtwusb->continual_io_error) > 4) {
+ set_bit(RTW89_FLAG_UNPLUGGED, rtwdev->flags);
+ break;
+ }
+ }
+}
+
+static u32 rtw89_usb_read_cmac(struct rtw89_dev *rtwdev, u32 addr)
+{
+ u32 addr32, val32, shift;
+ __le32 data = 0;
+ int count;
+
+ addr32 = addr & ~0x3;
+ shift = (addr & 0x3) * 8;
+
+ for (count = 0; ; count++) {
+ rtw89_usb_vendorreq(rtwdev, addr32, &data, 4,
+ RTW89_USB_VENQT_READ);
+
+ val32 = le32_to_cpu(data);
+ if (val32 != RTW89_R32_DEAD)
+ break;
+
+ if (count >= MAC_REG_POOL_COUNT) {
+ rtw89_warn(rtwdev, "%s: addr %#x = %#x\n",
+ __func__, addr32, val32);
+ val32 = RTW89_R32_DEAD;
+ break;
+ }
+
+ rtw89_write32(rtwdev, R_AX_CK_EN, B_AX_CMAC_ALLCKEN);
+ }
+
+ return val32 >> shift;
+}
+
+static u8 rtw89_usb_ops_read8(struct rtw89_dev *rtwdev, u32 addr)
+{
+ u8 data = 0;
+
+ if (ACCESS_CMAC(addr))
+ return rtw89_usb_read_cmac(rtwdev, addr);
+
+ rtw89_usb_vendorreq(rtwdev, addr, &data, 1, RTW89_USB_VENQT_READ);
+
+ return data;
+}
+
+static u16 rtw89_usb_ops_read16(struct rtw89_dev *rtwdev, u32 addr)
+{
+ __le16 data = 0;
+
+ if (ACCESS_CMAC(addr))
+ return rtw89_usb_read_cmac(rtwdev, addr);
+
+ rtw89_usb_vendorreq(rtwdev, addr, &data, 2, RTW89_USB_VENQT_READ);
+
+ return le16_to_cpu(data);
+}
+
+static u32 rtw89_usb_ops_read32(struct rtw89_dev *rtwdev, u32 addr)
+{
+ __le32 data = 0;
+
+ if (ACCESS_CMAC(addr))
+ return rtw89_usb_read_cmac(rtwdev, addr);
+
+ rtw89_usb_vendorreq(rtwdev, addr, &data, 4,
+ RTW89_USB_VENQT_READ);
+
+ return le32_to_cpu(data);
+}
+
+static void rtw89_usb_ops_write8(struct rtw89_dev *rtwdev, u32 addr, u8 val)
+{
+ u8 data = val;
+
+ rtw89_usb_vendorreq(rtwdev, addr, &data, 1, RTW89_USB_VENQT_WRITE);
+}
+
+static void rtw89_usb_ops_write16(struct rtw89_dev *rtwdev, u32 addr, u16 val)
+{
+ __le16 data = cpu_to_le16(val);
+
+ rtw89_usb_vendorreq(rtwdev, addr, &data, 2, RTW89_USB_VENQT_WRITE);
+}
+
+static void rtw89_usb_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 val)
+{
+ __le32 data = cpu_to_le32(val);
+
+ rtw89_usb_vendorreq(rtwdev, addr, &data, 4, RTW89_USB_VENQT_WRITE);
+}
+
+static u32
+rtw89_usb_ops_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev,
+ u8 txch)
+{
+ if (txch == RTW89_TXCH_CH12)
+ return 1;
+
+ return 42; /* TODO some kind of calculation? */
+}
+
+static u8 rtw89_usb_get_bulkout_id(u8 ch_dma)
+{
+ switch (ch_dma) {
+ case RTW89_DMA_ACH0:
+ return 3;
+ case RTW89_DMA_ACH1:
+ return 4;
+ case RTW89_DMA_ACH2:
+ return 5;
+ case RTW89_DMA_ACH3:
+ return 6;
+ default:
+ case RTW89_DMA_B0MG:
+ return 0;
+ case RTW89_DMA_B0HI:
+ return 1;
+ case RTW89_DMA_H2C:
+ return 2;
+ }
+}
+
+static void rtw89_usb_write_port_complete(struct urb *urb)
+{
+ struct rtw89_usb_tx_ctrl_block *txcb = urb->context;
+ struct rtw89_dev *rtwdev = txcb->rtwdev;
+ struct ieee80211_tx_info *info;
+ struct rtw89_txwd_body *txdesc;
+ struct sk_buff *skb;
+ u32 txdesc_size;
+
+ while (true) {
+ skb = skb_dequeue(&txcb->tx_ack_queue);
+ if (!skb)
+ break;
+
+ if (txcb->txch == RTW89_TXCH_CH12) {
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ txdesc = (struct rtw89_txwd_body *)skb->data;
+
+ txdesc_size = rtwdev->chip->txwd_body_size;
+ if (le32_get_bits(txdesc->dword0, RTW89_TXWD_BODY0_WD_INFO_EN))
+ txdesc_size += rtwdev->chip->txwd_info_size;
+
+ skb_pull(skb, txdesc_size);
+
+ info = IEEE80211_SKB_CB(skb);
+ ieee80211_tx_info_clear_status(info);
+
+ if (urb->status == 0) {
+ if (info->flags & IEEE80211_TX_CTL_NO_ACK)
+ info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+ else
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ }
+
+ ieee80211_tx_status_irqsafe(rtwdev->hw, skb);
+ }
+
+ switch (urb->status) {
+ case 0:
+ case -EPIPE:
+ case -EPROTO:
+ case -EINPROGRESS:
+ case -ENOENT:
+ case -ECONNRESET:
+ break;
+ default:
+ set_bit(RTW89_FLAG_UNPLUGGED, rtwdev->flags);
+ break;
+ }
+
+ kfree(txcb);
+ usb_free_urb(urb);
+}
+
+static int rtw89_usb_write_port(struct rtw89_dev *rtwdev, u8 ch_dma,
+ void *data, int len, void *context)
+{
+ struct rtw89_usb *rtwusb = rtw89_usb_priv(rtwdev);
+ struct usb_device *usbd = rtwusb->udev;
+ struct urb *urb;
+ u8 bulkout_id = rtw89_usb_get_bulkout_id(ch_dma);
+ unsigned int pipe;
+ int ret;
+
+ if (test_bit(RTW89_FLAG_UNPLUGGED, rtwdev->flags))
+ return 0;
+
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb)
+ return -ENOMEM;
+
+ pipe = usb_sndbulkpipe(usbd, rtwusb->out_pipe[bulkout_id]);
+
+ usb_fill_bulk_urb(urb, usbd, pipe, data, len,
+ rtw89_usb_write_port_complete, context);
+ urb->transfer_flags |= URB_ZERO_PACKET;
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+
+ if (ret)
+ usb_free_urb(urb);
+
+ if (ret == -ENODEV)
+ set_bit(RTW89_FLAG_UNPLUGGED, rtwdev->flags);
+
+ return ret;
+}
+
+static void rtw89_usb_ops_tx_kick_off(struct rtw89_dev *rtwdev, u8 txch)
+{
+ struct rtw89_usb *rtwusb = rtw89_usb_priv(rtwdev);
+ struct rtw89_usb_tx_ctrl_block *txcb;
+ struct sk_buff *skb;
+ int ret;
+
+ while (true) {
+ skb = skb_dequeue(&rtwusb->tx_queue[txch]);
+ if (!skb)
+ break;
+
+ txcb = kmalloc(sizeof(*txcb), GFP_ATOMIC);
+ if (!txcb) {
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ txcb->rtwdev = rtwdev;
+ txcb->txch = txch;
+ skb_queue_head_init(&txcb->tx_ack_queue);
+
+ skb_queue_tail(&txcb->tx_ack_queue, skb);
+
+ ret = rtw89_usb_write_port(rtwdev, txch, skb->data, skb->len,
+ txcb);
+ if (ret) {
+ rtw89_err(rtwdev, "write port txch %d failed: %d\n",
+ txch, ret);
+
+ skb_dequeue(&txcb->tx_ack_queue);
+ kfree(txcb);
+ dev_kfree_skb_any(skb);
+ }
+ }
+}
+
+static int rtw89_usb_tx_write_fwcmd(struct rtw89_dev *rtwdev,
+ struct rtw89_core_tx_request *tx_req)
+{
+ struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
+ struct rtw89_usb *rtwusb = rtw89_usb_priv(rtwdev);
+ struct sk_buff *skb = tx_req->skb;
+ struct sk_buff *skb512;
+ u32 txdesc_size = rtwdev->chip->h2c_desc_size;
+ void *txdesc;
+
+ if (((desc_info->pkt_size + txdesc_size) % 512) == 0) {
+ rtw89_debug(rtwdev, RTW89_DBG_HCI, "avoiding multiple of 512\n");
+
+ skb512 = dev_alloc_skb(txdesc_size + desc_info->pkt_size +
+ RTW89_USB_MOD512_PADDING);
+ if (!skb512) {
+ rtw89_err(rtwdev, "%s: failed to allocate skb\n",
+ __func__);
+
+ return -ENOMEM;
+ }
+
+ skb_pull(skb512, txdesc_size);
+ skb_put_data(skb512, skb->data, skb->len);
+ skb_put_zero(skb512, RTW89_USB_MOD512_PADDING);
+
+ dev_kfree_skb_any(skb);
+ skb = skb512;
+ tx_req->skb = skb512;
+
+ desc_info->pkt_size += RTW89_USB_MOD512_PADDING;
+ }
+
+ txdesc = skb_push(skb, txdesc_size);
+ memset(txdesc, 0, txdesc_size);
+ rtw89_chip_fill_txdesc_fwcmd(rtwdev, desc_info, txdesc);
+
+ skb_queue_tail(&rtwusb->tx_queue[desc_info->ch_dma], skb);
+
+ return 0;
+}
+
+static int rtw89_usb_ops_tx_write(struct rtw89_dev *rtwdev,
+ struct rtw89_core_tx_request *tx_req)
+{
+ struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
+ struct rtw89_usb *rtwusb = rtw89_usb_priv(rtwdev);
+ struct sk_buff *skb = tx_req->skb;
+ struct rtw89_txwd_body *txdesc;
+ u32 txdesc_size;
+
+ if ((desc_info->ch_dma == RTW89_TXCH_CH12 ||
+ tx_req->tx_type == RTW89_CORE_TX_TYPE_FWCMD) &&
+ (desc_info->ch_dma != RTW89_TXCH_CH12 ||
+ tx_req->tx_type != RTW89_CORE_TX_TYPE_FWCMD)) {
+ rtw89_err(rtwdev, "dma channel %d/TX type %d mismatch\n",
+ desc_info->ch_dma, tx_req->tx_type);
+ return -EINVAL;
+ }
+
+ if (desc_info->ch_dma == RTW89_TXCH_CH12)
+ return rtw89_usb_tx_write_fwcmd(rtwdev, tx_req);
+
+ txdesc_size = rtwdev->chip->txwd_body_size;
+ if (desc_info->en_wd_info)
+ txdesc_size += rtwdev->chip->txwd_info_size;
+
+ txdesc = skb_push(skb, txdesc_size);
+ memset(txdesc, 0, txdesc_size);
+ rtw89_chip_fill_txdesc(rtwdev, desc_info, txdesc);
+
+ le32p_replace_bits(&txdesc->dword0, 1, RTW89_TXWD_BODY0_STF_MODE);
+
+ skb_queue_tail(&rtwusb->tx_queue[desc_info->ch_dma], skb);
+
+ return 0;
+}
+
+static void rtw89_usb_rx_handler(struct work_struct *work)
+{
+ struct rtw89_usb *rtwusb = container_of(work, struct rtw89_usb, rx_work);
+ struct rtw89_dev *rtwdev = rtwusb->rtwdev;
+ struct rtw89_rx_desc_info desc_info;
+ struct sk_buff *rx_skb;
+ struct sk_buff *skb;
+ u32 pkt_offset;
+ int limit;
+
+ for (limit = 0; limit < 200; limit++) {
+ rx_skb = skb_dequeue(&rtwusb->rx_queue);
+ if (!rx_skb)
+ break;
+
+ if (skb_queue_len(&rtwusb->rx_queue) >= RTW89_USB_MAX_RXQ_LEN) {
+ rtw89_warn(rtwdev, "rx_queue overflow\n");
+ dev_kfree_skb_any(rx_skb);
+ continue;
+ }
+
+ memset(&desc_info, 0, sizeof(desc_info));
+ rtw89_chip_query_rxdesc(rtwdev, &desc_info, rx_skb->data, 0);
+
+ skb = rtw89_alloc_skb_for_rx(rtwdev, desc_info.pkt_size);
+ if (!skb) {
+ rtw89_debug(rtwdev, RTW89_DBG_HCI,
+ "failed to allocate RX skb of size %u\n",
+ desc_info.pkt_size);
+ continue;
+ }
+
+ pkt_offset = desc_info.offset + desc_info.rxd_len;
+
+ skb_put_data(skb, rx_skb->data + pkt_offset,
+ desc_info.pkt_size);
+
+ rtw89_core_rx(rtwdev, &desc_info, skb);
+
+ if (skb_queue_len(&rtwusb->rx_free_queue) >= RTW89_USB_RX_SKB_NUM)
+ dev_kfree_skb_any(rx_skb);
+ else
+ skb_queue_tail(&rtwusb->rx_free_queue, rx_skb);
+ }
+
+ if (limit == 200) {
+ rtw89_debug(rtwdev, RTW89_DBG_HCI,
+ "left %d rx skbs in the queue for later\n",
+ skb_queue_len(&rtwusb->rx_queue));
+ queue_work(rtwusb->rxwq, &rtwusb->rx_work);
+ }
+}
+
+static void rtw89_usb_rx_resubmit(struct rtw89_usb *rtwusb,
+ struct rtw89_usb_rx_ctrl_block *rxcb,
+ gfp_t gfp)
+{
+ struct rtw89_dev *rtwdev = rtwusb->rtwdev;
+ struct sk_buff *rx_skb;
+ int ret;
+
+ rx_skb = skb_dequeue(&rtwusb->rx_free_queue);
+ if (!rx_skb)
+ rx_skb = alloc_skb(RTW89_USB_RECVBUF_SZ, gfp);
+
+ if (!rx_skb)
+ goto try_later;
+
+ skb_reset_tail_pointer(rx_skb);
+ rx_skb->len = 0;
+
+ rxcb->rx_skb = rx_skb;
+
+ usb_fill_bulk_urb(rxcb->rx_urb, rtwusb->udev,
+ usb_rcvbulkpipe(rtwusb->udev, rtwusb->in_pipe),
+ rxcb->rx_skb->data, RTW89_USB_RECVBUF_SZ,
+ rtw89_usb_read_port_complete, rxcb);
+
+ ret = usb_submit_urb(rxcb->rx_urb, gfp);
+ if (ret) {
+ skb_queue_tail(&rtwusb->rx_free_queue, rxcb->rx_skb);
+
+ if (ret == -ENODEV)
+ set_bit(RTW89_FLAG_UNPLUGGED, rtwdev->flags);
+ else
+ rtw89_err(rtwdev, "Err sending rx data urb %d\n", ret);
+
+ if (ret == -ENOMEM)
+ goto try_later;
+ }
+
+ return;
+
+try_later:
+ rxcb->rx_skb = NULL;
+ queue_work(rtwusb->rxwq, &rtwusb->rx_urb_work);
+}
+
+static void rtw89_usb_rx_resubmit_work(struct work_struct *work)
+{
+ struct rtw89_usb *rtwusb = container_of(work, struct rtw89_usb, rx_urb_work);
+ struct rtw89_usb_rx_ctrl_block *rxcb;
+ int i;
+
+ for (i = 0; i < RTW89_USB_RXCB_NUM; i++) {
+ rxcb = &rtwusb->rx_cb[i];
+
+ if (!rxcb->rx_skb)
+ rtw89_usb_rx_resubmit(rtwusb, rxcb, GFP_ATOMIC);
+ }
+}
+
+static void rtw89_usb_read_port_complete(struct urb *urb)
+{
+ struct rtw89_usb_rx_ctrl_block *rxcb = urb->context;
+ struct rtw89_dev *rtwdev = rxcb->rtwdev;
+ struct rtw89_usb *rtwusb = rtw89_usb_priv(rtwdev);
+ struct sk_buff *skb = rxcb->rx_skb;
+
+ if (urb->status == 0) {
+ if (urb->actual_length > urb->transfer_buffer_length ||
+ urb->actual_length < sizeof(struct rtw89_rxdesc_short)) {
+ rtw89_err(rtwdev, "failed to get urb length: %d\n",
+ urb->actual_length);
+ skb_queue_tail(&rtwusb->rx_free_queue, skb);
+ } else {
+ skb_put(skb, urb->actual_length);
+ skb_queue_tail(&rtwusb->rx_queue, skb);
+ queue_work(rtwusb->rxwq, &rtwusb->rx_work);
+ }
+
+ rtw89_usb_rx_resubmit(rtwusb, rxcb, GFP_ATOMIC);
+ } else {
+ skb_queue_tail(&rtwusb->rx_free_queue, skb);
+
+ if (atomic_inc_return(&rtwusb->continual_io_error) > 4)
+ set_bit(RTW89_FLAG_UNPLUGGED, rtwdev->flags);
+
+ switch (urb->status) {
+ case -EINVAL:
+ case -EPIPE:
+ case -ENODEV:
+ case -ESHUTDOWN:
+ set_bit(RTW89_FLAG_UNPLUGGED, rtwdev->flags);
+ break;
+ case -EPROTO:
+ case -EILSEQ:
+ case -ETIME:
+ case -ECOMM:
+ case -EOVERFLOW:
+ case -ENOENT:
+ break;
+ case -EINPROGRESS:
+ rtw89_info(rtwdev, "URB is in progress\n");
+ break;
+ default:
+ rtw89_err(rtwdev, "%s status %d\n",
+ __func__, urb->status);
+ break;
+ }
+ }
+}
+
+static void rtw89_usb_cancel_rx_bufs(struct rtw89_usb *rtwusb)
+{
+ struct rtw89_usb_rx_ctrl_block *rxcb;
+ int i;
+
+ for (i = 0; i < RTW89_USB_RXCB_NUM; i++) {
+ rxcb = &rtwusb->rx_cb[i];
+ usb_kill_urb(rxcb->rx_urb);
+ }
+}
+
+static void rtw89_usb_free_rx_bufs(struct rtw89_usb *rtwusb)
+{
+ struct rtw89_usb_rx_ctrl_block *rxcb;
+ int i;
+
+ for (i = 0; i < RTW89_USB_RXCB_NUM; i++) {
+ rxcb = &rtwusb->rx_cb[i];
+ usb_free_urb(rxcb->rx_urb);
+ }
+}
+
+static int rtw89_usb_alloc_rx_bufs(struct rtw89_usb *rtwusb)
+{
+ struct rtw89_usb_rx_ctrl_block *rxcb;
+ int i;
+
+ for (i = 0; i < RTW89_USB_RXCB_NUM; i++) {
+ rxcb = &rtwusb->rx_cb[i];
+
+ rxcb->rtwdev = rtwusb->rtwdev;
+ rxcb->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!rxcb->rx_urb) {
+ rtw89_usb_free_rx_bufs(rtwusb);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static int rtw89_usb_init_rx(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_usb *rtwusb = rtw89_usb_priv(rtwdev);
+ struct sk_buff *rx_skb;
+ int i;
+
+ rtwusb->rxwq = alloc_workqueue("rtw89_usb: rx wq", WQ_BH, 0);
+ if (!rtwusb->rxwq) {
+ rtw89_err(rtwdev, "failed to create RX work queue\n");
+ return -ENOMEM;
+ }
+
+ skb_queue_head_init(&rtwusb->rx_queue);
+ skb_queue_head_init(&rtwusb->rx_free_queue);
+
+ INIT_WORK(&rtwusb->rx_work, rtw89_usb_rx_handler);
+ INIT_WORK(&rtwusb->rx_urb_work, rtw89_usb_rx_resubmit_work);
+
+ for (i = 0; i < RTW89_USB_RX_SKB_NUM; i++) {
+ rx_skb = alloc_skb(RTW89_USB_RECVBUF_SZ, GFP_KERNEL);
+ if (rx_skb)
+ skb_queue_tail(&rtwusb->rx_free_queue, rx_skb);
+ }
+
+ return 0;
+}
+
+static void rtw89_usb_deinit_rx(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_usb *rtwusb = rtw89_usb_priv(rtwdev);
+
+ skb_queue_purge(&rtwusb->rx_queue);
+
+ destroy_workqueue(rtwusb->rxwq);
+
+ skb_queue_purge(&rtwusb->rx_free_queue);
+}
+
+static void rtw89_usb_start_rx(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_usb *rtwusb = rtw89_usb_priv(rtwdev);
+ int i;
+
+ for (i = 0; i < RTW89_USB_RXCB_NUM; i++)
+ rtw89_usb_rx_resubmit(rtwusb, &rtwusb->rx_cb[i], GFP_KERNEL);
+}
+
+static void rtw89_usb_init_tx(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_usb *rtwusb = rtw89_usb_priv(rtwdev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(rtwusb->tx_queue); i++)
+ skb_queue_head_init(&rtwusb->tx_queue[i]);
+}
+
+static void rtw89_usb_deinit_tx(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_usb *rtwusb = rtw89_usb_priv(rtwdev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(rtwusb->tx_queue); i++) {
+ if (i == RTW89_TXCH_CH12)
+ skb_queue_purge(&rtwusb->tx_queue[i]);
+ else
+ ieee80211_purge_tx_queue(rtwdev->hw, &rtwusb->tx_queue[i]);
+ }
+}
+
+static void rtw89_usb_ops_reset(struct rtw89_dev *rtwdev)
+{
+ /* TODO: anything to do here? */
+}
+
+static int rtw89_usb_ops_start(struct rtw89_dev *rtwdev)
+{
+ return 0; /* Nothing to do. */
+}
+
+static void rtw89_usb_ops_stop(struct rtw89_dev *rtwdev)
+{
+ /* Nothing to do. */
+}
+
+static void rtw89_usb_ops_pause(struct rtw89_dev *rtwdev, bool pause)
+{
+ /* Nothing to do? */
+}
+
+static void rtw89_usb_ops_switch_mode(struct rtw89_dev *rtwdev, bool low_power)
+{
+ /* Nothing to do. */
+}
+
+static int rtw89_usb_ops_deinit(struct rtw89_dev *rtwdev)
+{
+ return 0; /* Nothing to do. */
+}
+
+static int rtw89_usb_ops_mac_pre_init(struct rtw89_dev *rtwdev)
+{
+ u32 val32;
+
+ rtw89_write32_set(rtwdev, R_AX_USB_HOST_REQUEST_2, B_AX_R_USBIO_MODE);
+
+ /* fix USB IO hang suggest by chihhanli@realtek.com */
+ rtw89_write32_clr(rtwdev, R_AX_USB_WLAN0_1,
+ B_AX_USBRX_RST | B_AX_USBTX_RST);
+
+ val32 = rtw89_read32(rtwdev, R_AX_HCI_FUNC_EN);
+ val32 &= ~(B_AX_HCI_RXDMA_EN | B_AX_HCI_TXDMA_EN);
+ rtw89_write32(rtwdev, R_AX_HCI_FUNC_EN, val32);
+
+ val32 |= B_AX_HCI_RXDMA_EN | B_AX_HCI_TXDMA_EN;
+ rtw89_write32(rtwdev, R_AX_HCI_FUNC_EN, val32);
+ /* fix USB TRX hang suggest by chihhanli@realtek.com */
+
+ return 0;
+}
+
+static int rtw89_usb_ops_mac_pre_deinit(struct rtw89_dev *rtwdev)
+{
+ return 0; /* Nothing to do. */
+}
+
+static int rtw89_usb_ops_mac_post_init(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_usb *rtwusb = rtw89_usb_priv(rtwdev);
+ enum usb_device_speed speed;
+ u32 ep;
+
+ rtw89_write32_clr(rtwdev, R_AX_USB3_MAC_NPI_CONFIG_INTF_0,
+ B_AX_SSPHY_LFPS_FILTER);
+
+ speed = rtwusb->udev->speed;
+
+ if (speed == USB_SPEED_SUPER)
+ rtw89_write8(rtwdev, R_AX_RXDMA_SETTING, USB3_BULKSIZE);
+ else if (speed == USB_SPEED_HIGH)
+ rtw89_write8(rtwdev, R_AX_RXDMA_SETTING, USB2_BULKSIZE);
+ else
+ rtw89_write8(rtwdev, R_AX_RXDMA_SETTING, USB11_BULKSIZE);
+
+ for (ep = 5; ep <= 12; ep++) {
+ if (ep == 8)
+ continue;
+
+ rtw89_write8_mask(rtwdev, R_AX_USB_ENDPOINT_0,
+ B_AX_EP_IDX, ep);
+ rtw89_write8(rtwdev, R_AX_USB_ENDPOINT_2 + 1, NUMP);
+ }
+
+ return 0;
+}
+
+static void rtw89_usb_ops_recalc_int_mit(struct rtw89_dev *rtwdev)
+{
+ /* Nothing to do. */
+}
+
+static int rtw89_usb_ops_mac_lv1_rcvy(struct rtw89_dev *rtwdev,
+ enum rtw89_lv1_rcvy_step step)
+{
+ u32 reg, mask;
+
+ switch (rtwdev->chip->chip_id) {
+ case RTL8851B:
+ case RTL8852A:
+ case RTL8852B:
+ reg = R_AX_USB_WLAN0_1;
+ mask = B_AX_USBRX_RST | B_AX_USBTX_RST;
+ break;
+ case RTL8852C:
+ reg = R_AX_USB_WLAN0_1_V1;
+ mask = B_AX_USBRX_RST_V1 | B_AX_USBTX_RST_V1;
+ break;
+ default:
+ rtw89_err(rtwdev, "%s: fix me\n", __func__);
+ return -EOPNOTSUPP;
+ }
+
+ switch (step) {
+ case RTW89_LV1_RCVY_STEP_1:
+ rtw89_write32_set(rtwdev, reg, mask);
+
+ msleep(30);
+ break;
+ case RTW89_LV1_RCVY_STEP_2:
+ rtw89_write32_clr(rtwdev, reg, mask);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void rtw89_usb_ops_dump_err_status(struct rtw89_dev *rtwdev)
+{
+ rtw89_warn(rtwdev, "%s TODO\n", __func__);
+}
+
+static const struct rtw89_hci_ops rtw89_usb_ops = {
+ .tx_write = rtw89_usb_ops_tx_write,
+ .tx_kick_off = rtw89_usb_ops_tx_kick_off,
+ .flush_queues = NULL, /* Not needed? */
+ .reset = rtw89_usb_ops_reset,
+ .start = rtw89_usb_ops_start,
+ .stop = rtw89_usb_ops_stop,
+ .pause = rtw89_usb_ops_pause,
+ .switch_mode = rtw89_usb_ops_switch_mode,
+ .recalc_int_mit = rtw89_usb_ops_recalc_int_mit,
+
+ .read8 = rtw89_usb_ops_read8,
+ .read16 = rtw89_usb_ops_read16,
+ .read32 = rtw89_usb_ops_read32,
+ .write8 = rtw89_usb_ops_write8,
+ .write16 = rtw89_usb_ops_write16,
+ .write32 = rtw89_usb_ops_write32,
+
+ .mac_pre_init = rtw89_usb_ops_mac_pre_init,
+ .mac_pre_deinit = rtw89_usb_ops_mac_pre_deinit,
+ .mac_post_init = rtw89_usb_ops_mac_post_init,
+ .deinit = rtw89_usb_ops_deinit,
+
+ .check_and_reclaim_tx_resource = rtw89_usb_ops_check_and_reclaim_tx_resource,
+ .mac_lv1_rcvy = rtw89_usb_ops_mac_lv1_rcvy,
+ .dump_err_status = rtw89_usb_ops_dump_err_status,
+ .napi_poll = NULL,
+
+ .recovery_start = NULL,
+ .recovery_complete = NULL,
+
+ .ctrl_txdma_ch = NULL,
+ .ctrl_txdma_fw_ch = NULL,
+ .ctrl_trxhci = NULL,
+ .poll_txdma_ch_idle = NULL,
+
+ .clr_idx_all = NULL,
+ .clear = NULL,
+ .disable_intr = NULL,
+ .enable_intr = NULL,
+ .rst_bdram = NULL,
+};
+
+static int rtw89_usb_parse(struct rtw89_dev *rtwdev,
+ struct usb_interface *intf)
+{
+ struct usb_host_interface *host_interface = &intf->altsetting[0];
+ struct usb_interface_descriptor *intf_desc = &host_interface->desc;
+ struct rtw89_usb *rtwusb = rtw89_usb_priv(rtwdev);
+ struct usb_endpoint_descriptor *endpoint;
+ int num_out_pipes = 0;
+ u8 num;
+ int i;
+
+ if (intf_desc->bNumEndpoints > RTW89_MAX_ENDPOINT_NUM) {
+ rtw89_err(rtwdev, "found %d endpoints, expected %d max\n",
+ intf_desc->bNumEndpoints, RTW89_MAX_ENDPOINT_NUM);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < intf_desc->bNumEndpoints; i++) {
+ endpoint = &host_interface->endpoint[i].desc;
+ num = usb_endpoint_num(endpoint);
+
+ if (usb_endpoint_dir_in(endpoint) &&
+ usb_endpoint_xfer_bulk(endpoint)) {
+ if (rtwusb->in_pipe) {
+ rtw89_err(rtwdev,
+ "found more than 1 bulk in endpoint\n");
+ return -EINVAL;
+ }
+
+ rtwusb->in_pipe = num;
+ }
+
+ if (usb_endpoint_dir_out(endpoint) &&
+ usb_endpoint_xfer_bulk(endpoint)) {
+ if (num_out_pipes >= RTW89_MAX_BULKOUT_NUM) {
+ rtw89_err(rtwdev,
+ "found more than %d bulk out endpoints\n",
+ RTW89_MAX_BULKOUT_NUM);
+ return -EINVAL;
+ }
+
+ rtwusb->out_pipe[num_out_pipes++] = num;
+ }
+ }
+
+ if (num_out_pipes < 1) {
+ rtw89_err(rtwdev, "no bulk out endpoints found\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rtw89_usb_intf_init(struct rtw89_dev *rtwdev,
+ struct usb_interface *intf)
+{
+ struct rtw89_usb *rtwusb = rtw89_usb_priv(rtwdev);
+ int ret;
+
+ ret = rtw89_usb_parse(rtwdev, intf);
+ if (ret)
+ return ret;
+
+ rtwusb->vendor_req_buf = kmalloc(sizeof(*rtwusb->vendor_req_buf),
+ GFP_KERNEL);
+ if (!rtwusb->vendor_req_buf)
+ return -ENOMEM;
+
+ rtwusb->udev = usb_get_dev(interface_to_usbdev(intf));
+
+ usb_set_intfdata(intf, rtwdev->hw);
+
+ SET_IEEE80211_DEV(rtwdev->hw, &intf->dev);
+
+ return 0;
+}
+
+static void rtw89_usb_intf_deinit(struct rtw89_dev *rtwdev,
+ struct usb_interface *intf)
+{
+ struct rtw89_usb *rtwusb = rtw89_usb_priv(rtwdev);
+
+ usb_put_dev(rtwusb->udev);
+ kfree(rtwusb->vendor_req_buf);
+ usb_set_intfdata(intf, NULL);
+}
+
+int rtw89_usb_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ const struct rtw89_driver_info *info;
+ struct rtw89_dev *rtwdev;
+ struct rtw89_usb *rtwusb;
+ int ret;
+
+ info = (const struct rtw89_driver_info *)id->driver_info;
+
+ rtwdev = rtw89_alloc_ieee80211_hw(&intf->dev,
+ sizeof(struct rtw89_usb),
+ info->chip, info->variant);
+ if (!rtwdev) {
+ dev_err(&intf->dev, "failed to allocate hw\n");
+ return -ENOMEM;
+ }
+
+ rtwusb = rtw89_usb_priv(rtwdev);
+ rtwusb->rtwdev = rtwdev;
+
+ rtwdev->hci.ops = &rtw89_usb_ops;
+ rtwdev->hci.type = RTW89_HCI_TYPE_USB;
+
+ ret = rtw89_usb_intf_init(rtwdev, intf);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to initialise intf: %d\n", ret);
+ goto err_free_hw;
+ }
+
+ if (rtwusb->udev->speed == USB_SPEED_SUPER)
+ rtwdev->hci.dle_type = RTW89_HCI_DLE_TYPE_USB3;
+ else
+ rtwdev->hci.dle_type = RTW89_HCI_DLE_TYPE_USB2;
+
+ rtw89_usb_init_tx(rtwdev);
+
+ ret = rtw89_usb_alloc_rx_bufs(rtwusb);
+ if (ret)
+ goto err_intf_deinit;
+
+ ret = rtw89_usb_init_rx(rtwdev);
+ if (ret)
+ goto err_free_rx_bufs;
+
+ ret = rtw89_core_init(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to initialise core: %d\n", ret);
+ goto err_deinit_rx;
+ }
+
+ ret = rtw89_chip_info_setup(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to setup chip information\n");
+ goto err_core_deinit;
+ }
+
+ ret = rtw89_core_register(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to register core\n");
+ goto err_core_deinit;
+ }
+
+ rtw89_usb_start_rx(rtwdev);
+
+ set_bit(RTW89_FLAG_PROBE_DONE, rtwdev->flags);
+
+ return 0;
+
+err_core_deinit:
+ rtw89_core_deinit(rtwdev);
+err_deinit_rx:
+ rtw89_usb_deinit_rx(rtwdev);
+err_free_rx_bufs:
+ rtw89_usb_free_rx_bufs(rtwusb);
+err_intf_deinit:
+ rtw89_usb_intf_deinit(rtwdev, intf);
+err_free_hw:
+ rtw89_free_ieee80211_hw(rtwdev);
+
+ return ret;
+}
+EXPORT_SYMBOL(rtw89_usb_probe);
+
+void rtw89_usb_disconnect(struct usb_interface *intf)
+{
+ struct ieee80211_hw *hw = usb_get_intfdata(intf);
+ struct rtw89_dev *rtwdev;
+ struct rtw89_usb *rtwusb;
+
+ if (!hw)
+ return;
+
+ rtwdev = hw->priv;
+ rtwusb = rtw89_usb_priv(rtwdev);
+
+ rtw89_usb_cancel_rx_bufs(rtwusb);
+
+ rtw89_core_unregister(rtwdev);
+ rtw89_core_deinit(rtwdev);
+ rtw89_usb_deinit_rx(rtwdev);
+ rtw89_usb_free_rx_bufs(rtwusb);
+ rtw89_usb_deinit_tx(rtwdev);
+ rtw89_usb_intf_deinit(rtwdev, intf);
+ rtw89_free_ieee80211_hw(rtwdev);
+}
+EXPORT_SYMBOL(rtw89_usb_disconnect);
+
+MODULE_AUTHOR("Bitterblue Smith <rtl8821cerfe2@gmail.com>");
+MODULE_DESCRIPTION("Realtek USB 802.11ax wireless driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw89/usb.h b/drivers/net/wireless/realtek/rtw89/usb.h
new file mode 100644
index 000000000000..c1b4bfa20979
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/usb.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2025 Realtek Corporation
+ */
+
+#ifndef __RTW89_USB_H__
+#define __RTW89_USB_H__
+
+#include "txrx.h"
+
+#define RTW89_USB_VENQT 0x05
+#define RTW89_USB_VENQT_READ 0xc0
+#define RTW89_USB_VENQT_WRITE 0x40
+
+#define RTW89_USB_RECVBUF_SZ 20480
+#define RTW89_USB_RXCB_NUM 8
+#define RTW89_USB_RX_SKB_NUM 16
+#define RTW89_USB_MAX_RXQ_LEN 512
+#define RTW89_USB_MOD512_PADDING 4
+
+#define RTW89_MAX_ENDPOINT_NUM 9
+#define RTW89_MAX_BULKOUT_NUM 7
+
+struct rtw89_usb_rx_ctrl_block {
+ struct rtw89_dev *rtwdev;
+ struct urb *rx_urb;
+ struct sk_buff *rx_skb;
+};
+
+struct rtw89_usb_tx_ctrl_block {
+ struct rtw89_dev *rtwdev;
+ u8 txch;
+ struct sk_buff_head tx_ack_queue;
+};
+
+struct rtw89_usb {
+ struct rtw89_dev *rtwdev;
+ struct usb_device *udev;
+
+ __le32 *vendor_req_buf;
+
+ atomic_t continual_io_error;
+
+ u8 in_pipe;
+ u8 out_pipe[RTW89_MAX_BULKOUT_NUM];
+
+ struct workqueue_struct *rxwq;
+ struct rtw89_usb_rx_ctrl_block rx_cb[RTW89_USB_RXCB_NUM];
+ struct sk_buff_head rx_queue;
+ struct sk_buff_head rx_free_queue;
+ struct work_struct rx_work;
+ struct work_struct rx_urb_work;
+
+ struct sk_buff_head tx_queue[RTW89_TXCH_NUM];
+};
+
+static inline struct rtw89_usb *rtw89_usb_priv(struct rtw89_dev *rtwdev)
+{
+ return (struct rtw89_usb *)rtwdev->priv;
+}
+
+int rtw89_usb_probe(struct usb_interface *intf,
+ const struct usb_device_id *id);
+void rtw89_usb_disconnect(struct usb_interface *intf);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw89/wow.c b/drivers/net/wireless/realtek/rtw89/wow.c
index 34a0ab49bd7a..5bb7c1a42f1d 100644
--- a/drivers/net/wireless/realtek/rtw89/wow.c
+++ b/drivers/net/wireless/realtek/rtw89/wow.c
@@ -12,7 +12,7 @@
#include "util.h"
#include "wow.h"
-void rtw89_wow_parse_akm(struct rtw89_dev *rtwdev, struct sk_buff *skb)
+void __rtw89_wow_parse_akm(struct rtw89_dev *rtwdev, struct sk_buff *skb)
{
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
@@ -619,9 +619,12 @@ static struct ieee80211_key_conf *rtw89_wow_gtk_rekey(struct rtw89_dev *rtwdev,
flex_array_size(rekey_conf, key, cipher_info->len));
if (ieee80211_vif_is_mld(wow_vif))
- key = ieee80211_gtk_rekey_add(wow_vif, rekey_conf, rtwvif_link->link_id);
+ key = ieee80211_gtk_rekey_add(wow_vif, keyidx, gtk,
+ cipher_info->len,
+ rtwvif_link->link_id);
else
- key = ieee80211_gtk_rekey_add(wow_vif, rekey_conf, -1);
+ key = ieee80211_gtk_rekey_add(wow_vif, keyidx, gtk,
+ cipher_info->len, -1);
kfree(rekey_conf);
if (IS_ERR(key)) {
@@ -1412,6 +1415,8 @@ static void rtw89_fw_release_pno_pkt_list(struct rtw89_dev *rtwdev,
static int rtw89_pno_scan_update_probe_req(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link)
{
+ static const u8 basic_rate_ie[] = {WLAN_EID_SUPP_RATES, 0x08,
+ 0x0c, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6c};
struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config;
u8 num = nd_config->n_match_sets, i;
@@ -1423,10 +1428,11 @@ static int rtw89_pno_scan_update_probe_req(struct rtw89_dev *rtwdev,
skb = ieee80211_probereq_get(rtwdev->hw, rtwvif_link->mac_addr,
nd_config->match_sets[i].ssid.ssid,
nd_config->match_sets[i].ssid.ssid_len,
- nd_config->ie_len);
+ nd_config->ie_len + sizeof(basic_rate_ie));
if (!skb)
return -ENOMEM;
+ skb_put_data(skb, basic_rate_ie, sizeof(basic_rate_ie));
skb_put_data(skb, nd_config->ie, nd_config->ie_len);
info = kzalloc(sizeof(*info), GFP_KERNEL);
@@ -1477,7 +1483,7 @@ static int rtw89_pno_scan_offload(struct rtw89_dev *rtwdev, bool enable)
opt.enable = enable;
opt.repeat = RTW89_SCAN_NORMAL;
opt.norm_pd = max(interval, 1) * 10; /* in unit of 100ms */
- opt.delay = max(rtw_wow->nd_config->delay, 1);
+ opt.delay = max(rtw_wow->nd_config->delay, 1) * 1000;
if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) {
opt.operation = enable ? RTW89_SCAN_OP_START : RTW89_SCAN_OP_STOP;
@@ -1489,7 +1495,7 @@ static int rtw89_pno_scan_offload(struct rtw89_dev *rtwdev, bool enable)
opt.opch_end = RTW89_CHAN_INVALID;
}
- mac->scan_offload(rtwdev, &opt, rtwvif_link, true);
+ rtw89_mac_scan_offload(rtwdev, &opt, rtwvif_link, true);
return 0;
}
diff --git a/drivers/net/wireless/realtek/rtw89/wow.h b/drivers/net/wireless/realtek/rtw89/wow.h
index f91991e8f2e3..6606528d31c7 100644
--- a/drivers/net/wireless/realtek/rtw89/wow.h
+++ b/drivers/net/wireless/realtek/rtw89/wow.h
@@ -116,9 +116,21 @@ static inline bool rtw_wow_has_mgd_features(struct rtw89_dev *rtwdev)
return !bitmap_empty(rtw_wow->flags, RTW89_WOW_FLAG_NUM);
}
+void __rtw89_wow_parse_akm(struct rtw89_dev *rtwdev, struct sk_buff *skb);
+
+static inline
+void rtw89_wow_parse_akm(struct rtw89_dev *rtwdev, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+
+ if (likely(!ieee80211_is_assoc_req(hdr->frame_control)))
+ return;
+
+ __rtw89_wow_parse_akm(rtwdev, skb);
+}
+
int rtw89_wow_suspend(struct rtw89_dev *rtwdev, struct cfg80211_wowlan *wowlan);
int rtw89_wow_resume(struct rtw89_dev *rtwdev);
-void rtw89_wow_parse_akm(struct rtw89_dev *rtwdev, struct sk_buff *skb);
#else
static inline
void rtw89_wow_parse_akm(struct rtw89_dev *rtwdev, struct sk_buff *skb)
diff --git a/drivers/net/wireless/ti/wl1251/reg.h b/drivers/net/wireless/ti/wl1251/reg.h
index 890176c915ab..bfe35754f33a 100644
--- a/drivers/net/wireless/ti/wl1251/reg.h
+++ b/drivers/net/wireless/ti/wl1251/reg.h
@@ -205,7 +205,7 @@ enum wl12xx_acx_int_reg {
the burst read starts at EEPROM address 0.
Otherwise, it starts at the address
following the address of the previous access.
- TheWlan hardware hardware clears this bit automatically.
+ TheWlan hardware clears this bit automatically.
Default: 0x00000000
*================================================*/
@@ -353,13 +353,13 @@ enum wl12xx_acx_int_reg {
loads a single byte of data into the EE_DATA
register from the EEPROM location specified in
the EE_ADDR register.
- The Wlan hardware hardware clears this bit automatically.
+ The Wlan hardware clears this bit automatically.
EE_DATA is valid when this bit is cleared.
0 EE_WRITE - EEPROM Write Request - Setting this bit
writes a single byte of data from the EE_DATA register into the
EEPROM location specified in the EE_ADDR register.
- The Wlan hardware hardware clears this bit automatically.
+ The Wlan hardware clears this bit automatically.
*===============================================*/
#define EE_CTL (REGISTERS_BASE + 0x2000)
#define ACX_EE_CTL_REG EE_CTL
diff --git a/drivers/net/wireless/ti/wl12xx/reg.h b/drivers/net/wireless/ti/wl12xx/reg.h
index 8ff018808020..601305a26141 100644
--- a/drivers/net/wireless/ti/wl12xx/reg.h
+++ b/drivers/net/wireless/ti/wl12xx/reg.h
@@ -168,7 +168,7 @@
the burst read starts at EEPROM address 0.
Otherwise, it starts at the address
following the address of the previous access.
- TheWlan hardware hardware clears this bit automatically.
+ TheWlan hardware clears this bit automatically.
Default: 0x00000000
*================================================*/
@@ -276,13 +276,13 @@
loads a single byte of data into the EE_DATA
register from the EEPROM location specified in
the EE_ADDR register.
- The Wlan hardware hardware clears this bit automatically.
+ The Wlan hardware clears this bit automatically.
EE_DATA is valid when this bit is cleared.
0 EE_WRITE - EEPROM Write Request - Setting this bit
writes a single byte of data from the EE_DATA register into the
EEPROM location specified in the EE_ADDR register.
- The Wlan hardware hardware clears this bit automatically.
+ The Wlan hardware clears this bit automatically.
*===============================================*/
#define ACX_EE_CTL_REG EE_CTL
#define EE_WRITE 0x00000001ul
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
index 9ae10f65f2af..2faa0de2a36e 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
@@ -445,7 +445,7 @@ static void int_urb_complete(struct urb *urb)
}
if (urb->actual_length < sizeof(hdr)) {
- dev_dbg_f(urb_dev(urb), "error: urb %p to small\n", urb);
+ dev_dbg_f(urb_dev(urb), "error: urb %p too small\n", urb);
goto resubmit;
}
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 7493e5aa984c..895fb163d48e 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -381,12 +381,12 @@ static void nvme_log_err_passthru(struct request *req)
nr->status & NVME_SC_MASK, /* Status Code */
nr->status & NVME_STATUS_MORE ? "MORE " : "",
nr->status & NVME_STATUS_DNR ? "DNR " : "",
- nr->cmd->common.cdw10,
- nr->cmd->common.cdw11,
- nr->cmd->common.cdw12,
- nr->cmd->common.cdw13,
- nr->cmd->common.cdw14,
- nr->cmd->common.cdw15);
+ le32_to_cpu(nr->cmd->common.cdw10),
+ le32_to_cpu(nr->cmd->common.cdw11),
+ le32_to_cpu(nr->cmd->common.cdw12),
+ le32_to_cpu(nr->cmd->common.cdw13),
+ le32_to_cpu(nr->cmd->common.cdw14),
+ le32_to_cpu(nr->cmd->common.cdw15));
}
enum nvme_disposition {
@@ -764,6 +764,10 @@ blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
!test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) &&
!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
return BLK_STS_RESOURCE;
+
+ if (!(rq->rq_flags & RQF_DONTPREP))
+ nvme_clear_nvme_request(rq);
+
return nvme_host_path_error(rq);
}
EXPORT_SYMBOL_GPL(nvme_fail_nonready_command);
@@ -3537,15 +3541,6 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl)
if (ret)
goto out_free;
}
-
- if (le16_to_cpu(id->awupf) != ctrl->subsys->awupf) {
- dev_err_ratelimited(ctrl->device,
- "inconsistent AWUPF, controller not added (%u/%u).\n",
- le16_to_cpu(id->awupf), ctrl->subsys->awupf);
- ret = -EINVAL;
- goto out_free;
- }
-
memcpy(ctrl->subsys->firmware_rev, id->fr,
sizeof(ctrl->subsys->firmware_rev));
@@ -4077,7 +4072,7 @@ static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns)
return;
}
}
- list_add(&ns->list, &ns->ctrl->namespaces);
+ list_add_rcu(&ns->list, &ns->ctrl->namespaces);
}
static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 688033b88d38..470bf37e5a63 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -1928,10 +1928,10 @@ static void nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
struct sock *sk = queue->sock->sk;
/* Restore the default callbacks before starting upcall */
- read_lock_bh(&sk->sk_callback_lock);
+ write_lock_bh(&sk->sk_callback_lock);
sk->sk_user_data = NULL;
sk->sk_data_ready = port->data_ready;
- read_unlock_bh(&sk->sk_callback_lock);
+ write_unlock_bh(&sk->sk_callback_lock);
if (!nvmet_tcp_try_peek_pdu(queue)) {
if (!nvmet_tcp_tls_handshake(queue))
return;
diff --git a/drivers/nvmem/imx-ocotp-ele.c b/drivers/nvmem/imx-ocotp-ele.c
index ca6dd71d8a2e..7807ec0e2d18 100644
--- a/drivers/nvmem/imx-ocotp-ele.c
+++ b/drivers/nvmem/imx-ocotp-ele.c
@@ -12,6 +12,7 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/if_ether.h> /* ETH_ALEN */
enum fuse_type {
FUSE_FSB = BIT(0),
@@ -118,9 +119,11 @@ static int imx_ocotp_cell_pp(void *context, const char *id, int index,
int i;
/* Deal with some post processing of nvmem cell data */
- if (id && !strcmp(id, "mac-address"))
+ if (id && !strcmp(id, "mac-address")) {
+ bytes = min(bytes, ETH_ALEN);
for (i = 0; i < bytes / 2; i++)
swap(buf[i], buf[bytes - i - 1]);
+ }
return 0;
}
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
index 79dd4fda0329..7bf7656d4f96 100644
--- a/drivers/nvmem/imx-ocotp.c
+++ b/drivers/nvmem/imx-ocotp.c
@@ -23,6 +23,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/delay.h>
+#include <linux/if_ether.h> /* ETH_ALEN */
#define IMX_OCOTP_OFFSET_B0W0 0x400 /* Offset from base address of the
* OTP Bank0 Word0
@@ -227,9 +228,11 @@ static int imx_ocotp_cell_pp(void *context, const char *id, int index,
int i;
/* Deal with some post processing of nvmem cell data */
- if (id && !strcmp(id, "mac-address"))
+ if (id && !strcmp(id, "mac-address")) {
+ bytes = min(bytes, ETH_ALEN);
for (i = 0; i < bytes / 2; i++)
swap(buf[i], buf[bytes - i - 1]);
+ }
return 0;
}
diff --git a/drivers/nvmem/layouts/u-boot-env.c b/drivers/nvmem/layouts/u-boot-env.c
index 436426d4e8f9..8571aac56295 100644
--- a/drivers/nvmem/layouts/u-boot-env.c
+++ b/drivers/nvmem/layouts/u-boot-env.c
@@ -92,7 +92,7 @@ int u_boot_env_parse(struct device *dev, struct nvmem_device *nvmem,
size_t crc32_data_offset;
size_t crc32_data_len;
size_t crc32_offset;
- __le32 *crc32_addr;
+ uint32_t *crc32_addr;
size_t data_offset;
size_t data_len;
size_t dev_size;
@@ -143,8 +143,8 @@ int u_boot_env_parse(struct device *dev, struct nvmem_device *nvmem,
goto err_kfree;
}
- crc32_addr = (__le32 *)(buf + crc32_offset);
- crc32 = le32_to_cpu(*crc32_addr);
+ crc32_addr = (uint32_t *)(buf + crc32_offset);
+ crc32 = *crc32_addr;
crc32_data_len = dev_size - crc32_data_offset;
data_len = dev_size - data_offset;
diff --git a/drivers/pci/controller/pci-hyperv-intf.c b/drivers/pci/controller/pci-hyperv-intf.c
index cc96be450360..28b3e93d31c0 100644
--- a/drivers/pci/controller/pci-hyperv-intf.c
+++ b/drivers/pci/controller/pci-hyperv-intf.c
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/hyperv.h>
+#include <linux/export.h>
struct hyperv_pci_block_ops hvpci_block_ops;
EXPORT_SYMBOL_GPL(hvpci_block_ops);
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index ebe39218479a..d2b7e8ea710b 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -599,7 +599,7 @@ static unsigned int hv_msi_get_int_vector(struct irq_data *data)
#define hv_msi_prepare pci_msi_prepare
/**
- * hv_arch_irq_unmask() - "Unmask" the IRQ by setting its current
+ * hv_irq_retarget_interrupt() - "Unmask" the IRQ by setting its current
* affinity.
* @data: Describes the IRQ
*
@@ -608,7 +608,7 @@ static unsigned int hv_msi_get_int_vector(struct irq_data *data)
* is built out of this PCI bus's instance GUID and the function
* number of the device.
*/
-static void hv_arch_irq_unmask(struct irq_data *data)
+static void hv_irq_retarget_interrupt(struct irq_data *data)
{
struct msi_desc *msi_desc = irq_data_get_msi_desc(data);
struct hv_retarget_device_interrupt *params;
@@ -713,6 +713,20 @@ out:
dev_err(&hbus->hdev->device,
"%s() failed: %#llx", __func__, res);
}
+
+static void hv_arch_irq_unmask(struct irq_data *data)
+{
+ if (hv_root_partition())
+ /*
+ * In case of the nested root partition, the nested hypervisor
+ * is taking care of interrupt remapping and thus the
+ * MAP_DEVICE_INTERRUPT hypercall is required instead of
+ * RETARGET_INTERRUPT.
+ */
+ (void)hv_map_msi_interrupt(data, NULL);
+ else
+ hv_irq_retarget_interrupt(data);
+}
#elif defined(CONFIG_ARM64)
/*
* SPI vectors to use for vPCI; arch SPIs range is [32, 1019], but leaving a bit
@@ -4200,6 +4214,9 @@ static int __init init_hv_pci_drv(void)
if (!hv_is_hyperv_initialized())
return -ENODEV;
+ if (hv_root_partition() && !hv_nested)
+ return -ENODEV;
+
ret = hv_pci_irqchip_init();
if (ret)
return ret;
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index 8e2daea81666..04a5a34e7a95 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -994,7 +994,8 @@ struct phy *phy_create(struct device *dev, struct device_node *node,
}
device_initialize(&phy->dev);
- mutex_init(&phy->mutex);
+ lockdep_register_key(&phy->lockdep_key);
+ mutex_init_with_key(&phy->mutex, &phy->lockdep_key);
phy->dev.class = &phy_class;
phy->dev.parent = dev;
@@ -1259,6 +1260,8 @@ static void phy_release(struct device *dev)
dev_vdbg(dev, "releasing '%s'\n", dev_name(dev));
debugfs_remove_recursive(phy->debugfs);
regulator_put(phy->pwr);
+ mutex_destroy(&phy->mutex);
+ lockdep_unregister_key(&phy->lockdep_key);
ida_free(&phy_ida, phy->id);
kfree(phy);
}
diff --git a/drivers/phy/phy-snps-eusb2.c b/drivers/phy/phy-snps-eusb2.c
index b73a1d7e57b3..751b6d8ba2be 100644
--- a/drivers/phy/phy-snps-eusb2.c
+++ b/drivers/phy/phy-snps-eusb2.c
@@ -567,9 +567,11 @@ static int snps_eusb2_hsphy_probe(struct platform_device *pdev)
}
}
- if (IS_ERR_OR_NULL(phy->ref_clk))
- return dev_err_probe(dev, PTR_ERR(phy->ref_clk),
+ if (IS_ERR_OR_NULL(phy->ref_clk)) {
+ ret = phy->ref_clk ? PTR_ERR(phy->ref_clk) : -ENOENT;
+ return dev_err_probe(dev, ret,
"failed to get ref clk\n");
+ }
num = ARRAY_SIZE(phy->vregs);
for (i = 0; i < num; i++)
diff --git a/drivers/phy/tegra/xusb-tegra186.c b/drivers/phy/tegra/xusb-tegra186.c
index 23a23f2d64e5..e818f6c3980e 100644
--- a/drivers/phy/tegra/xusb-tegra186.c
+++ b/drivers/phy/tegra/xusb-tegra186.c
@@ -648,14 +648,15 @@ static void tegra186_utmi_bias_pad_power_on(struct tegra_xusb_padctl *padctl)
udelay(100);
}
- if (padctl->soc->trk_hw_mode) {
- value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL2);
- value |= USB2_TRK_HW_MODE;
+ value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL2);
+ if (padctl->soc->trk_update_on_idle)
value &= ~CYA_TRK_CODE_UPDATE_ON_IDLE;
- padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL2);
- } else {
+ if (padctl->soc->trk_hw_mode)
+ value |= USB2_TRK_HW_MODE;
+ padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL2);
+
+ if (!padctl->soc->trk_hw_mode)
clk_disable_unprepare(priv->usb2_trk_clk);
- }
}
static void tegra186_utmi_bias_pad_power_off(struct tegra_xusb_padctl *padctl)
@@ -782,13 +783,15 @@ static int tegra186_xusb_padctl_vbus_override(struct tegra_xusb_padctl *padctl,
}
static int tegra186_xusb_padctl_id_override(struct tegra_xusb_padctl *padctl,
- bool status)
+ struct tegra_xusb_usb2_port *port, bool status)
{
- u32 value;
+ u32 value, id_override;
+ int err = 0;
dev_dbg(padctl->dev, "%s id override\n", status ? "set" : "clear");
value = padctl_readl(padctl, USB2_VBUS_ID);
+ id_override = value & ID_OVERRIDE(~0);
if (status) {
if (value & VBUS_OVERRIDE) {
@@ -799,15 +802,35 @@ static int tegra186_xusb_padctl_id_override(struct tegra_xusb_padctl *padctl,
value = padctl_readl(padctl, USB2_VBUS_ID);
}
- value &= ~ID_OVERRIDE(~0);
- value |= ID_OVERRIDE_GROUNDED;
+ if (id_override != ID_OVERRIDE_GROUNDED) {
+ value &= ~ID_OVERRIDE(~0);
+ value |= ID_OVERRIDE_GROUNDED;
+ padctl_writel(padctl, value, USB2_VBUS_ID);
+
+ err = regulator_enable(port->supply);
+ if (err) {
+ dev_err(padctl->dev, "Failed to enable regulator: %d\n", err);
+ return err;
+ }
+ }
} else {
- value &= ~ID_OVERRIDE(~0);
- value |= ID_OVERRIDE_FLOATING;
+ if (id_override == ID_OVERRIDE_GROUNDED) {
+ /*
+ * The regulator is disabled only when the role transitions
+ * from USB_ROLE_HOST to USB_ROLE_NONE.
+ */
+ err = regulator_disable(port->supply);
+ if (err) {
+ dev_err(padctl->dev, "Failed to disable regulator: %d\n", err);
+ return err;
+ }
+
+ value &= ~ID_OVERRIDE(~0);
+ value |= ID_OVERRIDE_FLOATING;
+ padctl_writel(padctl, value, USB2_VBUS_ID);
+ }
}
- padctl_writel(padctl, value, USB2_VBUS_ID);
-
return 0;
}
@@ -826,27 +849,20 @@ static int tegra186_utmi_phy_set_mode(struct phy *phy, enum phy_mode mode,
if (mode == PHY_MODE_USB_OTG) {
if (submode == USB_ROLE_HOST) {
- tegra186_xusb_padctl_id_override(padctl, true);
-
- err = regulator_enable(port->supply);
+ err = tegra186_xusb_padctl_id_override(padctl, port, true);
+ if (err)
+ goto out;
} else if (submode == USB_ROLE_DEVICE) {
tegra186_xusb_padctl_vbus_override(padctl, true);
} else if (submode == USB_ROLE_NONE) {
- /*
- * When port is peripheral only or role transitions to
- * USB_ROLE_NONE from USB_ROLE_DEVICE, regulator is not
- * enabled.
- */
- if (regulator_is_enabled(port->supply))
- regulator_disable(port->supply);
-
- tegra186_xusb_padctl_id_override(padctl, false);
+ err = tegra186_xusb_padctl_id_override(padctl, port, false);
+ if (err)
+ goto out;
tegra186_xusb_padctl_vbus_override(padctl, false);
}
}
-
+out:
mutex_unlock(&padctl->lock);
-
return err;
}
@@ -1710,7 +1726,8 @@ const struct tegra_xusb_padctl_soc tegra234_xusb_padctl_soc = {
.num_supplies = ARRAY_SIZE(tegra194_xusb_padctl_supply_names),
.supports_gen2 = true,
.poll_trk_completed = true,
- .trk_hw_mode = true,
+ .trk_hw_mode = false,
+ .trk_update_on_idle = true,
.supports_lp_cfg_en = true,
};
EXPORT_SYMBOL_GPL(tegra234_xusb_padctl_soc);
diff --git a/drivers/phy/tegra/xusb.h b/drivers/phy/tegra/xusb.h
index 6e45d194c689..d2b5f9565132 100644
--- a/drivers/phy/tegra/xusb.h
+++ b/drivers/phy/tegra/xusb.h
@@ -434,6 +434,7 @@ struct tegra_xusb_padctl_soc {
bool need_fake_usb3_port;
bool poll_trk_completed;
bool trk_hw_mode;
+ bool trk_update_on_idle;
bool supports_lp_cfg_en;
};
diff --git a/drivers/platform/arm64/huawei-gaokun-ec.c b/drivers/platform/arm64/huawei-gaokun-ec.c
index 7e5aa7ca2403..7170f8eb76f7 100644
--- a/drivers/platform/arm64/huawei-gaokun-ec.c
+++ b/drivers/platform/arm64/huawei-gaokun-ec.c
@@ -662,6 +662,7 @@ static void gaokun_aux_release(struct device *dev)
{
struct auxiliary_device *adev = to_auxiliary_dev(dev);
+ of_node_put(dev->of_node);
kfree(adev);
}
@@ -693,6 +694,7 @@ static int gaokun_aux_init(struct device *parent, const char *name,
ret = auxiliary_device_init(adev);
if (ret) {
+ of_node_put(adev->dev.of_node);
kfree(adev);
return ret;
}
diff --git a/drivers/platform/mellanox/mlxbf-pmc.c b/drivers/platform/mellanox/mlxbf-pmc.c
index a1c529f1ff1a..4776013e0764 100644
--- a/drivers/platform/mellanox/mlxbf-pmc.c
+++ b/drivers/platform/mellanox/mlxbf-pmc.c
@@ -15,6 +15,7 @@
#include <linux/hwmon.h>
#include <linux/platform_device.h>
#include <linux/string.h>
+#include <linux/string_helpers.h>
#include <uapi/linux/psci.h>
#define MLXBF_PMC_WRITE_REG_32 0x82000009
@@ -1222,7 +1223,7 @@ static int mlxbf_pmc_get_event_num(const char *blk, const char *evt)
return -ENODEV;
}
-/* Get the event number given the name */
+/* Get the event name given the number */
static char *mlxbf_pmc_get_event_name(const char *blk, u32 evt)
{
const struct mlxbf_pmc_events *events;
@@ -1784,6 +1785,7 @@ static ssize_t mlxbf_pmc_event_store(struct device *dev,
attr, struct mlxbf_pmc_attribute, dev_attr);
unsigned int blk_num, cnt_num;
bool is_l3 = false;
+ char *evt_name;
int evt_num;
int err;
@@ -1791,14 +1793,23 @@ static ssize_t mlxbf_pmc_event_store(struct device *dev,
cnt_num = attr_event->index;
if (isalpha(buf[0])) {
+ /* Remove the trailing newline character if present */
+ evt_name = kstrdup_and_replace(buf, '\n', '\0', GFP_KERNEL);
+ if (!evt_name)
+ return -ENOMEM;
+
evt_num = mlxbf_pmc_get_event_num(pmc->block_name[blk_num],
- buf);
+ evt_name);
+ kfree(evt_name);
if (evt_num < 0)
return -EINVAL;
} else {
err = kstrtouint(buf, 0, &evt_num);
if (err < 0)
return err;
+
+ if (!mlxbf_pmc_get_event_name(pmc->block_name[blk_num], evt_num))
+ return -EINVAL;
}
if (strstr(pmc->block_name[blk_num], "l3cache"))
@@ -1879,13 +1890,14 @@ static ssize_t mlxbf_pmc_enable_store(struct device *dev,
{
struct mlxbf_pmc_attribute *attr_enable = container_of(
attr, struct mlxbf_pmc_attribute, dev_attr);
- unsigned int en, blk_num;
+ unsigned int blk_num;
u32 word;
int err;
+ bool en;
blk_num = attr_enable->nr;
- err = kstrtouint(buf, 0, &en);
+ err = kstrtobool(buf, &en);
if (err < 0)
return err;
@@ -1905,14 +1917,11 @@ static ssize_t mlxbf_pmc_enable_store(struct device *dev,
MLXBF_PMC_CRSPACE_PERFMON_CTL(pmc->block[blk_num].counters),
MLXBF_PMC_WRITE_REG_32, word);
} else {
- if (en && en != 1)
- return -EINVAL;
-
err = mlxbf_pmc_config_l3_counters(blk_num, false, !!en);
if (err)
return err;
- if (en == 1) {
+ if (en) {
err = mlxbf_pmc_config_l3_counters(blk_num, true, false);
if (err)
return err;
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index abbc2644ff6d..bea87a85ae75 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -58,6 +58,8 @@ obj-$(CONFIG_X86_PLATFORM_DRIVERS_HP) += hp/
# Hewlett Packard Enterprise
obj-$(CONFIG_UV_SYSFS) += uv_sysfs.o
+obj-$(CONFIG_FW_ATTR_CLASS) += firmware_attributes_class.o
+
# IBM Thinkpad and Lenovo
obj-$(CONFIG_IBM_RTL) += ibm_rtl.o
obj-$(CONFIG_IDEAPAD_LAPTOP) += ideapad-laptop.o
@@ -128,7 +130,6 @@ obj-$(CONFIG_SYSTEM76_ACPI) += system76_acpi.o
obj-$(CONFIG_TOPSTAR_LAPTOP) += topstar-laptop.o
# Platform drivers
-obj-$(CONFIG_FW_ATTR_CLASS) += firmware_attributes_class.o
obj-$(CONFIG_SERIAL_MULTI_INSTANTIATE) += serial-multi-instantiate.o
obj-$(CONFIG_TOUCHSCREEN_DMI) += touchscreen_dmi.o
obj-$(CONFIG_WIRELESS_HOTKEY) += wireless-hotkey.o
diff --git a/drivers/platform/x86/dell/alienware-wmi-wmax.c b/drivers/platform/x86/dell/alienware-wmi-wmax.c
index 20ec122a9fe0..b58cf74197f0 100644
--- a/drivers/platform/x86/dell/alienware-wmi-wmax.c
+++ b/drivers/platform/x86/dell/alienware-wmi-wmax.c
@@ -90,6 +90,14 @@ static struct awcc_quirks empty_quirks;
static const struct dmi_system_id awcc_dmi_table[] __initconst = {
{
+ .ident = "Alienware Area-51m",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alienware Area-51m"),
+ },
+ .driver_data = &generic_quirks,
+ },
+ {
.ident = "Alienware Area-51m R2",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
@@ -98,6 +106,14 @@ static const struct dmi_system_id awcc_dmi_table[] __initconst = {
.driver_data = &generic_quirks,
},
{
+ .ident = "Alienware m15 R5",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alienware m15 R5"),
+ },
+ .driver_data = &generic_quirks,
+ },
+ {
.ident = "Alienware m15 R7",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
@@ -233,6 +249,7 @@ static const struct dmi_system_id awcc_dmi_table[] __initconst = {
},
.driver_data = &g_series_quirks,
},
+ {}
};
enum AWCC_GET_FAN_SENSORS_OPERATIONS {
diff --git a/drivers/platform/x86/dell/dell-lis3lv02d.c b/drivers/platform/x86/dell/dell-lis3lv02d.c
index 0791118dd6b7..732de5f556f8 100644
--- a/drivers/platform/x86/dell/dell-lis3lv02d.c
+++ b/drivers/platform/x86/dell/dell-lis3lv02d.c
@@ -49,6 +49,7 @@ static const struct dmi_system_id lis3lv02d_devices[] __initconst = {
DELL_LIS3LV02D_DMI_ENTRY("Latitude E6330", 0x29),
DELL_LIS3LV02D_DMI_ENTRY("Latitude E6430", 0x29),
DELL_LIS3LV02D_DMI_ENTRY("Precision 3540", 0x29),
+ DELL_LIS3LV02D_DMI_ENTRY("Precision 3551", 0x29),
DELL_LIS3LV02D_DMI_ENTRY("Precision M6800", 0x29),
DELL_LIS3LV02D_DMI_ENTRY("Vostro V131", 0x1d),
DELL_LIS3LV02D_DMI_ENTRY("Vostro 5568", 0x29),
diff --git a/drivers/platform/x86/dell/dell-wmi-ddv.c b/drivers/platform/x86/dell/dell-wmi-ddv.c
index 67f3d7158403..62e3d060f038 100644
--- a/drivers/platform/x86/dell/dell-wmi-ddv.c
+++ b/drivers/platform/x86/dell/dell-wmi-ddv.c
@@ -689,9 +689,13 @@ static int dell_wmi_ddv_battery_translate(struct dell_wmi_ddv_data *data,
dev_dbg(&data->wdev->dev, "Translation cache miss\n");
- /* Perform a translation between a ACPI battery and a battery index */
-
- ret = power_supply_get_property(battery, POWER_SUPPLY_PROP_SERIAL_NUMBER, &val);
+ /*
+ * Perform a translation between a ACPI battery and a battery index.
+ * We have to use power_supply_get_property_direct() here because this
+ * function will also get called from the callbacks of the power supply
+ * extension.
+ */
+ ret = power_supply_get_property_direct(battery, POWER_SUPPLY_PROP_SERIAL_NUMBER, &val);
if (ret < 0)
return ret;
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index b5e4da6a6779..edb9d2fb02ec 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -1669,7 +1669,7 @@ static int ideapad_kbd_bl_init(struct ideapad_private *priv)
priv->kbd_bl.led.name = "platform::" LED_FUNCTION_KBD_BACKLIGHT;
priv->kbd_bl.led.brightness_get = ideapad_kbd_bl_led_cdev_brightness_get;
priv->kbd_bl.led.brightness_set_blocking = ideapad_kbd_bl_led_cdev_brightness_set;
- priv->kbd_bl.led.flags = LED_BRIGHT_HW_CHANGED;
+ priv->kbd_bl.led.flags = LED_BRIGHT_HW_CHANGED | LED_RETAIN_AT_SHUTDOWN;
err = led_classdev_register(&priv->platform_device->dev, &priv->kbd_bl.led);
if (err)
@@ -1728,7 +1728,7 @@ static int ideapad_fn_lock_led_init(struct ideapad_private *priv)
priv->fn_lock.led.name = "platform::" LED_FUNCTION_FNLOCK;
priv->fn_lock.led.brightness_get = ideapad_fn_lock_led_cdev_get;
priv->fn_lock.led.brightness_set_blocking = ideapad_fn_lock_led_cdev_set;
- priv->fn_lock.led.flags = LED_BRIGHT_HW_CHANGED;
+ priv->fn_lock.led.flags = LED_BRIGHT_HW_CHANGED | LED_RETAIN_AT_SHUTDOWN;
err = led_classdev_register(&priv->platform_device->dev, &priv->fn_lock.led);
if (err)
diff --git a/drivers/platform/x86/lenovo-wmi-hotkey-utilities.c b/drivers/platform/x86/lenovo-wmi-hotkey-utilities.c
index 89153afd7015..7b9bad1978ff 100644
--- a/drivers/platform/x86/lenovo-wmi-hotkey-utilities.c
+++ b/drivers/platform/x86/lenovo-wmi-hotkey-utilities.c
@@ -122,26 +122,35 @@ static int lenovo_super_hotkey_wmi_led_init(enum mute_led_type led_type, struct
return -EIO;
union acpi_object *obj __free(kfree) = output.pointer;
- if (obj && obj->type == ACPI_TYPE_INTEGER)
- led_version = obj->integer.value;
- else
+ if (!obj || obj->type != ACPI_TYPE_INTEGER)
return -EIO;
- wpriv->cdev[led_type].max_brightness = LED_ON;
- wpriv->cdev[led_type].flags = LED_CORE_SUSPENDRESUME;
+ led_version = obj->integer.value;
+
+ /*
+ * Output parameters define: 0 means mute LED is not supported, Non-zero means
+ * mute LED can be supported.
+ */
+ if (led_version == 0)
+ return 0;
+
switch (led_type) {
case MIC_MUTE:
- if (led_version != WMI_LUD_SUPPORT_MICMUTE_LED_VER)
- return -EIO;
+ if (led_version != WMI_LUD_SUPPORT_MICMUTE_LED_VER) {
+ pr_warn("The MIC_MUTE LED of this device isn't supported.\n");
+ return 0;
+ }
wpriv->cdev[led_type].name = "platform::micmute";
wpriv->cdev[led_type].brightness_set_blocking = &lsh_wmi_micmute_led_set;
wpriv->cdev[led_type].default_trigger = "audio-micmute";
break;
case AUDIO_MUTE:
- if (led_version != WMI_LUD_SUPPORT_AUDIOMUTE_LED_VER)
- return -EIO;
+ if (led_version != WMI_LUD_SUPPORT_AUDIOMUTE_LED_VER) {
+ pr_warn("The AUDIO_MUTE LED of this device isn't supported.\n");
+ return 0;
+ }
wpriv->cdev[led_type].name = "platform::mute";
wpriv->cdev[led_type].brightness_set_blocking = &lsh_wmi_audiomute_led_set;
@@ -152,6 +161,9 @@ static int lenovo_super_hotkey_wmi_led_init(enum mute_led_type led_type, struct
return -EINVAL;
}
+ wpriv->cdev[led_type].max_brightness = LED_ON;
+ wpriv->cdev[led_type].flags = LED_CORE_SUSPENDRESUME;
+
err = devm_led_classdev_register(dev, &wpriv->cdev[led_type]);
if (err < 0) {
dev_err(dev, "Could not register mute LED %d : %d\n", led_type, err);
diff --git a/drivers/pmdomain/governor.c b/drivers/pmdomain/governor.c
index c1e148657c87..39359811a930 100644
--- a/drivers/pmdomain/governor.c
+++ b/drivers/pmdomain/governor.c
@@ -8,6 +8,7 @@
#include <linux/pm_domain.h>
#include <linux/pm_qos.h>
#include <linux/hrtimer.h>
+#include <linux/cpu.h>
#include <linux/cpuidle.h>
#include <linux/cpumask.h>
#include <linux/ktime.h>
@@ -349,6 +350,8 @@ static bool cpu_power_down_ok(struct dev_pm_domain *pd)
struct cpuidle_device *dev;
ktime_t domain_wakeup, next_hrtimer;
ktime_t now = ktime_get();
+ struct device *cpu_dev;
+ s64 cpu_constraint, global_constraint;
s64 idle_duration_ns;
int cpu, i;
@@ -359,6 +362,7 @@ static bool cpu_power_down_ok(struct dev_pm_domain *pd)
if (!(genpd->flags & GENPD_FLAG_CPU_DOMAIN))
return true;
+ global_constraint = cpu_latency_qos_limit();
/*
* Find the next wakeup for any of the online CPUs within the PM domain
* and its subdomains. Note, we only need the genpd->cpus, as it already
@@ -372,8 +376,16 @@ static bool cpu_power_down_ok(struct dev_pm_domain *pd)
if (ktime_before(next_hrtimer, domain_wakeup))
domain_wakeup = next_hrtimer;
}
+
+ cpu_dev = get_cpu_device(cpu);
+ if (cpu_dev) {
+ cpu_constraint = dev_pm_qos_raw_resume_latency(cpu_dev);
+ if (cpu_constraint < global_constraint)
+ global_constraint = cpu_constraint;
+ }
}
+ global_constraint *= NSEC_PER_USEC;
/* The minimum idle duration is from now - until the next wakeup. */
idle_duration_ns = ktime_to_ns(ktime_sub(domain_wakeup, now));
if (idle_duration_ns <= 0)
@@ -389,8 +401,10 @@ static bool cpu_power_down_ok(struct dev_pm_domain *pd)
*/
i = genpd->state_idx;
do {
- if (idle_duration_ns >= (genpd->states[i].residency_ns +
- genpd->states[i].power_off_latency_ns)) {
+ if ((idle_duration_ns >= (genpd->states[i].residency_ns +
+ genpd->states[i].power_off_latency_ns)) &&
+ (global_constraint >= (genpd->states[i].power_on_latency_ns +
+ genpd->states[i].power_off_latency_ns))) {
genpd->state_idx = i;
genpd->gd->last_enter = now;
genpd->gd->reflect_residency = true;
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index 33a5bfce4604..cfb0e3e0d4aa 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -1235,9 +1235,8 @@ bool power_supply_has_property(struct power_supply *psy,
return false;
}
-int power_supply_get_property(struct power_supply *psy,
- enum power_supply_property psp,
- union power_supply_propval *val)
+static int __power_supply_get_property(struct power_supply *psy, enum power_supply_property psp,
+ union power_supply_propval *val, bool use_extensions)
{
struct power_supply_ext_registration *reg;
@@ -1247,10 +1246,14 @@ int power_supply_get_property(struct power_supply *psy,
return -ENODEV;
}
- scoped_guard(rwsem_read, &psy->extensions_sem) {
- power_supply_for_each_extension(reg, psy) {
- if (power_supply_ext_has_property(reg->ext, psp))
+ if (use_extensions) {
+ scoped_guard(rwsem_read, &psy->extensions_sem) {
+ power_supply_for_each_extension(reg, psy) {
+ if (!power_supply_ext_has_property(reg->ext, psp))
+ continue;
+
return reg->ext->get_property(psy, reg->ext, reg->data, psp, val);
+ }
}
}
@@ -1261,20 +1264,49 @@ int power_supply_get_property(struct power_supply *psy,
else
return -EINVAL;
}
+
+int power_supply_get_property(struct power_supply *psy, enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ return __power_supply_get_property(psy, psp, val, true);
+}
EXPORT_SYMBOL_GPL(power_supply_get_property);
-int power_supply_set_property(struct power_supply *psy,
- enum power_supply_property psp,
- const union power_supply_propval *val)
+/**
+ * power_supply_get_property_direct - Read a power supply property without checking for extensions
+ * @psy: The power supply
+ * @psp: The power supply property to read
+ * @val: The resulting value of the power supply property
+ *
+ * Read a power supply property without taking into account any power supply extensions registered
+ * on the given power supply. This is mostly useful for power supply extensions that want to access
+ * their own power supply as using power_supply_get_property() directly will result in a potential
+ * deadlock.
+ *
+ * Return: 0 on success or negative error code on failure.
+ */
+int power_supply_get_property_direct(struct power_supply *psy, enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ return __power_supply_get_property(psy, psp, val, false);
+}
+EXPORT_SYMBOL_GPL(power_supply_get_property_direct);
+
+
+static int __power_supply_set_property(struct power_supply *psy, enum power_supply_property psp,
+ const union power_supply_propval *val, bool use_extensions)
{
struct power_supply_ext_registration *reg;
if (atomic_read(&psy->use_cnt) <= 0)
return -ENODEV;
- scoped_guard(rwsem_read, &psy->extensions_sem) {
- power_supply_for_each_extension(reg, psy) {
- if (power_supply_ext_has_property(reg->ext, psp)) {
+ if (use_extensions) {
+ scoped_guard(rwsem_read, &psy->extensions_sem) {
+ power_supply_for_each_extension(reg, psy) {
+ if (!power_supply_ext_has_property(reg->ext, psp))
+ continue;
+
if (reg->ext->set_property)
return reg->ext->set_property(psy, reg->ext, reg->data,
psp, val);
@@ -1289,8 +1321,34 @@ int power_supply_set_property(struct power_supply *psy,
return psy->desc->set_property(psy, psp, val);
}
+
+int power_supply_set_property(struct power_supply *psy, enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ return __power_supply_set_property(psy, psp, val, true);
+}
EXPORT_SYMBOL_GPL(power_supply_set_property);
+/**
+ * power_supply_set_property_direct - Write a power supply property without checking for extensions
+ * @psy: The power supply
+ * @psp: The power supply property to write
+ * @val: The value to write to the power supply property
+ *
+ * Write a power supply property without taking into account any power supply extensions registered
+ * on the given power supply. This is mostly useful for power supply extensions that want to access
+ * their own power supply as using power_supply_set_property() directly will result in a potential
+ * deadlock.
+ *
+ * Return: 0 on success or negative error code on failure.
+ */
+int power_supply_set_property_direct(struct power_supply *psy, enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ return __power_supply_set_property(psy, psp, val, false);
+}
+EXPORT_SYMBOL_GPL(power_supply_set_property_direct);
+
int power_supply_property_is_writeable(struct power_supply *psy,
enum power_supply_property psp)
{
diff --git a/drivers/power/supply/test_power.c b/drivers/power/supply/test_power.c
index 5bfdfcf6013b..2c0e9ad820c0 100644
--- a/drivers/power/supply/test_power.c
+++ b/drivers/power/supply/test_power.c
@@ -259,6 +259,7 @@ static const struct power_supply_config test_power_configs[] = {
static int test_power_battery_extmanufacture_year = 1234;
static int test_power_battery_exttemp_max = 1000;
static const enum power_supply_property test_power_battery_extprops[] = {
+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
POWER_SUPPLY_PROP_MANUFACTURE_YEAR,
POWER_SUPPLY_PROP_TEMP_MAX,
};
@@ -270,6 +271,9 @@ static int test_power_battery_extget_property(struct power_supply *psy,
union power_supply_propval *val)
{
switch (psp) {
+ case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW:
+ return power_supply_get_property_direct(psy, POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG,
+ val);
case POWER_SUPPLY_PROP_MANUFACTURE_YEAR:
val->intval = test_power_battery_extmanufacture_year;
break;
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
index b7f15f303ea2..967dc4f9eea8 100644
--- a/drivers/s390/net/ism_drv.c
+++ b/drivers/s390/net/ism_drv.c
@@ -130,6 +130,7 @@ static int ism_cmd(struct ism_dev *ism, void *cmd)
struct ism_req_hdr *req = cmd;
struct ism_resp_hdr *resp = cmd;
+ spin_lock(&ism->cmd_lock);
__ism_write_cmd(ism, req + 1, sizeof(*req), req->len - sizeof(*req));
__ism_write_cmd(ism, req, 0, sizeof(*req));
@@ -143,6 +144,7 @@ static int ism_cmd(struct ism_dev *ism, void *cmd)
}
__ism_read_cmd(ism, resp + 1, sizeof(*resp), resp->len - sizeof(*resp));
out:
+ spin_unlock(&ism->cmd_lock);
return resp->ret;
}
@@ -606,6 +608,7 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return -ENOMEM;
spin_lock_init(&ism->lock);
+ spin_lock_init(&ism->cmd_lock);
dev_set_drvdata(&pdev->dev, ism);
ism->pdev = pdev;
ism->dev.parent = &pdev->dev;
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index eea93f8f106f..c0e4883be6d0 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -518,28 +518,32 @@ static ssize_t qeth_hw_trap_store(struct device *dev,
if (qeth_card_hw_is_reachable(card))
state = 1;
- if (sysfs_streq(buf, "arm") && !card->info.hwtrap) {
- if (state) {
+ if (sysfs_streq(buf, "arm")) {
+ if (state && !card->info.hwtrap) {
if (qeth_is_diagass_supported(card,
QETH_DIAGS_CMD_TRAP)) {
rc = qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM);
if (!rc)
card->info.hwtrap = 1;
- } else
+ } else {
rc = -EINVAL;
- } else
+ }
+ } else {
card->info.hwtrap = 1;
- } else if (sysfs_streq(buf, "disarm") && card->info.hwtrap) {
- if (state) {
+ }
+ } else if (sysfs_streq(buf, "disarm")) {
+ if (state && card->info.hwtrap) {
rc = qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
if (!rc)
card->info.hwtrap = 0;
- } else
+ } else {
card->info.hwtrap = 0;
- } else if (sysfs_streq(buf, "trap") && state && card->info.hwtrap)
+ }
+ } else if (sysfs_streq(buf, "trap") && state && card->info.hwtrap) {
rc = qeth_hw_trap(card, QETH_DIAGS_TRAP_CAPTURE);
- else
+ } else {
rc = -EINVAL;
+ }
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
diff --git a/drivers/soundwire/amd_manager.c b/drivers/soundwire/amd_manager.c
index a12c68b93b1c..7a671a786197 100644
--- a/drivers/soundwire/amd_manager.c
+++ b/drivers/soundwire/amd_manager.c
@@ -238,7 +238,7 @@ static u64 amd_sdw_send_cmd_get_resp(struct amd_sdw_manager *amd_manager, u32 lo
if (sts & AMD_SDW_IMM_RES_VALID) {
dev_err(amd_manager->dev, "SDW%x manager is in bad state\n", amd_manager->instance);
- writel(0x00, amd_manager->mmio + ACP_SW_IMM_CMD_STS);
+ writel(AMD_SDW_IMM_RES_VALID, amd_manager->mmio + ACP_SW_IMM_CMD_STS);
}
writel(upper_data, amd_manager->mmio + ACP_SW_IMM_CMD_UPPER_WORD);
writel(lower_data, amd_manager->mmio + ACP_SW_IMM_CMD_LOWER_QWORD);
@@ -1209,6 +1209,7 @@ static int __maybe_unused amd_suspend(struct device *dev)
}
if (amd_manager->power_mode_mask & AMD_SDW_CLK_STOP_MODE) {
+ cancel_work_sync(&amd_manager->amd_sdw_work);
amd_sdw_wake_enable(amd_manager, false);
if (amd_manager->acp_rev >= ACP70_PCI_REV_ID) {
ret = amd_sdw_host_wake_enable(amd_manager, false);
@@ -1219,6 +1220,7 @@ static int __maybe_unused amd_suspend(struct device *dev)
if (ret)
return ret;
} else if (amd_manager->power_mode_mask & AMD_SDW_POWER_OFF_MODE) {
+ cancel_work_sync(&amd_manager->amd_sdw_work);
amd_sdw_wake_enable(amd_manager, false);
if (amd_manager->acp_rev >= ACP70_PCI_REV_ID) {
ret = amd_sdw_host_wake_enable(amd_manager, false);
diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c
index 295a46dc2be7..0f45e3404756 100644
--- a/drivers/soundwire/qcom.c
+++ b/drivers/soundwire/qcom.c
@@ -156,7 +156,6 @@ struct qcom_swrm_port_config {
u8 word_length;
u8 blk_group_count;
u8 lane_control;
- u8 ch_mask;
};
/*
@@ -1049,13 +1048,9 @@ static int qcom_swrm_port_enable(struct sdw_bus *bus,
{
u32 reg = SWRM_DP_PORT_CTRL_BANK(enable_ch->port_num, bank);
struct qcom_swrm_ctrl *ctrl = to_qcom_sdw(bus);
- struct qcom_swrm_port_config *pcfg;
u32 val;
- pcfg = &ctrl->pconfig[enable_ch->port_num];
ctrl->reg_read(ctrl, reg, &val);
- if (pcfg->ch_mask != SWR_INVALID_PARAM && pcfg->ch_mask != 0)
- enable_ch->ch_mask = pcfg->ch_mask;
if (enable_ch->enable)
val |= (enable_ch->ch_mask << SWRM_DP_PORT_CTRL_EN_CHAN_SHFT);
@@ -1275,26 +1270,6 @@ static void *qcom_swrm_get_sdw_stream(struct snd_soc_dai *dai, int direction)
return ctrl->sruntime[dai->id];
}
-static int qcom_swrm_set_channel_map(struct snd_soc_dai *dai,
- unsigned int tx_num, const unsigned int *tx_slot,
- unsigned int rx_num, const unsigned int *rx_slot)
-{
- struct qcom_swrm_ctrl *ctrl = dev_get_drvdata(dai->dev);
- int i;
-
- if (tx_slot) {
- for (i = 0; i < tx_num; i++)
- ctrl->pconfig[i].ch_mask = tx_slot[i];
- }
-
- if (rx_slot) {
- for (i = 0; i < rx_num; i++)
- ctrl->pconfig[i].ch_mask = rx_slot[i];
- }
-
- return 0;
-}
-
static int qcom_swrm_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
@@ -1331,7 +1306,6 @@ static const struct snd_soc_dai_ops qcom_swrm_pdm_dai_ops = {
.shutdown = qcom_swrm_shutdown,
.set_stream = qcom_swrm_set_sdw_stream,
.get_stream = qcom_swrm_get_sdw_stream,
- .set_channel_map = qcom_swrm_set_channel_map,
};
static const struct snd_soc_component_driver qcom_swrm_dai_component = {
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 1bc0fdbb1bd7..0ffa3f9f2870 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -4138,10 +4138,13 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
xfer->tx_nbits != SPI_NBITS_OCTAL)
return -EINVAL;
if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
- !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
+ !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL)))
return -EINVAL;
if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
- !(spi->mode & SPI_TX_QUAD))
+ !(spi->mode & (SPI_TX_QUAD | SPI_TX_OCTAL)))
+ return -EINVAL;
+ if ((xfer->tx_nbits == SPI_NBITS_OCTAL) &&
+ !(spi->mode & SPI_TX_OCTAL))
return -EINVAL;
}
/* Check transfer rx_nbits */
@@ -4154,10 +4157,13 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
xfer->rx_nbits != SPI_NBITS_OCTAL)
return -EINVAL;
if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
- !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
+ !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
return -EINVAL;
if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
- !(spi->mode & SPI_RX_QUAD))
+ !(spi->mode & (SPI_RX_QUAD | SPI_RX_OCTAL)))
+ return -EINVAL;
+ if ((xfer->rx_nbits == SPI_NBITS_OCTAL) &&
+ !(spi->mode & SPI_RX_OCTAL))
return -EINVAL;
}
diff --git a/drivers/ssb/driver_gpio.c b/drivers/ssb/driver_gpio.c
index 14ad57954a66..e1f5f0a9c8a2 100644
--- a/drivers/ssb/driver_gpio.c
+++ b/drivers/ssb/driver_gpio.c
@@ -267,12 +267,14 @@ static int ssb_gpio_extif_get_value(struct gpio_chip *chip, unsigned int gpio)
return !!ssb_extif_gpio_in(&bus->extif, 1 << gpio);
}
-static void ssb_gpio_extif_set_value(struct gpio_chip *chip, unsigned int gpio,
- int value)
+static int ssb_gpio_extif_set_value(struct gpio_chip *chip, unsigned int gpio,
+ int value)
{
struct ssb_bus *bus = gpiochip_get_data(chip);
ssb_extif_gpio_out(&bus->extif, 1 << gpio, value ? 1 << gpio : 0);
+
+ return 0;
}
static int ssb_gpio_extif_direction_input(struct gpio_chip *chip,
@@ -420,7 +422,7 @@ static int ssb_gpio_extif_init(struct ssb_bus *bus)
chip->label = "ssb_extif_gpio";
chip->owner = THIS_MODULE;
chip->get = ssb_gpio_extif_get_value;
- chip->set = ssb_gpio_extif_set_value;
+ chip->set_rv = ssb_gpio_extif_set_value;
chip->direction_input = ssb_gpio_extif_direction_input;
chip->direction_output = ssb_gpio_extif_direction_output;
#if IS_ENABLED(CONFIG_SSB_EMBEDDED)
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index 5dbf8d53db09..721b15b7e13b 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -97,6 +97,13 @@ struct vchiq_arm_state {
* tracked separately with the state.
*/
int peer_use_count;
+
+ /*
+ * Flag to indicate that the first vchiq connect has made it through.
+ * This means that both sides should be fully ready, and we should
+ * be able to suspend after this point.
+ */
+ int first_connect;
};
static int
@@ -273,6 +280,29 @@ static int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state
return 0;
}
+int
+vchiq_platform_init_state(struct vchiq_state *state)
+{
+ struct vchiq_arm_state *platform_state;
+
+ platform_state = devm_kzalloc(state->dev, sizeof(*platform_state), GFP_KERNEL);
+ if (!platform_state)
+ return -ENOMEM;
+
+ rwlock_init(&platform_state->susp_res_lock);
+
+ init_completion(&platform_state->ka_evt);
+ atomic_set(&platform_state->ka_use_count, 0);
+ atomic_set(&platform_state->ka_use_ack_count, 0);
+ atomic_set(&platform_state->ka_release_count, 0);
+
+ platform_state->state = state;
+
+ state->platform_state = (struct opaque_platform_state *)platform_state;
+
+ return 0;
+}
+
static struct vchiq_arm_state *vchiq_platform_get_arm_state(struct vchiq_state *state)
{
return (struct vchiq_arm_state *)state->platform_state;
@@ -363,8 +393,7 @@ int vchiq_shutdown(struct vchiq_instance *instance)
struct vchiq_state *state = instance->state;
int ret = 0;
- if (mutex_lock_killable(&state->mutex))
- return -EAGAIN;
+ mutex_lock(&state->mutex);
/* Remove all services */
vchiq_shutdown_internal(state, instance);
@@ -982,39 +1011,6 @@ exit:
}
int
-vchiq_platform_init_state(struct vchiq_state *state)
-{
- struct vchiq_arm_state *platform_state;
- char threadname[16];
-
- platform_state = devm_kzalloc(state->dev, sizeof(*platform_state), GFP_KERNEL);
- if (!platform_state)
- return -ENOMEM;
-
- snprintf(threadname, sizeof(threadname), "vchiq-keep/%d",
- state->id);
- platform_state->ka_thread = kthread_create(&vchiq_keepalive_thread_func,
- (void *)state, threadname);
- if (IS_ERR(platform_state->ka_thread)) {
- dev_err(state->dev, "couldn't create thread %s\n", threadname);
- return PTR_ERR(platform_state->ka_thread);
- }
-
- rwlock_init(&platform_state->susp_res_lock);
-
- init_completion(&platform_state->ka_evt);
- atomic_set(&platform_state->ka_use_count, 0);
- atomic_set(&platform_state->ka_use_ack_count, 0);
- atomic_set(&platform_state->ka_release_count, 0);
-
- platform_state->state = state;
-
- state->platform_state = (struct opaque_platform_state *)platform_state;
-
- return 0;
-}
-
-int
vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
enum USE_TYPE_E use_type)
{
@@ -1329,19 +1325,37 @@ out:
return ret;
}
-void vchiq_platform_connected(struct vchiq_state *state)
-{
- struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
-
- wake_up_process(arm_state->ka_thread);
-}
-
void vchiq_platform_conn_state_changed(struct vchiq_state *state,
enum vchiq_connstate oldstate,
enum vchiq_connstate newstate)
{
+ struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
+ char threadname[16];
+
dev_dbg(state->dev, "suspend: %d: %s->%s\n",
state->id, get_conn_state_name(oldstate), get_conn_state_name(newstate));
+ if (state->conn_state != VCHIQ_CONNSTATE_CONNECTED)
+ return;
+
+ write_lock_bh(&arm_state->susp_res_lock);
+ if (arm_state->first_connect) {
+ write_unlock_bh(&arm_state->susp_res_lock);
+ return;
+ }
+
+ arm_state->first_connect = 1;
+ write_unlock_bh(&arm_state->susp_res_lock);
+ snprintf(threadname, sizeof(threadname), "vchiq-keep/%d",
+ state->id);
+ arm_state->ka_thread = kthread_create(&vchiq_keepalive_thread_func,
+ (void *)state,
+ threadname);
+ if (IS_ERR(arm_state->ka_thread)) {
+ dev_err(state->dev, "suspend: Couldn't create thread %s\n",
+ threadname);
+ } else {
+ wake_up_process(arm_state->ka_thread);
+ }
}
static const struct of_device_id vchiq_of_match[] = {
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
index e7b0c800a205..e2cac0898b8f 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
@@ -3343,7 +3343,6 @@ vchiq_connect_internal(struct vchiq_state *state, struct vchiq_instance *instanc
return -EAGAIN;
vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
- vchiq_platform_connected(state);
complete(&state->connect);
}
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
index 3b5c0618e567..9b4e766990a4 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
@@ -575,8 +575,6 @@ int vchiq_send_remote_use(struct vchiq_state *state);
int vchiq_send_remote_use_active(struct vchiq_state *state);
-void vchiq_platform_connected(struct vchiq_state *state);
-
void vchiq_platform_conn_state_changed(struct vchiq_state *state,
enum vchiq_connstate oldstate,
enum vchiq_connstate newstate);
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 28febb95f8fa..b6c58a7e7b6a 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -1450,7 +1450,7 @@ int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
return ret;
data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK;
- data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
+ data[1] &= ~ADP_DP_CS_1_AUX_TX_HOPID_MASK;
data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) &
@@ -3437,7 +3437,7 @@ void tb_sw_set_unplugged(struct tb_switch *sw)
}
}
-static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
+static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags, bool runtime)
{
if (flags)
tb_sw_dbg(sw, "enabling wakeup: %#x\n", flags);
@@ -3445,7 +3445,7 @@ static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
tb_sw_dbg(sw, "disabling wakeup\n");
if (tb_switch_is_usb4(sw))
- return usb4_switch_set_wake(sw, flags);
+ return usb4_switch_set_wake(sw, flags, runtime);
return tb_lc_set_wake(sw, flags);
}
@@ -3521,7 +3521,7 @@ int tb_switch_resume(struct tb_switch *sw, bool runtime)
tb_switch_check_wakes(sw);
/* Disable wakes */
- tb_switch_set_wake(sw, 0);
+ tb_switch_set_wake(sw, 0, true);
err = tb_switch_tmu_init(sw);
if (err)
@@ -3603,7 +3603,7 @@ void tb_switch_suspend(struct tb_switch *sw, bool runtime)
flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE;
}
- tb_switch_set_wake(sw, flags);
+ tb_switch_set_wake(sw, flags, runtime);
if (tb_switch_is_usb4(sw))
usb4_switch_set_sleep(sw);
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index 87afd5a7c504..f503bad86413 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -1317,7 +1317,7 @@ int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid);
int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
size_t size);
bool usb4_switch_lane_bonding_possible(struct tb_switch *sw);
-int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags);
+int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags, bool runtime);
int usb4_switch_set_sleep(struct tb_switch *sw);
int usb4_switch_nvm_sector_size(struct tb_switch *sw);
int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
index fce3c0f2354a..fdae76c8f728 100644
--- a/drivers/thunderbolt/usb4.c
+++ b/drivers/thunderbolt/usb4.c
@@ -403,12 +403,12 @@ bool usb4_switch_lane_bonding_possible(struct tb_switch *sw)
* usb4_switch_set_wake() - Enabled/disable wake
* @sw: USB4 router
* @flags: Wakeup flags (%0 to disable)
+ * @runtime: Wake is being programmed during system runtime
*
* Enables/disables router to wake up from sleep.
*/
-int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags)
+int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags, bool runtime)
{
- struct usb4_port *usb4;
struct tb_port *port;
u64 route = tb_route(sw);
u32 val;
@@ -438,13 +438,11 @@ int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags)
val |= PORT_CS_19_WOU4;
} else {
bool configured = val & PORT_CS_19_PC;
- usb4 = port->usb4;
+ bool wakeup = runtime || device_may_wakeup(&port->usb4->dev);
- if (((flags & TB_WAKE_ON_CONNECT) &&
- device_may_wakeup(&usb4->dev)) && !configured)
+ if ((flags & TB_WAKE_ON_CONNECT) && wakeup && !configured)
val |= PORT_CS_19_WOC;
- if (((flags & TB_WAKE_ON_DISCONNECT) &&
- device_may_wakeup(&usb4->dev)) && configured)
+ if ((flags & TB_WAKE_ON_DISCONNECT) && wakeup && configured)
val |= PORT_CS_19_WOD;
if ((flags & TB_WAKE_ON_USB4) && configured)
val |= PORT_CS_19_WOU4;
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 508e8c6f01d4..884fefbfd5a1 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -954,7 +954,7 @@ static unsigned int dma_handle_tx(struct eg20t_port *priv)
__func__);
return 0;
}
- dma_sync_sg_for_device(port->dev, priv->sg_tx_p, nent, DMA_TO_DEVICE);
+ dma_sync_sg_for_device(port->dev, priv->sg_tx_p, num, DMA_TO_DEVICE);
priv->desc_tx = desc;
desc->callback = pch_dma_tx_complete;
desc->callback_param = priv;
diff --git a/drivers/tty/serial/serial_base_bus.c b/drivers/tty/serial/serial_base_bus.c
index cb3b127b06b6..22749ab0428a 100644
--- a/drivers/tty/serial/serial_base_bus.c
+++ b/drivers/tty/serial/serial_base_bus.c
@@ -13,6 +13,7 @@
#include <linux/device.h>
#include <linux/idr.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/serial_core.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -93,6 +94,7 @@ static void serial_base_ctrl_release(struct device *dev)
{
struct serial_ctrl_device *ctrl_dev = to_serial_base_ctrl_device(dev);
+ of_node_put(dev->of_node);
kfree(ctrl_dev);
}
@@ -140,6 +142,7 @@ static void serial_base_port_release(struct device *dev)
{
struct serial_port_device *port_dev = to_serial_base_port_device(dev);
+ of_node_put(dev->of_node);
kfree(port_dev);
}
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 3e1215f7a9a0..256fe8c86828 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -5751,6 +5751,7 @@ static void port_event(struct usb_hub *hub, int port1)
struct usb_device *hdev = hub->hdev;
u16 portstatus, portchange;
int i = 0;
+ int err;
connect_change = test_bit(port1, hub->change_bits);
clear_bit(port1, hub->event_bits);
@@ -5847,8 +5848,11 @@ static void port_event(struct usb_hub *hub, int port1)
} else if (!udev || !(portstatus & USB_PORT_STAT_CONNECTION)
|| udev->state == USB_STATE_NOTATTACHED) {
dev_dbg(&port_dev->dev, "do warm reset, port only\n");
- if (hub_port_reset(hub, port1, NULL,
- HUB_BH_RESET_TIME, true) < 0)
+ err = hub_port_reset(hub, port1, NULL,
+ HUB_BH_RESET_TIME, true);
+ if (!udev && err == -ENOTCONN)
+ connect_change = 0;
+ else if (err < 0)
hub_port_disable(hub, port1, 1);
} else {
dev_dbg(&port_dev->dev, "do warm reset, full device\n");
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index d5b622f78cf3..0637bfbc054e 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -5389,20 +5389,34 @@ int dwc2_gadget_enter_hibernation(struct dwc2_hsotg *hsotg)
if (gusbcfg & GUSBCFG_ULPI_UTMI_SEL) {
/* ULPI interface */
gpwrdn |= GPWRDN_ULPI_LATCH_EN_DURING_HIB_ENTRY;
- }
- dwc2_writel(hsotg, gpwrdn, GPWRDN);
- udelay(10);
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
+ udelay(10);
- /* Suspend the Phy Clock */
- pcgcctl = dwc2_readl(hsotg, PCGCTL);
- pcgcctl |= PCGCTL_STOPPCLK;
- dwc2_writel(hsotg, pcgcctl, PCGCTL);
- udelay(10);
+ /* Suspend the Phy Clock */
+ pcgcctl = dwc2_readl(hsotg, PCGCTL);
+ pcgcctl |= PCGCTL_STOPPCLK;
+ dwc2_writel(hsotg, pcgcctl, PCGCTL);
+ udelay(10);
- gpwrdn = dwc2_readl(hsotg, GPWRDN);
- gpwrdn |= GPWRDN_PMUACTV;
- dwc2_writel(hsotg, gpwrdn, GPWRDN);
- udelay(10);
+ gpwrdn = dwc2_readl(hsotg, GPWRDN);
+ gpwrdn |= GPWRDN_PMUACTV;
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
+ udelay(10);
+ } else {
+ /* UTMI+ Interface */
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
+ udelay(10);
+
+ gpwrdn = dwc2_readl(hsotg, GPWRDN);
+ gpwrdn |= GPWRDN_PMUACTV;
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
+ udelay(10);
+
+ pcgcctl = dwc2_readl(hsotg, PCGCTL);
+ pcgcctl |= PCGCTL_STOPPCLK;
+ dwc2_writel(hsotg, pcgcctl, PCGCTL);
+ udelay(10);
+ }
/* Set flag to indicate that we are in hibernation */
hsotg->hibernated = 1;
diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
index 7334de85ad10..ca7e1c02773a 100644
--- a/drivers/usb/dwc3/dwc3-qcom.c
+++ b/drivers/usb/dwc3/dwc3-qcom.c
@@ -680,12 +680,12 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
ret = reset_control_deassert(qcom->resets);
if (ret) {
dev_err(&pdev->dev, "failed to deassert resets, err=%d\n", ret);
- goto reset_assert;
+ return ret;
}
ret = clk_bulk_prepare_enable(qcom->num_clocks, qcom->clks);
if (ret < 0)
- goto reset_assert;
+ return ret;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
@@ -755,8 +755,6 @@ remove_core:
dwc3_core_remove(&qcom->dwc);
clk_disable:
clk_bulk_disable_unprepare(qcom->num_clocks, qcom->clks);
-reset_assert:
- reset_control_assert(qcom->resets);
return ret;
}
@@ -771,7 +769,6 @@ static void dwc3_qcom_remove(struct platform_device *pdev)
clk_bulk_disable_unprepare(qcom->num_clocks, qcom->clks);
dwc3_qcom_interconnect_exit(qcom);
- reset_control_assert(qcom->resets);
}
static int dwc3_qcom_pm_suspend(struct device *dev)
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index fba2a56dae97..f94ea196ce54 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -1065,6 +1065,8 @@ static ssize_t webusb_landingPage_store(struct config_item *item, const char *pa
unsigned int bytes_to_strip = 0;
int l = len;
+ if (!len)
+ return len;
if (page[l - 1] == '\n') {
--l;
++bytes_to_strip;
@@ -1188,6 +1190,8 @@ static ssize_t os_desc_qw_sign_store(struct config_item *item, const char *page,
struct gadget_info *gi = os_desc_item_to_gadget_info(item);
int res, l;
+ if (!len)
+ return len;
l = min_t(int, len, OS_STRING_QW_SIGN_LEN >> 1);
if (page[l - 1] == '\n')
--l;
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 6869c58367f2..caf4d4cd4b75 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1913,6 +1913,7 @@ static int musb_gadget_stop(struct usb_gadget *g)
* gadget driver here and have everything work;
* that currently misbehaves.
*/
+ usb_gadget_set_state(g, USB_STATE_NOTATTACHED);
/* Force check of devctl register for PM runtime */
pm_runtime_mark_last_busy(musb->controller);
@@ -2019,6 +2020,7 @@ void musb_g_disconnect(struct musb *musb)
case OTG_STATE_B_PERIPHERAL:
case OTG_STATE_B_IDLE:
musb_set_state(musb, OTG_STATE_B_IDLE);
+ usb_gadget_set_state(&musb->g, USB_STATE_NOTATTACHED);
break;
case OTG_STATE_B_SRP_INIT:
break;
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 6ac7a0a5cf07..abfcfca3f971 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -803,6 +803,8 @@ static const struct usb_device_id id_table_combined[] = {
.driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk },
{ USB_DEVICE(FTDI_VID, FTDI_NDI_AURORA_SCU_PID),
.driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk },
+ { USB_DEVICE(FTDI_NDI_VID, FTDI_NDI_EMGUIDE_GEMINI_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk },
{ USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) },
{ USB_DEVICE(NOVITUS_VID, NOVITUS_BONO_E_PID) },
{ USB_DEVICE(FTDI_VID, RTSYSTEMS_USB_VX8_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 9acb6f837327..4cc1fae8acb9 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -204,6 +204,9 @@
#define FTDI_NDI_FUTURE_3_PID 0xDA73 /* NDI future device #3 */
#define FTDI_NDI_AURORA_SCU_PID 0xDA74 /* NDI Aurora SCU */
+#define FTDI_NDI_VID 0x23F2
+#define FTDI_NDI_EMGUIDE_GEMINI_PID 0x0003 /* NDI Emguide Gemini */
+
/*
* ChamSys Limited (www.chamsys.co.uk) USB wing/interface product IDs
*/
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 27879cc57536..147ca50c94be 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1415,6 +1415,9 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d0, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d0, 0xff, 0xff, 0x60) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10c7, 0xff, 0xff, 0x30), /* Telit FE910C04 (ECM) */
+ .driver_info = NCTRL(4) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10c7, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d1, 0xff, 0xff, 0x30), /* Telit FN990B (MBIM) */
.driver_info = NCTRL(6) },
{ USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d1, 0xff, 0xff, 0x40) },
@@ -2343,6 +2346,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe145, 0xff), /* Foxconn T99W651 RNDIS */
.driver_info = RSVD(5) | RSVD(6) },
+ { USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe167, 0xff), /* Foxconn T99W640 MBIM */
+ .driver_info = RSVD(3) },
{ USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 (IOT version) */
.driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
{ USB_DEVICE(0x1782, 0x4d10) }, /* Fibocom L610 (AT mode) */
diff --git a/fs/bcachefs/alloc_foreground.c b/fs/bcachefs/alloc_foreground.c
index b375ad610acd..b58525ec7b4d 100644
--- a/fs/bcachefs/alloc_foreground.c
+++ b/fs/bcachefs/alloc_foreground.c
@@ -511,7 +511,8 @@ again:
bch2_dev_usage_read_fast(ca, &req->usage);
avail = dev_buckets_free(ca, req->usage, req->watermark);
- if (req->usage.buckets[BCH_DATA_need_discard] > avail)
+ if (req->usage.buckets[BCH_DATA_need_discard] >
+ min(avail, ca->mi.nbuckets >> 7))
bch2_dev_do_discards(ca);
if (req->usage.buckets[BCH_DATA_need_gc_gens] > avail)
diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c
index a4cc72986e36..590cd29f3e86 100644
--- a/fs/bcachefs/btree_io.c
+++ b/fs/bcachefs/btree_io.c
@@ -1295,9 +1295,6 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
btree_bounce_free(c, btree_buf_bytes(b), used_mempool, sorted);
- if (updated_range)
- bch2_btree_node_drop_keys_outside_node(b);
-
i = &b->data->keys;
for (k = i->start; k != vstruct_last(i);) {
struct bkey tmp;
@@ -1335,6 +1332,9 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
btree_node_reset_sib_u64s(b);
+ if (updated_range)
+ bch2_btree_node_drop_keys_outside_node(b);
+
/*
* XXX:
*
diff --git a/fs/bcachefs/dirent.c b/fs/bcachefs/dirent.c
index a18d0f78704d..28875c5c86ad 100644
--- a/fs/bcachefs/dirent.c
+++ b/fs/bcachefs/dirent.c
@@ -13,6 +13,7 @@
#include <linux/dcache.h>
+#ifdef CONFIG_UNICODE
int bch2_casefold(struct btree_trans *trans, const struct bch_hash_info *info,
const struct qstr *str, struct qstr *out_cf)
{
@@ -33,6 +34,7 @@ int bch2_casefold(struct btree_trans *trans, const struct bch_hash_info *info,
*out_cf = (struct qstr) QSTR_INIT(buf, ret);
return 0;
}
+#endif
static unsigned bch2_dirent_name_bytes(struct bkey_s_c_dirent d)
{
@@ -254,6 +256,7 @@ int bch2_dirent_init_name(struct bch_fs *c,
if (!bch2_fs_casefold_enabled(c))
return -EOPNOTSUPP;
+#ifdef CONFIG_UNICODE
memcpy(&dirent->v.d_cf_name_block.d_names[0], name->name, name->len);
char *cf_out = &dirent->v.d_cf_name_block.d_names[name->len];
@@ -279,6 +282,7 @@ int bch2_dirent_init_name(struct bch_fs *c,
dirent->v.d_cf_name_block.d_cf_name_len = cpu_to_le16(cf_len);
EBUG_ON(bch2_dirent_get_casefold_name(dirent_i_to_s_c(dirent)).len != cf_len);
+#endif
}
unsigned u64s = dirent_val_u64s(name->len, cf_len);
diff --git a/fs/bcachefs/dirent.h b/fs/bcachefs/dirent.h
index 1e17199cc5c7..0417608c18d5 100644
--- a/fs/bcachefs/dirent.h
+++ b/fs/bcachefs/dirent.h
@@ -23,8 +23,16 @@ struct bch_fs;
struct bch_hash_info;
struct bch_inode_info;
+#ifdef CONFIG_UNICODE
int bch2_casefold(struct btree_trans *, const struct bch_hash_info *,
const struct qstr *, struct qstr *);
+#else
+static inline int bch2_casefold(struct btree_trans *trans, const struct bch_hash_info *info,
+ const struct qstr *str, struct qstr *out_cf)
+{
+ return -EOPNOTSUPP;
+}
+#endif
static inline int bch2_maybe_casefold(struct btree_trans *trans,
const struct bch_hash_info *info,
diff --git a/fs/bcachefs/io_read.c b/fs/bcachefs/io_read.c
index cd184b219a65..e0874ad9a6cf 100644
--- a/fs/bcachefs/io_read.c
+++ b/fs/bcachefs/io_read.c
@@ -166,6 +166,7 @@ static noinline void promote_free(struct bch_read_bio *rbio)
BUG_ON(ret);
async_object_list_del(c, promote, op->list_idx);
+ async_object_list_del(c, rbio, rbio->list_idx);
bch2_data_update_exit(&op->write);
@@ -456,6 +457,10 @@ static void bch2_rbio_done(struct bch_read_bio *rbio)
if (rbio->start_time)
bch2_time_stats_update(&rbio->c->times[BCH_TIME_data_read],
rbio->start_time);
+#ifdef CONFIG_BCACHEFS_ASYNC_OBJECT_LISTS
+ if (rbio->list_idx)
+ async_object_list_del(rbio->c, rbio, rbio->list_idx);
+#endif
bio_endio(&rbio->bio);
}
diff --git a/fs/bcachefs/journal_io.c b/fs/bcachefs/journal_io.c
index dd3f3434c1b0..9e028dbcc3d0 100644
--- a/fs/bcachefs/journal_io.c
+++ b/fs/bcachefs/journal_io.c
@@ -1767,6 +1767,7 @@ static CLOSURE_CALLBACK(journal_write_done)
closure_wake_up(&c->freelist_wait);
bch2_reset_alloc_cursors(c);
+ do_discards = true;
}
j->seq_ondisk = seq;
diff --git a/fs/bcachefs/movinggc.c b/fs/bcachefs/movinggc.c
index 27e68d470ad0..5e6de91a8763 100644
--- a/fs/bcachefs/movinggc.c
+++ b/fs/bcachefs/movinggc.c
@@ -71,7 +71,7 @@ static int bch2_bucket_is_movable(struct btree_trans *trans,
if (ret)
return ret;
- struct bch_dev *ca = bch2_dev_tryget(c, k.k->p.inode);
+ struct bch_dev *ca = bch2_dev_bucket_tryget(c, k.k->p);
if (!ca)
goto out;
diff --git a/fs/cachefiles/io.c b/fs/cachefiles/io.c
index c08e4a66ac07..3e0576d9db1d 100644
--- a/fs/cachefiles/io.c
+++ b/fs/cachefiles/io.c
@@ -347,8 +347,6 @@ int __cachefiles_write(struct cachefiles_object *object,
default:
ki->was_async = false;
cachefiles_write_complete(&ki->iocb, ret);
- if (ret > 0)
- ret = 0;
break;
}
diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c
index d9bc67176128..a7ed86fa98bb 100644
--- a/fs/cachefiles/ondemand.c
+++ b/fs/cachefiles/ondemand.c
@@ -83,10 +83,8 @@ static ssize_t cachefiles_ondemand_fd_write_iter(struct kiocb *kiocb,
trace_cachefiles_ondemand_fd_write(object, file_inode(file), pos, len);
ret = __cachefiles_write(object, file, pos, iter, NULL, NULL);
- if (!ret) {
- ret = len;
+ if (ret > 0)
kiocb->ki_pos += ret;
- }
out:
fput(file);
diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
index c900d98bf494..284d6dbba2ec 100644
--- a/fs/efivarfs/super.c
+++ b/fs/efivarfs/super.c
@@ -390,10 +390,16 @@ static int efivarfs_reconfigure(struct fs_context *fc)
return 0;
}
+static void efivarfs_free(struct fs_context *fc)
+{
+ kfree(fc->s_fs_info);
+}
+
static const struct fs_context_operations efivarfs_context_ops = {
.get_tree = efivarfs_get_tree,
.parse_param = efivarfs_parse_param,
.reconfigure = efivarfs_reconfigure,
+ .free = efivarfs_free,
};
static int efivarfs_check_missing(efi_char16_t *name16, efi_guid_t vendor,
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 3729391a18f3..fb4519158f3a 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -71,6 +71,9 @@ static void iomap_set_range_uptodate(struct folio *folio, size_t off,
unsigned long flags;
bool uptodate = true;
+ if (folio_test_uptodate(folio))
+ return;
+
if (ifs) {
spin_lock_irqsave(&ifs->state_lock, flags);
uptodate = ifs_set_range_uptodate(folio, ifs, off, len);
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index d5da9817df9b..33e6a620c103 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -1440,9 +1440,16 @@ static int isofs_read_inode(struct inode *inode, int relocated)
inode->i_op = &page_symlink_inode_operations;
inode_nohighmem(inode);
inode->i_data.a_ops = &isofs_symlink_aops;
- } else
+ } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
+ S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
/* XXX - parse_rock_ridge_inode() had already set i_rdev. */
init_special_inode(inode, inode->i_mode, inode->i_rdev);
+ } else {
+ printk(KERN_DEBUG "ISOFS: Invalid file type 0%04o for inode %lu.\n",
+ inode->i_mode, inode->i_ino);
+ ret = -EIO;
+ goto fail;
+ }
ret = 0;
out:
diff --git a/fs/netfs/read_pgpriv2.c b/fs/netfs/read_pgpriv2.c
index 5bbe906a551d..8097bc069c1d 100644
--- a/fs/netfs/read_pgpriv2.c
+++ b/fs/netfs/read_pgpriv2.c
@@ -110,6 +110,8 @@ static struct netfs_io_request *netfs_pgpriv2_begin_copy_to_cache(
if (!creq->io_streams[1].avail)
goto cancel_put;
+ __set_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &creq->flags);
+ trace_netfs_copy2cache(rreq, creq);
trace_netfs_write(creq, netfs_write_trace_copy_to_cache);
netfs_stat(&netfs_n_wh_copy_to_cache);
rreq->copy_to_cache = creq;
@@ -154,6 +156,9 @@ void netfs_pgpriv2_end_copy_to_cache(struct netfs_io_request *rreq)
netfs_issue_write(creq, &creq->io_streams[1]);
smp_wmb(); /* Write lists before ALL_QUEUED. */
set_bit(NETFS_RREQ_ALL_QUEUED, &creq->flags);
+ trace_netfs_rreq(rreq, netfs_rreq_trace_end_copy_to_cache);
+ if (list_empty_careful(&creq->io_streams[1].subrequests))
+ netfs_wake_collector(creq);
netfs_put_request(creq, netfs_rreq_trace_put_return);
creq->copy_to_cache = NULL;
diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c
index c4cdaf5fa7ed..9fb73bafd41d 100644
--- a/fs/notify/dnotify/dnotify.c
+++ b/fs/notify/dnotify/dnotify.c
@@ -308,6 +308,10 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned int arg)
goto out_err;
}
+ error = file_f_owner_allocate(filp);
+ if (error)
+ goto out_err;
+
/* new fsnotify mark, we expect most fcntl calls to add a new mark */
new_dn_mark = kmem_cache_alloc(dnotify_mark_cache, GFP_KERNEL);
if (!new_dn_mark) {
@@ -315,10 +319,6 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned int arg)
goto out_err;
}
- error = file_f_owner_allocate(filp);
- if (error)
- goto out_err;
-
/* set up the new_fsn_mark and new_dn_mark */
new_fsn_mark = &new_dn_mark->fsn_mark;
fsnotify_init_mark(new_fsn_mark, dnotify_group);
diff --git a/fs/pidfs.c b/fs/pidfs.c
index 69919be1c9d8..4625e097e3a0 100644
--- a/fs/pidfs.c
+++ b/fs/pidfs.c
@@ -319,7 +319,7 @@ static long pidfd_info(struct file *file, unsigned int cmd, unsigned long arg)
if (!c)
return -ESRCH;
- if (!(kinfo.mask & PIDFD_INFO_COREDUMP)) {
+ if ((kinfo.mask & PIDFD_INFO_COREDUMP) && !(kinfo.coredump_mask)) {
task_lock(task);
if (task->mm)
kinfo.coredump_mask = pidfs_coredump_mask(task->mm->flags);
diff --git a/fs/smb/client/dir.c b/fs/smb/client/dir.c
index 1c6e5389c51f..5223edf6d11a 100644
--- a/fs/smb/client/dir.c
+++ b/fs/smb/client/dir.c
@@ -190,6 +190,7 @@ static int cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned
int disposition;
struct TCP_Server_Info *server = tcon->ses->server;
struct cifs_open_parms oparms;
+ struct cached_fid *parent_cfid = NULL;
int rdwr_for_fscache = 0;
__le32 lease_flags = 0;
@@ -313,10 +314,10 @@ static int cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned
if (!tcon->unix_ext && (mode & S_IWUGO) == 0)
create_options |= CREATE_OPTION_READONLY;
+
retry_open:
if (tcon->cfids && direntry->d_parent && server->dialect >= SMB30_PROT_ID) {
- struct cached_fid *parent_cfid;
-
+ parent_cfid = NULL;
spin_lock(&tcon->cfids->cfid_list_lock);
list_for_each_entry(parent_cfid, &tcon->cfids->entries, entry) {
if (parent_cfid->dentry == direntry->d_parent) {
@@ -327,6 +328,7 @@ retry_open:
memcpy(fid->parent_lease_key,
parent_cfid->fid.lease_key,
SMB2_LEASE_KEY_SIZE);
+ parent_cfid->dirents.is_valid = false;
}
break;
}
diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c
index e9212da32f01..1421bde045c2 100644
--- a/fs/smb/client/file.c
+++ b/fs/smb/client/file.c
@@ -3088,7 +3088,8 @@ void cifs_oplock_break(struct work_struct *work)
struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
oplock_break);
struct inode *inode = d_inode(cfile->dentry);
- struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ struct super_block *sb = inode->i_sb;
+ struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
struct cifsInodeInfo *cinode = CIFS_I(inode);
struct cifs_tcon *tcon;
struct TCP_Server_Info *server;
@@ -3098,6 +3099,12 @@ void cifs_oplock_break(struct work_struct *work)
__u64 persistent_fid, volatile_fid;
__u16 net_fid;
+ /*
+ * Hold a reference to the superblock to prevent it and its inodes from
+ * being freed while we are accessing cinode. Otherwise, _cifsFileInfo_put()
+ * may release the last reference to the sb and trigger inode eviction.
+ */
+ cifs_sb_active(sb);
wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
TASK_UNINTERRUPTIBLE);
@@ -3170,6 +3177,7 @@ oplock_break_ack:
cifs_put_tlink(tlink);
out:
cifs_done_oplock_break(cinode);
+ cifs_sb_deactive(sb);
}
static int cifs_swap_activate(struct swap_info_struct *sis,
diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c
index 2a3e46b8e15a..a11a2a693c51 100644
--- a/fs/smb/client/smb2inode.c
+++ b/fs/smb/client/smb2inode.c
@@ -1346,7 +1346,8 @@ struct inode *smb2_get_reparse_inode(struct cifs_open_info_data *data,
* empty object on the server.
*/
if (!(le32_to_cpu(tcon->fsAttrInfo.Attributes) & FILE_SUPPORTS_REPARSE_POINTS))
- return ERR_PTR(-EOPNOTSUPP);
+ if (!tcon->posix_extensions)
+ return ERR_PTR(-EOPNOTSUPP);
oparms = CIFS_OPARMS(cifs_sb, tcon, full_path,
SYNCHRONIZE | DELETE |
diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
index 1468c16ea9b8..938a8a7c5d21 100644
--- a/fs/smb/client/smb2ops.c
+++ b/fs/smb/client/smb2ops.c
@@ -4316,6 +4316,7 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
u8 key[SMB3_ENC_DEC_KEY_SIZE];
struct aead_request *req;
u8 *iv;
+ DECLARE_CRYPTO_WAIT(wait);
unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
void *creq;
size_t sensitive_size;
@@ -4366,7 +4367,11 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
aead_request_set_crypt(req, sg, sg, crypt_len, iv);
aead_request_set_ad(req, assoc_data_len);
- rc = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
+ aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &wait);
+
+ rc = crypto_wait_req(enc ? crypto_aead_encrypt(req)
+ : crypto_aead_decrypt(req), &wait);
if (!rc && enc)
memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE);
@@ -5255,7 +5260,8 @@ static int smb2_make_node(unsigned int xid, struct inode *inode,
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) {
rc = cifs_sfu_make_node(xid, inode, dentry, tcon,
full_path, mode, dev);
- } else if (le32_to_cpu(tcon->fsAttrInfo.Attributes) & FILE_SUPPORTS_REPARSE_POINTS) {
+ } else if ((le32_to_cpu(tcon->fsAttrInfo.Attributes) & FILE_SUPPORTS_REPARSE_POINTS)
+ || (tcon->posix_extensions)) {
rc = smb2_mknod_reparse(xid, inode, dentry, tcon,
full_path, mode, dev);
}
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index eea718ac66b4..6e4585169f94 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -397,7 +397,7 @@ static int ufs_parse_param(struct fs_context *fc, struct fs_parameter *param)
pr_err("ufstype can't be changed during remount\n");
return -EINVAL;
}
- if (!ctx->flavour) {
+ if (ctx->flavour) {
pr_err("conflicting ufstype options\n");
return -EINVAL;
}
diff --git a/fs/xfs/libxfs/xfs_group.c b/fs/xfs/libxfs/xfs_group.c
index e9d76bcdc820..20ad7c309489 100644
--- a/fs/xfs/libxfs/xfs_group.c
+++ b/fs/xfs/libxfs/xfs_group.c
@@ -163,7 +163,8 @@ xfs_group_free(
xfs_defer_drain_free(&xg->xg_intents_drain);
#ifdef __KERNEL__
- kfree(xg->xg_busy_extents);
+ if (xfs_group_has_extent_busy(xg->xg_mount, xg->xg_type))
+ kfree(xg->xg_busy_extents);
#endif
if (uninit)
@@ -189,9 +190,11 @@ xfs_group_insert(
xg->xg_type = type;
#ifdef __KERNEL__
- xg->xg_busy_extents = xfs_extent_busy_alloc();
- if (!xg->xg_busy_extents)
- return -ENOMEM;
+ if (xfs_group_has_extent_busy(mp, type)) {
+ xg->xg_busy_extents = xfs_extent_busy_alloc();
+ if (!xg->xg_busy_extents)
+ return -ENOMEM;
+ }
spin_lock_init(&xg->xg_state_lock);
xfs_hooks_init(&xg->xg_rmap_update_hooks);
#endif
@@ -210,7 +213,8 @@ xfs_group_insert(
out_drain:
xfs_defer_drain_free(&xg->xg_intents_drain);
#ifdef __KERNEL__
- kfree(xg->xg_busy_extents);
+ if (xfs_group_has_extent_busy(xg->xg_mount, xg->xg_type))
+ kfree(xg->xg_busy_extents);
#endif
return error;
}
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index ba5bd6031ece..f9ef3b2a332a 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -1683,7 +1683,7 @@ xfs_free_buftarg(
fs_put_dax(btp->bt_daxdev, btp->bt_mount);
/* the main block device is closed by kill_block_super */
if (btp->bt_bdev != btp->bt_mount->m_super->s_bdev)
- bdev_fput(btp->bt_bdev_file);
+ bdev_fput(btp->bt_file);
kfree(btp);
}
@@ -1712,8 +1712,8 @@ xfs_configure_buftarg_atomic_writes(
max_bytes = 0;
}
- btp->bt_bdev_awu_min = min_bytes;
- btp->bt_bdev_awu_max = max_bytes;
+ btp->bt_awu_min = min_bytes;
+ btp->bt_awu_max = max_bytes;
}
/* Configure a buffer target that abstracts a block device. */
@@ -1738,14 +1738,9 @@ xfs_configure_buftarg(
return -EINVAL;
}
- /*
- * Flush the block device pagecache so our bios see anything dirtied
- * before mount.
- */
if (bdev_can_atomic_write(btp->bt_bdev))
xfs_configure_buftarg_atomic_writes(btp);
-
- return sync_blockdev(btp->bt_bdev);
+ return 0;
}
int
@@ -1803,7 +1798,7 @@ xfs_alloc_buftarg(
btp = kzalloc(sizeof(*btp), GFP_KERNEL | __GFP_NOFAIL);
btp->bt_mount = mp;
- btp->bt_bdev_file = bdev_file;
+ btp->bt_file = bdev_file;
btp->bt_bdev = file_bdev(bdev_file);
btp->bt_dev = btp->bt_bdev->bd_dev;
btp->bt_daxdev = fs_dax_get_by_bdev(btp->bt_bdev, &btp->bt_dax_part_off,
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index 15fc56948346..b269e115d9ac 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -94,7 +94,6 @@ void xfs_buf_cache_destroy(struct xfs_buf_cache *bch);
*/
struct xfs_buftarg {
dev_t bt_dev;
- struct file *bt_bdev_file;
struct block_device *bt_bdev;
struct dax_device *bt_daxdev;
struct file *bt_file;
@@ -112,9 +111,9 @@ struct xfs_buftarg {
struct percpu_counter bt_readahead_count;
struct ratelimit_state bt_ioerror_rl;
- /* Atomic write unit values, bytes */
- unsigned int bt_bdev_awu_min;
- unsigned int bt_bdev_awu_max;
+ /* Hardware atomic write unit values, bytes */
+ unsigned int bt_awu_min;
+ unsigned int bt_awu_max;
/* built-in cache, if we're not using the perag one */
struct xfs_buf_cache bt_cache[];
@@ -375,7 +374,6 @@ extern void xfs_buftarg_wait(struct xfs_buftarg *);
extern void xfs_buftarg_drain(struct xfs_buftarg *);
int xfs_configure_buftarg(struct xfs_buftarg *btp, unsigned int sectorsize);
-#define xfs_getsize_buftarg(buftarg) block_size((buftarg)->bt_bdev)
#define xfs_readonly_buftarg(buftarg) bdev_read_only((buftarg)->bt_bdev)
int xfs_buf_reverify(struct xfs_buf *bp, const struct xfs_buf_ops *ops);
diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
index 94d0873bcd62..603d51365645 100644
--- a/fs/xfs/xfs_discard.c
+++ b/fs/xfs/xfs_discard.c
@@ -103,24 +103,6 @@ xfs_discard_endio(
bio_put(bio);
}
-static inline struct block_device *
-xfs_group_bdev(
- const struct xfs_group *xg)
-{
- struct xfs_mount *mp = xg->xg_mount;
-
- switch (xg->xg_type) {
- case XG_TYPE_AG:
- return mp->m_ddev_targp->bt_bdev;
- case XG_TYPE_RTG:
- return mp->m_rtdev_targp->bt_bdev;
- default:
- ASSERT(0);
- break;
- }
- return NULL;
-}
-
/*
* Walk the discard list and issue discards on all the busy extents in the
* list. We plug and chain the bios so that we only need a single completion
@@ -138,11 +120,14 @@ xfs_discard_extents(
blk_start_plug(&plug);
list_for_each_entry(busyp, &extents->extent_list, list) {
- trace_xfs_discard_extent(busyp->group, busyp->bno,
- busyp->length);
+ struct xfs_group *xg = busyp->group;
+ struct xfs_buftarg *btp =
+ xfs_group_type_buftarg(xg->xg_mount, xg->xg_type);
+
+ trace_xfs_discard_extent(xg, busyp->bno, busyp->length);
- error = __blkdev_issue_discard(xfs_group_bdev(busyp->group),
- xfs_gbno_to_daddr(busyp->group, busyp->bno),
+ error = __blkdev_issue_discard(btp->bt_bdev,
+ xfs_gbno_to_daddr(xg, busyp->bno),
XFS_FSB_TO_BB(mp, busyp->length),
GFP_KERNEL, &bio);
if (error && error != -EOPNOTSUPP) {
diff --git a/fs/xfs/xfs_extent_busy.h b/fs/xfs/xfs_extent_busy.h
index f069b04e8ea1..3e6e019b6146 100644
--- a/fs/xfs/xfs_extent_busy.h
+++ b/fs/xfs/xfs_extent_busy.h
@@ -68,4 +68,12 @@ static inline void xfs_extent_busy_sort(struct list_head *list)
list_sort(NULL, list, xfs_extent_busy_ag_cmp);
}
+/*
+ * Zoned RTGs don't need to track busy extents, as the actual block freeing only
+ * happens by a zone reset, which forces out all transactions that touched the
+ * to be reset zone first.
+ */
+#define xfs_group_has_extent_busy(mp, type) \
+ ((type) == XG_TYPE_AG || !xfs_has_zoned((mp)))
+
#endif /* __XFS_EXTENT_BUSY_H__ */
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 0b41b18debf3..38e365b16348 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -752,7 +752,7 @@ xfs_file_dio_write_atomic(
* HW offload should be faster, so try that first if it is already
* known that the write length is not too large.
*/
- if (ocount > xfs_inode_buftarg(ip)->bt_bdev_awu_max)
+ if (ocount > xfs_inode_buftarg(ip)->bt_awu_max)
dops = &xfs_atomic_write_cow_iomap_ops;
else
dops = &xfs_direct_write_iomap_ops;
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index d7e2b902ef5c..07fbdcc4cbf5 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -358,7 +358,7 @@ static inline bool xfs_inode_has_bigrtalloc(const struct xfs_inode *ip)
static inline bool xfs_inode_can_hw_atomic_write(const struct xfs_inode *ip)
{
- return xfs_inode_buftarg(ip)->bt_bdev_awu_max > 0;
+ return xfs_inode_buftarg(ip)->bt_awu_max > 0;
}
/*
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index ff05e6b1b0bb..ec30b78bf5c4 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -827,7 +827,7 @@ xfs_bmap_hw_atomic_write_possible(
/*
* The ->iomap_begin caller should ensure this, but check anyway.
*/
- return len <= xfs_inode_buftarg(ip)->bt_bdev_awu_max;
+ return len <= xfs_inode_buftarg(ip)->bt_awu_max;
}
static int
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 8cddbb7c149b..01e597290eb5 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -665,7 +665,7 @@ xfs_get_atomic_write_max_opt(
* less than our out of place write limit, but we don't want to exceed
* the awu_max.
*/
- return min(awu_max, xfs_inode_buftarg(ip)->bt_bdev_awu_max);
+ return min(awu_max, xfs_inode_buftarg(ip)->bt_awu_max);
}
static void
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 29276fe60df9..0b690bc119d7 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -171,19 +171,16 @@ xfs_readsb(
ASSERT(mp->m_ddev_targp != NULL);
/*
- * For the initial read, we must guess at the sector
- * size based on the block device. It's enough to
- * get the sb_sectsize out of the superblock and
- * then reread with the proper length.
- * We don't verify it yet, because it may not be complete.
+ * In the first pass, use the device sector size to just read enough
+ * of the superblock to extract the XFS sector size.
+ *
+ * The device sector size must be smaller than or equal to the XFS
+ * sector size and thus we can always read the superblock. Once we know
+ * the XFS sector size, re-read it and run the buffer verifier.
*/
- sector_size = xfs_getsize_buftarg(mp->m_ddev_targp);
+ sector_size = mp->m_ddev_targp->bt_logical_sectorsize;
buf_ops = NULL;
- /*
- * Allocate a (locked) buffer to hold the superblock. This will be kept
- * around at all times to optimize access to the superblock.
- */
reread:
error = xfs_buf_read_uncached(mp->m_ddev_targp, XFS_SB_DADDR,
BTOBB(sector_size), &bp, buf_ops);
@@ -247,6 +244,10 @@ reread:
/* no need to be quiet anymore, so reset the buf ops */
bp->b_ops = &xfs_sb_buf_ops;
+ /*
+ * Keep a pointer of the sb buffer around instead of caching it in the
+ * buffer cache because we access it frequently.
+ */
mp->m_sb_bp = bp;
xfs_buf_unlock(bp);
return 0;
@@ -678,68 +679,46 @@ static inline unsigned int max_pow_of_two_factor(const unsigned int nr)
}
/*
- * If the data device advertises atomic write support, limit the size of data
- * device atomic writes to the greatest power-of-two factor of the AG size so
- * that every atomic write unit aligns with the start of every AG. This is
- * required so that the per-AG allocations for an atomic write will always be
+ * If the underlying device advertises atomic write support, limit the size of
+ * atomic writes to the greatest power-of-two factor of the group size so
+ * that every atomic write unit aligns with the start of every group. This is
+ * required so that the allocations for an atomic write will always be
* aligned compatibly with the alignment requirements of the storage.
*
- * If the data device doesn't advertise atomic writes, then there are no
- * alignment restrictions and the largest out-of-place write we can do
- * ourselves is the number of blocks that user files can allocate from any AG.
- */
-static inline xfs_extlen_t xfs_calc_perag_awu_max(struct xfs_mount *mp)
-{
- if (mp->m_ddev_targp->bt_bdev_awu_min > 0)
- return max_pow_of_two_factor(mp->m_sb.sb_agblocks);
- return rounddown_pow_of_two(mp->m_ag_max_usable);
-}
-
-/*
- * Reflink on the realtime device requires rtgroups, and atomic writes require
- * reflink.
- *
- * If the realtime device advertises atomic write support, limit the size of
- * data device atomic writes to the greatest power-of-two factor of the rtgroup
- * size so that every atomic write unit aligns with the start of every rtgroup.
- * This is required so that the per-rtgroup allocations for an atomic write
- * will always be aligned compatibly with the alignment requirements of the
- * storage.
- *
- * If the rt device doesn't advertise atomic writes, then there are no
- * alignment restrictions and the largest out-of-place write we can do
- * ourselves is the number of blocks that user files can allocate from any
- * rtgroup.
+ * If the device doesn't advertise atomic writes, then there are no alignment
+ * restrictions and the largest out-of-place write we can do ourselves is the
+ * number of blocks that user files can allocate from any group.
*/
-static inline xfs_extlen_t xfs_calc_rtgroup_awu_max(struct xfs_mount *mp)
+static xfs_extlen_t
+xfs_calc_group_awu_max(
+ struct xfs_mount *mp,
+ enum xfs_group_type type)
{
- struct xfs_groups *rgs = &mp->m_groups[XG_TYPE_RTG];
+ struct xfs_groups *g = &mp->m_groups[type];
+ struct xfs_buftarg *btp = xfs_group_type_buftarg(mp, type);
- if (rgs->blocks == 0)
+ if (g->blocks == 0)
return 0;
- if (mp->m_rtdev_targp && mp->m_rtdev_targp->bt_bdev_awu_min > 0)
- return max_pow_of_two_factor(rgs->blocks);
- return rounddown_pow_of_two(rgs->blocks);
+ if (btp && btp->bt_awu_min > 0)
+ return max_pow_of_two_factor(g->blocks);
+ return rounddown_pow_of_two(g->blocks);
}
/* Compute the maximum atomic write unit size for each section. */
static inline void
xfs_calc_atomic_write_unit_max(
- struct xfs_mount *mp)
+ struct xfs_mount *mp,
+ enum xfs_group_type type)
{
- struct xfs_groups *ags = &mp->m_groups[XG_TYPE_AG];
- struct xfs_groups *rgs = &mp->m_groups[XG_TYPE_RTG];
+ struct xfs_groups *g = &mp->m_groups[type];
const xfs_extlen_t max_write = xfs_calc_atomic_write_max(mp);
const xfs_extlen_t max_ioend = xfs_reflink_max_atomic_cow(mp);
- const xfs_extlen_t max_agsize = xfs_calc_perag_awu_max(mp);
- const xfs_extlen_t max_rgsize = xfs_calc_rtgroup_awu_max(mp);
-
- ags->awu_max = min3(max_write, max_ioend, max_agsize);
- rgs->awu_max = min3(max_write, max_ioend, max_rgsize);
+ const xfs_extlen_t max_gsize = xfs_calc_group_awu_max(mp, type);
- trace_xfs_calc_atomic_write_unit_max(mp, max_write, max_ioend,
- max_agsize, max_rgsize);
+ g->awu_max = min3(max_write, max_ioend, max_gsize);
+ trace_xfs_calc_atomic_write_unit_max(mp, type, max_write, max_ioend,
+ max_gsize, g->awu_max);
}
/*
@@ -757,7 +736,8 @@ xfs_set_max_atomic_write_opt(
max(mp->m_groups[XG_TYPE_AG].blocks,
mp->m_groups[XG_TYPE_RTG].blocks);
const xfs_extlen_t max_group_write =
- max(xfs_calc_perag_awu_max(mp), xfs_calc_rtgroup_awu_max(mp));
+ max(xfs_calc_group_awu_max(mp, XG_TYPE_AG),
+ xfs_calc_group_awu_max(mp, XG_TYPE_RTG));
int error;
if (new_max_bytes == 0)
@@ -813,7 +793,8 @@ set_limit:
return error;
}
- xfs_calc_atomic_write_unit_max(mp);
+ xfs_calc_atomic_write_unit_max(mp, XG_TYPE_AG);
+ xfs_calc_atomic_write_unit_max(mp, XG_TYPE_RTG);
mp->m_awu_max_bytes = new_max_bytes;
return 0;
}
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index d85084f9f317..97de44c32272 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -802,4 +802,21 @@ static inline void xfs_mod_sb_delalloc(struct xfs_mount *mp, int64_t delta)
int xfs_set_max_atomic_write_opt(struct xfs_mount *mp,
unsigned long long new_max_bytes);
+static inline struct xfs_buftarg *
+xfs_group_type_buftarg(
+ struct xfs_mount *mp,
+ enum xfs_group_type type)
+{
+ switch (type) {
+ case XG_TYPE_AG:
+ return mp->m_ddev_targp;
+ case XG_TYPE_RTG:
+ return mp->m_rtdev_targp;
+ default:
+ ASSERT(0);
+ break;
+ }
+ return NULL;
+}
+
#endif /* __XFS_MOUNT_H__ */
diff --git a/fs/xfs/xfs_notify_failure.c b/fs/xfs/xfs_notify_failure.c
index 3545dc1d953c..42e9c72b85c0 100644
--- a/fs/xfs/xfs_notify_failure.c
+++ b/fs/xfs/xfs_notify_failure.c
@@ -253,8 +253,7 @@ xfs_dax_notify_dev_failure(
return -EOPNOTSUPP;
}
- error = xfs_dax_translate_range(type == XG_TYPE_RTG ?
- mp->m_rtdev_targp : mp->m_ddev_targp,
+ error = xfs_dax_translate_range(xfs_group_type_buftarg(mp, type),
offset, len, &daddr, &bblen);
if (error)
return error;
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index ba45d801df1c..78be223b13b2 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -171,36 +171,33 @@ DEFINE_ATTR_LIST_EVENT(xfs_attr_leaf_list);
DEFINE_ATTR_LIST_EVENT(xfs_attr_node_list);
TRACE_EVENT(xfs_calc_atomic_write_unit_max,
- TP_PROTO(struct xfs_mount *mp, unsigned int max_write,
- unsigned int max_ioend, unsigned int max_agsize,
- unsigned int max_rgsize),
- TP_ARGS(mp, max_write, max_ioend, max_agsize, max_rgsize),
+ TP_PROTO(struct xfs_mount *mp, enum xfs_group_type type,
+ unsigned int max_write, unsigned int max_ioend,
+ unsigned int max_gsize, unsigned int awu_max),
+ TP_ARGS(mp, type, max_write, max_ioend, max_gsize, awu_max),
TP_STRUCT__entry(
__field(dev_t, dev)
+ __field(enum xfs_group_type, type)
__field(unsigned int, max_write)
__field(unsigned int, max_ioend)
- __field(unsigned int, max_agsize)
- __field(unsigned int, max_rgsize)
- __field(unsigned int, data_awu_max)
- __field(unsigned int, rt_awu_max)
+ __field(unsigned int, max_gsize)
+ __field(unsigned int, awu_max)
),
TP_fast_assign(
__entry->dev = mp->m_super->s_dev;
+ __entry->type = type;
__entry->max_write = max_write;
__entry->max_ioend = max_ioend;
- __entry->max_agsize = max_agsize;
- __entry->max_rgsize = max_rgsize;
- __entry->data_awu_max = mp->m_groups[XG_TYPE_AG].awu_max;
- __entry->rt_awu_max = mp->m_groups[XG_TYPE_RTG].awu_max;
+ __entry->max_gsize = max_gsize;
+ __entry->awu_max = awu_max;
),
- TP_printk("dev %d:%d max_write %u max_ioend %u max_agsize %u max_rgsize %u data_awu_max %u rt_awu_max %u",
+ TP_printk("dev %d:%d %s max_write %u max_ioend %u max_gsize %u awu_max %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
+ __print_symbolic(__entry->type, XG_TYPE_STRINGS),
__entry->max_write,
__entry->max_ioend,
- __entry->max_agsize,
- __entry->max_rgsize,
- __entry->data_awu_max,
- __entry->rt_awu_max)
+ __entry->max_gsize,
+ __entry->awu_max)
);
TRACE_EVENT(xfs_calc_max_atomic_write_fsblocks,
diff --git a/fs/xfs/xfs_xattr.c b/fs/xfs/xfs_xattr.c
index 0f641a9091ec..ac5cecec9aa1 100644
--- a/fs/xfs/xfs_xattr.c
+++ b/fs/xfs/xfs_xattr.c
@@ -243,7 +243,7 @@ __xfs_xattr_put_listent(
offset = context->buffer + context->count;
memcpy(offset, prefix, prefix_len);
offset += prefix_len;
- strncpy(offset, (char *)name, namelen); /* real name */
+ memcpy(offset, (char *)name, namelen); /* real name */
offset += namelen;
*offset = '\0';
diff --git a/include/drm/drm_buddy.h b/include/drm/drm_buddy.h
index 9689a7c5dd36..513837632b7d 100644
--- a/include/drm/drm_buddy.h
+++ b/include/drm/drm_buddy.h
@@ -160,6 +160,8 @@ int drm_buddy_block_trim(struct drm_buddy *mm,
u64 new_size,
struct list_head *blocks);
+void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear);
+
void drm_buddy_free_block(struct drm_buddy *mm, struct drm_buddy_block *block);
void drm_buddy_free_list(struct drm_buddy *mm,
diff --git a/include/linux/interconnect-provider.h b/include/linux/interconnect-provider.h
index 55cfebc658e6..8a2f652a05ff 100644
--- a/include/linux/interconnect-provider.h
+++ b/include/linux/interconnect-provider.h
@@ -119,6 +119,7 @@ int icc_std_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
struct icc_node *icc_node_create_dyn(void);
struct icc_node *icc_node_create(int id);
void icc_node_destroy(int id);
+int icc_node_set_name(struct icc_node *node, const struct icc_provider *provider, const char *name);
int icc_link_nodes(struct icc_node *src_node, struct icc_node **dst_node);
int icc_link_create(struct icc_node *node, const int dst_id);
void icc_node_add(struct icc_node *node, struct icc_provider *provider);
@@ -152,6 +153,12 @@ static inline void icc_node_destroy(int id)
{
}
+static inline int icc_node_set_name(struct icc_node *node, const struct icc_provider *provider,
+ const char *name)
+{
+ return -EOPNOTSUPP;
+}
+
static inline int icc_link_nodes(struct icc_node *src_node, struct icc_node **dst_node)
{
return -EOPNOTSUPP;
diff --git a/include/linux/ism.h b/include/linux/ism.h
index 5428edd90982..8358b4cd7ba6 100644
--- a/include/linux/ism.h
+++ b/include/linux/ism.h
@@ -28,6 +28,7 @@ struct ism_dmb {
struct ism_dev {
spinlock_t lock; /* protects the ism device */
+ spinlock_t cmd_lock; /* serializes cmds */
struct list_head list;
struct pci_dev *pdev;
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 639dd0b56655..ed4130e49c27 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -420,7 +420,8 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
/* Table 2170 - Flow Table Fields Supported 2 Format */
struct mlx5_ifc_flow_table_fields_supported_2_bits {
- u8 reserved_at_0[0x2];
+ u8 inner_l4_type_ext[0x1];
+ u8 outer_l4_type_ext[0x1];
u8 inner_l4_type[0x1];
u8 outer_l4_type[0x1];
u8 reserved_at_4[0xa];
@@ -429,7 +430,11 @@ struct mlx5_ifc_flow_table_fields_supported_2_bits {
u8 tunnel_header_0_1[0x1];
u8 reserved_at_11[0xf];
- u8 reserved_at_20[0x60];
+ u8 reserved_at_20[0xf];
+ u8 ipsec_next_header[0x1];
+ u8 reserved_at_30[0x10];
+
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_flow_table_prop_layout_bits {
@@ -552,6 +557,13 @@ enum {
MLX5_PACKET_L4_TYPE_UDP,
};
+enum {
+ MLX5_PACKET_L4_TYPE_EXT_NONE,
+ MLX5_PACKET_L4_TYPE_EXT_TCP,
+ MLX5_PACKET_L4_TYPE_EXT_UDP,
+ MLX5_PACKET_L4_TYPE_EXT_ICMP,
+};
+
struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
u8 smac_47_16[0x20];
@@ -578,10 +590,10 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
u8 tcp_dport[0x10];
u8 l4_type[0x2];
- u8 reserved_at_c2[0xe];
+ u8 l4_type_ext[0x4];
+ u8 reserved_at_c6[0xa];
u8 ipv4_ihl[0x4];
- u8 reserved_at_c4[0x4];
-
+ u8 reserved_at_d4[0x4];
u8 ttl_hoplimit[0x8];
u8 udp_sport[0x10];
@@ -689,10 +701,9 @@ struct mlx5_ifc_fte_match_set_misc2_bits {
u8 metadata_reg_a[0x20];
u8 reserved_at_1a0[0x8];
-
u8 macsec_syndrome[0x8];
u8 ipsec_syndrome[0x8];
- u8 reserved_at_1b8[0x8];
+ u8 ipsec_next_header[0x8];
u8 reserved_at_1c0[0x40];
};
@@ -9983,6 +9994,10 @@ struct mlx5_ifc_pude_reg_bits {
u8 reserved_at_20[0x60];
};
+enum {
+ MLX5_PTYS_CONNECTOR_TYPE_PORT_DA = 0x7,
+};
+
struct mlx5_ifc_ptys_reg_bits {
u8 reserved_at_0[0x1];
u8 an_disable_admin[0x1];
@@ -10019,7 +10034,8 @@ struct mlx5_ifc_ptys_reg_bits {
u8 ib_link_width_oper[0x10];
u8 ib_proto_oper[0x10];
- u8 reserved_at_160[0x1c];
+ u8 reserved_at_160[0x8];
+ u8 lane_rate_oper[0x14];
u8 connector_type[0x4];
u8 eth_proto_lp_advertise[0x20];
@@ -10463,10 +10479,19 @@ struct mlx5_ifc_pifr_reg_bits {
u8 port_filter_update_en[8][0x20];
};
+enum {
+ MLX5_BUF_OWNERSHIP_UNKNOWN = 0x0,
+ MLX5_BUF_OWNERSHIP_FW_OWNED = 0x1,
+ MLX5_BUF_OWNERSHIP_SW_OWNED = 0x2,
+};
+
struct mlx5_ifc_pfcc_reg_bits {
- u8 reserved_at_0[0x8];
+ u8 reserved_at_0[0x4];
+ u8 buf_ownership[0x2];
+ u8 reserved_at_6[0x2];
u8 local_port[0x8];
- u8 reserved_at_10[0xb];
+ u8 reserved_at_10[0xa];
+ u8 cable_length_mask[0x1];
u8 ppan_mask_n[0x1];
u8 minor_stall_mask[0x1];
u8 critical_stall_mask[0x1];
@@ -10495,7 +10520,10 @@ struct mlx5_ifc_pfcc_reg_bits {
u8 device_stall_minor_watermark[0x10];
u8 device_stall_critical_watermark[0x10];
- u8 reserved_at_a0[0x60];
+ u8 reserved_at_a0[0x18];
+ u8 cable_length[0x8];
+
+ u8 reserved_at_c0[0x40];
};
struct mlx5_ifc_pelc_reg_bits {
@@ -10596,11 +10624,15 @@ struct mlx5_ifc_mtutc_reg_bits {
struct mlx5_ifc_pcam_enhanced_features_bits {
u8 reserved_at_0[0x10];
u8 ppcnt_recovery_counters[0x1];
- u8 reserved_at_11[0xc];
+ u8 reserved_at_11[0x7];
+ u8 cable_length[0x1];
+ u8 reserved_at_19[0x4];
u8 fec_200G_per_lane_in_pplm[0x1];
u8 reserved_at_1e[0x2a];
u8 fec_100G_per_lane_in_pplm[0x1];
- u8 reserved_at_49[0x1f];
+ u8 reserved_at_49[0xa];
+ u8 buffer_ownership[0x1];
+ u8 resereved_at_54[0x14];
u8 fec_50G_per_lane_in_pplm[0x1];
u8 reserved_at_69[0x4];
u8 rx_icrc_encapsulated_counter[0x1];
diff --git a/include/linux/mm.h b/include/linux/mm.h
index fa538feaa8d9..ae50c1641bed 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -4178,12 +4178,12 @@ int arch_lock_shadow_stack_status(struct task_struct *t, unsigned long status);
#define PP_MAGIC_MASK ~(PP_DMA_INDEX_MASK | 0x3UL)
#ifdef CONFIG_PAGE_POOL
-static inline bool page_pool_page_is_pp(struct page *page)
+static inline bool page_pool_page_is_pp(const struct page *page)
{
return (page->pp_magic & PP_MAGIC_MASK) == PP_SIGNATURE;
}
#else
-static inline bool page_pool_page_is_pp(struct page *page)
+static inline bool page_pool_page_is_pp(const struct page *page)
{
return false;
}
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index 7cddfdac2f57..fe3d6d98f8da 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -76,6 +76,7 @@
#define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6
#define SDIO_DEVICE_ID_BROADCOM_43439 0xa9af
#define SDIO_DEVICE_ID_BROADCOM_43455 0xa9bf
+#define SDIO_DEVICE_ID_BROADCOM_43751 0xaae7
#define SDIO_DEVICE_ID_BROADCOM_CYPRESS_43752 0xaae8
#define SDIO_VENDOR_ID_CYPRESS 0x04b4
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h
index 437769e061b7..13add0c2c407 100644
--- a/include/linux/phy/phy.h
+++ b/include/linux/phy/phy.h
@@ -154,6 +154,7 @@ struct phy_attrs {
* @id: id of the phy device
* @ops: function pointers for performing phy operations
* @mutex: mutex to protect phy_ops
+ * @lockdep_key: lockdep information for this mutex
* @init_count: used to protect when the PHY is used by multiple consumers
* @power_count: used to protect when the PHY is used by multiple consumers
* @attrs: used to specify PHY specific attributes
@@ -165,6 +166,7 @@ struct phy {
int id;
const struct phy_ops *ops;
struct mutex mutex;
+ struct lock_class_key lockdep_key;
int init_count;
int power_count;
struct phy_attrs attrs;
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 7803edaa8ff8..0cca01b5607b 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -888,15 +888,23 @@ static inline int power_supply_is_system_supplied(void) { return -ENOSYS; }
extern int power_supply_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val);
+int power_supply_get_property_direct(struct power_supply *psy, enum power_supply_property psp,
+ union power_supply_propval *val);
#if IS_ENABLED(CONFIG_POWER_SUPPLY)
extern int power_supply_set_property(struct power_supply *psy,
enum power_supply_property psp,
const union power_supply_propval *val);
+int power_supply_set_property_direct(struct power_supply *psy, enum power_supply_property psp,
+ const union power_supply_propval *val);
#else
static inline int power_supply_set_property(struct power_supply *psy,
enum power_supply_property psp,
const union power_supply_propval *val)
{ return 0; }
+static inline int power_supply_set_property_direct(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{ return 0; }
#endif
extern void power_supply_external_power_changed(struct power_supply *psy);
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 1a5737b3753d..57e478bfaef2 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -621,6 +621,7 @@ void tcp_sock_set_nodelay(struct sock *sk);
void tcp_sock_set_quickack(struct sock *sk, int val);
int tcp_sock_set_syncnt(struct sock *sk, int val);
int tcp_sock_set_user_timeout(struct sock *sk, int val);
+int tcp_sock_set_maxseg(struct sock *sk, int val);
static inline bool dst_tcp_usec_ts(const struct dst_entry *dst)
{
diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h
index 2d207cb4837d..4ac082a63173 100644
--- a/include/linux/usb/cdc_ncm.h
+++ b/include/linux/usb/cdc_ncm.h
@@ -119,6 +119,7 @@ struct cdc_ncm_ctx {
u32 timer_interval;
u32 max_ndp_size;
u8 is_ndp16;
+ u8 filtering_supported;
union {
struct usb_cdc_ncm_ndp16 *delayed_ndp16;
struct usb_cdc_ncm_ndp32 *delayed_ndp32;
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 17f2a665dce6..406626ff6cc8 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -1424,6 +1424,23 @@ struct cfg80211_unsol_bcast_probe_resp {
};
/**
+ * struct cfg80211_s1g_short_beacon - S1G short beacon data.
+ *
+ * @update: Set to true if the feature configuration should be updated.
+ * @short_head: Short beacon head.
+ * @short_tail: Short beacon tail.
+ * @short_head_len: Short beacon head len.
+ * @short_tail_len: Short beacon tail len.
+ */
+struct cfg80211_s1g_short_beacon {
+ bool update;
+ const u8 *short_head;
+ const u8 *short_tail;
+ size_t short_head_len;
+ size_t short_tail_len;
+};
+
+/**
* struct cfg80211_ap_settings - AP configuration
*
* Used to configure an AP interface.
@@ -1463,6 +1480,8 @@ struct cfg80211_unsol_bcast_probe_resp {
* @fils_discovery: FILS discovery transmission parameters
* @unsol_bcast_probe_resp: Unsolicited broadcast probe response parameters
* @mbssid_config: AP settings for multiple bssid
+ * @s1g_long_beacon_period: S1G long beacon period
+ * @s1g_short_beacon: S1G short beacon data
*/
struct cfg80211_ap_settings {
struct cfg80211_chan_def chandef;
@@ -1496,6 +1515,8 @@ struct cfg80211_ap_settings {
struct cfg80211_fils_discovery fils_discovery;
struct cfg80211_unsol_bcast_probe_resp unsol_bcast_probe_resp;
struct cfg80211_mbssid_config mbssid_config;
+ u8 s1g_long_beacon_period;
+ struct cfg80211_s1g_short_beacon s1g_short_beacon;
};
@@ -1507,11 +1528,13 @@ struct cfg80211_ap_settings {
* @beacon: beacon data
* @fils_discovery: FILS discovery transmission parameters
* @unsol_bcast_probe_resp: Unsolicited broadcast probe response parameters
+ * @s1g_short_beacon: S1G short beacon data
*/
struct cfg80211_ap_update {
struct cfg80211_beacon_data beacon;
struct cfg80211_fils_discovery fils_discovery;
struct cfg80211_unsol_bcast_probe_resp unsol_bcast_probe_resp;
+ struct cfg80211_s1g_short_beacon s1g_short_beacon;
};
/**
@@ -9025,6 +9048,7 @@ void cfg80211_pmksa_candidate_notify(struct net_device *dev, int index,
/**
* cfg80211_rx_spurious_frame - inform userspace about a spurious frame
* @dev: The device the frame matched to
+ * @link_id: the link the frame was received on, -1 if not applicable or unknown
* @addr: the transmitter address
* @gfp: context flags
*
@@ -9034,13 +9058,14 @@ void cfg80211_pmksa_candidate_notify(struct net_device *dev, int index,
* Return: %true if the frame was passed to userspace (or this failed
* for a reason other than not having a subscription.)
*/
-bool cfg80211_rx_spurious_frame(struct net_device *dev,
- const u8 *addr, gfp_t gfp);
+bool cfg80211_rx_spurious_frame(struct net_device *dev, const u8 *addr,
+ int link_id, gfp_t gfp);
/**
* cfg80211_rx_unexpected_4addr_frame - inform about unexpected WDS frame
* @dev: The device the frame matched to
* @addr: the transmitter address
+ * @link_id: the link the frame was received on, -1 if not applicable or unknown
* @gfp: context flags
*
* This function is used in AP mode (only!) to inform userspace that
@@ -9050,8 +9075,8 @@ bool cfg80211_rx_spurious_frame(struct net_device *dev,
* Return: %true if the frame was passed to userspace (or this failed
* for a reason other than not having a subscription.)
*/
-bool cfg80211_rx_unexpected_4addr_frame(struct net_device *dev,
- const u8 *addr, gfp_t gfp);
+bool cfg80211_rx_unexpected_4addr_frame(struct net_device *dev, const u8 *addr,
+ int link_id, gfp_t gfp);
/**
* cfg80211_probe_status - notify userspace about probe status
diff --git a/include/net/dropreason-core.h b/include/net/dropreason-core.h
index e19184dd1b0f..d8ff24a33459 100644
--- a/include/net/dropreason-core.h
+++ b/include/net/dropreason-core.h
@@ -126,6 +126,7 @@
FN(CANFD_RX_INVALID_FRAME) \
FN(CANXL_RX_INVALID_FRAME) \
FN(PFMEMALLOC) \
+ FN(DUALPI2_STEP_DROP) \
FNe(MAX)
/**
@@ -605,6 +606,11 @@ enum skb_drop_reason {
*/
SKB_DROP_REASON_PFMEMALLOC,
/**
+ * @SKB_DROP_REASON_DUALPI2_STEP_DROP: dropped by the step drop
+ * threshold of DualPI2 qdisc.
+ */
+ SKB_DROP_REASON_DUALPI2_STEP_DROP,
+ /**
* @SKB_DROP_REASON_MAX: the maximum of core drop reasons, which
* shouldn't be used as a real 'reason' - only for tracing code gen
*/
diff --git a/include/net/libeth/xdp.h b/include/net/libeth/xdp.h
index 6ce6aec6884c..f4880b50e804 100644
--- a/include/net/libeth/xdp.h
+++ b/include/net/libeth/xdp.h
@@ -1292,7 +1292,7 @@ static inline void libeth_xdp_prepare_buff(struct libeth_xdp_buff *xdp,
xdp_init_buff(&xdp->base, fqe->truesize, xdp->base.rxq);
#endif
xdp_prepare_buff(&xdp->base, page_address(page) + fqe->offset,
- page->pp->p.offset, len, true);
+ pp_page_to_nmdesc(page)->pp->p.offset, len, true);
}
/**
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 577fd6a8c372..a45e4bee65d4 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -758,6 +758,8 @@ struct ieee80211_parsed_tpe {
* be updated to 1, even if bss_param_ch_cnt didn't change. This allows
* the link to know that it heard the latest value from its own beacon
* (as opposed to hearing its value from another link's beacon).
+ * @s1g_long_beacon_period: number of beacon intervals between each long
+ * beacon transmission.
*/
struct ieee80211_bss_conf {
struct ieee80211_vif *vif;
@@ -857,6 +859,8 @@ struct ieee80211_bss_conf {
u8 bss_param_ch_cnt;
u8 bss_param_ch_cnt_link_id;
+
+ u8 s1g_long_beacon_period;
};
/**
@@ -6029,21 +6033,12 @@ void ieee80211_set_key_rx_seq(struct ieee80211_key_conf *keyconf,
int tid, struct ieee80211_key_seq *seq);
/**
- * ieee80211_remove_key - remove the given key
- * @keyconf: the parameter passed with the set key
- *
- * Context: Must be called with the wiphy mutex held.
- *
- * Remove the given key. If the key was uploaded to the hardware at the
- * time this function is called, it is not deleted in the hardware but
- * instead assumed to have been removed already.
- */
-void ieee80211_remove_key(struct ieee80211_key_conf *keyconf);
-
-/**
* ieee80211_gtk_rekey_add - add a GTK key from rekeying during WoWLAN
* @vif: the virtual interface to add the key on
- * @keyconf: new key data
+ * @idx: the keyidx of the key
+ * @key_data: the key data
+ * @key_len: the key data. Might be bigger than the actual key length,
+ * but not smaller (for the driver convinence)
* @link_id: the link id of the key or -1 for non-MLO
*
* When GTK rekeying was done while the system was suspended, (a) new
@@ -6066,13 +6061,11 @@ void ieee80211_remove_key(struct ieee80211_key_conf *keyconf);
* for the new key for each TID to set up sequence counters properly.
*
* IMPORTANT: If this replaces a key that is present in the hardware,
- * then it will attempt to remove it during this call. In many cases
- * this isn't what you want, so call ieee80211_remove_key() first for
- * the key that's being replaced.
+ * then it will attempt to remove it during this call.
*/
struct ieee80211_key_conf *
ieee80211_gtk_rekey_add(struct ieee80211_vif *vif,
- struct ieee80211_key_conf *keyconf,
+ u8 idx, u8 *key_data, u8 key_len,
int link_id);
/**
diff --git a/include/net/netmem.h b/include/net/netmem.h
index de1d95f04076..f7dacc9e75fd 100644
--- a/include/net/netmem.h
+++ b/include/net/netmem.h
@@ -12,6 +12,50 @@
#include <linux/mm.h>
#include <net/net_debug.h>
+/* These fields in struct page are used by the page_pool and net stack:
+ *
+ * struct {
+ * unsigned long pp_magic;
+ * struct page_pool *pp;
+ * unsigned long _pp_mapping_pad;
+ * unsigned long dma_addr;
+ * atomic_long_t pp_ref_count;
+ * };
+ *
+ * We mirror the page_pool fields here so the page_pool can access these
+ * fields without worrying whether the underlying fields belong to a
+ * page or netmem_desc.
+ *
+ * CAUTION: Do not update the fields in netmem_desc without also
+ * updating the anonymous aliasing union in struct net_iov.
+ */
+struct netmem_desc {
+ unsigned long _flags;
+ unsigned long pp_magic;
+ struct page_pool *pp;
+ unsigned long _pp_mapping_pad;
+ unsigned long dma_addr;
+ atomic_long_t pp_ref_count;
+};
+
+#define NETMEM_DESC_ASSERT_OFFSET(pg, desc) \
+ static_assert(offsetof(struct page, pg) == \
+ offsetof(struct netmem_desc, desc))
+NETMEM_DESC_ASSERT_OFFSET(flags, _flags);
+NETMEM_DESC_ASSERT_OFFSET(pp_magic, pp_magic);
+NETMEM_DESC_ASSERT_OFFSET(pp, pp);
+NETMEM_DESC_ASSERT_OFFSET(_pp_mapping_pad, _pp_mapping_pad);
+NETMEM_DESC_ASSERT_OFFSET(dma_addr, dma_addr);
+NETMEM_DESC_ASSERT_OFFSET(pp_ref_count, pp_ref_count);
+#undef NETMEM_DESC_ASSERT_OFFSET
+
+/*
+ * Since struct netmem_desc uses the space in struct page, the size
+ * should be checked, until struct netmem_desc has its own instance from
+ * slab, to avoid conflicting with other members within struct page.
+ */
+static_assert(sizeof(struct netmem_desc) <= offsetof(struct page, _refcount));
+
/* net_iov */
DECLARE_STATIC_KEY_FALSE(page_pool_mem_providers);
@@ -30,13 +74,48 @@ enum net_iov_type {
NET_IOV_MAX = ULONG_MAX
};
+/* A memory descriptor representing abstract networking I/O vectors,
+ * generally for non-pages memory that doesn't have its corresponding
+ * struct page and needs to be explicitly allocated through slab.
+ *
+ * net_iovs are allocated and used by networking code, and the size of
+ * the chunk is PAGE_SIZE.
+ *
+ * This memory can be any form of non-struct paged memory. Examples
+ * include imported dmabuf memory and imported io_uring memory. See
+ * net_iov_type for all the supported types.
+ *
+ * @pp_magic: pp field, similar to the one in struct page/struct
+ * netmem_desc.
+ * @pp: the pp this net_iov belongs to, if any.
+ * @dma_addr: the dma addrs of the net_iov. Needed for the network
+ * card to send/receive this net_iov.
+ * @pp_ref_count: the pp ref count of this net_iov, exactly the same
+ * usage as struct page/struct netmem_desc.
+ * @owner: the net_iov_area this net_iov belongs to, if any.
+ * @type: the type of the memory. Different types of net_iovs are
+ * supported.
+ */
struct net_iov {
- enum net_iov_type type;
- unsigned long pp_magic;
- struct page_pool *pp;
+ union {
+ struct netmem_desc desc;
+
+ /* XXX: The following part should be removed once all
+ * the references to them are converted so as to be
+ * accessed via netmem_desc e.g. niov->desc.pp instead
+ * of niov->pp.
+ */
+ struct {
+ unsigned long _flags;
+ unsigned long pp_magic;
+ struct page_pool *pp;
+ unsigned long _pp_mapping_pad;
+ unsigned long dma_addr;
+ atomic_long_t pp_ref_count;
+ };
+ };
struct net_iov_area *owner;
- unsigned long dma_addr;
- atomic_long_t pp_ref_count;
+ enum net_iov_type type;
};
struct net_iov_area {
@@ -48,27 +127,22 @@ struct net_iov_area {
unsigned long base_virtual;
};
-/* These fields in struct page are used by the page_pool and net stack:
- *
- * struct {
- * unsigned long pp_magic;
- * struct page_pool *pp;
- * unsigned long _pp_mapping_pad;
- * unsigned long dma_addr;
- * atomic_long_t pp_ref_count;
- * };
- *
- * We mirror the page_pool fields here so the page_pool can access these fields
- * without worrying whether the underlying fields belong to a page or net_iov.
+/* net_iov is union'ed with struct netmem_desc mirroring struct page, so
+ * the page_pool can access these fields without worrying whether the
+ * underlying fields are accessed via netmem_desc or directly via
+ * net_iov, until all the references to them are converted so as to be
+ * accessed via netmem_desc e.g. niov->desc.pp instead of niov->pp.
*
- * The non-net stack fields of struct page are private to the mm stack and must
- * never be mirrored to net_iov.
+ * The non-net stack fields of struct page are private to the mm stack
+ * and must never be mirrored to net_iov.
*/
-#define NET_IOV_ASSERT_OFFSET(pg, iov) \
- static_assert(offsetof(struct page, pg) == \
+#define NET_IOV_ASSERT_OFFSET(desc, iov) \
+ static_assert(offsetof(struct netmem_desc, desc) == \
offsetof(struct net_iov, iov))
+NET_IOV_ASSERT_OFFSET(_flags, _flags);
NET_IOV_ASSERT_OFFSET(pp_magic, pp_magic);
NET_IOV_ASSERT_OFFSET(pp, pp);
+NET_IOV_ASSERT_OFFSET(_pp_mapping_pad, _pp_mapping_pad);
NET_IOV_ASSERT_OFFSET(dma_addr, dma_addr);
NET_IOV_ASSERT_OFFSET(pp_ref_count, pp_ref_count);
#undef NET_IOV_ASSERT_OFFSET
@@ -173,6 +247,24 @@ static inline unsigned long netmem_pfn_trace(netmem_ref netmem)
return page_to_pfn(netmem_to_page(netmem));
}
+/**
+ * __netmem_to_nmdesc - unsafely get pointer to the &netmem_desc backing
+ * @netmem
+ * @netmem: netmem reference to convert
+ *
+ * Unsafe version that can be used only when @netmem is always backed by
+ * system memory, performs faster and generates smaller object code (no
+ * check for the LSB, no WARN). When @netmem points to IOV, provokes
+ * undefined behaviour.
+ *
+ * Return: pointer to the &netmem_desc (garbage if @netmem is not backed
+ * by system memory).
+ */
+static inline struct netmem_desc *__netmem_to_nmdesc(netmem_ref netmem)
+{
+ return (__force struct netmem_desc *)netmem;
+}
+
/* __netmem_clear_lsb - convert netmem_ref to struct net_iov * for access to
* common fields.
* @netmem: netmem reference to extract as net_iov.
@@ -193,6 +285,23 @@ static inline struct net_iov *__netmem_clear_lsb(netmem_ref netmem)
return (struct net_iov *)((__force unsigned long)netmem & ~NET_IOV);
}
+/* XXX: How to extract netmem_desc from page must be changed, once
+ * netmem_desc no longer overlays on page and will be allocated through
+ * slab.
+ */
+#define __pp_page_to_nmdesc(p) (_Generic((p), \
+ const struct page * : (const struct netmem_desc *)(p), \
+ struct page * : (struct netmem_desc *)(p)))
+
+/* CAUTION: Check if the page is a pp page before calling this helper or
+ * know it's a pp page.
+ */
+#define pp_page_to_nmdesc(p) \
+({ \
+ DEBUG_NET_WARN_ON_ONCE(!page_pool_page_is_pp(p)); \
+ __pp_page_to_nmdesc(p); \
+})
+
/**
* __netmem_get_pp - unsafely get pointer to the &page_pool backing @netmem
* @netmem: netmem reference to get the pointer from
@@ -206,7 +315,7 @@ static inline struct net_iov *__netmem_clear_lsb(netmem_ref netmem)
*/
static inline struct page_pool *__netmem_get_pp(netmem_ref netmem)
{
- return __netmem_to_page(netmem)->pp;
+ return __netmem_to_nmdesc(netmem)->pp;
}
static inline struct page_pool *netmem_get_pp(netmem_ref netmem)
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index a21e276dbe44..f3014e4f54fc 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -441,7 +441,6 @@ int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo);
int xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo *afinfo);
void xfrm_flush_gc(void);
-void xfrm_state_delete_tunnel(struct xfrm_state *x);
struct xfrm_type {
struct module *owner;
@@ -474,7 +473,7 @@ struct xfrm_type_offload {
int xfrm_register_type_offload(const struct xfrm_type_offload *type, unsigned short family);
void xfrm_unregister_type_offload(const struct xfrm_type_offload *type, unsigned short family);
-void xfrm_set_type_offload(struct xfrm_state *x);
+void xfrm_set_type_offload(struct xfrm_state *x, bool try_load);
static inline void xfrm_unset_type_offload(struct xfrm_state *x)
{
if (!x->type_offload)
@@ -916,7 +915,7 @@ static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols)
xfrm_pol_put(pols[i]);
}
-void __xfrm_state_destroy(struct xfrm_state *, bool);
+void __xfrm_state_destroy(struct xfrm_state *);
static inline void __xfrm_state_put(struct xfrm_state *x)
{
@@ -926,13 +925,7 @@ static inline void __xfrm_state_put(struct xfrm_state *x)
static inline void xfrm_state_put(struct xfrm_state *x)
{
if (refcount_dec_and_test(&x->refcnt))
- __xfrm_state_destroy(x, false);
-}
-
-static inline void xfrm_state_put_sync(struct xfrm_state *x)
-{
- if (refcount_dec_and_test(&x->refcnt))
- __xfrm_state_destroy(x, true);
+ __xfrm_state_destroy(x);
}
static inline void xfrm_state_hold(struct xfrm_state *x)
@@ -1770,7 +1763,7 @@ struct xfrmk_spdinfo {
struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq, u32 pcpu_num);
int xfrm_state_delete(struct xfrm_state *x);
-int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync);
+int xfrm_state_flush(struct net *net, u8 proto, bool task_valid);
int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid);
int xfrm_dev_policy_flush(struct net *net, struct net_device *dev,
bool task_valid);
diff --git a/include/trace/events/netfs.h b/include/trace/events/netfs.h
index 73e96ccbe830..64a382fbc31a 100644
--- a/include/trace/events/netfs.h
+++ b/include/trace/events/netfs.h
@@ -55,6 +55,7 @@
EM(netfs_rreq_trace_copy, "COPY ") \
EM(netfs_rreq_trace_dirty, "DIRTY ") \
EM(netfs_rreq_trace_done, "DONE ") \
+ EM(netfs_rreq_trace_end_copy_to_cache, "END-C2C") \
EM(netfs_rreq_trace_free, "FREE ") \
EM(netfs_rreq_trace_ki_complete, "KI-CMPL") \
EM(netfs_rreq_trace_recollect, "RECLLCT") \
@@ -559,6 +560,35 @@ TRACE_EVENT(netfs_write,
__entry->start, __entry->start + __entry->len - 1)
);
+TRACE_EVENT(netfs_copy2cache,
+ TP_PROTO(const struct netfs_io_request *rreq,
+ const struct netfs_io_request *creq),
+
+ TP_ARGS(rreq, creq),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, rreq)
+ __field(unsigned int, creq)
+ __field(unsigned int, cookie)
+ __field(unsigned int, ino)
+ ),
+
+ TP_fast_assign(
+ struct netfs_inode *__ctx = netfs_inode(rreq->inode);
+ struct fscache_cookie *__cookie = netfs_i_cookie(__ctx);
+ __entry->rreq = rreq->debug_id;
+ __entry->creq = creq->debug_id;
+ __entry->cookie = __cookie ? __cookie->debug_id : 0;
+ __entry->ino = rreq->inode->i_ino;
+ ),
+
+ TP_printk("R=%08x CR=%08x c=%08x i=%x ",
+ __entry->rreq,
+ __entry->creq,
+ __entry->cookie,
+ __entry->ino)
+ );
+
TRACE_EVENT(netfs_collect,
TP_PROTO(const struct netfs_io_request *wreq),
diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h
index 54e60c6009e3..9d2c36c6a0ed 100644
--- a/include/trace/events/tcp.h
+++ b/include/trace/events/tcp.h
@@ -13,17 +13,11 @@
#include <linux/sock_diag.h>
#include <net/rstreason.h>
-/*
- * tcp event with arguments sk and skb
- *
- * Note: this class requires a valid sk pointer; while skb pointer could
- * be NULL.
- */
-DECLARE_EVENT_CLASS(tcp_event_sk_skb,
+TRACE_EVENT(tcp_retransmit_skb,
- TP_PROTO(const struct sock *sk, const struct sk_buff *skb),
+ TP_PROTO(const struct sock *sk, const struct sk_buff *skb, int err),
- TP_ARGS(sk, skb),
+ TP_ARGS(sk, skb, err),
TP_STRUCT__entry(
__field(const void *, skbaddr)
@@ -36,6 +30,7 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb,
__array(__u8, daddr, 4)
__array(__u8, saddr_v6, 16)
__array(__u8, daddr_v6, 16)
+ __field(int, err)
),
TP_fast_assign(
@@ -58,21 +53,17 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb,
TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr,
sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
+
+ __entry->err = err;
),
- TP_printk("skbaddr=%p skaddr=%p family=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c state=%s",
+ TP_printk("skbaddr=%p skaddr=%p family=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c state=%s err=%d",
__entry->skbaddr, __entry->skaddr,
show_family_name(__entry->family),
__entry->sport, __entry->dport, __entry->saddr, __entry->daddr,
__entry->saddr_v6, __entry->daddr_v6,
- show_tcp_state_name(__entry->state))
-);
-
-DEFINE_EVENT(tcp_event_sk_skb, tcp_retransmit_skb,
-
- TP_PROTO(const struct sock *sk, const struct sk_buff *skb),
-
- TP_ARGS(sk, skb)
+ show_tcp_state_name(__entry->state),
+ __entry->err)
);
#undef FN
diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h
index e72bcc239afd..9fcb25a0f447 100644
--- a/include/uapi/linux/devlink.h
+++ b/include/uapi/linux/devlink.h
@@ -635,8 +635,6 @@ enum devlink_attr {
DEVLINK_ATTR_REGION_DIRECT, /* flag */
DEVLINK_ATTR_RATE_TC_BWS, /* nested */
- DEVLINK_ATTR_RATE_TC_INDEX, /* u8 */
- DEVLINK_ATTR_RATE_TC_BW, /* u32 */
/* Add new attributes above here, update the spec in
* Documentation/netlink/specs/devlink.yaml and re-generate
@@ -647,6 +645,15 @@ enum devlink_attr {
DEVLINK_ATTR_MAX = __DEVLINK_ATTR_MAX - 1
};
+enum devlink_rate_tc_attr {
+ DEVLINK_RATE_TC_ATTR_UNSPEC,
+ DEVLINK_RATE_TC_ATTR_INDEX, /* u8 */
+ DEVLINK_RATE_TC_ATTR_BW, /* u32 */
+
+ __DEVLINK_RATE_TC_ATTR_MAX,
+ DEVLINK_RATE_TC_ATTR_MAX = __DEVLINK_RATE_TC_ATTR_MAX - 1
+};
+
/* Mapping between internal resource described by the field and system
* structure
*/
diff --git a/include/uapi/linux/ethtool_netlink_generated.h b/include/uapi/linux/ethtool_netlink_generated.h
index 130bdf5c3516..e3b8813465d7 100644
--- a/include/uapi/linux/ethtool_netlink_generated.h
+++ b/include/uapi/linux/ethtool_netlink_generated.h
@@ -841,6 +841,8 @@ enum {
ETHTOOL_MSG_TSCONFIG_GET,
ETHTOOL_MSG_TSCONFIG_SET,
ETHTOOL_MSG_RSS_SET,
+ ETHTOOL_MSG_RSS_CREATE_ACT,
+ ETHTOOL_MSG_RSS_DELETE_ACT,
__ETHTOOL_MSG_USER_CNT,
ETHTOOL_MSG_USER_MAX = (__ETHTOOL_MSG_USER_CNT - 1)
@@ -898,6 +900,9 @@ enum {
ETHTOOL_MSG_TSCONFIG_SET_REPLY,
ETHTOOL_MSG_PSE_NTF,
ETHTOOL_MSG_RSS_NTF,
+ ETHTOOL_MSG_RSS_CREATE_ACT_REPLY,
+ ETHTOOL_MSG_RSS_CREATE_NTF,
+ ETHTOOL_MSG_RSS_DELETE_NTF,
__ETHTOOL_MSG_KERNEL_CNT,
ETHTOOL_MSG_KERNEL_MAX = (__ETHTOOL_MSG_KERNEL_CNT - 1)
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 39460334dafb..d1a14f2892d9 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -2915,6 +2915,19 @@ enum nl80211_commands {
* applicable to that specific radio only. If the radio id is greater
* thank the number of radios, error denoting invalid value is returned.
*
+ * @NL80211_ATTR_S1G_LONG_BEACON_PERIOD: (u8) Integer attribute that represents
+ * the number of beacon intervals between each long beacon transmission
+ * for an S1G BSS with short beaconing enabled. This is a required
+ * attribute for initialising an S1G short beaconing BSS. When updating
+ * the short beacon data, this is not required. It has a minimum value of
+ * 2 (i.e 2 beacon intervals).
+ *
+ * @NL80211_ATTR_S1G_SHORT_BEACON: Nested attribute containing the short beacon
+ * head and tail used to set or update the short beacon templates. When
+ * bringing up a new interface, %NL80211_ATTR_S1G_LONG_BEACON_PERIOD is
+ * required alongside this attribute. Refer to
+ * @enum nl80211_s1g_short_beacon_attrs for the attribute definitions.
+ *
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
@@ -3474,6 +3487,9 @@ enum nl80211_attrs {
NL80211_ATTR_WIPHY_RADIO_INDEX,
+ NL80211_ATTR_S1G_LONG_BEACON_PERIOD,
+ NL80211_ATTR_S1G_SHORT_BEACON,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -8148,4 +8164,27 @@ enum nl80211_wiphy_radio_freq_range {
NL80211_WIPHY_RADIO_FREQ_ATTR_MAX = __NL80211_WIPHY_RADIO_FREQ_ATTR_LAST - 1,
};
+/**
+ * enum nl80211_s1g_short_beacon_attrs - S1G short beacon data
+ *
+ * @__NL80211_S1G_SHORT_BEACON_ATTR_INVALID: Invalid
+ *
+ * @NL80211_S1G_SHORT_BEACON_ATTR_HEAD: Short beacon head (binary).
+ * @NL80211_S1G_SHORT_BEACON_ATTR_TAIL: Short beacon tail (binary).
+ *
+ * @__NL80211_S1G_SHORT_BEACON_ATTR_LAST: Internal
+ * @NL80211_S1G_SHORT_BEACON_ATTR_MAX: Highest attribute
+ */
+enum nl80211_s1g_short_beacon_attrs {
+ __NL80211_S1G_SHORT_BEACON_ATTR_INVALID,
+
+ NL80211_S1G_SHORT_BEACON_ATTR_HEAD,
+ NL80211_S1G_SHORT_BEACON_ATTR_TAIL,
+
+ /* keep last */
+ __NL80211_S1G_SHORT_BEACON_ATTR_LAST,
+ NL80211_S1G_SHORT_BEACON_ATTR_MAX =
+ __NL80211_S1G_SHORT_BEACON_ATTR_LAST - 1
+};
+
#endif /* __LINUX_NL80211_H */
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 3e41349f3fa2..c2da76e78bad 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -1211,4 +1211,72 @@ enum {
#define TCA_ETS_MAX (__TCA_ETS_MAX - 1)
+/* DUALPI2 */
+enum tc_dualpi2_drop_overload {
+ TC_DUALPI2_DROP_OVERLOAD_OVERFLOW = 0,
+ TC_DUALPI2_DROP_OVERLOAD_DROP = 1,
+ __TCA_DUALPI2_DROP_OVERLOAD_MAX,
+};
+#define TCA_DUALPI2_DROP_OVERLOAD_MAX (__TCA_DUALPI2_DROP_OVERLOAD_MAX - 1)
+
+enum tc_dualpi2_drop_early {
+ TC_DUALPI2_DROP_EARLY_DROP_DEQUEUE = 0,
+ TC_DUALPI2_DROP_EARLY_DROP_ENQUEUE = 1,
+ __TCA_DUALPI2_DROP_EARLY_MAX,
+};
+#define TCA_DUALPI2_DROP_EARLY_MAX (__TCA_DUALPI2_DROP_EARLY_MAX - 1)
+
+enum tc_dualpi2_ecn_mask {
+ TC_DUALPI2_ECN_MASK_L4S_ECT = 1,
+ TC_DUALPI2_ECN_MASK_CLA_ECT = 2,
+ TC_DUALPI2_ECN_MASK_ANY_ECT = 3,
+ __TCA_DUALPI2_ECN_MASK_MAX,
+};
+#define TCA_DUALPI2_ECN_MASK_MAX (__TCA_DUALPI2_ECN_MASK_MAX - 1)
+
+enum tc_dualpi2_split_gso {
+ TC_DUALPI2_SPLIT_GSO_NO_SPLIT_GSO = 0,
+ TC_DUALPI2_SPLIT_GSO_SPLIT_GSO = 1,
+ __TCA_DUALPI2_SPLIT_GSO_MAX,
+};
+#define TCA_DUALPI2_SPLIT_GSO_MAX (__TCA_DUALPI2_SPLIT_GSO_MAX - 1)
+
+enum {
+ TCA_DUALPI2_UNSPEC,
+ TCA_DUALPI2_LIMIT, /* Packets */
+ TCA_DUALPI2_MEMORY_LIMIT, /* Bytes */
+ TCA_DUALPI2_TARGET, /* us */
+ TCA_DUALPI2_TUPDATE, /* us */
+ TCA_DUALPI2_ALPHA, /* Hz scaled up by 256 */
+ TCA_DUALPI2_BETA, /* Hz scaled up by 256 */
+ TCA_DUALPI2_STEP_THRESH_PKTS, /* Step threshold in packets */
+ TCA_DUALPI2_STEP_THRESH_US, /* Step threshold in microseconds */
+ TCA_DUALPI2_MIN_QLEN_STEP, /* Minimum qlen to apply STEP_THRESH */
+ TCA_DUALPI2_COUPLING, /* Coupling factor between queues */
+ TCA_DUALPI2_DROP_OVERLOAD, /* Whether to drop on overload */
+ TCA_DUALPI2_DROP_EARLY, /* Whether to drop on enqueue */
+ TCA_DUALPI2_C_PROTECTION, /* Percentage */
+ TCA_DUALPI2_ECN_MASK, /* L4S queue classification mask */
+ TCA_DUALPI2_SPLIT_GSO, /* Split GSO packets at enqueue */
+ TCA_DUALPI2_PAD,
+ __TCA_DUALPI2_MAX
+};
+
+#define TCA_DUALPI2_MAX (__TCA_DUALPI2_MAX - 1)
+
+struct tc_dualpi2_xstats {
+ __u32 prob; /* current probability */
+ __u32 delay_c; /* current delay in C queue */
+ __u32 delay_l; /* current delay in L queue */
+ __u32 packets_in_c; /* number of packets enqueued in C queue */
+ __u32 packets_in_l; /* number of packets enqueued in L queue */
+ __u32 maxq; /* maximum queue size */
+ __u32 ecn_mark; /* packets marked with ecn*/
+ __u32 step_marks; /* ECN marks due to the step AQM */
+ __s32 credit; /* current c_protection credit */
+ __u32 memory_used; /* Memory used by both queues */
+ __u32 max_memory_used; /* Maximum used memory */
+ __u32 memory_limit; /* Memory limit of both queues */
+};
+
#endif
diff --git a/io_uring/net.c b/io_uring/net.c
index 43a43522f406..bec8c6ed0a93 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -1738,9 +1738,11 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags)
int ret;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
- if (unlikely(req->flags & REQ_F_FAIL)) {
- ret = -ECONNRESET;
- goto out;
+ if (connect->in_progress) {
+ struct poll_table_struct pt = { ._key = EPOLLERR };
+
+ if (vfs_poll(req->file, &pt) & EPOLLERR)
+ goto get_sock_err;
}
file_flags = force_nonblock ? O_NONBLOCK : 0;
@@ -1765,8 +1767,10 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags)
* which means the previous result is good. For both of these,
* grab the sock_error() and use that for the completion.
*/
- if (ret == -EBADFD || ret == -EISCONN)
+ if (ret == -EBADFD || ret == -EISCONN) {
+get_sock_err:
ret = sock_error(sock_from_file(req->file)->sk);
+ }
}
if (ret == -ERESTARTSYS)
ret = -EINTR;
diff --git a/io_uring/poll.c b/io_uring/poll.c
index 0526062e2f81..20e9b46a4adf 100644
--- a/io_uring/poll.c
+++ b/io_uring/poll.c
@@ -273,8 +273,6 @@ static int io_poll_check_events(struct io_kiocb *req, io_tw_token_t tw)
return IOU_POLL_REISSUE;
}
}
- if (unlikely(req->cqe.res & EPOLLERR))
- req_set_fail(req);
if (req->apoll_events & EPOLLONESHOT)
return IOU_POLL_DONE;
diff --git a/io_uring/zcrx.c b/io_uring/zcrx.c
index 00d0064b22a5..4a7011c799f0 100644
--- a/io_uring/zcrx.c
+++ b/io_uring/zcrx.c
@@ -76,6 +76,8 @@ static int io_import_dmabuf(struct io_zcrx_ifq *ifq,
int dmabuf_fd = area_reg->dmabuf_fd;
int i, ret;
+ if (off)
+ return -EINVAL;
if (WARN_ON_ONCE(!ifq->dev))
return -EFAULT;
if (!IS_ENABLED(CONFIG_DMA_SHARED_BUFFER))
@@ -106,7 +108,7 @@ static int io_import_dmabuf(struct io_zcrx_ifq *ifq,
for_each_sgtable_dma_sg(mem->sgt, sg, i)
total_size += sg_dma_len(sg);
- if (total_size < off + len) {
+ if (total_size != len) {
ret = -EINVAL;
goto err;
}
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index b71e428ad936..ad6df48b540c 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -884,6 +884,13 @@ int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
if (fmt[i] == 'p') {
sizeof_cur_arg = sizeof(long);
+ if (fmt[i + 1] == 0 || isspace(fmt[i + 1]) ||
+ ispunct(fmt[i + 1])) {
+ if (tmp_buf)
+ cur_arg = raw_args[num_spec];
+ goto nocopy_fmt;
+ }
+
if ((fmt[i + 1] == 'k' || fmt[i + 1] == 'u') &&
fmt[i + 2] == 's') {
fmt_ptype = fmt[i + 1];
@@ -891,11 +898,9 @@ int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
goto fmt_str;
}
- if (fmt[i + 1] == 0 || isspace(fmt[i + 1]) ||
- ispunct(fmt[i + 1]) || fmt[i + 1] == 'K' ||
+ if (fmt[i + 1] == 'K' ||
fmt[i + 1] == 'x' || fmt[i + 1] == 's' ||
fmt[i + 1] == 'S') {
- /* just kernel pointers */
if (tmp_buf)
cur_arg = raw_args[num_spec];
i++;
diff --git a/kernel/bpf/sysfs_btf.c b/kernel/bpf/sysfs_btf.c
index 941d0d2427e3..8e61dc555415 100644
--- a/kernel/bpf/sysfs_btf.c
+++ b/kernel/bpf/sysfs_btf.c
@@ -21,7 +21,7 @@ static int btf_sysfs_vmlinux_mmap(struct file *filp, struct kobject *kobj,
{
unsigned long pages = PAGE_ALIGN(attr->size) >> PAGE_SHIFT;
size_t vm_size = vma->vm_end - vma->vm_start;
- phys_addr_t addr = virt_to_phys(__start_BTF);
+ phys_addr_t addr = __pa_symbol(__start_BTF);
unsigned long pfn = addr >> PAGE_SHIFT;
if (attr->private != __start_BTF || !PAGE_ALIGNED(addr))
diff --git a/kernel/cgroup/legacy_freezer.c b/kernel/cgroup/legacy_freezer.c
index 507b8f19a262..dd9417425d92 100644
--- a/kernel/cgroup/legacy_freezer.c
+++ b/kernel/cgroup/legacy_freezer.c
@@ -66,15 +66,9 @@ static struct freezer *parent_freezer(struct freezer *freezer)
bool cgroup_freezing(struct task_struct *task)
{
bool ret;
- unsigned int state;
rcu_read_lock();
- /* Check if the cgroup is still FREEZING, but not FROZEN. The extra
- * !FROZEN check is required, because the FREEZING bit is not cleared
- * when the state FROZEN is reached.
- */
- state = task_freezer(task)->state;
- ret = (state & CGROUP_FREEZING) && !(state & CGROUP_FROZEN);
+ ret = task_freezer(task)->state & CGROUP_FREEZING;
rcu_read_unlock();
return ret;
diff --git a/kernel/freezer.c b/kernel/freezer.c
index 8d530d0949ff..6a96149aede9 100644
--- a/kernel/freezer.c
+++ b/kernel/freezer.c
@@ -201,18 +201,9 @@ static int __restore_freezer_state(struct task_struct *p, void *arg)
void __thaw_task(struct task_struct *p)
{
- unsigned long flags;
-
- spin_lock_irqsave(&freezer_lock, flags);
- if (WARN_ON_ONCE(freezing(p)))
- goto unlock;
-
- if (!frozen(p) || task_call_func(p, __restore_freezer_state, NULL))
- goto unlock;
-
- wake_up_state(p, TASK_FROZEN);
-unlock:
- spin_unlock_irqrestore(&freezer_lock, flags);
+ guard(spinlock_irqsave)(&freezer_lock);
+ if (frozen(p) && !task_call_func(p, __restore_freezer_state, NULL))
+ wake_up_state(p, TASK_FROZEN);
}
/**
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index b498d867ba21..7dd5cbcb7a06 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -1272,7 +1272,8 @@ static inline struct rq *scx_locked_rq(void)
#define SCX_CALL_OP(sch, mask, op, rq, args...) \
do { \
- update_locked_rq(rq); \
+ if (rq) \
+ update_locked_rq(rq); \
if (mask) { \
scx_kf_allow(mask); \
(sch)->ops.op(args); \
@@ -1280,14 +1281,16 @@ do { \
} else { \
(sch)->ops.op(args); \
} \
- update_locked_rq(NULL); \
+ if (rq) \
+ update_locked_rq(NULL); \
} while (0)
#define SCX_CALL_OP_RET(sch, mask, op, rq, args...) \
({ \
__typeof__((sch)->ops.op(args)) __ret; \
\
- update_locked_rq(rq); \
+ if (rq) \
+ update_locked_rq(rq); \
if (mask) { \
scx_kf_allow(mask); \
__ret = (sch)->ops.op(args); \
@@ -1295,7 +1298,8 @@ do { \
} else { \
__ret = (sch)->ops.op(args); \
} \
- update_locked_rq(NULL); \
+ if (rq) \
+ update_locked_rq(NULL); \
__ret; \
})
diff --git a/kernel/sched/ext_idle.c b/kernel/sched/ext_idle.c
index 6d29d3cbc670..001fb88a8481 100644
--- a/kernel/sched/ext_idle.c
+++ b/kernel/sched/ext_idle.c
@@ -903,7 +903,7 @@ s32 select_cpu_from_kfunc(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
* selection optimizations and simply check whether the previously
* used CPU is idle and within the allowed cpumask.
*/
- if (p->nr_cpus_allowed == 1) {
+ if (p->nr_cpus_allowed == 1 || is_migration_disabled(p)) {
if (cpumask_test_cpu(prev_cpu, allowed ?: p->cpus_ptr) &&
scx_idle_test_and_clear_cpu(prev_cpu))
cpu = prev_cpu;
diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c
index c48900b856a2..52ca8e268cfc 100644
--- a/kernel/sched/loadavg.c
+++ b/kernel/sched/loadavg.c
@@ -80,7 +80,7 @@ long calc_load_fold_active(struct rq *this_rq, long adjust)
long nr_active, delta = 0;
nr_active = this_rq->nr_running - adjust;
- nr_active += (int)this_rq->nr_uninterruptible;
+ nr_active += (long)this_rq->nr_uninterruptible;
if (nr_active != this_rq->calc_load_active) {
delta = nr_active - this_rq->calc_load_active;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 475bb5998295..83e3aa917142 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1149,7 +1149,7 @@ struct rq {
* one CPU and if it got migrated afterwards it may decrease
* it on another CPU. Always updated under the runqueue lock:
*/
- unsigned int nr_uninterruptible;
+ unsigned long nr_uninterruptible;
union {
struct task_struct __rcu *donor; /* Scheduler context */
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 120531268abf..d01e5c910ce1 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -3136,7 +3136,10 @@ __register_event(struct trace_event_call *call, struct module *mod)
if (ret < 0)
return ret;
+ down_write(&trace_event_sem);
list_add(&call->list, &ftrace_events);
+ up_write(&trace_event_sem);
+
if (call->flags & TRACE_EVENT_FL_DYNAMIC)
atomic_set(&call->refcnt, 0);
else
@@ -3750,6 +3753,8 @@ __trace_add_event_dirs(struct trace_array *tr)
struct trace_event_call *call;
int ret;
+ lockdep_assert_held(&trace_event_sem);
+
list_for_each_entry(call, &ftrace_events, list) {
ret = __trace_add_new_event(call, tr);
if (ret < 0)
diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c
index 6819b93309ce..fd259da0aa64 100644
--- a/kernel/trace/trace_osnoise.c
+++ b/kernel/trace/trace_osnoise.c
@@ -637,8 +637,8 @@ __timerlat_dump_stack(struct trace_buffer *buffer, struct trace_stack *fstack, u
entry = ring_buffer_event_data(event);
- memcpy(&entry->caller, fstack->calls, size);
entry->size = fstack->nr_entries;
+ memcpy(&entry->caller, fstack->calls, size);
trace_buffer_unlock_commit_nostack(buffer, event);
}
diff --git a/mm/secretmem.c b/mm/secretmem.c
index 9a11a38a6770..e042a4a0bc0c 100644
--- a/mm/secretmem.c
+++ b/mm/secretmem.c
@@ -261,7 +261,15 @@ err_put_fd:
static int secretmem_init_fs_context(struct fs_context *fc)
{
- return init_pseudo(fc, SECRETMEM_MAGIC) ? 0 : -ENOMEM;
+ struct pseudo_fs_context *ctx;
+
+ ctx = init_pseudo(fc, SECRETMEM_MAGIC);
+ if (!ctx)
+ return -ENOMEM;
+
+ fc->s_iflags |= SB_I_NOEXEC;
+ fc->s_iflags |= SB_I_NODEV;
+ return 0;
}
static struct file_system_type secretmem_fs = {
@@ -279,9 +287,6 @@ static int __init secretmem_init(void)
if (IS_ERR(secretmem_mnt))
return PTR_ERR(secretmem_mnt);
- /* prevent secretmem mappings from ever getting PROT_EXEC */
- secretmem_mnt->mnt_flags |= MNT_NOEXEC;
-
return 0;
}
fs_initcall(secretmem_init);
diff --git a/net/Kconfig b/net/Kconfig
index ebc80a98fc91..d5865cf19799 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -247,7 +247,7 @@ source "net/ipv4/netfilter/Kconfig"
source "net/ipv6/netfilter/Kconfig"
source "net/bridge/netfilter/Kconfig"
-endif
+endif # if NETFILTER
source "net/sctp/Kconfig"
source "net/rds/Kconfig"
@@ -408,9 +408,9 @@ config NET_DROP_MONITOR
just checking the various proc files and other utilities for
drop statistics, say N here.
-endmenu
+endmenu # Network testing
-endmenu
+endmenu # Networking options
source "net/ax25/Kconfig"
source "net/can/Kconfig"
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c
index 9c787e2e4b17..4744e3fd4544 100644
--- a/net/appletalk/aarp.c
+++ b/net/appletalk/aarp.c
@@ -35,6 +35,7 @@
#include <linux/seq_file.h>
#include <linux/export.h>
#include <linux/etherdevice.h>
+#include <linux/refcount.h>
int sysctl_aarp_expiry_time = AARP_EXPIRY_TIME;
int sysctl_aarp_tick_time = AARP_TICK_TIME;
@@ -44,6 +45,7 @@ int sysctl_aarp_resolve_time = AARP_RESOLVE_TIME;
/* Lists of aarp entries */
/**
* struct aarp_entry - AARP entry
+ * @refcnt: Reference count
* @last_sent: Last time we xmitted the aarp request
* @packet_queue: Queue of frames wait for resolution
* @status: Used for proxy AARP
@@ -55,6 +57,7 @@ int sysctl_aarp_resolve_time = AARP_RESOLVE_TIME;
* @next: Next entry in chain
*/
struct aarp_entry {
+ refcount_t refcnt;
/* These first two are only used for unresolved entries */
unsigned long last_sent;
struct sk_buff_head packet_queue;
@@ -79,6 +82,17 @@ static DEFINE_RWLOCK(aarp_lock);
/* Used to walk the list and purge/kick entries. */
static struct timer_list aarp_timer;
+static inline void aarp_entry_get(struct aarp_entry *a)
+{
+ refcount_inc(&a->refcnt);
+}
+
+static inline void aarp_entry_put(struct aarp_entry *a)
+{
+ if (refcount_dec_and_test(&a->refcnt))
+ kfree(a);
+}
+
/*
* Delete an aarp queue
*
@@ -87,7 +101,7 @@ static struct timer_list aarp_timer;
static void __aarp_expire(struct aarp_entry *a)
{
skb_queue_purge(&a->packet_queue);
- kfree(a);
+ aarp_entry_put(a);
}
/*
@@ -380,9 +394,11 @@ static void aarp_purge(void)
static struct aarp_entry *aarp_alloc(void)
{
struct aarp_entry *a = kmalloc(sizeof(*a), GFP_ATOMIC);
+ if (!a)
+ return NULL;
- if (a)
- skb_queue_head_init(&a->packet_queue);
+ refcount_set(&a->refcnt, 1);
+ skb_queue_head_init(&a->packet_queue);
return a;
}
@@ -477,6 +493,7 @@ int aarp_proxy_probe_network(struct atalk_iface *atif, struct atalk_addr *sa)
entry->dev = atif->dev;
write_lock_bh(&aarp_lock);
+ aarp_entry_get(entry);
hash = sa->s_node % (AARP_HASH_SIZE - 1);
entry->next = proxies[hash];
@@ -502,6 +519,7 @@ int aarp_proxy_probe_network(struct atalk_iface *atif, struct atalk_addr *sa)
retval = 1;
}
+ aarp_entry_put(entry);
write_unlock_bh(&aarp_lock);
out:
return retval;
diff --git a/net/devlink/netlink_gen.c b/net/devlink/netlink_gen.c
index c50436433c18..d97c326a9045 100644
--- a/net/devlink/netlink_gen.c
+++ b/net/devlink/netlink_gen.c
@@ -45,9 +45,9 @@ const struct nla_policy devlink_dl_port_function_nl_policy[DEVLINK_PORT_FN_ATTR_
[DEVLINK_PORT_FN_ATTR_CAPS] = NLA_POLICY_BITFIELD32(15),
};
-const struct nla_policy devlink_dl_rate_tc_bws_nl_policy[DEVLINK_ATTR_RATE_TC_BW + 1] = {
- [DEVLINK_ATTR_RATE_TC_INDEX] = NLA_POLICY_MAX(NLA_U8, DEVLINK_RATE_TC_INDEX_MAX),
- [DEVLINK_ATTR_RATE_TC_BW] = { .type = NLA_U32, },
+const struct nla_policy devlink_dl_rate_tc_bws_nl_policy[DEVLINK_RATE_TC_ATTR_BW + 1] = {
+ [DEVLINK_RATE_TC_ATTR_INDEX] = NLA_POLICY_MAX(NLA_U8, DEVLINK_RATE_TC_INDEX_MAX),
+ [DEVLINK_RATE_TC_ATTR_BW] = { .type = NLA_U32, },
};
const struct nla_policy devlink_dl_selftest_id_nl_policy[DEVLINK_ATTR_SELFTEST_ID_FLASH + 1] = {
diff --git a/net/devlink/netlink_gen.h b/net/devlink/netlink_gen.h
index fb733b5d4ff1..09cc6f264ccf 100644
--- a/net/devlink/netlink_gen.h
+++ b/net/devlink/netlink_gen.h
@@ -13,7 +13,7 @@
/* Common nested types */
extern const struct nla_policy devlink_dl_port_function_nl_policy[DEVLINK_PORT_FN_ATTR_CAPS + 1];
-extern const struct nla_policy devlink_dl_rate_tc_bws_nl_policy[DEVLINK_ATTR_RATE_TC_BW + 1];
+extern const struct nla_policy devlink_dl_rate_tc_bws_nl_policy[DEVLINK_RATE_TC_ATTR_BW + 1];
extern const struct nla_policy devlink_dl_selftest_id_nl_policy[DEVLINK_ATTR_SELFTEST_ID_FLASH + 1];
/* Ops table for devlink */
diff --git a/net/devlink/rate.c b/net/devlink/rate.c
index d39300a9b3d4..110b3fa8a0b1 100644
--- a/net/devlink/rate.c
+++ b/net/devlink/rate.c
@@ -90,8 +90,8 @@ static int devlink_rate_put_tc_bws(struct sk_buff *msg, u32 *tc_bw)
if (!nla_tc_bw)
return -EMSGSIZE;
- if (nla_put_u8(msg, DEVLINK_ATTR_RATE_TC_INDEX, i) ||
- nla_put_u32(msg, DEVLINK_ATTR_RATE_TC_BW, tc_bw[i]))
+ if (nla_put_u8(msg, DEVLINK_RATE_TC_ATTR_INDEX, i) ||
+ nla_put_u32(msg, DEVLINK_RATE_TC_ATTR_BW, tc_bw[i]))
goto nla_put_failure;
nla_nest_end(msg, nla_tc_bw);
@@ -346,26 +346,26 @@ static int devlink_nl_rate_tc_bw_parse(struct nlattr *parent_nest, u32 *tc_bw,
unsigned long *bitmap,
struct netlink_ext_ack *extack)
{
- struct nlattr *tb[DEVLINK_ATTR_MAX + 1];
+ struct nlattr *tb[DEVLINK_RATE_TC_ATTR_MAX + 1];
u8 tc_index;
int err;
- err = nla_parse_nested(tb, DEVLINK_ATTR_MAX, parent_nest,
+ err = nla_parse_nested(tb, DEVLINK_RATE_TC_ATTR_MAX, parent_nest,
devlink_dl_rate_tc_bws_nl_policy, extack);
if (err)
return err;
- if (!tb[DEVLINK_ATTR_RATE_TC_INDEX]) {
+ if (!tb[DEVLINK_RATE_TC_ATTR_INDEX]) {
NL_SET_ERR_ATTR_MISS(extack, parent_nest,
- DEVLINK_ATTR_RATE_TC_INDEX);
+ DEVLINK_RATE_TC_ATTR_INDEX);
return -EINVAL;
}
- tc_index = nla_get_u8(tb[DEVLINK_ATTR_RATE_TC_INDEX]);
+ tc_index = nla_get_u8(tb[DEVLINK_RATE_TC_ATTR_INDEX]);
- if (!tb[DEVLINK_ATTR_RATE_TC_BW]) {
+ if (!tb[DEVLINK_RATE_TC_ATTR_BW]) {
NL_SET_ERR_ATTR_MISS(extack, parent_nest,
- DEVLINK_ATTR_RATE_TC_BW);
+ DEVLINK_RATE_TC_ATTR_BW);
return -EINVAL;
}
@@ -376,7 +376,7 @@ static int devlink_nl_rate_tc_bw_parse(struct nlattr *parent_nest, u32 *tc_bw,
return -EINVAL;
}
- tc_bw[tc_index] = nla_get_u32(tb[DEVLINK_ATTR_RATE_TC_BW]);
+ tc_bw[tc_index] = nla_get_u32(tb[DEVLINK_RATE_TC_ATTR_BW]);
return 0;
}
diff --git a/net/ethtool/common.c b/net/ethtool/common.c
index 4dcb4194f3ce..4f58648a27ad 100644
--- a/net/ethtool/common.c
+++ b/net/ethtool/common.c
@@ -806,6 +806,40 @@ out_free:
return rc;
}
+struct ethtool_rxfh_context *
+ethtool_rxfh_ctx_alloc(const struct ethtool_ops *ops,
+ u32 indir_size, u32 key_size)
+{
+ size_t indir_bytes, flex_len, key_off, size;
+ struct ethtool_rxfh_context *ctx;
+ u32 priv_bytes, indir_max;
+ u16 key_max;
+
+ key_max = max(key_size, ops->rxfh_key_space);
+ indir_max = max(indir_size, ops->rxfh_indir_space);
+
+ priv_bytes = ALIGN(ops->rxfh_priv_size, sizeof(u32));
+ indir_bytes = array_size(indir_max, sizeof(u32));
+
+ key_off = size_add(priv_bytes, indir_bytes);
+ flex_len = size_add(key_off, key_max);
+ size = struct_size_t(struct ethtool_rxfh_context, data, flex_len);
+
+ ctx = kzalloc(size, GFP_KERNEL_ACCOUNT);
+ if (!ctx)
+ return NULL;
+
+ ctx->indir_size = indir_size;
+ ctx->key_size = key_size;
+ ctx->key_off = key_off;
+ ctx->priv_size = ops->rxfh_priv_size;
+
+ ctx->hfunc = ETH_RSS_HASH_NO_CHANGE;
+ ctx->input_xfrm = RXH_XFRM_NO_CHANGE;
+
+ return ctx;
+}
+
/* Check if fields configured for flow hash are symmetric - if src is included
* so is dst and vice versa.
*/
@@ -829,6 +863,10 @@ int ethtool_check_ops(const struct ethtool_ops *ops)
return -EINVAL;
if (WARN_ON(ops->supported_input_xfrm && !ops->get_rxfh_fields))
return -EINVAL;
+ if (WARN_ON(ops->supported_input_xfrm &&
+ ops->rxfh_per_ctx_fields != ops->rxfh_per_ctx_key))
+ return -EINVAL;
+
/* NOTE: sufficiently insane drivers may swap ethtool_ops at runtime,
* the fact that ops are checked at registration time does not
* mean the ops attached to a netdev later on are sane.
@@ -1098,5 +1136,6 @@ void ethtool_rxfh_context_lost(struct net_device *dev, u32 context_id)
netdev_err(dev, "device error, RSS context %d lost\n", context_id);
ctx = xa_erase(&dev->ethtool->rss_ctx, context_id);
kfree(ctx);
+ ethtool_rss_notify(dev, ETHTOOL_MSG_RSS_DELETE_NTF, context_id);
}
EXPORT_SYMBOL(ethtool_rxfh_context_lost);
diff --git a/net/ethtool/common.h b/net/ethtool/common.h
index b2718afe38b5..c4d084dde5bf 100644
--- a/net/ethtool/common.h
+++ b/net/ethtool/common.h
@@ -43,6 +43,9 @@ bool convert_legacy_settings_to_link_ksettings(
int ethtool_check_max_channel(struct net_device *dev,
struct ethtool_channels channels,
struct genl_info *info);
+struct ethtool_rxfh_context *
+ethtool_rxfh_ctx_alloc(const struct ethtool_ops *ops,
+ u32 indir_size, u32 key_size);
int ethtool_check_rss_ctx_busy(struct net_device *dev, u32 rss_context);
int ethtool_rxfh_config_is_sym(u64 rxfh);
@@ -76,9 +79,10 @@ int ethtool_get_module_eeprom_call(struct net_device *dev,
bool __ethtool_dev_mm_supported(struct net_device *dev);
#if IS_ENABLED(CONFIG_ETHTOOL_NETLINK)
-void ethtool_rss_notify(struct net_device *dev, u32 rss_context);
+void ethtool_rss_notify(struct net_device *dev, u32 type, u32 rss_context);
#else
-static inline void ethtool_rss_notify(struct net_device *dev, u32 rss_context)
+static inline void
+ethtool_rss_notify(struct net_device *dev, u32 type, u32 rss_context)
{
}
#endif
diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
index 2dfeaaa099fb..43a7854e784e 100644
--- a/net/ethtool/ioctl.c
+++ b/net/ethtool/ioctl.c
@@ -1105,7 +1105,7 @@ exit_unlock:
if (rc)
return rc;
- ethtool_rss_notify(dev, fields.rss_context);
+ ethtool_rss_notify(dev, ETHTOOL_MSG_RSS_NTF, fields.rss_context);
return 0;
}
@@ -1473,40 +1473,6 @@ out:
return ret;
}
-static struct ethtool_rxfh_context *
-ethtool_rxfh_ctx_alloc(const struct ethtool_ops *ops,
- u32 indir_size, u32 key_size)
-{
- size_t indir_bytes, flex_len, key_off, size;
- struct ethtool_rxfh_context *ctx;
- u32 priv_bytes, indir_max;
- u16 key_max;
-
- key_max = max(key_size, ops->rxfh_key_space);
- indir_max = max(indir_size, ops->rxfh_indir_space);
-
- priv_bytes = ALIGN(ops->rxfh_priv_size, sizeof(u32));
- indir_bytes = array_size(indir_max, sizeof(u32));
-
- key_off = size_add(priv_bytes, indir_bytes);
- flex_len = size_add(key_off, key_max);
- size = struct_size_t(struct ethtool_rxfh_context, data, flex_len);
-
- ctx = kzalloc(size, GFP_KERNEL_ACCOUNT);
- if (!ctx)
- return NULL;
-
- ctx->indir_size = indir_size;
- ctx->key_size = key_size;
- ctx->key_off = key_off;
- ctx->priv_size = ops->rxfh_priv_size;
-
- ctx->hfunc = ETH_RSS_HASH_NO_CHANGE;
- ctx->input_xfrm = RXH_XFRM_NO_CHANGE;
-
- return ctx;
-}
-
static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
void __user *useraddr)
{
@@ -1520,8 +1486,8 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
struct ethtool_rxnfc rx_rings;
struct ethtool_rxfh rxfh;
bool create = false;
- bool mod = false;
u8 *rss_config;
+ int ntf = 0;
int ret;
if (!ops->get_rxnfc || !ops->set_rxfh)
@@ -1671,20 +1637,25 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
rxfh_dev.input_xfrm = rxfh.input_xfrm;
if (!rxfh.rss_context) {
+ ntf = ETHTOOL_MSG_RSS_NTF;
ret = ops->set_rxfh(dev, &rxfh_dev, extack);
} else if (create) {
+ ntf = ETHTOOL_MSG_RSS_CREATE_NTF;
ret = ops->create_rxfh_context(dev, ctx, &rxfh_dev, extack);
/* Make sure driver populates defaults */
WARN_ON_ONCE(!ret && !rxfh_dev.key && ops->rxfh_per_ctx_key &&
!memchr_inv(ethtool_rxfh_context_key(ctx), 0,
ctx->key_size));
} else if (rxfh_dev.rss_delete) {
+ ntf = ETHTOOL_MSG_RSS_DELETE_NTF;
ret = ops->remove_rxfh_context(dev, ctx, rxfh.rss_context,
extack);
} else {
+ ntf = ETHTOOL_MSG_RSS_NTF;
ret = ops->modify_rxfh_context(dev, ctx, &rxfh_dev, extack);
}
if (ret) {
+ ntf = 0;
if (create) {
/* failed to create, free our new tracking entry */
xa_erase(&dev->ethtool->rss_ctx, rxfh.rss_context);
@@ -1692,7 +1663,6 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
}
goto out_unlock;
}
- mod = !create && !rxfh_dev.rss_delete;
if (copy_to_user(useraddr + offsetof(struct ethtool_rxfh, rss_context),
&rxfh_dev.rss_context, sizeof(rxfh_dev.rss_context)))
@@ -1732,8 +1702,8 @@ out_unlock:
mutex_unlock(&dev->ethtool->rss_lock);
out_free:
kfree(rss_config);
- if (mod)
- ethtool_rss_notify(dev, rxfh.rss_context);
+ if (ntf)
+ ethtool_rss_notify(dev, ntf, rxfh.rss_context);
return ret;
}
diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c
index 0ae0d7a9667c..2f813f25f07e 100644
--- a/net/ethtool/netlink.c
+++ b/net/ethtool/netlink.c
@@ -81,6 +81,12 @@ static void ethnl_sock_priv_destroy(void *priv)
}
}
+u32 ethnl_bcast_seq_next(void)
+{
+ ASSERT_RTNL();
+ return ++ethnl_bcast_seq;
+}
+
int ethnl_ops_begin(struct net_device *dev)
{
int ret;
@@ -954,6 +960,7 @@ ethnl_default_notify_ops[ETHTOOL_MSG_KERNEL_MAX + 1] = {
[ETHTOOL_MSG_PLCA_NTF] = &ethnl_plca_cfg_request_ops,
[ETHTOOL_MSG_MM_NTF] = &ethnl_mm_request_ops,
[ETHTOOL_MSG_RSS_NTF] = &ethnl_rss_request_ops,
+ [ETHTOOL_MSG_RSS_CREATE_NTF] = &ethnl_rss_request_ops,
};
/* default notification handler */
@@ -1061,6 +1068,7 @@ static const ethnl_notify_handler_t ethnl_notify_handlers[] = {
[ETHTOOL_MSG_PLCA_NTF] = ethnl_default_notify,
[ETHTOOL_MSG_MM_NTF] = ethnl_default_notify,
[ETHTOOL_MSG_RSS_NTF] = ethnl_default_notify,
+ [ETHTOOL_MSG_RSS_CREATE_NTF] = ethnl_default_notify,
};
void ethnl_notify(struct net_device *dev, unsigned int cmd,
@@ -1512,6 +1520,20 @@ static const struct genl_ops ethtool_genl_ops[] = {
.policy = ethnl_rss_set_policy,
.maxattr = ARRAY_SIZE(ethnl_rss_set_policy) - 1,
},
+ {
+ .cmd = ETHTOOL_MSG_RSS_CREATE_ACT,
+ .flags = GENL_UNS_ADMIN_PERM,
+ .doit = ethnl_rss_create_doit,
+ .policy = ethnl_rss_create_policy,
+ .maxattr = ARRAY_SIZE(ethnl_rss_create_policy) - 1,
+ },
+ {
+ .cmd = ETHTOOL_MSG_RSS_DELETE_ACT,
+ .flags = GENL_UNS_ADMIN_PERM,
+ .doit = ethnl_rss_delete_doit,
+ .policy = ethnl_rss_delete_policy,
+ .maxattr = ARRAY_SIZE(ethnl_rss_delete_policy) - 1,
+ },
};
static const struct genl_multicast_group ethtool_nl_mcgrps[] = {
diff --git a/net/ethtool/netlink.h b/net/ethtool/netlink.h
index ddb2fb00f929..1d4f9ecb3d26 100644
--- a/net/ethtool/netlink.h
+++ b/net/ethtool/netlink.h
@@ -10,6 +10,7 @@
struct ethnl_req_info;
+u32 ethnl_bcast_seq_next(void);
int ethnl_parse_header_dev_get(struct ethnl_req_info *req_info,
const struct nlattr *nest, struct net *net,
struct netlink_ext_ack *extack,
@@ -485,6 +486,8 @@ extern const struct nla_policy ethnl_pse_get_policy[ETHTOOL_A_PSE_HEADER + 1];
extern const struct nla_policy ethnl_pse_set_policy[ETHTOOL_A_PSE_MAX + 1];
extern const struct nla_policy ethnl_rss_get_policy[ETHTOOL_A_RSS_START_CONTEXT + 1];
extern const struct nla_policy ethnl_rss_set_policy[ETHTOOL_A_RSS_FLOW_HASH + 1];
+extern const struct nla_policy ethnl_rss_create_policy[ETHTOOL_A_RSS_INPUT_XFRM + 1];
+extern const struct nla_policy ethnl_rss_delete_policy[ETHTOOL_A_RSS_CONTEXT + 1];
extern const struct nla_policy ethnl_plca_get_cfg_policy[ETHTOOL_A_PLCA_HEADER + 1];
extern const struct nla_policy ethnl_plca_set_cfg_policy[ETHTOOL_A_PLCA_MAX + 1];
extern const struct nla_policy ethnl_plca_get_status_policy[ETHTOOL_A_PLCA_HEADER + 1];
@@ -507,6 +510,8 @@ int ethnl_rss_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
int ethnl_tsinfo_start(struct netlink_callback *cb);
int ethnl_tsinfo_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
int ethnl_tsinfo_done(struct netlink_callback *cb);
+int ethnl_rss_create_doit(struct sk_buff *skb, struct genl_info *info);
+int ethnl_rss_delete_doit(struct sk_buff *skb, struct genl_info *info);
extern const char stats_std_names[__ETHTOOL_STATS_CNT][ETH_GSTRING_LEN];
extern const char stats_eth_phy_names[__ETHTOOL_A_STATS_ETH_PHY_CNT][ETH_GSTRING_LEN];
diff --git a/net/ethtool/rss.c b/net/ethtool/rss.c
index bf45ebc22347..992e98abe9dd 100644
--- a/net/ethtool/rss.c
+++ b/net/ethtool/rss.c
@@ -113,21 +113,11 @@ rss_prepare_flow_hash(const struct rss_req_info *req, struct net_device *dev,
}
static int
-rss_prepare_get(const struct rss_req_info *request, struct net_device *dev,
- struct rss_reply_data *data, const struct genl_info *info)
+rss_get_data_alloc(struct net_device *dev, struct rss_reply_data *data)
{
- struct ethtool_rxfh_param rxfh = {};
- const struct ethtool_ops *ops;
+ const struct ethtool_ops *ops = dev->ethtool_ops;
u32 total_size, indir_bytes;
u8 *rss_config;
- int ret;
-
- ops = dev->ethtool_ops;
-
- ret = ethnl_ops_begin(dev);
- if (ret < 0)
- return ret;
- mutex_lock(&dev->ethtool->rss_lock);
data->indir_size = 0;
data->hkey_size = 0;
@@ -139,16 +129,39 @@ rss_prepare_get(const struct rss_req_info *request, struct net_device *dev,
indir_bytes = data->indir_size * sizeof(u32);
total_size = indir_bytes + data->hkey_size;
rss_config = kzalloc(total_size, GFP_KERNEL);
- if (!rss_config) {
- ret = -ENOMEM;
- goto out_unlock;
- }
+ if (!rss_config)
+ return -ENOMEM;
if (data->indir_size)
data->indir_table = (u32 *)rss_config;
if (data->hkey_size)
data->hkey = rss_config + indir_bytes;
+ return 0;
+}
+
+static void rss_get_data_free(const struct rss_reply_data *data)
+{
+ kfree(data->indir_table);
+}
+
+static int
+rss_prepare_get(const struct rss_req_info *request, struct net_device *dev,
+ struct rss_reply_data *data, const struct genl_info *info)
+{
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ struct ethtool_rxfh_param rxfh = {};
+ int ret;
+
+ ret = ethnl_ops_begin(dev);
+ if (ret < 0)
+ return ret;
+ mutex_lock(&dev->ethtool->rss_lock);
+
+ ret = rss_get_data_alloc(dev, data);
+ if (ret)
+ goto out_unlock;
+
rxfh.indir_size = data->indir_size;
rxfh.indir = data->indir_table;
rxfh.key_size = data->hkey_size;
@@ -166,6 +179,25 @@ out_unlock:
return ret;
}
+static void
+__rss_prepare_ctx(struct net_device *dev, struct rss_reply_data *data,
+ struct ethtool_rxfh_context *ctx)
+{
+ if (WARN_ON_ONCE(data->indir_size != ctx->indir_size ||
+ data->hkey_size != ctx->key_size))
+ return;
+
+ data->no_key_fields = !dev->ethtool_ops->rxfh_per_ctx_key;
+
+ data->hfunc = ctx->hfunc;
+ data->input_xfrm = ctx->input_xfrm;
+ memcpy(data->indir_table, ethtool_rxfh_context_indir(ctx),
+ data->indir_size * sizeof(u32));
+ if (data->hkey_size)
+ memcpy(data->hkey, ethtool_rxfh_context_key(ctx),
+ data->hkey_size);
+}
+
static int
rss_prepare_ctx(const struct rss_req_info *request, struct net_device *dev,
struct rss_reply_data *data, const struct genl_info *info)
@@ -175,8 +207,6 @@ rss_prepare_ctx(const struct rss_req_info *request, struct net_device *dev,
u8 *rss_config;
int ret;
- data->no_key_fields = !dev->ethtool_ops->rxfh_per_ctx_key;
-
mutex_lock(&dev->ethtool->rss_lock);
ctx = xa_load(&dev->ethtool->rss_ctx, request->rss_context);
if (!ctx) {
@@ -186,8 +216,6 @@ rss_prepare_ctx(const struct rss_req_info *request, struct net_device *dev,
data->indir_size = ctx->indir_size;
data->hkey_size = ctx->key_size;
- data->hfunc = ctx->hfunc;
- data->input_xfrm = ctx->input_xfrm;
indir_bytes = data->indir_size * sizeof(u32);
total_size = indir_bytes + data->hkey_size;
@@ -198,13 +226,10 @@ rss_prepare_ctx(const struct rss_req_info *request, struct net_device *dev,
}
data->indir_table = (u32 *)rss_config;
- memcpy(data->indir_table, ethtool_rxfh_context_indir(ctx), indir_bytes);
-
- if (data->hkey_size) {
+ if (data->hkey_size)
data->hkey = rss_config + indir_bytes;
- memcpy(data->hkey, ethtool_rxfh_context_key(ctx),
- data->hkey_size);
- }
+
+ __rss_prepare_ctx(dev, data, ctx);
ret = 0;
out_unlock:
@@ -318,7 +343,7 @@ static void rss_cleanup_data(struct ethnl_reply_data *reply_base)
{
const struct rss_reply_data *data = RSS_REPDATA(reply_base);
- kfree(data->indir_table);
+ rss_get_data_free(data);
}
struct rss_nl_dump_ctx {
@@ -461,13 +486,49 @@ int ethnl_rss_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
/* RSS_NTF */
-void ethtool_rss_notify(struct net_device *dev, u32 rss_context)
+static void ethnl_rss_delete_notify(struct net_device *dev, u32 rss_context)
+{
+ struct sk_buff *ntf;
+ size_t ntf_size;
+ void *hdr;
+
+ ntf_size = ethnl_reply_header_size() +
+ nla_total_size(sizeof(u32)); /* _RSS_CONTEXT */
+
+ ntf = genlmsg_new(ntf_size, GFP_KERNEL);
+ if (!ntf)
+ goto out_warn;
+
+ hdr = ethnl_bcastmsg_put(ntf, ETHTOOL_MSG_RSS_DELETE_NTF);
+ if (!hdr)
+ goto out_free_ntf;
+
+ if (ethnl_fill_reply_header(ntf, dev, ETHTOOL_A_RSS_HEADER) ||
+ nla_put_u32(ntf, ETHTOOL_A_RSS_CONTEXT, rss_context))
+ goto out_free_ntf;
+
+ genlmsg_end(ntf, hdr);
+ if (ethnl_multicast(ntf, dev))
+ goto out_warn;
+
+ return;
+
+out_free_ntf:
+ nlmsg_free(ntf);
+out_warn:
+ pr_warn_once("Failed to send a RSS delete notification");
+}
+
+void ethtool_rss_notify(struct net_device *dev, u32 type, u32 rss_context)
{
struct rss_req_info req_info = {
.rss_context = rss_context,
};
- ethnl_notify(dev, ETHTOOL_MSG_RSS_NTF, &req_info.base);
+ if (type == ETHTOOL_MSG_RSS_DELETE_NTF)
+ ethnl_rss_delete_notify(dev, rss_context);
+ else
+ ethnl_notify(dev, type, &req_info.base);
}
/* RSS_SET */
@@ -868,3 +929,277 @@ const struct ethnl_request_ops ethnl_rss_request_ops = {
.set = ethnl_rss_set,
.set_ntf_cmd = ETHTOOL_MSG_RSS_NTF,
};
+
+/* RSS_CREATE */
+
+const struct nla_policy ethnl_rss_create_policy[ETHTOOL_A_RSS_INPUT_XFRM + 1] = {
+ [ETHTOOL_A_RSS_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy),
+ [ETHTOOL_A_RSS_CONTEXT] = NLA_POLICY_MIN(NLA_U32, 1),
+ [ETHTOOL_A_RSS_HFUNC] = NLA_POLICY_MIN(NLA_U32, 1),
+ [ETHTOOL_A_RSS_INDIR] = NLA_POLICY_MIN(NLA_BINARY, 1),
+ [ETHTOOL_A_RSS_HKEY] = NLA_POLICY_MIN(NLA_BINARY, 1),
+ [ETHTOOL_A_RSS_INPUT_XFRM] =
+ NLA_POLICY_MAX(NLA_U32, RXH_XFRM_SYM_OR_XOR),
+};
+
+static int
+ethnl_rss_create_validate(struct net_device *dev, struct genl_info *info)
+{
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ struct nlattr **tb = info->attrs;
+ struct nlattr *bad_attr = NULL;
+ u32 rss_context, input_xfrm;
+
+ if (!ops->create_rxfh_context)
+ return -EOPNOTSUPP;
+
+ rss_context = nla_get_u32_default(tb[ETHTOOL_A_RSS_CONTEXT], 0);
+ if (ops->rxfh_max_num_contexts &&
+ ops->rxfh_max_num_contexts <= rss_context) {
+ NL_SET_BAD_ATTR(info->extack, tb[ETHTOOL_A_RSS_CONTEXT]);
+ return -ERANGE;
+ }
+
+ if (!ops->rxfh_per_ctx_key) {
+ bad_attr = bad_attr ?: tb[ETHTOOL_A_RSS_HFUNC];
+ bad_attr = bad_attr ?: tb[ETHTOOL_A_RSS_HKEY];
+ bad_attr = bad_attr ?: tb[ETHTOOL_A_RSS_INPUT_XFRM];
+ }
+
+ input_xfrm = nla_get_u32_default(tb[ETHTOOL_A_RSS_INPUT_XFRM], 0);
+ if (input_xfrm & ~ops->supported_input_xfrm)
+ bad_attr = bad_attr ?: tb[ETHTOOL_A_RSS_INPUT_XFRM];
+
+ if (bad_attr) {
+ NL_SET_BAD_ATTR(info->extack, bad_attr);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static void
+ethnl_rss_create_send_ntf(struct sk_buff *rsp, struct net_device *dev)
+{
+ struct nlmsghdr *nlh = (void *)rsp->data;
+ struct genlmsghdr *genl_hdr;
+
+ /* Convert the reply into a notification */
+ nlh->nlmsg_pid = 0;
+ nlh->nlmsg_seq = ethnl_bcast_seq_next();
+
+ genl_hdr = nlmsg_data(nlh);
+ genl_hdr->cmd = ETHTOOL_MSG_RSS_CREATE_NTF;
+
+ ethnl_multicast(rsp, dev);
+}
+
+int ethnl_rss_create_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ bool indir_dflt = false, mod = false, ntf_fail = false;
+ struct ethtool_rxfh_param rxfh = {};
+ struct ethtool_rxfh_context *ctx;
+ struct nlattr **tb = info->attrs;
+ struct rss_reply_data data = {};
+ const struct ethtool_ops *ops;
+ struct rss_req_info req = {};
+ struct net_device *dev;
+ struct sk_buff *rsp;
+ void *hdr;
+ u32 limit;
+ int ret;
+
+ rsp = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!rsp)
+ return -ENOMEM;
+
+ ret = ethnl_parse_header_dev_get(&req.base, tb[ETHTOOL_A_RSS_HEADER],
+ genl_info_net(info), info->extack,
+ true);
+ if (ret < 0)
+ goto exit_free_rsp;
+
+ dev = req.base.dev;
+ ops = dev->ethtool_ops;
+
+ req.rss_context = nla_get_u32_default(tb[ETHTOOL_A_RSS_CONTEXT], 0);
+
+ ret = ethnl_rss_create_validate(dev, info);
+ if (ret)
+ goto exit_free_dev;
+
+ rtnl_lock();
+ netdev_lock_ops(dev);
+
+ ret = ethnl_ops_begin(dev);
+ if (ret < 0)
+ goto exit_dev_unlock;
+
+ ret = rss_get_data_alloc(dev, &data);
+ if (ret)
+ goto exit_ops;
+
+ ret = rss_set_prep_indir(dev, info, &data, &rxfh, &indir_dflt, &mod);
+ if (ret)
+ goto exit_clean_data;
+
+ ethnl_update_u8(&rxfh.hfunc, tb[ETHTOOL_A_RSS_HFUNC], &mod);
+
+ ret = rss_set_prep_hkey(dev, info, &data, &rxfh, &mod);
+ if (ret)
+ goto exit_free_indir;
+
+ rxfh.input_xfrm = RXH_XFRM_NO_CHANGE;
+ ethnl_update_u8(&rxfh.input_xfrm, tb[ETHTOOL_A_RSS_INPUT_XFRM], &mod);
+
+ ctx = ethtool_rxfh_ctx_alloc(ops, data.indir_size, data.hkey_size);
+ if (!ctx) {
+ ret = -ENOMEM;
+ goto exit_free_hkey;
+ }
+
+ mutex_lock(&dev->ethtool->rss_lock);
+ if (!req.rss_context) {
+ limit = ops->rxfh_max_num_contexts ?: U32_MAX;
+ ret = xa_alloc(&dev->ethtool->rss_ctx, &req.rss_context, ctx,
+ XA_LIMIT(1, limit - 1), GFP_KERNEL_ACCOUNT);
+ } else {
+ ret = xa_insert(&dev->ethtool->rss_ctx,
+ req.rss_context, ctx, GFP_KERNEL_ACCOUNT);
+ }
+ if (ret < 0) {
+ NL_SET_ERR_MSG_ATTR(info->extack, tb[ETHTOOL_A_RSS_CONTEXT],
+ "error allocating context ID");
+ goto err_unlock_free_ctx;
+ }
+ rxfh.rss_context = req.rss_context;
+
+ ret = ops->create_rxfh_context(dev, ctx, &rxfh, info->extack);
+ if (ret)
+ goto err_ctx_id_free;
+
+ /* Make sure driver populates defaults */
+ WARN_ON_ONCE(!rxfh.key && ops->rxfh_per_ctx_key &&
+ !memchr_inv(ethtool_rxfh_context_key(ctx), 0,
+ ctx->key_size));
+
+ /* Store the config from rxfh to Xarray.. */
+ rss_set_ctx_update(ctx, tb, &data, &rxfh);
+ /* .. copy from Xarray to data. */
+ __rss_prepare_ctx(dev, &data, ctx);
+
+ hdr = ethnl_unicast_put(rsp, info->snd_portid, info->snd_seq,
+ ETHTOOL_MSG_RSS_CREATE_ACT_REPLY);
+ ntf_fail = ethnl_fill_reply_header(rsp, dev, ETHTOOL_A_RSS_HEADER);
+ ntf_fail |= rss_fill_reply(rsp, &req.base, &data.base);
+ if (WARN_ON(!hdr || ntf_fail)) {
+ ret = -EMSGSIZE;
+ goto exit_unlock;
+ }
+
+ genlmsg_end(rsp, hdr);
+
+ /* Use the same skb for the response and the notification,
+ * genlmsg_reply() will copy the skb if it has elevated user count.
+ */
+ skb_get(rsp);
+ ret = genlmsg_reply(rsp, info);
+ ethnl_rss_create_send_ntf(rsp, dev);
+ rsp = NULL;
+
+exit_unlock:
+ mutex_unlock(&dev->ethtool->rss_lock);
+exit_free_hkey:
+ kfree(rxfh.key);
+exit_free_indir:
+ kfree(rxfh.indir);
+exit_clean_data:
+ rss_get_data_free(&data);
+exit_ops:
+ ethnl_ops_complete(dev);
+exit_dev_unlock:
+ netdev_unlock_ops(dev);
+ rtnl_unlock();
+exit_free_dev:
+ ethnl_parse_header_dev_put(&req.base);
+exit_free_rsp:
+ nlmsg_free(rsp);
+ return ret;
+
+err_ctx_id_free:
+ xa_erase(&dev->ethtool->rss_ctx, req.rss_context);
+err_unlock_free_ctx:
+ kfree(ctx);
+ goto exit_unlock;
+}
+
+/* RSS_DELETE */
+
+const struct nla_policy ethnl_rss_delete_policy[ETHTOOL_A_RSS_CONTEXT + 1] = {
+ [ETHTOOL_A_RSS_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy),
+ [ETHTOOL_A_RSS_CONTEXT] = NLA_POLICY_MIN(NLA_U32, 1),
+};
+
+int ethnl_rss_delete_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct ethtool_rxfh_context *ctx;
+ struct nlattr **tb = info->attrs;
+ struct ethnl_req_info req = {};
+ const struct ethtool_ops *ops;
+ struct net_device *dev;
+ u32 rss_context;
+ int ret;
+
+ if (GENL_REQ_ATTR_CHECK(info, ETHTOOL_A_RSS_CONTEXT))
+ return -EINVAL;
+ rss_context = nla_get_u32(tb[ETHTOOL_A_RSS_CONTEXT]);
+
+ ret = ethnl_parse_header_dev_get(&req, tb[ETHTOOL_A_RSS_HEADER],
+ genl_info_net(info), info->extack,
+ true);
+ if (ret < 0)
+ return ret;
+
+ dev = req.dev;
+ ops = dev->ethtool_ops;
+
+ if (!ops->create_rxfh_context)
+ goto exit_free_dev;
+
+ rtnl_lock();
+ netdev_lock_ops(dev);
+
+ ret = ethnl_ops_begin(dev);
+ if (ret < 0)
+ goto exit_dev_unlock;
+
+ mutex_lock(&dev->ethtool->rss_lock);
+ ret = ethtool_check_rss_ctx_busy(dev, rss_context);
+ if (ret)
+ goto exit_unlock;
+
+ ctx = xa_load(&dev->ethtool->rss_ctx, rss_context);
+ if (!ctx) {
+ ret = -ENOENT;
+ goto exit_unlock;
+ }
+
+ ret = ops->remove_rxfh_context(dev, ctx, rss_context, info->extack);
+ if (ret)
+ goto exit_unlock;
+
+ WARN_ON(xa_erase(&dev->ethtool->rss_ctx, rss_context) != ctx);
+ kfree(ctx);
+
+ ethnl_rss_delete_notify(dev, rss_context);
+
+exit_unlock:
+ mutex_unlock(&dev->ethtool->rss_lock);
+ ethnl_ops_complete(dev);
+exit_dev_unlock:
+ netdev_unlock_ops(dev);
+ rtnl_unlock();
+exit_free_dev:
+ ethnl_parse_header_dev_put(&req);
+ return ret;
+}
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index 5a4fb2539b08..9a45aed508d1 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -54,6 +54,7 @@ static int ipcomp4_err(struct sk_buff *skb, u32 info)
}
/* We always hold one tunnel user reference to indicate a tunnel */
+static struct lock_class_key xfrm_state_lock_key;
static struct xfrm_state *ipcomp_tunnel_create(struct xfrm_state *x)
{
struct net *net = xs_net(x);
@@ -62,6 +63,7 @@ static struct xfrm_state *ipcomp_tunnel_create(struct xfrm_state *x)
t = xfrm_state_alloc(net);
if (!t)
goto out;
+ lockdep_set_class(&t->lock, &xfrm_state_lock_key);
t->id.proto = IPPROTO_IPIP;
t->id.spi = x->props.saddr.a4;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 31149a0ac849..71a956fbfc55 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -3751,6 +3751,19 @@ int tcp_set_window_clamp(struct sock *sk, int val)
return 0;
}
+int tcp_sock_set_maxseg(struct sock *sk, int val)
+{
+ /* Values greater than interface MTU won't take effect. However
+ * at the point when this call is done we typically don't yet
+ * know which interface is going to be used
+ */
+ if (val && (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW))
+ return -EINVAL;
+
+ tcp_sk(sk)->rx_opt.user_mss = val;
+ return 0;
+}
+
/*
* Socket option code for TCP.
*/
@@ -3883,15 +3896,7 @@ int do_tcp_setsockopt(struct sock *sk, int level, int optname,
switch (optname) {
case TCP_MAXSEG:
- /* Values greater than interface MTU won't take effect. However
- * at the point when this call is done we typically don't yet
- * know which interface is going to be used
- */
- if (val && (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW)) {
- err = -EINVAL;
- break;
- }
- tp->rx_opt.user_mss = val;
+ err = tcp_sock_set_maxseg(sk, val);
break;
case TCP_NODELAY:
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 672cbfbdcec1..71b76e98371a 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5549,6 +5549,10 @@ static int tcp_prune_queue(struct sock *sk, const struct sk_buff *in_skb)
{
struct tcp_sock *tp = tcp_sk(sk);
+ /* Do nothing if our queues are empty. */
+ if (!atomic_read(&sk->sk_rmem_alloc))
+ return -1;
+
NET_INC_STATS(sock_net(sk), LINUX_MIB_PRUNECALLED);
if (!tcp_can_ingest(sk, in_skb))
@@ -5911,7 +5915,11 @@ step1:
if (!th->rst) {
if (th->syn)
goto syn_challenge;
- NET_INC_STATS(sock_net(sk), LINUX_MIB_BEYOND_WINDOW);
+
+ if (reason == SKB_DROP_REASON_TCP_INVALID_SEQUENCE ||
+ reason == SKB_DROP_REASON_TCP_INVALID_END_SEQUENCE)
+ NET_INC_STATS(sock_net(sk),
+ LINUX_MIB_BEYOND_WINDOW);
if (!tcp_oow_rate_limited(sock_net(sk), skb,
LINUX_MIB_TCPACKSKIPPEDSEQ,
&tp->last_oow_ack_time))
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index b616776e3354..caf11920a878 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -3330,8 +3330,10 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
if (icsk->icsk_mtup.probe_size)
icsk->icsk_mtup.probe_size = 0;
- if (skb_still_in_host_queue(sk, skb))
- return -EBUSY;
+ if (skb_still_in_host_queue(sk, skb)) {
+ err = -EBUSY;
+ goto out;
+ }
start:
if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
@@ -3342,14 +3344,19 @@ start:
}
if (unlikely(before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))) {
WARN_ON_ONCE(1);
- return -EINVAL;
+ err = -EINVAL;
+ goto out;
+ }
+ if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) {
+ err = -ENOMEM;
+ goto out;
}
- if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
- return -ENOMEM;
}
- if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
- return -EHOSTUNREACH; /* Routing failure or similar. */
+ if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) {
+ err = -EHOSTUNREACH; /* Routing failure or similar. */
+ goto out;
+ }
cur_mss = tcp_current_mss(sk);
avail_wnd = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
@@ -3360,8 +3367,10 @@ start:
* our retransmit of one segment serves as a zero window probe.
*/
if (avail_wnd <= 0) {
- if (TCP_SKB_CB(skb)->seq != tp->snd_una)
- return -EAGAIN;
+ if (TCP_SKB_CB(skb)->seq != tp->snd_una) {
+ err = -EAGAIN;
+ goto out;
+ }
avail_wnd = cur_mss;
}
@@ -3373,11 +3382,15 @@ start:
}
if (skb->len > len) {
if (tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb, len,
- cur_mss, GFP_ATOMIC))
- return -ENOMEM; /* We'll try again later. */
+ cur_mss, GFP_ATOMIC)) {
+ err = -ENOMEM; /* We'll try again later. */
+ goto out;
+ }
} else {
- if (skb_unclone_keeptruesize(skb, GFP_ATOMIC))
- return -ENOMEM;
+ if (skb_unclone_keeptruesize(skb, GFP_ATOMIC)) {
+ err = -ENOMEM;
+ goto out;
+ }
diff = tcp_skb_pcount(skb);
tcp_set_skb_tso_segs(skb, cur_mss);
@@ -3431,17 +3444,16 @@ start:
tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RETRANS_CB,
TCP_SKB_CB(skb)->seq, segs, err);
- if (likely(!err)) {
- trace_tcp_retransmit_skb(sk, skb);
- } else if (err != -EBUSY) {
+ if (unlikely(err) && err != -EBUSY)
NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL, segs);
- }
/* To avoid taking spuriously low RTT samples based on a timestamp
* for a transmit that never happened, always mark EVER_RETRANS
*/
TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;
+out:
+ trace_tcp_retransmit_skb(sk, skb, err);
return err;
}
diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c
index 0d31a8c108d4..f28cfd88eaf5 100644
--- a/net/ipv4/xfrm4_input.c
+++ b/net/ipv4/xfrm4_input.c
@@ -202,6 +202,9 @@ struct sk_buff *xfrm4_gro_udp_encap_rcv(struct sock *sk, struct list_head *head,
if (len <= sizeof(struct ip_esp_hdr) || udpdata32[0] == 0)
goto out;
+ /* set the transport header to ESP */
+ skb_set_transport_header(skb, offset);
+
NAPI_GRO_CB(skb)->proto = IPPROTO_UDP;
pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index a1210fd6404e..74d49dd6124d 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -111,8 +111,32 @@ static u32 HASH_ADDR(const struct in6_addr *addr)
#define tunnels_l tunnels[1]
#define tunnels_wc tunnels[0]
-/* Given src, dst and key, find appropriate for input tunnel. */
+static bool ip6gre_tunnel_match(struct ip6_tnl *t, int dev_type, int link,
+ int *cand_score, struct ip6_tnl **ret)
+{
+ int score = 0;
+
+ if (t->dev->type != ARPHRD_IP6GRE &&
+ t->dev->type != dev_type)
+ return false;
+
+ if (t->parms.link != link)
+ score |= 1;
+ if (t->dev->type != dev_type)
+ score |= 2;
+ if (score == 0) {
+ *ret = t;
+ return true;
+ }
+
+ if (score < *cand_score) {
+ *ret = t;
+ *cand_score = score;
+ }
+ return false;
+}
+/* Given src, dst and key, find appropriate for input tunnel. */
static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
const struct in6_addr *remote, const struct in6_addr *local,
__be32 key, __be16 gre_proto)
@@ -127,8 +151,8 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
gre_proto == htons(ETH_P_ERSPAN) ||
gre_proto == htons(ETH_P_ERSPAN2)) ?
ARPHRD_ETHER : ARPHRD_IP6GRE;
- int score, cand_score = 4;
struct net_device *ndev;
+ int cand_score = 4;
for_each_ip_tunnel_rcu(t, ign->tunnels_r_l[h0 ^ h1]) {
if (!ipv6_addr_equal(local, &t->parms.laddr) ||
@@ -137,22 +161,8 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
!(t->dev->flags & IFF_UP))
continue;
- if (t->dev->type != ARPHRD_IP6GRE &&
- t->dev->type != dev_type)
- continue;
-
- score = 0;
- if (t->parms.link != link)
- score |= 1;
- if (t->dev->type != dev_type)
- score |= 2;
- if (score == 0)
- return t;
-
- if (score < cand_score) {
- cand = t;
- cand_score = score;
- }
+ if (ip6gre_tunnel_match(t, dev_type, link, &cand_score, &cand))
+ return cand;
}
for_each_ip_tunnel_rcu(t, ign->tunnels_r[h0 ^ h1]) {
@@ -161,22 +171,8 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
!(t->dev->flags & IFF_UP))
continue;
- if (t->dev->type != ARPHRD_IP6GRE &&
- t->dev->type != dev_type)
- continue;
-
- score = 0;
- if (t->parms.link != link)
- score |= 1;
- if (t->dev->type != dev_type)
- score |= 2;
- if (score == 0)
- return t;
-
- if (score < cand_score) {
- cand = t;
- cand_score = score;
- }
+ if (ip6gre_tunnel_match(t, dev_type, link, &cand_score, &cand))
+ return cand;
}
for_each_ip_tunnel_rcu(t, ign->tunnels_l[h1]) {
@@ -187,22 +183,8 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
!(t->dev->flags & IFF_UP))
continue;
- if (t->dev->type != ARPHRD_IP6GRE &&
- t->dev->type != dev_type)
- continue;
-
- score = 0;
- if (t->parms.link != link)
- score |= 1;
- if (t->dev->type != dev_type)
- score |= 2;
- if (score == 0)
- return t;
-
- if (score < cand_score) {
- cand = t;
- cand_score = score;
- }
+ if (ip6gre_tunnel_match(t, dev_type, link, &cand_score, &cand))
+ return cand;
}
for_each_ip_tunnel_rcu(t, ign->tunnels_wc[h1]) {
@@ -210,22 +192,8 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
!(t->dev->flags & IFF_UP))
continue;
- if (t->dev->type != ARPHRD_IP6GRE &&
- t->dev->type != dev_type)
- continue;
-
- score = 0;
- if (t->parms.link != link)
- score |= 1;
- if (t->dev->type != dev_type)
- score |= 2;
- if (score == 0)
- return t;
-
- if (score < cand_score) {
- cand = t;
- cand_score = score;
- }
+ if (ip6gre_tunnel_match(t, dev_type, link, &cand_score, &cand))
+ return cand;
}
if (cand)
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index 72d4858dec18..8607569de34f 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -71,6 +71,7 @@ static int ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
return 0;
}
+static struct lock_class_key xfrm_state_lock_key;
static struct xfrm_state *ipcomp6_tunnel_create(struct xfrm_state *x)
{
struct net *net = xs_net(x);
@@ -79,6 +80,7 @@ static struct xfrm_state *ipcomp6_tunnel_create(struct xfrm_state *x)
t = xfrm_state_alloc(net);
if (!t)
goto out;
+ lockdep_set_class(&t->lock, &xfrm_state_lock_key);
t->id.proto = IPPROTO_IPV6;
t->id.spi = xfrm6_tunnel_alloc_spi(net, (xfrm_address_t *)&x->props.saddr);
diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c
index 841c81abaaf4..9005fc156a20 100644
--- a/net/ipv6/xfrm6_input.c
+++ b/net/ipv6/xfrm6_input.c
@@ -202,6 +202,9 @@ struct sk_buff *xfrm6_gro_udp_encap_rcv(struct sock *sk, struct list_head *head,
if (len <= sizeof(struct ip_esp_hdr) || udpdata32[0] == 0)
goto out;
+ /* set the transport header to ESP */
+ skb_set_transport_header(skb, offset);
+
NAPI_GRO_CB(skb)->proto = IPPROTO_UDP;
pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index bf140ef781c1..5120a763da0d 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -334,8 +334,8 @@ static void __net_exit xfrm6_tunnel_net_exit(struct net *net)
struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
unsigned int i;
+ xfrm_state_flush(net, IPSEC_PROTO_ANY, false);
xfrm_flush_gc();
- xfrm_state_flush(net, 0, false, true);
for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
WARN_ON_ONCE(!hlist_empty(&xfrm6_tn->spi_byaddr[i]));
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 1f82f69acfde..2ebde0352245 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -1766,7 +1766,7 @@ static int pfkey_flush(struct sock *sk, struct sk_buff *skb, const struct sadb_m
if (proto == 0)
return -EINVAL;
- err = xfrm_state_flush(net, proto, true, false);
+ err = xfrm_state_flush(net, proto, true);
err2 = unicast_flush_resp(sk, hdr);
if (err || err2) {
if (err == -ESRCH) /* empty table - go quietly */
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index b99e39cb808b..2ed07fa121ab 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1071,6 +1071,47 @@ ieee80211_set_unsol_bcast_probe_resp(struct ieee80211_sub_if_data *sdata,
return 0;
}
+static int
+ieee80211_set_s1g_short_beacon(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_link_data *link,
+ struct cfg80211_s1g_short_beacon *params)
+{
+ struct s1g_short_beacon_data *new;
+ struct s1g_short_beacon_data *old =
+ sdata_dereference(link->u.ap.s1g_short_beacon, sdata);
+ size_t new_len =
+ sizeof(*new) + params->short_head_len + params->short_tail_len;
+
+ if (!params->update)
+ return 0;
+
+ if (!params->short_head)
+ return -EINVAL;
+
+ new = kzalloc(new_len, GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
+
+ /* Memory layout: | struct | head | tail | */
+ new->short_head = (u8 *)new + sizeof(*new);
+ new->short_head_len = params->short_head_len;
+ memcpy(new->short_head, params->short_head, params->short_head_len);
+
+ if (params->short_tail) {
+ new->short_tail = new->short_head + params->short_head_len;
+ new->short_tail_len = params->short_tail_len;
+ memcpy(new->short_tail, params->short_tail,
+ params->short_tail_len);
+ }
+
+ rcu_assign_pointer(link->u.ap.s1g_short_beacon, new);
+
+ if (old)
+ kfree_rcu(old, rcu_head);
+
+ return 0;
+}
+
static int ieee80211_set_ftm_responder_params(
struct ieee80211_sub_if_data *sdata,
const u8 *lci, size_t lci_len,
@@ -1135,13 +1176,13 @@ ieee80211_copy_rnr_beacon(u8 *pos, struct cfg80211_rnr_elems *dst,
{
int i, offset = 0;
+ dst->cnt = src->cnt;
for (i = 0; i < src->cnt; i++) {
memcpy(pos + offset, src->elem[i].data, src->elem[i].len);
dst->elem[i].len = src->elem[i].len;
dst->elem[i].data = pos + offset;
offset += dst->elem[i].len;
}
- dst->cnt = src->cnt;
return offset;
}
@@ -1341,6 +1382,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_link_data *link;
struct ieee80211_bss_conf *link_conf;
struct ieee80211_chan_req chanreq = { .oper = params->chandef };
+ u64 tsf;
lockdep_assert_wiphy(local->hw.wiphy);
@@ -1493,8 +1535,8 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
link_conf->twt_responder = params->twt_responder;
link_conf->he_obss_pd = params->he_obss_pd;
link_conf->he_bss_color = params->beacon.he_bss_color;
- sdata->vif.cfg.s1g = params->chandef.chan->band ==
- NL80211_BAND_S1GHZ;
+ link_conf->s1g_long_beacon_period = params->s1g_long_beacon_period;
+ sdata->vif.cfg.s1g = params->chandef.chan->band == NL80211_BAND_S1GHZ;
sdata->vif.cfg.ssid_len = params->ssid_len;
if (params->ssid_len)
@@ -1541,6 +1583,13 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
if (err < 0)
goto error;
+ if (sdata->vif.cfg.s1g) {
+ err = ieee80211_set_s1g_short_beacon(sdata, link,
+ &params->s1g_short_beacon);
+ if (err < 0)
+ goto error;
+ }
+
err = drv_start_ap(sdata->local, sdata, link_conf);
if (err) {
old = sdata_dereference(link->u.ap.beacon, sdata);
@@ -1555,7 +1604,12 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
goto error;
}
- ieee80211_recalc_dtim(local, sdata);
+ tsf = drv_get_tsf(local, sdata);
+ ieee80211_recalc_dtim(sdata, tsf);
+
+ if (link->u.ap.s1g_short_beacon)
+ ieee80211_recalc_sb_count(sdata, tsf);
+
ieee80211_vif_cfg_change_notify(sdata, BSS_CHANGED_SSID);
ieee80211_link_info_change_notify(sdata, link, changed);
@@ -1619,6 +1673,13 @@ static int ieee80211_change_beacon(struct wiphy *wiphy, struct net_device *dev,
if (err < 0)
return err;
+ if (link->u.ap.s1g_short_beacon) {
+ err = ieee80211_set_s1g_short_beacon(sdata, link,
+ &params->s1g_short_beacon);
+ if (err < 0)
+ return err;
+ }
+
if (beacon->he_bss_color_valid &&
beacon->he_bss_color.enabled != link_conf->he_bss_color.enabled) {
link_conf->he_bss_color.enabled = beacon->he_bss_color.enabled;
@@ -1650,6 +1711,7 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev,
struct probe_resp *old_probe_resp;
struct fils_discovery_data *old_fils_discovery;
struct unsol_bcast_probe_resp_data *old_unsol_bcast_probe_resp;
+ struct s1g_short_beacon_data *old_s1g_short_beacon;
struct cfg80211_chan_def chandef;
struct ieee80211_link_data *link =
sdata_dereference(sdata->link[link_id], sdata);
@@ -1668,6 +1730,8 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev,
old_unsol_bcast_probe_resp =
sdata_dereference(link->u.ap.unsol_bcast_probe_resp,
sdata);
+ old_s1g_short_beacon =
+ sdata_dereference(link->u.ap.s1g_short_beacon, sdata);
/* abort any running channel switch or color change */
link_conf->csa_active = false;
@@ -1690,6 +1754,7 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev,
RCU_INIT_POINTER(link->u.ap.probe_resp, NULL);
RCU_INIT_POINTER(link->u.ap.fils_discovery, NULL);
RCU_INIT_POINTER(link->u.ap.unsol_bcast_probe_resp, NULL);
+ RCU_INIT_POINTER(link->u.ap.s1g_short_beacon, NULL);
kfree_rcu(old_beacon, rcu_head);
if (old_probe_resp)
kfree_rcu(old_probe_resp, rcu_head);
@@ -1697,6 +1762,8 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev,
kfree_rcu(old_fils_discovery, rcu_head);
if (old_unsol_bcast_probe_resp)
kfree_rcu(old_unsol_bcast_probe_resp, rcu_head);
+ if (old_s1g_short_beacon)
+ kfree_rcu(old_s1g_short_beacon, rcu_head);
kfree(link_conf->ftmr_params);
link_conf->ftmr_params = NULL;
@@ -1720,6 +1787,7 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev,
link_conf->enable_beacon = false;
sdata->beacon_rate_set = false;
sdata->vif.cfg.ssid_len = 0;
+ sdata->vif.cfg.s1g = false;
clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state);
ieee80211_link_info_change_notify(sdata, link,
BSS_CHANGED_BEACON_ENABLED);
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 54c479910d05..1dac78271045 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -704,7 +704,7 @@ static ssize_t ieee80211_if_parse_tsf(
}
}
- ieee80211_recalc_dtim(local, sdata);
+ ieee80211_recalc_dtim(sdata, drv_get_tsf(local, sdata));
return buflen;
}
IEEE80211_IF_FILE_RW(tsf);
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 084e2673a27e..8afa2404eaa8 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -296,6 +296,14 @@ struct unsol_bcast_probe_resp_data {
u8 data[];
};
+struct s1g_short_beacon_data {
+ struct rcu_head rcu_head;
+ u8 *short_head;
+ u8 *short_tail;
+ int short_head_len;
+ int short_tail_len;
+};
+
struct ps_data {
/* yes, this looks ugly, but guarantees that we can later use
* bitmap_empty :)
@@ -306,6 +314,7 @@ struct ps_data {
atomic_t num_sta_ps; /* number of stations in PS mode */
int dtim_count;
bool dtim_bc_mc;
+ int sb_count; /* num short beacons til next long beacon */
};
struct ieee80211_if_ap {
@@ -1042,6 +1051,7 @@ struct ieee80211_link_data_ap {
struct probe_resp __rcu *probe_resp;
struct fils_discovery_data __rcu *fils_discovery;
struct unsol_bcast_probe_resp_data __rcu *unsol_bcast_probe_resp;
+ struct s1g_short_beacon_data __rcu *s1g_short_beacon;
/* to be used after channel switch. */
struct cfg80211_beacon_data *next_beacon;
@@ -1242,7 +1252,9 @@ struct ieee80211_sub_if_data *vif_to_sdata(struct ieee80211_vif *p)
if ((_link = rcu_dereference((___sdata)->link[___link_id])))
#define for_each_link_data(sdata, __link) \
- struct ieee80211_sub_if_data *__sdata = sdata; \
+ /* outer loop just to define the variable ... */ \
+ for (struct ieee80211_sub_if_data *__sdata = (sdata); __sdata; \
+ __sdata = NULL /* always stop */) \
for (int __link_id = 0; \
__link_id < ARRAY_SIZE((__sdata)->link); __link_id++) \
if ((!(__sdata)->vif.valid_links || \
@@ -1250,6 +1262,19 @@ struct ieee80211_sub_if_data *vif_to_sdata(struct ieee80211_vif *p)
((__link) = sdata_dereference((__sdata)->link[__link_id], \
(__sdata))))
+/*
+ * for_each_link_data_rcu should be used under RCU read lock.
+ */
+#define for_each_link_data_rcu(sdata, __link) \
+ /* outer loop just to define the variable ... */ \
+ for (struct ieee80211_sub_if_data *__sdata = (sdata); __sdata; \
+ __sdata = NULL /* always stop */) \
+ for (int __link_id = 0; \
+ __link_id < ARRAY_SIZE((__sdata)->link); __link_id++) \
+ if ((!(__sdata)->vif.valid_links || \
+ (__sdata)->vif.valid_links & BIT(__link_id)) && \
+ ((__link) = rcu_dereference((__sdata)->link[__link_id]))) \
+
static inline int
ieee80211_get_mbssid_beacon_len(struct cfg80211_mbssid_elems *elems,
struct cfg80211_rnr_elems *rnr_elems,
@@ -2750,9 +2775,8 @@ void ieee80211_dfs_radar_detected_work(struct wiphy *wiphy,
struct wiphy_work *work);
int ieee80211_send_action_csa(struct ieee80211_sub_if_data *sdata,
struct cfg80211_csa_settings *csa_settings);
-
-void ieee80211_recalc_dtim(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata);
+void ieee80211_recalc_sb_count(struct ieee80211_sub_if_data *sdata, u64 tsf);
+void ieee80211_recalc_dtim(struct ieee80211_sub_if_data *sdata, u64 tsf);
int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
const struct cfg80211_chan_def *chandef,
enum ieee80211_chanctx_mode chanmode,
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 997892da8886..b14e9cd9713f 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -6,7 +6,7 @@
* Copyright 2007-2008 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2015-2017 Intel Deutschland GmbH
- * Copyright 2018-2020, 2022-2024 Intel Corporation
+ * Copyright 2018-2020, 2022-2025 Intel Corporation
*/
#include <crypto/utils.h>
@@ -1354,38 +1354,14 @@ void ieee80211_set_key_rx_seq(struct ieee80211_key_conf *keyconf,
}
EXPORT_SYMBOL_GPL(ieee80211_set_key_rx_seq);
-void ieee80211_remove_key(struct ieee80211_key_conf *keyconf)
-{
- struct ieee80211_key *key;
-
- key = container_of(keyconf, struct ieee80211_key, conf);
-
- lockdep_assert_wiphy(key->local->hw.wiphy);
-
- /*
- * if key was uploaded, we assume the driver will/has remove(d)
- * it, so adjust bookkeeping accordingly
- */
- if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) {
- key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
-
- if (!(key->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC |
- IEEE80211_KEY_FLAG_PUT_MIC_SPACE |
- IEEE80211_KEY_FLAG_RESERVE_TAILROOM)))
- increment_tailroom_need_count(key->sdata);
- }
-
- ieee80211_key_free(key, false);
-}
-EXPORT_SYMBOL_GPL(ieee80211_remove_key);
-
struct ieee80211_key_conf *
ieee80211_gtk_rekey_add(struct ieee80211_vif *vif,
- struct ieee80211_key_conf *keyconf,
+ u8 idx, u8 *key_data, u8 key_len,
int link_id)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_local *local = sdata->local;
+ struct ieee80211_key *prev_key;
struct ieee80211_key *key;
int err;
struct ieee80211_link_data *link_data =
@@ -1401,8 +1377,37 @@ ieee80211_gtk_rekey_add(struct ieee80211_vif *vif,
if (WARN_ON(vif->type != NL80211_IFTYPE_STATION))
return ERR_PTR(-EINVAL);
- key = ieee80211_key_alloc(keyconf->cipher, keyconf->keyidx,
- keyconf->keylen, keyconf->key,
+ if (WARN_ON(idx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS +
+ NUM_DEFAULT_BEACON_KEYS))
+ return ERR_PTR(-EINVAL);
+
+ prev_key = wiphy_dereference(local->hw.wiphy,
+ link_data->gtk[idx]);
+ if (!prev_key) {
+ if (idx < NUM_DEFAULT_KEYS) {
+ for (int i = 0; i < NUM_DEFAULT_KEYS; i++) {
+ if (i == idx)
+ continue;
+ prev_key = wiphy_dereference(local->hw.wiphy,
+ link_data->gtk[i]);
+ if (prev_key)
+ break;
+ }
+ } else {
+ /* For IGTK we have 4 and 5 and for BIGTK - 6 and 7 */
+ prev_key = wiphy_dereference(local->hw.wiphy,
+ link_data->gtk[idx ^ 1]);
+ }
+ }
+
+ if (WARN_ON(!prev_key))
+ return ERR_PTR(-EINVAL);
+
+ if (WARN_ON(key_len < prev_key->conf.keylen))
+ return ERR_PTR(-EINVAL);
+
+ key = ieee80211_key_alloc(prev_key->conf.cipher, idx,
+ prev_key->conf.keylen, key_data,
0, NULL);
if (IS_ERR(key))
return ERR_CAST(key);
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index ec60b82af007..9c8f18b258a6 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -408,9 +408,20 @@ void ieee80211_link_info_change_notify(struct ieee80211_sub_if_data *sdata,
WARN_ON_ONCE(changed & BSS_CHANGED_VIF_CFG_FLAGS);
- if (!changed || sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ if (!changed)
return;
+ switch (sdata->vif.type) {
+ case NL80211_IFTYPE_AP_VLAN:
+ return;
+ case NL80211_IFTYPE_MONITOR:
+ if (!ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF))
+ return;
+ break;
+ default:
+ break;
+ }
+
if (!check_sdata_in_driver(sdata))
return;
@@ -1334,7 +1345,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
GFP_KERNEL);
if (!local->int_scan_req)
return -ENOMEM;
- local->int_scan_req->n_channels = channels;
eth_broadcast_addr(local->int_scan_req->bssid);
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index d00d9d413c5c..a4a715f6f1c3 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -1202,7 +1202,7 @@ int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
return -ENOMEM;
}
- ieee80211_recalc_dtim(local, sdata);
+ ieee80211_recalc_dtim(sdata, drv_get_tsf(local, sdata));
ieee80211_link_info_change_notify(sdata, &sdata->deflink, changed);
netif_carrier_on(sdata->dev);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 5298dbbb5341..1008eb8e9b13 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -2505,6 +2505,21 @@ static void ieee80211_csa_switch_work(struct wiphy *wiphy,
}
}
+ /*
+ * It is not necessary to reset these timers if any link does not
+ * have an active CSA and that link still receives the beacons
+ * when other links have active CSA.
+ */
+ for_each_link_data(sdata, link) {
+ if (!link->conf->csa_active)
+ return;
+ }
+
+ /*
+ * Reset the beacon monitor and connection monitor timers when CSA
+ * is active for all links in MLO when channel switch occurs in all
+ * the links.
+ */
ieee80211_sta_reset_beacon_monitor(sdata);
ieee80211_sta_reset_conn_monitor(sdata);
}
@@ -4352,9 +4367,6 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
lockdep_assert_wiphy(sdata->local->hw.wiphy);
- if (WARN_ON(ieee80211_vif_is_mld(&sdata->vif)))
- return;
-
/*
* Try sending broadcast probe requests for the last three
* probe requests after the first ones failed since some
@@ -4400,9 +4412,6 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata,
lockdep_assert_wiphy(sdata->local->hw.wiphy);
- if (WARN_ON_ONCE(ieee80211_vif_is_mld(&sdata->vif)))
- return;
-
if (!ieee80211_sdata_running(sdata))
return;
@@ -8473,16 +8482,32 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
}
}
+static bool
+ieee80211_is_csa_in_progress(struct ieee80211_sub_if_data *sdata)
+{
+ /*
+ * In MLO, check the CSA flags 'active' and 'waiting_bcn' for all
+ * the links.
+ */
+ struct ieee80211_link_data *link;
+
+ guard(rcu)();
+
+ for_each_link_data_rcu(sdata, link) {
+ if (!(link->conf->csa_active &&
+ !link->u.mgd.csa.waiting_bcn))
+ return false;
+ }
+
+ return true;
+}
+
static void ieee80211_sta_bcn_mon_timer(struct timer_list *t)
{
struct ieee80211_sub_if_data *sdata =
timer_container_of(sdata, t, u.mgd.bcn_mon_timer);
- if (WARN_ON(ieee80211_vif_is_mld(&sdata->vif)))
- return;
-
- if (sdata->vif.bss_conf.csa_active &&
- !sdata->deflink.u.mgd.csa.waiting_bcn)
+ if (ieee80211_is_csa_in_progress(sdata))
return;
if (sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)
@@ -8493,36 +8518,69 @@ static void ieee80211_sta_bcn_mon_timer(struct timer_list *t)
&sdata->u.mgd.beacon_connection_loss_work);
}
+static unsigned long
+ieee80211_latest_active_link_conn_timeout(struct ieee80211_sub_if_data *sdata)
+{
+ unsigned long latest_timeout = jiffies;
+ unsigned int link_id;
+ struct sta_info *sta;
+
+ guard(rcu)();
+
+ sta = sta_info_get(sdata, sdata->vif.cfg.ap_addr);
+ if (!sta)
+ return 0;
+
+ for (link_id = 0; link_id < ARRAY_SIZE(sta->link);
+ link_id++) {
+ struct link_sta_info *link_sta;
+ unsigned long timeout;
+
+ link_sta = rcu_dereference(sta->link[link_id]);
+ if (!link_sta)
+ continue;
+
+ timeout = link_sta->status_stats.last_ack;
+ if (time_before(timeout, link_sta->rx_stats.last_rx))
+ timeout = link_sta->rx_stats.last_rx;
+
+ timeout += IEEE80211_CONNECTION_IDLE_TIME;
+
+ /*
+ * latest_timeout holds the timeout of the link
+ * that will expire last among all links in an
+ * non-AP MLD STA. This ensures that the connection
+ * monitor timer is only reset if at least one link
+ * is still active, and it is scheduled to fire at
+ * the latest possible timeout.
+ */
+ if (time_after(timeout, latest_timeout))
+ latest_timeout = timeout;
+ }
+
+ return latest_timeout;
+}
+
static void ieee80211_sta_conn_mon_timer(struct timer_list *t)
{
struct ieee80211_sub_if_data *sdata =
timer_container_of(sdata, t, u.mgd.conn_mon_timer);
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_local *local = sdata->local;
- struct sta_info *sta;
- unsigned long timeout;
+ unsigned long latest_timeout;
- if (WARN_ON(ieee80211_vif_is_mld(&sdata->vif)))
+ if (ieee80211_is_csa_in_progress(sdata))
return;
- if (sdata->vif.bss_conf.csa_active &&
- !sdata->deflink.u.mgd.csa.waiting_bcn)
- return;
+ latest_timeout = ieee80211_latest_active_link_conn_timeout(sdata);
- sta = sta_info_get(sdata, sdata->vif.cfg.ap_addr);
- if (!sta)
- return;
-
- timeout = sta->deflink.status_stats.last_ack;
- if (time_before(sta->deflink.status_stats.last_ack, sta->deflink.rx_stats.last_rx))
- timeout = sta->deflink.rx_stats.last_rx;
- timeout += IEEE80211_CONNECTION_IDLE_TIME;
-
- /* If timeout is after now, then update timer to fire at
+ /*
+ * If latest timeout is after now, then update timer to fire at
* the later date, but do not actually probe at this time.
*/
- if (time_is_after_jiffies(timeout)) {
- mod_timer(&ifmgd->conn_mon_timer, round_jiffies_up(timeout));
+ if (time_is_after_jiffies(latest_timeout)) {
+ mod_timer(&ifmgd->conn_mon_timer,
+ round_jiffies_up(latest_timeout));
return;
}
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index b414c9e6e61b..4d4ff4d4917a 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1532,9 +1532,8 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
}
if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
- cfg80211_rx_spurious_frame(rx->sdata->dev,
- hdr->addr2,
- GFP_ATOMIC))
+ cfg80211_rx_spurious_frame(rx->sdata->dev, hdr->addr2,
+ rx->link_id, GFP_ATOMIC))
return RX_DROP_U_SPURIOUS;
return RX_DROP;
@@ -1872,7 +1871,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
if (!test_and_set_sta_flag(sta, WLAN_STA_4ADDR_EVENT))
cfg80211_rx_unexpected_4addr_frame(
rx->sdata->dev, sta->sta.addr,
- GFP_ATOMIC);
+ rx->link_id, GFP_ATOMIC);
return RX_DROP_U_UNEXPECTED_4ADDR_FRAME;
}
/*
@@ -3191,7 +3190,8 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
if (rx->sta &&
!test_and_set_sta_flag(rx->sta, WLAN_STA_4ADDR_EVENT))
cfg80211_rx_unexpected_4addr_frame(
- rx->sdata->dev, rx->sta->sta.addr, GFP_ATOMIC);
+ rx->sdata->dev, rx->sta->sta.addr, rx->link_id,
+ GFP_ATOMIC);
return RX_DROP;
}
@@ -5106,8 +5106,24 @@ static bool ieee80211_rx_for_interface(struct ieee80211_rx_data *rx,
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
sta = sta_info_get_bss(rx->sdata, hdr->addr2);
- if (status->link_valid)
+ if (status->link_valid) {
link_id = status->link_id;
+ } else if (ieee80211_vif_is_mld(&rx->sdata->vif) &&
+ status->freq) {
+ struct ieee80211_link_data *link;
+ struct ieee80211_chanctx_conf *conf;
+
+ for_each_link_data_rcu(rx->sdata, link) {
+ conf = rcu_dereference(link->conf->chanctx_conf);
+ if (!conf || !conf->def.chan)
+ continue;
+
+ if (status->freq == conf->def.chan->center_freq) {
+ link_id = link->link_id;
+ break;
+ }
+ }
+ }
}
if (!ieee80211_rx_data_set_sta(rx, sta, link_id))
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
index 94714f8ffd22..ba5fbacbeeda 100644
--- a/net/mac80211/tdls.c
+++ b/net/mac80211/tdls.c
@@ -1422,7 +1422,7 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS))
return -EOPNOTSUPP;
- if (sdata->vif.type != NL80211_IFTYPE_STATION)
+ if (sdata->vif.type != NL80211_IFTYPE_STATION || !sdata->vif.cfg.assoc)
return -EINVAL;
switch (oper) {
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 6fa883a9250d..00671ae45b2f 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -612,6 +612,12 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
else
tx->key = NULL;
+ if (info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) {
+ if (tx->key && tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)
+ info->control.hw_key = &tx->key->conf;
+ return TX_CONTINUE;
+ }
+
if (tx->key) {
bool skip_hw = false;
@@ -1429,7 +1435,7 @@ static void ieee80211_txq_enqueue(struct ieee80211_local *local,
{
struct fq *fq = &local->fq;
struct fq_tin *tin = &txqi->tin;
- u32 flow_idx = fq_flow_idx(fq, skb);
+ u32 flow_idx;
ieee80211_set_skb_enqueue_time(skb);
@@ -1445,6 +1451,7 @@ static void ieee80211_txq_enqueue(struct ieee80211_local *local,
IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
__skb_queue_tail(&txqi->frags, skb);
} else {
+ flow_idx = fq_flow_idx(fq, skb);
fq_tin_enqueue(fq, tin, flow_idx, skb,
fq_skb_free_func);
}
@@ -3877,6 +3884,7 @@ begin:
* The key can be removed while the packet was queued, so need to call
* this here to get the current key.
*/
+ info->control.hw_key = NULL;
r = ieee80211_tx_h_select_key(&tx);
if (r != TX_CONTINUE) {
ieee80211_free_txskb(&local->hw, skb);
@@ -4099,7 +4107,9 @@ void __ieee80211_schedule_txq(struct ieee80211_hw *hw,
spin_lock_bh(&local->active_txq_lock[txq->ac]);
- has_queue = force || txq_has_queue(txq);
+ has_queue = force ||
+ (!test_bit(IEEE80211_TXQ_STOP, &txqi->flags) &&
+ txq_has_queue(txq));
if (list_empty(&txqi->schedule_order) &&
(has_queue || ieee80211_txq_keep_active(txqi))) {
/* If airtime accounting is active, always enqueue STAs at the
@@ -5290,14 +5300,14 @@ ieee80211_beacon_add_mbssid(struct sk_buff *skb, struct beacon_data *beacon,
}
static struct sk_buff *
-ieee80211_beacon_get_ap(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_link_data *link,
- struct ieee80211_mutable_offsets *offs,
- bool is_template,
- struct beacon_data *beacon,
- struct ieee80211_chanctx_conf *chanctx_conf,
- u8 ema_index)
+__ieee80211_beacon_get_ap(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_data *link,
+ struct ieee80211_mutable_offsets *offs,
+ bool is_template,
+ struct beacon_data *beacon,
+ struct ieee80211_chanctx_conf *chanctx_conf,
+ u8 ema_index)
{
struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
@@ -5358,6 +5368,71 @@ ieee80211_beacon_get_ap(struct ieee80211_hw *hw,
return skb;
}
+static bool ieee80211_s1g_need_long_beacon(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_link_data *link)
+{
+ struct ps_data *ps = &sdata->u.ap.ps;
+
+ if (ps->sb_count == 0)
+ ps->sb_count = link->conf->s1g_long_beacon_period - 1;
+ else
+ ps->sb_count--;
+
+ return ps->sb_count == 0;
+}
+
+static struct sk_buff *
+ieee80211_s1g_short_beacon_get(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_data *link,
+ struct ieee80211_chanctx_conf *chanctx_conf,
+ struct s1g_short_beacon_data *sb,
+ bool is_template)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ struct ieee80211_if_ap *ap = &sdata->u.ap;
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(local->tx_headroom + sb->short_head_len +
+ sb->short_tail_len + 256 +
+ local->hw.extra_beacon_tailroom);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, local->tx_headroom);
+ skb_put_data(skb, sb->short_head, sb->short_head_len);
+
+ ieee80211_beacon_add_tim(sdata, link, &ap->ps, skb, is_template);
+
+ if (sb->short_tail)
+ skb_put_data(skb, sb->short_tail, sb->short_tail_len);
+
+ ieee80211_beacon_get_finish(hw, vif, link, NULL, NULL, skb,
+ chanctx_conf, 0);
+ return skb;
+}
+
+static struct sk_buff *
+ieee80211_beacon_get_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_link_data *link,
+ struct ieee80211_mutable_offsets *offs,
+ bool is_template, struct beacon_data *beacon,
+ struct ieee80211_chanctx_conf *chanctx_conf,
+ u8 ema_index, struct s1g_short_beacon_data *s1g_sb)
+{
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+
+ if (!sdata->vif.cfg.s1g || !s1g_sb ||
+ ieee80211_s1g_need_long_beacon(sdata, link))
+ return __ieee80211_beacon_get_ap(hw, vif, link, offs,
+ is_template, beacon,
+ chanctx_conf, ema_index);
+
+ return ieee80211_s1g_short_beacon_get(hw, vif, link, chanctx_conf,
+ s1g_sb, is_template);
+}
+
static struct ieee80211_ema_beacons *
ieee80211_beacon_get_ap_ema_list(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -5381,7 +5456,7 @@ ieee80211_beacon_get_ap_ema_list(struct ieee80211_hw *hw,
ieee80211_beacon_get_ap(hw, vif, link,
&ema->bcn[ema->cnt].offs,
is_template, beacon,
- chanctx_conf, ema->cnt);
+ chanctx_conf, ema->cnt, NULL);
if (!ema->bcn[ema->cnt].skb)
break;
}
@@ -5410,6 +5485,7 @@ __ieee80211_beacon_get(struct ieee80211_hw *hw,
struct ieee80211_sub_if_data *sdata = NULL;
struct ieee80211_chanctx_conf *chanctx_conf;
struct ieee80211_link_data *link;
+ struct s1g_short_beacon_data *s1g_short_bcn = NULL;
rcu_read_lock();
@@ -5431,6 +5507,13 @@ __ieee80211_beacon_get(struct ieee80211_hw *hw,
if (!beacon)
goto out;
+ if (vif->cfg.s1g && link->u.ap.s1g_short_beacon) {
+ s1g_short_bcn =
+ rcu_dereference(link->u.ap.s1g_short_beacon);
+ if (!s1g_short_bcn)
+ goto out;
+ }
+
if (ema_beacons) {
*ema_beacons =
ieee80211_beacon_get_ap_ema_list(hw, vif, link,
@@ -5451,8 +5534,8 @@ __ieee80211_beacon_get(struct ieee80211_hw *hw,
skb = ieee80211_beacon_get_ap(hw, vif, link, offs,
is_template, beacon,
- chanctx_conf,
- ema_index);
+ chanctx_conf, ema_index,
+ s1g_short_bcn);
}
} else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 0d85a382746f..32f1bc5908c5 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -3913,10 +3913,8 @@ int ieee80211_parse_p2p_noa(const struct ieee80211_p2p_noa_attr *attr,
}
EXPORT_SYMBOL(ieee80211_parse_p2p_noa);
-void ieee80211_recalc_dtim(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata)
+void ieee80211_recalc_dtim(struct ieee80211_sub_if_data *sdata, u64 tsf)
{
- u64 tsf = drv_get_tsf(local, sdata);
u64 dtim_count = 0;
u32 beacon_int = sdata->vif.bss_conf.beacon_int * 1024;
u8 dtim_period = sdata->vif.bss_conf.dtim_period;
@@ -3954,6 +3952,33 @@ void ieee80211_recalc_dtim(struct ieee80211_local *local,
ps->dtim_count = dtim_count;
}
+/*
+ * Given a long beacon period, calculate the current index into
+ * that period to determine the number of TSBTTs until the next TBTT.
+ * It is completely valid to have a short beacon period that differs
+ * from the dtim period (i.e a TBTT thats not a DTIM).
+ */
+void ieee80211_recalc_sb_count(struct ieee80211_sub_if_data *sdata, u64 tsf)
+{
+ u32 sb_idx;
+ struct ps_data *ps = &sdata->bss->ps;
+ u8 lb_period = sdata->vif.bss_conf.s1g_long_beacon_period;
+ u32 beacon_int = sdata->vif.bss_conf.beacon_int * 1024;
+
+ /* No mesh / IBSS support for short beaconing */
+ if (tsf == -1ULL || !lb_period ||
+ (sdata->vif.type != NL80211_IFTYPE_AP &&
+ sdata->vif.type != NL80211_IFTYPE_AP_VLAN))
+ return;
+
+ /* find the current TSBTT index in our lb_period */
+ do_div(tsf, beacon_int);
+ sb_idx = do_div(tsf, lb_period);
+
+ /* num TSBTTs until the next TBTT */
+ ps->sb_count = sb_idx ? lb_period - sb_idx : 0;
+}
+
static u8 ieee80211_chanctx_radar_detect(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx)
{
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 2ad1c41e963e..6c448a0be949 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -1387,7 +1387,7 @@ struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
* - estimate the faster flow linger time
* - use the above to estimate the amount of byte transferred
* by the faster flow
- * - check that the amount of queued data is greter than the above,
+ * - check that the amount of queued data is greater than the above,
* otherwise do not use the picked, slower, subflow
* We select the subflow with the shorter estimated time to flush
* the queued mem, which basically ensure the above. We just need
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index 6ec245fd2778..1a32edf6f343 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -326,6 +326,7 @@ struct mptcp_sock {
int keepalive_cnt;
int keepalive_idle;
int keepalive_intvl;
+ int maxseg;
struct work_struct work;
struct sk_buff *ooo_last_skb;
struct rb_root out_of_order_queue;
diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
index 3caa0a9d3b38..2c267aff95be 100644
--- a/net/mptcp/sockopt.c
+++ b/net/mptcp/sockopt.c
@@ -798,6 +798,23 @@ unlock:
return ret;
}
+static int mptcp_setsockopt_all_sf(struct mptcp_sock *msk, int level,
+ int optname, sockptr_t optval,
+ unsigned int optlen)
+{
+ struct mptcp_subflow_context *subflow;
+ int ret = 0;
+
+ mptcp_for_each_subflow(msk, subflow) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+
+ ret = tcp_setsockopt(ssk, level, optname, optval, optlen);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
static int mptcp_setsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
sockptr_t optval, unsigned int optlen)
{
@@ -859,6 +876,11 @@ static int mptcp_setsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
&msk->keepalive_cnt,
val);
break;
+ case TCP_MAXSEG:
+ msk->maxseg = val;
+ ret = mptcp_setsockopt_all_sf(msk, SOL_TCP, optname, optval,
+ optlen);
+ break;
default:
ret = -ENOPROTOOPT;
}
@@ -914,10 +936,8 @@ static int mptcp_getsockopt_first_sf_only(struct mptcp_sock *msk, int level, int
lock_sock(sk);
ssk = msk->first;
- if (ssk) {
- ret = tcp_getsockopt(ssk, level, optname, optval, optlen);
- goto out;
- }
+ if (ssk)
+ goto get;
ssk = __mptcp_nmpc_sk(msk);
if (IS_ERR(ssk)) {
@@ -925,6 +945,7 @@ static int mptcp_getsockopt_first_sf_only(struct mptcp_sock *msk, int level, int
goto out;
}
+get:
ret = tcp_getsockopt(ssk, level, optname, optval, optlen);
out:
@@ -1407,6 +1428,9 @@ static int mptcp_getsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
return mptcp_put_int_option(msk, optval, optlen, msk->notsent_lowat);
case TCP_IS_MPTCP:
return mptcp_put_int_option(msk, optval, optlen, 1);
+ case TCP_MAXSEG:
+ return mptcp_getsockopt_first_sf_only(msk, SOL_TCP, optname,
+ optval, optlen);
}
return -EOPNOTSUPP;
}
@@ -1553,6 +1577,7 @@ static void sync_socket_options(struct mptcp_sock *msk, struct sock *ssk)
tcp_sock_set_keepidle_locked(ssk, msk->keepalive_idle);
tcp_sock_set_keepintvl(ssk, msk->keepalive_intvl);
tcp_sock_set_keepcnt(ssk, msk->keepalive_cnt);
+ tcp_sock_set_maxseg(ssk, msk->maxseg);
inet_assign_bit(TRANSPARENT, ssk, inet_test_bit(TRANSPARENT, sk));
inet_assign_bit(FREEBIND, ssk, inet_test_bit(FREEBIND, sk));
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index ad914d2b2e22..6ddff028b81a 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -415,6 +415,18 @@ config NET_SCH_BPF
If unsure, say N.
+config NET_SCH_DUALPI2
+ tristate "Dual Queue PI Square (DUALPI2) scheduler"
+ help
+ Say Y here if you want to use the Dual Queue Proportional Integral
+ Controller Improved with a Square scheduling algorithm.
+ For more information, please see https://tools.ietf.org/html/rfc9332
+
+ To compile this driver as a module, choose M here: the module
+ will be called sch_dualpi2.
+
+ If unsure, say N.
+
menuconfig NET_SCH_DEFAULT
bool "Allow override default queue discipline"
help
diff --git a/net/sched/Makefile b/net/sched/Makefile
index 904d784902d1..5078ea84e6ad 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -63,6 +63,7 @@ obj-$(CONFIG_NET_SCH_CBS) += sch_cbs.o
obj-$(CONFIG_NET_SCH_ETF) += sch_etf.o
obj-$(CONFIG_NET_SCH_TAPRIO) += sch_taprio.o
obj-$(CONFIG_NET_SCH_BPF) += bpf_qdisc.o
+obj-$(CONFIG_NET_SCH_DUALPI2) += sch_dualpi2.o
obj-$(CONFIG_NET_CLS_U32) += cls_u32.o
obj-$(CONFIG_NET_CLS_ROUTE4) += cls_route.o
diff --git a/net/sched/sch_dualpi2.c b/net/sched/sch_dualpi2.c
new file mode 100644
index 000000000000..845375ebd4ea
--- /dev/null
+++ b/net/sched/sch_dualpi2.c
@@ -0,0 +1,1175 @@
+// SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+/* Copyright (C) 2024 Nokia
+ *
+ * Author: Koen De Schepper <koen.de_schepper@nokia-bell-labs.com>
+ * Author: Olga Albisser <olga@albisser.org>
+ * Author: Henrik Steen <henrist@henrist.net>
+ * Author: Olivier Tilmans <olivier.tilmans@nokia.com>
+ * Author: Chia-Yu Chang <chia-yu.chang@nokia-bell-labs.com>
+ *
+ * DualPI Improved with a Square (dualpi2):
+ * - Supports congestion controls that comply with the Prague requirements
+ * in RFC9331 (e.g. TCP-Prague)
+ * - Supports coupled dual-queue with PI2 as defined in RFC9332
+ * - Supports ECN L4S-identifier (IP.ECN==0b*1)
+ *
+ * note: Although DCTCP and BBRv3 can use shallow-threshold ECN marks,
+ * they do not meet the 'Prague L4S Requirements' listed in RFC 9331
+ * Section 4, so they can only be used with DualPI2 in a datacenter
+ * context.
+ *
+ * References:
+ * - RFC9332: https://datatracker.ietf.org/doc/html/rfc9332
+ * - De Schepper, Koen, et al. "PI 2: A linearized AQM for both classic and
+ * scalable TCP." in proc. ACM CoNEXT'16, 2016.
+ */
+
+#include <linux/errno.h>
+#include <linux/hrtimer.h>
+#include <linux/if_vlan.h>
+#include <linux/kernel.h>
+#include <linux/limits.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+
+#include <net/gso.h>
+#include <net/inet_ecn.h>
+#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
+
+/* 32b enable to support flows with windows up to ~8.6 * 1e9 packets
+ * i.e., twice the maximal snd_cwnd.
+ * MAX_PROB must be consistent with the RNG in dualpi2_roll().
+ */
+#define MAX_PROB U32_MAX
+
+/* alpha/beta values exchanged over netlink are in units of 256ns */
+#define ALPHA_BETA_SHIFT 8
+
+/* Scaled values of alpha/beta must fit in 32b to avoid overflow in later
+ * computations. Consequently (see and dualpi2_scale_alpha_beta()), their
+ * netlink-provided values can use at most 31b, i.e. be at most (2^23)-1
+ * (~4MHz) as those are given in 1/256th. This enable to tune alpha/beta to
+ * control flows whose maximal RTTs can be in usec up to few secs.
+ */
+#define ALPHA_BETA_MAX ((1U << 31) - 1)
+
+/* Internal alpha/beta are in units of 64ns.
+ * This enables to use all alpha/beta values in the allowed range without loss
+ * of precision due to rounding when scaling them internally, e.g.,
+ * scale_alpha_beta(1) will not round down to 0.
+ */
+#define ALPHA_BETA_GRANULARITY 6
+
+#define ALPHA_BETA_SCALING (ALPHA_BETA_SHIFT - ALPHA_BETA_GRANULARITY)
+
+/* We express the weights (wc, wl) in %, i.e., wc + wl = 100 */
+#define MAX_WC 100
+
+struct dualpi2_sched_data {
+ struct Qdisc *l_queue; /* The L4S Low latency queue (L-queue) */
+ struct Qdisc *sch; /* The Classic queue (C-queue) */
+
+ /* Registered tc filters */
+ struct tcf_proto __rcu *tcf_filters;
+ struct tcf_block *tcf_block;
+
+ /* PI2 parameters */
+ u64 pi2_target; /* Target delay in nanoseconds */
+ u32 pi2_tupdate; /* Timer frequency in nanoseconds */
+ u32 pi2_prob; /* Base PI probability */
+ u32 pi2_alpha; /* Gain factor for the integral rate response */
+ u32 pi2_beta; /* Gain factor for the proportional response */
+ struct hrtimer pi2_timer; /* prob update timer */
+
+ /* Step AQM (L-queue only) parameters */
+ u32 step_thresh; /* Step threshold */
+ bool step_in_packets; /* Step thresh in packets (1) or time (0) */
+
+ /* C-queue starvation protection */
+ s32 c_protection_credit; /* Credit (sign indicates which queue) */
+ s32 c_protection_init; /* Reset value of the credit */
+ u8 c_protection_wc; /* C-queue weight (between 0 and MAX_WC) */
+ u8 c_protection_wl; /* L-queue weight (MAX_WC - wc) */
+
+ /* General dualQ parameters */
+ u32 memory_limit; /* Memory limit of both queues */
+ u8 coupling_factor;/* Coupling factor (k) between both queues */
+ u8 ecn_mask; /* Mask to match packets into L-queue */
+ u32 min_qlen_step; /* Minimum queue length to apply step thresh */
+ bool drop_early; /* Drop at enqueue (1) instead of dequeue (0) */
+ bool drop_overload; /* Drop (1) on overload, or overflow (0) */
+ bool split_gso; /* Split aggregated skb (1) or leave as is (0) */
+
+ /* Statistics */
+ u64 c_head_ts; /* Enqueue timestamp of the C-queue head */
+ u64 l_head_ts; /* Enqueue timestamp of the L-queue head */
+ u64 last_qdelay; /* Q delay val at the last probability update */
+ u32 packets_in_c; /* Enqueue packet counter of the C-queue */
+ u32 packets_in_l; /* Enqueue packet counter of the L-queue */
+ u32 maxq; /* Maximum queue size of the C-queue */
+ u32 ecn_mark; /* ECN mark pkt counter due to PI probability */
+ u32 step_marks; /* ECN mark pkt counter due to step AQM */
+ u32 memory_used; /* Memory used of both queues */
+ u32 max_memory_used;/* Maximum used memory */
+
+ /* Deferred drop statistics */
+ u32 deferred_drops_cnt; /* Packets dropped */
+ u32 deferred_drops_len; /* Bytes dropped */
+};
+
+struct dualpi2_skb_cb {
+ u64 ts; /* Timestamp at enqueue */
+ u8 apply_step:1, /* Can we apply the step threshold */
+ classified:2, /* Packet classification results */
+ ect:2; /* Packet ECT codepoint */
+};
+
+enum dualpi2_classification_results {
+ DUALPI2_C_CLASSIC = 0, /* C-queue */
+ DUALPI2_C_L4S = 1, /* L-queue (scale mark/classic drop) */
+ DUALPI2_C_LLLL = 2, /* L-queue (no drops/marks) */
+ __DUALPI2_C_MAX /* Keep last*/
+};
+
+static struct dualpi2_skb_cb *dualpi2_skb_cb(struct sk_buff *skb)
+{
+ qdisc_cb_private_validate(skb, sizeof(struct dualpi2_skb_cb));
+ return (struct dualpi2_skb_cb *)qdisc_skb_cb(skb)->data;
+}
+
+static u64 dualpi2_sojourn_time(struct sk_buff *skb, u64 reference)
+{
+ return reference - dualpi2_skb_cb(skb)->ts;
+}
+
+static u64 head_enqueue_time(struct Qdisc *q)
+{
+ struct sk_buff *skb = qdisc_peek_head(q);
+
+ return skb ? dualpi2_skb_cb(skb)->ts : 0;
+}
+
+static u32 dualpi2_scale_alpha_beta(u32 param)
+{
+ u64 tmp = ((u64)param * MAX_PROB >> ALPHA_BETA_SCALING);
+
+ do_div(tmp, NSEC_PER_SEC);
+ return tmp;
+}
+
+static u32 dualpi2_unscale_alpha_beta(u32 param)
+{
+ u64 tmp = ((u64)param * NSEC_PER_SEC << ALPHA_BETA_SCALING);
+
+ do_div(tmp, MAX_PROB);
+ return tmp;
+}
+
+static ktime_t next_pi2_timeout(struct dualpi2_sched_data *q)
+{
+ return ktime_add_ns(ktime_get_ns(), q->pi2_tupdate);
+}
+
+static bool skb_is_l4s(struct sk_buff *skb)
+{
+ return dualpi2_skb_cb(skb)->classified == DUALPI2_C_L4S;
+}
+
+static bool skb_in_l_queue(struct sk_buff *skb)
+{
+ return dualpi2_skb_cb(skb)->classified != DUALPI2_C_CLASSIC;
+}
+
+static bool skb_apply_step(struct sk_buff *skb, struct dualpi2_sched_data *q)
+{
+ return skb_is_l4s(skb) && qdisc_qlen(q->l_queue) >= q->min_qlen_step;
+}
+
+static bool dualpi2_mark(struct dualpi2_sched_data *q, struct sk_buff *skb)
+{
+ if (INET_ECN_set_ce(skb)) {
+ q->ecn_mark++;
+ return true;
+ }
+ return false;
+}
+
+static void dualpi2_reset_c_protection(struct dualpi2_sched_data *q)
+{
+ q->c_protection_credit = q->c_protection_init;
+}
+
+/* This computes the initial credit value and WRR weight for the L queue (wl)
+ * from the weight of the C queue (wc).
+ * If wl > wc, the scheduler will start with the L queue when reset.
+ */
+static void dualpi2_calculate_c_protection(struct Qdisc *sch,
+ struct dualpi2_sched_data *q, u32 wc)
+{
+ q->c_protection_wc = wc;
+ q->c_protection_wl = MAX_WC - wc;
+ q->c_protection_init = (s32)psched_mtu(qdisc_dev(sch)) *
+ ((int)q->c_protection_wc - (int)q->c_protection_wl);
+ dualpi2_reset_c_protection(q);
+}
+
+static bool dualpi2_roll(u32 prob)
+{
+ return get_random_u32() <= prob;
+}
+
+/* Packets in the C-queue are subject to a marking probability pC, which is the
+ * square of the internal PI probability (i.e., have an overall lower mark/drop
+ * probability). If the qdisc is overloaded, ignore ECT values and only drop.
+ *
+ * Note that this marking scheme is also applied to L4S packets during overload.
+ * Return true if packet dropping is required in C queue
+ */
+static bool dualpi2_classic_marking(struct dualpi2_sched_data *q,
+ struct sk_buff *skb, u32 prob,
+ bool overload)
+{
+ if (dualpi2_roll(prob) && dualpi2_roll(prob)) {
+ if (overload || dualpi2_skb_cb(skb)->ect == INET_ECN_NOT_ECT)
+ return true;
+ dualpi2_mark(q, skb);
+ }
+ return false;
+}
+
+/* Packets in the L-queue are subject to a marking probability pL given by the
+ * internal PI probability scaled by the coupling factor.
+ *
+ * On overload (i.e., @local_l_prob is >= 100%):
+ * - if the qdisc is configured to trade losses to preserve latency (i.e.,
+ * @q->drop_overload), apply classic drops first before marking.
+ * - otherwise, preserve the "no loss" property of ECN at the cost of queueing
+ * delay, eventually resulting in taildrop behavior once sch->limit is
+ * reached.
+ * Return true if packet dropping is required in L queue
+ */
+static bool dualpi2_scalable_marking(struct dualpi2_sched_data *q,
+ struct sk_buff *skb,
+ u64 local_l_prob, u32 prob,
+ bool overload)
+{
+ if (overload) {
+ /* Apply classic drop */
+ if (!q->drop_overload ||
+ !(dualpi2_roll(prob) && dualpi2_roll(prob)))
+ goto mark;
+ return true;
+ }
+
+ /* We can safely cut the upper 32b as overload==false */
+ if (dualpi2_roll(local_l_prob)) {
+ /* Non-ECT packets could have classified as L4S by filters. */
+ if (dualpi2_skb_cb(skb)->ect == INET_ECN_NOT_ECT)
+ return true;
+mark:
+ dualpi2_mark(q, skb);
+ }
+ return false;
+}
+
+/* Decide whether a given packet must be dropped (or marked if ECT), according
+ * to the PI2 probability.
+ *
+ * Never mark/drop if we have a standing queue of less than 2 MTUs.
+ */
+static bool must_drop(struct Qdisc *sch, struct dualpi2_sched_data *q,
+ struct sk_buff *skb)
+{
+ u64 local_l_prob;
+ bool overload;
+ u32 prob;
+
+ if (sch->qstats.backlog < 2 * psched_mtu(qdisc_dev(sch)))
+ return false;
+
+ prob = READ_ONCE(q->pi2_prob);
+ local_l_prob = (u64)prob * q->coupling_factor;
+ overload = local_l_prob > MAX_PROB;
+
+ switch (dualpi2_skb_cb(skb)->classified) {
+ case DUALPI2_C_CLASSIC:
+ return dualpi2_classic_marking(q, skb, prob, overload);
+ case DUALPI2_C_L4S:
+ return dualpi2_scalable_marking(q, skb, local_l_prob, prob,
+ overload);
+ default: /* DUALPI2_C_LLLL */
+ return false;
+ }
+}
+
+static void dualpi2_read_ect(struct sk_buff *skb)
+{
+ struct dualpi2_skb_cb *cb = dualpi2_skb_cb(skb);
+ int wlen = skb_network_offset(skb);
+
+ switch (skb_protocol(skb, true)) {
+ case htons(ETH_P_IP):
+ wlen += sizeof(struct iphdr);
+ if (!pskb_may_pull(skb, wlen) ||
+ skb_try_make_writable(skb, wlen))
+ goto not_ecn;
+
+ cb->ect = ipv4_get_dsfield(ip_hdr(skb)) & INET_ECN_MASK;
+ break;
+ case htons(ETH_P_IPV6):
+ wlen += sizeof(struct ipv6hdr);
+ if (!pskb_may_pull(skb, wlen) ||
+ skb_try_make_writable(skb, wlen))
+ goto not_ecn;
+
+ cb->ect = ipv6_get_dsfield(ipv6_hdr(skb)) & INET_ECN_MASK;
+ break;
+ default:
+ goto not_ecn;
+ }
+ return;
+
+not_ecn:
+ /* Non pullable/writable packets can only be dropped hence are
+ * classified as not ECT.
+ */
+ cb->ect = INET_ECN_NOT_ECT;
+}
+
+static int dualpi2_skb_classify(struct dualpi2_sched_data *q,
+ struct sk_buff *skb)
+{
+ struct dualpi2_skb_cb *cb = dualpi2_skb_cb(skb);
+ struct tcf_result res;
+ struct tcf_proto *fl;
+ int result;
+
+ dualpi2_read_ect(skb);
+ if (cb->ect & q->ecn_mask) {
+ cb->classified = DUALPI2_C_L4S;
+ return NET_XMIT_SUCCESS;
+ }
+
+ if (TC_H_MAJ(skb->priority) == q->sch->handle &&
+ TC_H_MIN(skb->priority) < __DUALPI2_C_MAX) {
+ cb->classified = TC_H_MIN(skb->priority);
+ return NET_XMIT_SUCCESS;
+ }
+
+ fl = rcu_dereference_bh(q->tcf_filters);
+ if (!fl) {
+ cb->classified = DUALPI2_C_CLASSIC;
+ return NET_XMIT_SUCCESS;
+ }
+
+ result = tcf_classify(skb, NULL, fl, &res, false);
+ if (result >= 0) {
+#ifdef CONFIG_NET_CLS_ACT
+ switch (result) {
+ case TC_ACT_STOLEN:
+ case TC_ACT_QUEUED:
+ case TC_ACT_TRAP:
+ return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
+ case TC_ACT_SHOT:
+ return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
+ }
+#endif
+ cb->classified = TC_H_MIN(res.classid) < __DUALPI2_C_MAX ?
+ TC_H_MIN(res.classid) : DUALPI2_C_CLASSIC;
+ }
+ return NET_XMIT_SUCCESS;
+}
+
+static int dualpi2_enqueue_skb(struct sk_buff *skb, struct Qdisc *sch,
+ struct sk_buff **to_free)
+{
+ struct dualpi2_sched_data *q = qdisc_priv(sch);
+ struct dualpi2_skb_cb *cb;
+
+ if (unlikely(qdisc_qlen(sch) >= sch->limit) ||
+ unlikely((u64)q->memory_used + skb->truesize > q->memory_limit)) {
+ qdisc_qstats_overlimit(sch);
+ if (skb_in_l_queue(skb))
+ qdisc_qstats_overlimit(q->l_queue);
+ return qdisc_drop_reason(skb, sch, to_free,
+ SKB_DROP_REASON_QDISC_OVERLIMIT);
+ }
+
+ if (q->drop_early && must_drop(sch, q, skb)) {
+ qdisc_drop_reason(skb, sch, to_free,
+ SKB_DROP_REASON_QDISC_CONGESTED);
+ return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
+ }
+
+ cb = dualpi2_skb_cb(skb);
+ cb->ts = ktime_get_ns();
+ q->memory_used += skb->truesize;
+ if (q->memory_used > q->max_memory_used)
+ q->max_memory_used = q->memory_used;
+
+ if (qdisc_qlen(sch) > q->maxq)
+ q->maxq = qdisc_qlen(sch);
+
+ if (skb_in_l_queue(skb)) {
+ /* Apply step thresh if skb is L4S && L-queue len >= min_qlen */
+ dualpi2_skb_cb(skb)->apply_step = skb_apply_step(skb, q);
+
+ /* Keep the overall qdisc stats consistent */
+ ++sch->q.qlen;
+ qdisc_qstats_backlog_inc(sch, skb);
+ ++q->packets_in_l;
+ if (!q->l_head_ts)
+ q->l_head_ts = cb->ts;
+ return qdisc_enqueue_tail(skb, q->l_queue);
+ }
+ ++q->packets_in_c;
+ if (!q->c_head_ts)
+ q->c_head_ts = cb->ts;
+ return qdisc_enqueue_tail(skb, sch);
+}
+
+/* By default, dualpi2 will split GSO skbs into independent skbs and enqueue
+ * each of those individually. This yields the following benefits, at the
+ * expense of CPU usage:
+ * - Finer-grained AQM actions as the sub-packets of a burst no longer share the
+ * same fate (e.g., the random mark/drop probability is applied individually)
+ * - Improved precision of the starvation protection/WRR scheduler at dequeue,
+ * as the size of the dequeued packets will be smaller.
+ */
+static int dualpi2_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ struct sk_buff **to_free)
+{
+ struct dualpi2_sched_data *q = qdisc_priv(sch);
+ int err;
+
+ err = dualpi2_skb_classify(q, skb);
+ if (err != NET_XMIT_SUCCESS) {
+ if (err & __NET_XMIT_BYPASS)
+ qdisc_qstats_drop(sch);
+ __qdisc_drop(skb, to_free);
+ return err;
+ }
+
+ if (q->split_gso && skb_is_gso(skb)) {
+ netdev_features_t features;
+ struct sk_buff *nskb, *next;
+ int cnt, byte_len, orig_len;
+ int err;
+
+ features = netif_skb_features(skb);
+ nskb = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
+ if (IS_ERR_OR_NULL(nskb))
+ return qdisc_drop(skb, sch, to_free);
+
+ cnt = 1;
+ byte_len = 0;
+ orig_len = qdisc_pkt_len(skb);
+ skb_list_walk_safe(nskb, nskb, next) {
+ skb_mark_not_on_list(nskb);
+
+ /* Iterate through GSO fragments of an skb:
+ * (1) Set pkt_len from the single GSO fragments
+ * (2) Copy classified and ect values of an skb
+ * (3) Enqueue fragment & set ts in dualpi2_enqueue_skb
+ */
+ qdisc_skb_cb(nskb)->pkt_len = nskb->len;
+ dualpi2_skb_cb(nskb)->classified =
+ dualpi2_skb_cb(skb)->classified;
+ dualpi2_skb_cb(nskb)->ect = dualpi2_skb_cb(skb)->ect;
+ err = dualpi2_enqueue_skb(nskb, sch, to_free);
+
+ if (err == NET_XMIT_SUCCESS) {
+ /* Compute the backlog adjustment that needs
+ * to be propagated in the qdisc tree to reflect
+ * all new skbs successfully enqueued.
+ */
+ ++cnt;
+ byte_len += nskb->len;
+ }
+ }
+ if (cnt > 1) {
+ /* The caller will add the original skb stats to its
+ * backlog, compensate this if any nskb is enqueued.
+ */
+ --cnt;
+ byte_len -= orig_len;
+ }
+ qdisc_tree_reduce_backlog(sch, -cnt, -byte_len);
+ consume_skb(skb);
+ return err;
+ }
+ return dualpi2_enqueue_skb(skb, sch, to_free);
+}
+
+/* Select the queue from which the next packet can be dequeued, ensuring that
+ * neither queue can starve the other with a WRR scheduler.
+ *
+ * The sign of the WRR credit determines the next queue, while the size of
+ * the dequeued packet determines the magnitude of the WRR credit change. If
+ * either queue is empty, the WRR credit is kept unchanged.
+ *
+ * As the dequeued packet can be dropped later, the caller has to perform the
+ * qdisc_bstats_update() calls.
+ */
+static struct sk_buff *dequeue_packet(struct Qdisc *sch,
+ struct dualpi2_sched_data *q,
+ int *credit_change,
+ u64 now)
+{
+ struct sk_buff *skb = NULL;
+ int c_len;
+
+ *credit_change = 0;
+ c_len = qdisc_qlen(sch) - qdisc_qlen(q->l_queue);
+ if (qdisc_qlen(q->l_queue) && (!c_len || q->c_protection_credit <= 0)) {
+ skb = __qdisc_dequeue_head(&q->l_queue->q);
+ WRITE_ONCE(q->l_head_ts, head_enqueue_time(q->l_queue));
+ if (c_len)
+ *credit_change = q->c_protection_wc;
+ qdisc_qstats_backlog_dec(q->l_queue, skb);
+
+ /* Keep the global queue size consistent */
+ --sch->q.qlen;
+ q->memory_used -= skb->truesize;
+ } else if (c_len) {
+ skb = __qdisc_dequeue_head(&sch->q);
+ WRITE_ONCE(q->c_head_ts, head_enqueue_time(sch));
+ if (qdisc_qlen(q->l_queue))
+ *credit_change = ~((s32)q->c_protection_wl) + 1;
+ q->memory_used -= skb->truesize;
+ } else {
+ dualpi2_reset_c_protection(q);
+ return NULL;
+ }
+ *credit_change *= qdisc_pkt_len(skb);
+ qdisc_qstats_backlog_dec(sch, skb);
+ return skb;
+}
+
+static int do_step_aqm(struct dualpi2_sched_data *q, struct sk_buff *skb,
+ u64 now)
+{
+ u64 qdelay = 0;
+
+ if (q->step_in_packets)
+ qdelay = qdisc_qlen(q->l_queue);
+ else
+ qdelay = dualpi2_sojourn_time(skb, now);
+
+ if (dualpi2_skb_cb(skb)->apply_step && qdelay > q->step_thresh) {
+ if (!dualpi2_skb_cb(skb)->ect) {
+ /* Drop this non-ECT packet */
+ return 1;
+ }
+
+ if (dualpi2_mark(q, skb))
+ ++q->step_marks;
+ }
+ qdisc_bstats_update(q->l_queue, skb);
+ return 0;
+}
+
+static void drop_and_retry(struct dualpi2_sched_data *q, struct sk_buff *skb,
+ struct Qdisc *sch, enum skb_drop_reason reason)
+{
+ ++q->deferred_drops_cnt;
+ q->deferred_drops_len += qdisc_pkt_len(skb);
+ kfree_skb_reason(skb, reason);
+ qdisc_qstats_drop(sch);
+}
+
+static struct sk_buff *dualpi2_qdisc_dequeue(struct Qdisc *sch)
+{
+ struct dualpi2_sched_data *q = qdisc_priv(sch);
+ struct sk_buff *skb;
+ int credit_change;
+ u64 now;
+
+ now = ktime_get_ns();
+
+ while ((skb = dequeue_packet(sch, q, &credit_change, now))) {
+ if (!q->drop_early && must_drop(sch, q, skb)) {
+ drop_and_retry(q, skb, sch,
+ SKB_DROP_REASON_QDISC_CONGESTED);
+ continue;
+ }
+
+ if (skb_in_l_queue(skb) && do_step_aqm(q, skb, now)) {
+ qdisc_qstats_drop(q->l_queue);
+ drop_and_retry(q, skb, sch,
+ SKB_DROP_REASON_DUALPI2_STEP_DROP);
+ continue;
+ }
+
+ q->c_protection_credit += credit_change;
+ qdisc_bstats_update(sch, skb);
+ break;
+ }
+
+ if (q->deferred_drops_cnt) {
+ qdisc_tree_reduce_backlog(sch, q->deferred_drops_cnt,
+ q->deferred_drops_len);
+ q->deferred_drops_cnt = 0;
+ q->deferred_drops_len = 0;
+ }
+ return skb;
+}
+
+static s64 __scale_delta(u64 diff)
+{
+ do_div(diff, 1 << ALPHA_BETA_GRANULARITY);
+ return diff;
+}
+
+static void get_queue_delays(struct dualpi2_sched_data *q, u64 *qdelay_c,
+ u64 *qdelay_l)
+{
+ u64 now, qc, ql;
+
+ now = ktime_get_ns();
+ qc = READ_ONCE(q->c_head_ts);
+ ql = READ_ONCE(q->l_head_ts);
+
+ *qdelay_c = qc ? now - qc : 0;
+ *qdelay_l = ql ? now - ql : 0;
+}
+
+static u32 calculate_probability(struct Qdisc *sch)
+{
+ struct dualpi2_sched_data *q = qdisc_priv(sch);
+ u32 new_prob;
+ u64 qdelay_c;
+ u64 qdelay_l;
+ u64 qdelay;
+ s64 delta;
+
+ get_queue_delays(q, &qdelay_c, &qdelay_l);
+ qdelay = max(qdelay_l, qdelay_c);
+
+ /* Alpha and beta take at most 32b, i.e, the delay difference would
+ * overflow for queuing delay differences > ~4.2sec.
+ */
+ delta = ((s64)qdelay - (s64)q->pi2_target) * q->pi2_alpha;
+ delta += ((s64)qdelay - (s64)q->last_qdelay) * q->pi2_beta;
+ q->last_qdelay = qdelay;
+
+ /* Bound new_prob between 0 and MAX_PROB */
+ if (delta > 0) {
+ new_prob = __scale_delta(delta) + q->pi2_prob;
+ if (new_prob < q->pi2_prob)
+ new_prob = MAX_PROB;
+ } else {
+ new_prob = q->pi2_prob - __scale_delta(~delta + 1);
+ if (new_prob > q->pi2_prob)
+ new_prob = 0;
+ }
+
+ /* If we do not drop on overload, ensure we cap the L4S probability to
+ * 100% to keep window fairness when overflowing.
+ */
+ if (!q->drop_overload)
+ return min_t(u32, new_prob, MAX_PROB / q->coupling_factor);
+ return new_prob;
+}
+
+static u32 get_memory_limit(struct Qdisc *sch, u32 limit)
+{
+ /* Apply rule of thumb, i.e., doubling the packet length,
+ * to further include per packet overhead in memory_limit.
+ */
+ u64 memlim = mul_u32_u32(limit, 2 * psched_mtu(qdisc_dev(sch)));
+
+ if (upper_32_bits(memlim))
+ return U32_MAX;
+ else
+ return lower_32_bits(memlim);
+}
+
+static u32 convert_us_to_nsec(u32 us)
+{
+ u64 ns = mul_u32_u32(us, NSEC_PER_USEC);
+
+ if (upper_32_bits(ns))
+ return U32_MAX;
+
+ return lower_32_bits(ns);
+}
+
+static u32 convert_ns_to_usec(u64 ns)
+{
+ do_div(ns, NSEC_PER_USEC);
+ if (upper_32_bits(ns))
+ return U32_MAX;
+
+ return lower_32_bits(ns);
+}
+
+static enum hrtimer_restart dualpi2_timer(struct hrtimer *timer)
+{
+ struct dualpi2_sched_data *q = timer_container_of(q, timer, pi2_timer);
+ struct Qdisc *sch = q->sch;
+ spinlock_t *root_lock; /* to lock qdisc for probability calculations */
+
+ rcu_read_lock();
+ root_lock = qdisc_lock(qdisc_root_sleeping(sch));
+ spin_lock(root_lock);
+
+ WRITE_ONCE(q->pi2_prob, calculate_probability(sch));
+ hrtimer_set_expires(&q->pi2_timer, next_pi2_timeout(q));
+
+ spin_unlock(root_lock);
+ rcu_read_unlock();
+ return HRTIMER_RESTART;
+}
+
+static struct netlink_range_validation dualpi2_alpha_beta_range = {
+ .min = 1,
+ .max = ALPHA_BETA_MAX,
+};
+
+static const struct nla_policy dualpi2_policy[TCA_DUALPI2_MAX + 1] = {
+ [TCA_DUALPI2_LIMIT] = NLA_POLICY_MIN(NLA_U32, 1),
+ [TCA_DUALPI2_MEMORY_LIMIT] = NLA_POLICY_MIN(NLA_U32, 1),
+ [TCA_DUALPI2_TARGET] = { .type = NLA_U32 },
+ [TCA_DUALPI2_TUPDATE] = NLA_POLICY_MIN(NLA_U32, 1),
+ [TCA_DUALPI2_ALPHA] =
+ NLA_POLICY_FULL_RANGE(NLA_U32, &dualpi2_alpha_beta_range),
+ [TCA_DUALPI2_BETA] =
+ NLA_POLICY_FULL_RANGE(NLA_U32, &dualpi2_alpha_beta_range),
+ [TCA_DUALPI2_STEP_THRESH_PKTS] = { .type = NLA_U32 },
+ [TCA_DUALPI2_STEP_THRESH_US] = { .type = NLA_U32 },
+ [TCA_DUALPI2_MIN_QLEN_STEP] = { .type = NLA_U32 },
+ [TCA_DUALPI2_COUPLING] = NLA_POLICY_MIN(NLA_U8, 1),
+ [TCA_DUALPI2_DROP_OVERLOAD] =
+ NLA_POLICY_MAX(NLA_U8, TCA_DUALPI2_DROP_OVERLOAD_MAX),
+ [TCA_DUALPI2_DROP_EARLY] =
+ NLA_POLICY_MAX(NLA_U8, TCA_DUALPI2_DROP_EARLY_MAX),
+ [TCA_DUALPI2_C_PROTECTION] =
+ NLA_POLICY_RANGE(NLA_U8, 0, MAX_WC),
+ [TCA_DUALPI2_ECN_MASK] =
+ NLA_POLICY_RANGE(NLA_U8, TC_DUALPI2_ECN_MASK_L4S_ECT,
+ TCA_DUALPI2_ECN_MASK_MAX),
+ [TCA_DUALPI2_SPLIT_GSO] =
+ NLA_POLICY_MAX(NLA_U8, TCA_DUALPI2_SPLIT_GSO_MAX),
+};
+
+static int dualpi2_change(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[TCA_DUALPI2_MAX + 1];
+ struct dualpi2_sched_data *q;
+ int old_backlog;
+ int old_qlen;
+ int err;
+
+ if (!opt || !nla_len(opt)) {
+ NL_SET_ERR_MSG_MOD(extack, "Dualpi2 options are required");
+ return -EINVAL;
+ }
+ err = nla_parse_nested(tb, TCA_DUALPI2_MAX, opt, dualpi2_policy,
+ extack);
+ if (err < 0)
+ return err;
+ if (tb[TCA_DUALPI2_STEP_THRESH_PKTS] && tb[TCA_DUALPI2_STEP_THRESH_US]) {
+ NL_SET_ERR_MSG_MOD(extack, "multiple step thresh attributes");
+ return -EINVAL;
+ }
+
+ q = qdisc_priv(sch);
+ sch_tree_lock(sch);
+
+ if (tb[TCA_DUALPI2_LIMIT]) {
+ u32 limit = nla_get_u32(tb[TCA_DUALPI2_LIMIT]);
+
+ WRITE_ONCE(sch->limit, limit);
+ WRITE_ONCE(q->memory_limit, get_memory_limit(sch, limit));
+ }
+
+ if (tb[TCA_DUALPI2_MEMORY_LIMIT])
+ WRITE_ONCE(q->memory_limit,
+ nla_get_u32(tb[TCA_DUALPI2_MEMORY_LIMIT]));
+
+ if (tb[TCA_DUALPI2_TARGET]) {
+ u64 target = nla_get_u32(tb[TCA_DUALPI2_TARGET]);
+
+ WRITE_ONCE(q->pi2_target, target * NSEC_PER_USEC);
+ }
+
+ if (tb[TCA_DUALPI2_TUPDATE]) {
+ u64 tupdate = nla_get_u32(tb[TCA_DUALPI2_TUPDATE]);
+
+ WRITE_ONCE(q->pi2_tupdate, convert_us_to_nsec(tupdate));
+ }
+
+ if (tb[TCA_DUALPI2_ALPHA]) {
+ u32 alpha = nla_get_u32(tb[TCA_DUALPI2_ALPHA]);
+
+ WRITE_ONCE(q->pi2_alpha, dualpi2_scale_alpha_beta(alpha));
+ }
+
+ if (tb[TCA_DUALPI2_BETA]) {
+ u32 beta = nla_get_u32(tb[TCA_DUALPI2_BETA]);
+
+ WRITE_ONCE(q->pi2_beta, dualpi2_scale_alpha_beta(beta));
+ }
+
+ if (tb[TCA_DUALPI2_STEP_THRESH_PKTS]) {
+ u32 step_th = nla_get_u32(tb[TCA_DUALPI2_STEP_THRESH_PKTS]);
+
+ WRITE_ONCE(q->step_in_packets, true);
+ WRITE_ONCE(q->step_thresh, step_th);
+ } else if (tb[TCA_DUALPI2_STEP_THRESH_US]) {
+ u32 step_th = nla_get_u32(tb[TCA_DUALPI2_STEP_THRESH_US]);
+
+ WRITE_ONCE(q->step_in_packets, false);
+ WRITE_ONCE(q->step_thresh, convert_us_to_nsec(step_th));
+ }
+
+ if (tb[TCA_DUALPI2_MIN_QLEN_STEP])
+ WRITE_ONCE(q->min_qlen_step,
+ nla_get_u32(tb[TCA_DUALPI2_MIN_QLEN_STEP]));
+
+ if (tb[TCA_DUALPI2_COUPLING]) {
+ u8 coupling = nla_get_u8(tb[TCA_DUALPI2_COUPLING]);
+
+ WRITE_ONCE(q->coupling_factor, coupling);
+ }
+
+ if (tb[TCA_DUALPI2_DROP_OVERLOAD]) {
+ u8 drop_overload = nla_get_u8(tb[TCA_DUALPI2_DROP_OVERLOAD]);
+
+ WRITE_ONCE(q->drop_overload, (bool)drop_overload);
+ }
+
+ if (tb[TCA_DUALPI2_DROP_EARLY]) {
+ u8 drop_early = nla_get_u8(tb[TCA_DUALPI2_DROP_EARLY]);
+
+ WRITE_ONCE(q->drop_early, (bool)drop_early);
+ }
+
+ if (tb[TCA_DUALPI2_C_PROTECTION]) {
+ u8 wc = nla_get_u8(tb[TCA_DUALPI2_C_PROTECTION]);
+
+ dualpi2_calculate_c_protection(sch, q, wc);
+ }
+
+ if (tb[TCA_DUALPI2_ECN_MASK]) {
+ u8 ecn_mask = nla_get_u8(tb[TCA_DUALPI2_ECN_MASK]);
+
+ WRITE_ONCE(q->ecn_mask, ecn_mask);
+ }
+
+ if (tb[TCA_DUALPI2_SPLIT_GSO]) {
+ u8 split_gso = nla_get_u8(tb[TCA_DUALPI2_SPLIT_GSO]);
+
+ WRITE_ONCE(q->split_gso, (bool)split_gso);
+ }
+
+ old_qlen = qdisc_qlen(sch);
+ old_backlog = sch->qstats.backlog;
+ while (qdisc_qlen(sch) > sch->limit ||
+ q->memory_used > q->memory_limit) {
+ struct sk_buff *skb = qdisc_dequeue_internal(sch, true);
+
+ q->memory_used -= skb->truesize;
+ qdisc_qstats_backlog_dec(sch, skb);
+ rtnl_qdisc_drop(skb, sch);
+ }
+ qdisc_tree_reduce_backlog(sch, old_qlen - qdisc_qlen(sch),
+ old_backlog - sch->qstats.backlog);
+
+ sch_tree_unlock(sch);
+ return 0;
+}
+
+/* Default alpha/beta values give a 10dB stability margin with max_rtt=100ms. */
+static void dualpi2_reset_default(struct Qdisc *sch)
+{
+ struct dualpi2_sched_data *q = qdisc_priv(sch);
+
+ q->sch->limit = 10000; /* Max 125ms at 1Gbps */
+ q->memory_limit = get_memory_limit(sch, q->sch->limit);
+
+ q->pi2_target = 15 * NSEC_PER_MSEC;
+ q->pi2_tupdate = 16 * NSEC_PER_MSEC;
+ q->pi2_alpha = dualpi2_scale_alpha_beta(41); /* ~0.16 Hz * 256 */
+ q->pi2_beta = dualpi2_scale_alpha_beta(819); /* ~3.20 Hz * 256 */
+
+ q->step_thresh = 1 * NSEC_PER_MSEC;
+ q->step_in_packets = false;
+
+ dualpi2_calculate_c_protection(q->sch, q, 10); /* wc=10%, wl=90% */
+
+ q->ecn_mask = TC_DUALPI2_ECN_MASK_L4S_ECT; /* INET_ECN_ECT_1 */
+ q->min_qlen_step = 0; /* Always apply step mark in L-queue */
+ q->coupling_factor = 2; /* window fairness for equal RTTs */
+ q->drop_overload = TC_DUALPI2_DROP_OVERLOAD_DROP; /* Drop overload */
+ q->drop_early = TC_DUALPI2_DROP_EARLY_DROP_DEQUEUE; /* Drop dequeue */
+ q->split_gso = TC_DUALPI2_SPLIT_GSO_SPLIT_GSO; /* Split GSO */
+}
+
+static int dualpi2_init(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
+{
+ struct dualpi2_sched_data *q = qdisc_priv(sch);
+ int err;
+
+ q->l_queue = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
+ TC_H_MAKE(sch->handle, 1), extack);
+ if (!q->l_queue)
+ return -ENOMEM;
+
+ err = tcf_block_get(&q->tcf_block, &q->tcf_filters, sch, extack);
+ if (err)
+ return err;
+
+ q->sch = sch;
+ dualpi2_reset_default(sch);
+ hrtimer_setup(&q->pi2_timer, dualpi2_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
+
+ if (opt && nla_len(opt)) {
+ err = dualpi2_change(sch, opt, extack);
+
+ if (err)
+ return err;
+ }
+
+ hrtimer_start(&q->pi2_timer, next_pi2_timeout(q),
+ HRTIMER_MODE_ABS_PINNED);
+ return 0;
+}
+
+static int dualpi2_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+ struct dualpi2_sched_data *q = qdisc_priv(sch);
+ struct nlattr *opts;
+ bool step_in_pkts;
+ u32 step_th;
+
+ step_in_pkts = READ_ONCE(q->step_in_packets);
+ step_th = READ_ONCE(q->step_thresh);
+
+ opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
+ if (!opts)
+ goto nla_put_failure;
+
+ if (step_in_pkts &&
+ (nla_put_u32(skb, TCA_DUALPI2_LIMIT, READ_ONCE(sch->limit)) ||
+ nla_put_u32(skb, TCA_DUALPI2_MEMORY_LIMIT,
+ READ_ONCE(q->memory_limit)) ||
+ nla_put_u32(skb, TCA_DUALPI2_TARGET,
+ convert_ns_to_usec(READ_ONCE(q->pi2_target))) ||
+ nla_put_u32(skb, TCA_DUALPI2_TUPDATE,
+ convert_ns_to_usec(READ_ONCE(q->pi2_tupdate))) ||
+ nla_put_u32(skb, TCA_DUALPI2_ALPHA,
+ dualpi2_unscale_alpha_beta(READ_ONCE(q->pi2_alpha))) ||
+ nla_put_u32(skb, TCA_DUALPI2_BETA,
+ dualpi2_unscale_alpha_beta(READ_ONCE(q->pi2_beta))) ||
+ nla_put_u32(skb, TCA_DUALPI2_STEP_THRESH_PKTS, step_th) ||
+ nla_put_u32(skb, TCA_DUALPI2_MIN_QLEN_STEP,
+ READ_ONCE(q->min_qlen_step)) ||
+ nla_put_u8(skb, TCA_DUALPI2_COUPLING,
+ READ_ONCE(q->coupling_factor)) ||
+ nla_put_u8(skb, TCA_DUALPI2_DROP_OVERLOAD,
+ READ_ONCE(q->drop_overload)) ||
+ nla_put_u8(skb, TCA_DUALPI2_DROP_EARLY,
+ READ_ONCE(q->drop_early)) ||
+ nla_put_u8(skb, TCA_DUALPI2_C_PROTECTION,
+ READ_ONCE(q->c_protection_wc)) ||
+ nla_put_u8(skb, TCA_DUALPI2_ECN_MASK, READ_ONCE(q->ecn_mask)) ||
+ nla_put_u8(skb, TCA_DUALPI2_SPLIT_GSO, READ_ONCE(q->split_gso))))
+ goto nla_put_failure;
+
+ if (!step_in_pkts &&
+ (nla_put_u32(skb, TCA_DUALPI2_LIMIT, READ_ONCE(sch->limit)) ||
+ nla_put_u32(skb, TCA_DUALPI2_MEMORY_LIMIT,
+ READ_ONCE(q->memory_limit)) ||
+ nla_put_u32(skb, TCA_DUALPI2_TARGET,
+ convert_ns_to_usec(READ_ONCE(q->pi2_target))) ||
+ nla_put_u32(skb, TCA_DUALPI2_TUPDATE,
+ convert_ns_to_usec(READ_ONCE(q->pi2_tupdate))) ||
+ nla_put_u32(skb, TCA_DUALPI2_ALPHA,
+ dualpi2_unscale_alpha_beta(READ_ONCE(q->pi2_alpha))) ||
+ nla_put_u32(skb, TCA_DUALPI2_BETA,
+ dualpi2_unscale_alpha_beta(READ_ONCE(q->pi2_beta))) ||
+ nla_put_u32(skb, TCA_DUALPI2_STEP_THRESH_US,
+ convert_ns_to_usec(step_th)) ||
+ nla_put_u32(skb, TCA_DUALPI2_MIN_QLEN_STEP,
+ READ_ONCE(q->min_qlen_step)) ||
+ nla_put_u8(skb, TCA_DUALPI2_COUPLING,
+ READ_ONCE(q->coupling_factor)) ||
+ nla_put_u8(skb, TCA_DUALPI2_DROP_OVERLOAD,
+ READ_ONCE(q->drop_overload)) ||
+ nla_put_u8(skb, TCA_DUALPI2_DROP_EARLY,
+ READ_ONCE(q->drop_early)) ||
+ nla_put_u8(skb, TCA_DUALPI2_C_PROTECTION,
+ READ_ONCE(q->c_protection_wc)) ||
+ nla_put_u8(skb, TCA_DUALPI2_ECN_MASK, READ_ONCE(q->ecn_mask)) ||
+ nla_put_u8(skb, TCA_DUALPI2_SPLIT_GSO, READ_ONCE(q->split_gso))))
+ goto nla_put_failure;
+
+ return nla_nest_end(skb, opts);
+
+nla_put_failure:
+ nla_nest_cancel(skb, opts);
+ return -1;
+}
+
+static int dualpi2_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
+{
+ struct dualpi2_sched_data *q = qdisc_priv(sch);
+ struct tc_dualpi2_xstats st = {
+ .prob = READ_ONCE(q->pi2_prob),
+ .packets_in_c = q->packets_in_c,
+ .packets_in_l = q->packets_in_l,
+ .maxq = q->maxq,
+ .ecn_mark = q->ecn_mark,
+ .credit = q->c_protection_credit,
+ .step_marks = q->step_marks,
+ .memory_used = q->memory_used,
+ .max_memory_used = q->max_memory_used,
+ .memory_limit = q->memory_limit,
+ };
+ u64 qc, ql;
+
+ get_queue_delays(q, &qc, &ql);
+ st.delay_l = convert_ns_to_usec(ql);
+ st.delay_c = convert_ns_to_usec(qc);
+ return gnet_stats_copy_app(d, &st, sizeof(st));
+}
+
+/* Reset both L-queue and C-queue, internal packet counters, PI probability,
+ * C-queue protection credit, and timestamps, while preserving current
+ * configuration of DUALPI2.
+ */
+static void dualpi2_reset(struct Qdisc *sch)
+{
+ struct dualpi2_sched_data *q = qdisc_priv(sch);
+
+ qdisc_reset_queue(sch);
+ qdisc_reset_queue(q->l_queue);
+ q->c_head_ts = 0;
+ q->l_head_ts = 0;
+ q->pi2_prob = 0;
+ q->packets_in_c = 0;
+ q->packets_in_l = 0;
+ q->maxq = 0;
+ q->ecn_mark = 0;
+ q->step_marks = 0;
+ q->memory_used = 0;
+ q->max_memory_used = 0;
+ dualpi2_reset_c_protection(q);
+}
+
+static void dualpi2_destroy(struct Qdisc *sch)
+{
+ struct dualpi2_sched_data *q = qdisc_priv(sch);
+
+ q->pi2_tupdate = 0;
+ hrtimer_cancel(&q->pi2_timer);
+ if (q->l_queue)
+ qdisc_put(q->l_queue);
+ tcf_block_put(q->tcf_block);
+}
+
+static struct Qdisc *dualpi2_leaf(struct Qdisc *sch, unsigned long arg)
+{
+ return NULL;
+}
+
+static unsigned long dualpi2_find(struct Qdisc *sch, u32 classid)
+{
+ return 0;
+}
+
+static unsigned long dualpi2_bind(struct Qdisc *sch, unsigned long parent,
+ u32 classid)
+{
+ return 0;
+}
+
+static void dualpi2_unbind(struct Qdisc *q, unsigned long cl)
+{
+}
+
+static struct tcf_block *dualpi2_tcf_block(struct Qdisc *sch, unsigned long cl,
+ struct netlink_ext_ack *extack)
+{
+ struct dualpi2_sched_data *q = qdisc_priv(sch);
+
+ if (cl)
+ return NULL;
+ return q->tcf_block;
+}
+
+static void dualpi2_walk(struct Qdisc *sch, struct qdisc_walker *arg)
+{
+ unsigned int i;
+
+ if (arg->stop)
+ return;
+
+ /* We statically define only 2 queues */
+ for (i = 0; i < 2; i++) {
+ if (arg->count < arg->skip) {
+ arg->count++;
+ continue;
+ }
+ if (arg->fn(sch, i + 1, arg) < 0) {
+ arg->stop = 1;
+ break;
+ }
+ arg->count++;
+ }
+}
+
+/* Minimal class support to handle tc filters */
+static const struct Qdisc_class_ops dualpi2_class_ops = {
+ .leaf = dualpi2_leaf,
+ .find = dualpi2_find,
+ .tcf_block = dualpi2_tcf_block,
+ .bind_tcf = dualpi2_bind,
+ .unbind_tcf = dualpi2_unbind,
+ .walk = dualpi2_walk,
+};
+
+static struct Qdisc_ops dualpi2_qdisc_ops __read_mostly = {
+ .id = "dualpi2",
+ .cl_ops = &dualpi2_class_ops,
+ .priv_size = sizeof(struct dualpi2_sched_data),
+ .enqueue = dualpi2_qdisc_enqueue,
+ .dequeue = dualpi2_qdisc_dequeue,
+ .peek = qdisc_peek_dequeued,
+ .init = dualpi2_init,
+ .destroy = dualpi2_destroy,
+ .reset = dualpi2_reset,
+ .change = dualpi2_change,
+ .dump = dualpi2_dump,
+ .dump_stats = dualpi2_dump_stats,
+ .owner = THIS_MODULE,
+};
+
+static int __init dualpi2_module_init(void)
+{
+ return register_qdisc(&dualpi2_qdisc_ops);
+}
+
+static void __exit dualpi2_module_exit(void)
+{
+ unregister_qdisc(&dualpi2_qdisc_ops);
+}
+
+module_init(dualpi2_module_init);
+module_exit(dualpi2_module_exit);
+
+MODULE_DESCRIPTION("Dual Queue with Proportional Integral controller Improved with a Square (dualpi2) scheduler");
+MODULE_AUTHOR("Koen De Schepper <koen.de_schepper@nokia-bell-labs.com>");
+MODULE_AUTHOR("Chia-Yu Chang <chia-yu.chang@nokia-bell-labs.com>");
+MODULE_AUTHOR("Olga Albisser <olga@albisser.org>");
+MODULE_AUTHOR("Henrik Steen <henrist@henrist.net>");
+MODULE_AUTHOR("Olivier Tilmans <olivier.tilmans@nokia.com>");
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION("1.0");
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index f0eb70353744..2255355e51d3 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -536,9 +536,6 @@ destroy_class:
static void qfq_destroy_class(struct Qdisc *sch, struct qfq_class *cl)
{
- struct qfq_sched *q = qdisc_priv(sch);
-
- qfq_rm_from_agg(q, cl);
gen_kill_estimator(&cl->rate_est);
qdisc_put(cl->qdisc);
kfree(cl);
@@ -559,10 +556,11 @@ static int qfq_delete_class(struct Qdisc *sch, unsigned long arg,
qdisc_purge_queue(cl->qdisc);
qdisc_class_hash_remove(&q->clhash, &cl->common);
- qfq_destroy_class(sch, cl);
+ qfq_rm_from_agg(q, cl);
sch_tree_unlock(sch);
+ qfq_destroy_class(sch, cl);
return 0;
}
@@ -1503,6 +1501,7 @@ static void qfq_destroy_qdisc(struct Qdisc *sch)
for (i = 0; i < q->clhash.hashsize; i++) {
hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
common.hnode) {
+ qfq_rm_from_agg(q, cl);
qfq_destroy_class(sch, cl);
}
}
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index bb5bc6ff09d4..46394eb2086f 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -867,7 +867,8 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
mgmt = (const struct ieee80211_mgmt *)params->buf;
- if (!ieee80211_is_mgmt(mgmt->frame_control))
+ if (!ieee80211_is_mgmt(mgmt->frame_control) ||
+ ieee80211_has_order(mgmt->frame_control))
return -EINVAL;
stype = le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 63f015ce9ad4..89519aa52893 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -482,6 +482,16 @@ nl80211_sta_wme_policy[NL80211_STA_WME_MAX + 1] = {
[NL80211_STA_WME_MAX_SP] = { .type = NLA_U8 },
};
+static const struct nla_policy
+nl80211_s1g_short_beacon[NL80211_S1G_SHORT_BEACON_ATTR_MAX + 1] = {
+ [NL80211_S1G_SHORT_BEACON_ATTR_HEAD] =
+ NLA_POLICY_VALIDATE_FN(NLA_BINARY, validate_beacon_head,
+ IEEE80211_MAX_DATA_LEN),
+ [NL80211_S1G_SHORT_BEACON_ATTR_TAIL] =
+ NLA_POLICY_VALIDATE_FN(NLA_BINARY, validate_ie_attr,
+ IEEE80211_MAX_DATA_LEN),
+};
+
static const struct netlink_range_validation nl80211_punct_bitmap_range = {
.min = 0,
.max = 0xffff,
@@ -858,6 +868,9 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_EPCS] = { .type = NLA_FLAG },
[NL80211_ATTR_ASSOC_MLD_EXT_CAPA_OPS] = { .type = NLA_U16 },
[NL80211_ATTR_WIPHY_RADIO_INDEX] = { .type = NLA_U8 },
+ [NL80211_ATTR_S1G_LONG_BEACON_PERIOD] = NLA_POLICY_MIN(NLA_U8, 2),
+ [NL80211_ATTR_S1G_SHORT_BEACON] =
+ NLA_POLICY_NESTED(nl80211_s1g_short_beacon),
};
/* policy for the key attributes */
@@ -6202,6 +6215,41 @@ static int nl80211_validate_ap_phy_operation(struct cfg80211_ap_settings *params
return 0;
}
+static int
+nl80211_parse_s1g_short_beacon(struct cfg80211_registered_device *rdev,
+ struct nlattr *attrs,
+ struct cfg80211_s1g_short_beacon *sb)
+{
+ struct nlattr *tb[NL80211_S1G_SHORT_BEACON_ATTR_MAX + 1];
+ int ret;
+
+ if (!rdev->wiphy.bands[NL80211_BAND_S1GHZ])
+ return -EINVAL;
+
+ ret = nla_parse_nested(tb, NL80211_S1G_SHORT_BEACON_ATTR_MAX, attrs,
+ NULL, NULL);
+ if (ret)
+ return ret;
+
+ /* Short beacon tail is optional (i.e might only include the TIM) */
+ if (!tb[NL80211_S1G_SHORT_BEACON_ATTR_HEAD])
+ return -EINVAL;
+
+ sb->short_head = nla_data(tb[NL80211_S1G_SHORT_BEACON_ATTR_HEAD]);
+ sb->short_head_len = nla_len(tb[NL80211_S1G_SHORT_BEACON_ATTR_HEAD]);
+ sb->short_tail_len = 0;
+
+ if (tb[NL80211_S1G_SHORT_BEACON_ATTR_TAIL]) {
+ sb->short_tail =
+ nla_data(tb[NL80211_S1G_SHORT_BEACON_ATTR_TAIL]);
+ sb->short_tail_len =
+ nla_len(tb[NL80211_S1G_SHORT_BEACON_ATTR_TAIL]);
+ }
+
+ sb->update = true;
+ return 0;
+}
+
static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -6442,6 +6490,22 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
goto out;
}
+ if (info->attrs[NL80211_ATTR_S1G_SHORT_BEACON]) {
+ if (!info->attrs[NL80211_ATTR_S1G_LONG_BEACON_PERIOD]) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ params->s1g_long_beacon_period = nla_get_u8(
+ info->attrs[NL80211_ATTR_S1G_LONG_BEACON_PERIOD]);
+
+ err = nl80211_parse_s1g_short_beacon(
+ rdev, info->attrs[NL80211_ATTR_S1G_SHORT_BEACON],
+ &params->s1g_short_beacon);
+ if (err)
+ goto out;
+ }
+
err = nl80211_calculate_ap_params(params);
if (err)
goto out;
@@ -6550,6 +6614,14 @@ static int nl80211_set_beacon(struct sk_buff *skb, struct genl_info *info)
goto out;
}
+ attr = info->attrs[NL80211_ATTR_S1G_SHORT_BEACON];
+ if (attr) {
+ err = nl80211_parse_s1g_short_beacon(rdev, attr,
+ &params->s1g_short_beacon);
+ if (err)
+ goto out;
+ }
+
err = rdev_change_beacon(rdev, dev, params);
out:
@@ -9975,7 +10047,6 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
request = kzalloc(size, GFP_KERNEL);
if (!request)
return -ENOMEM;
- request->req.n_channels = n_channels;
if (n_ssids)
request->req.ssids = (void *)request + ssids_offset;
@@ -17488,6 +17559,7 @@ static int nl80211_set_sar_specs(struct sk_buff *skb, struct genl_info *info)
if (!sar_spec)
return -ENOMEM;
+ sar_spec->num_sub_specs = specs;
sar_spec->type = type;
specs = 0;
nla_for_each_nested(spec_list, tb[NL80211_SAR_ATTR_SPECS], rem) {
@@ -19684,7 +19756,7 @@ void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr,
EXPORT_SYMBOL(cfg80211_conn_failed);
static bool __nl80211_unexpected_frame(struct net_device *dev, u8 cmd,
- const u8 *addr, gfp_t gfp)
+ const u8 *addr, int link_id, gfp_t gfp)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
@@ -19707,7 +19779,9 @@ static bool __nl80211_unexpected_frame(struct net_device *dev, u8 cmd,
if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
- nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr))
+ nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr) ||
+ (link_id >= 0 &&
+ nla_put_u8(msg, NL80211_ATTR_MLO_LINK_ID, link_id)))
goto nla_put_failure;
genlmsg_end(msg, hdr);
@@ -19719,13 +19793,13 @@ static bool __nl80211_unexpected_frame(struct net_device *dev, u8 cmd,
return true;
}
-bool cfg80211_rx_spurious_frame(struct net_device *dev,
- const u8 *addr, gfp_t gfp)
+bool cfg80211_rx_spurious_frame(struct net_device *dev, const u8 *addr,
+ int link_id, gfp_t gfp)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
bool ret;
- trace_cfg80211_rx_spurious_frame(dev, addr);
+ trace_cfg80211_rx_spurious_frame(dev, addr, link_id);
if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP &&
wdev->iftype != NL80211_IFTYPE_P2P_GO)) {
@@ -19733,19 +19807,19 @@ bool cfg80211_rx_spurious_frame(struct net_device *dev,
return false;
}
ret = __nl80211_unexpected_frame(dev, NL80211_CMD_UNEXPECTED_FRAME,
- addr, gfp);
+ addr, link_id, gfp);
trace_cfg80211_return_bool(ret);
return ret;
}
EXPORT_SYMBOL(cfg80211_rx_spurious_frame);
-bool cfg80211_rx_unexpected_4addr_frame(struct net_device *dev,
- const u8 *addr, gfp_t gfp)
+bool cfg80211_rx_unexpected_4addr_frame(struct net_device *dev, const u8 *addr,
+ int link_id, gfp_t gfp)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
bool ret;
- trace_cfg80211_rx_unexpected_4addr_frame(dev, addr);
+ trace_cfg80211_rx_unexpected_4addr_frame(dev, addr, link_id);
if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP &&
wdev->iftype != NL80211_IFTYPE_P2P_GO &&
@@ -19755,7 +19829,7 @@ bool cfg80211_rx_unexpected_4addr_frame(struct net_device *dev,
}
ret = __nl80211_unexpected_frame(dev,
NL80211_CMD_UNEXPECTED_4ADDR_FRAME,
- addr, gfp);
+ addr, link_id, gfp);
trace_cfg80211_return_bool(ret);
return ret;
}
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 2524bc187a19..3b0ac3437f81 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -4229,6 +4229,8 @@ static void cfg80211_check_and_end_cac(struct cfg80211_registered_device *rdev)
struct wireless_dev *wdev;
unsigned int link_id;
+ guard(wiphy)(&rdev->wiphy);
+
/* If we finished CAC or received radar, we should end any
* CAC running on the same channels.
* the check !cfg80211_chandef_dfs_usable contain 2 options:
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 6d7a7e7f0fc2..826ec0a6355f 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -83,7 +83,6 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)
if (!request)
return -ENOMEM;
- request->req.n_channels = n_channels;
if (wdev->conn->params.channel) {
enum nl80211_band band = wdev->conn->params.channel->band;
struct ieee80211_supported_band *sband =
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index a07d88d61bec..34c584a215e5 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -3570,27 +3570,30 @@ TRACE_EVENT(cfg80211_cac_event,
);
DECLARE_EVENT_CLASS(cfg80211_rx_evt,
- TP_PROTO(struct net_device *netdev, const u8 *addr),
- TP_ARGS(netdev, addr),
+ TP_PROTO(struct net_device *netdev, const u8 *addr, int link_id),
+ TP_ARGS(netdev, addr, link_id),
TP_STRUCT__entry(
NETDEV_ENTRY
MAC_ENTRY(addr)
+ __field(int, link_id)
),
TP_fast_assign(
NETDEV_ASSIGN;
MAC_ASSIGN(addr, addr);
+ __entry->link_id = link_id;
),
- TP_printk(NETDEV_PR_FMT ", %pM", NETDEV_PR_ARG, __entry->addr)
+ TP_printk(NETDEV_PR_FMT ", %pM, link_id:%d", NETDEV_PR_ARG,
+ __entry->addr, __entry->link_id)
);
DEFINE_EVENT(cfg80211_rx_evt, cfg80211_rx_spurious_frame,
- TP_PROTO(struct net_device *netdev, const u8 *addr),
- TP_ARGS(netdev, addr)
+ TP_PROTO(struct net_device *netdev, const u8 *addr, int link_id),
+ TP_ARGS(netdev, addr, link_id)
);
DEFINE_EVENT(cfg80211_rx_evt, cfg80211_rx_unexpected_4addr_frame,
- TP_PROTO(struct net_device *netdev, const u8 *addr),
- TP_ARGS(netdev, addr)
+ TP_PROTO(struct net_device *netdev, const u8 *addr, int link_id),
+ TP_ARGS(netdev, addr, link_id)
);
TRACE_EVENT(cfg80211_ibss_joined,
diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
index 81fd486b5e56..d2819baea414 100644
--- a/net/xfrm/xfrm_device.c
+++ b/net/xfrm/xfrm_device.c
@@ -305,7 +305,6 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
return -EINVAL;
}
- xfrm_set_type_offload(x);
if (!x->type_offload) {
NL_SET_ERR_MSG(extack, "Type doesn't support offload");
dev_put(dev);
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index 7e6a71b9d6a3..c9ddef869aa5 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -503,6 +503,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
/* An encap_type of -1 indicates async resumption. */
if (encap_type == -1) {
async = 1;
+ dev_put(skb->dev);
seq = XFRM_SKB_CB(skb)->seq.input.low;
goto resume;
}
@@ -649,18 +650,18 @@ lock:
XFRM_SKB_CB(skb)->seq.input.low = seq;
XFRM_SKB_CB(skb)->seq.input.hi = seq_hi;
- dev_hold(skb->dev);
-
- if (crypto_done)
+ if (crypto_done) {
nexthdr = x->type_offload->input_tail(x, skb);
- else
+ } else {
+ dev_hold(skb->dev);
+
nexthdr = x->type->input(x, skb);
+ if (nexthdr == -EINPROGRESS)
+ return 0;
- if (nexthdr == -EINPROGRESS)
- return 0;
+ dev_put(skb->dev);
+ }
resume:
- dev_put(skb->dev);
-
spin_lock(&x->lock);
if (nexthdr < 0) {
if (nexthdr == -EBADMSG) {
diff --git a/net/xfrm/xfrm_interface_core.c b/net/xfrm/xfrm_interface_core.c
index cb1e12740c87..330a05286a56 100644
--- a/net/xfrm/xfrm_interface_core.c
+++ b/net/xfrm/xfrm_interface_core.c
@@ -875,7 +875,7 @@ static int xfrmi_changelink(struct net_device *dev, struct nlattr *tb[],
return -EINVAL;
}
- if (p.collect_md) {
+ if (p.collect_md || xi->p.collect_md) {
NL_SET_ERR_MSG(extack, "collect_md can't be changed");
return -EINVAL;
}
@@ -886,11 +886,6 @@ static int xfrmi_changelink(struct net_device *dev, struct nlattr *tb[],
} else {
if (xi->dev != dev)
return -EEXIST;
- if (xi->p.collect_md) {
- NL_SET_ERR_MSG(extack,
- "device can't be changed to collect_md");
- return -EINVAL;
- }
}
return xfrmi_update(xi, &p);
diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c
index 907c3ccb440d..43fdc6ed8dd1 100644
--- a/net/xfrm/xfrm_ipcomp.c
+++ b/net/xfrm/xfrm_ipcomp.c
@@ -97,7 +97,7 @@ static int ipcomp_input_done2(struct sk_buff *skb, int err)
struct ip_comp_hdr *ipch = ip_comp_hdr(skb);
const int plen = skb->len;
- skb_reset_transport_header(skb);
+ skb->transport_header = skb->network_header + sizeof(*ipch);
return ipcomp_post_acomp(skb, err, 0) ?:
skb->len < (plen + sizeof(ip_comp_hdr)) ? -EINVAL :
@@ -313,7 +313,6 @@ void ipcomp_destroy(struct xfrm_state *x)
struct ipcomp_data *ipcd = x->data;
if (!ipcd)
return;
- xfrm_state_delete_tunnel(x);
ipcomp_free_data(ipcd);
kfree(ipcd);
}
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 77cc418ad69e..77db3b5fe4ac 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -424,11 +424,10 @@ void xfrm_unregister_type_offload(const struct xfrm_type_offload *type,
}
EXPORT_SYMBOL(xfrm_unregister_type_offload);
-void xfrm_set_type_offload(struct xfrm_state *x)
+void xfrm_set_type_offload(struct xfrm_state *x, bool try_load)
{
const struct xfrm_type_offload *type = NULL;
struct xfrm_state_afinfo *afinfo;
- bool try_load = true;
retry:
afinfo = xfrm_state_get_afinfo(x->props.family);
@@ -593,7 +592,7 @@ void xfrm_state_free(struct xfrm_state *x)
}
EXPORT_SYMBOL(xfrm_state_free);
-static void ___xfrm_state_destroy(struct xfrm_state *x)
+static void xfrm_state_gc_destroy(struct xfrm_state *x)
{
if (x->mode_cbs && x->mode_cbs->destroy_state)
x->mode_cbs->destroy_state(x);
@@ -607,6 +606,7 @@ static void ___xfrm_state_destroy(struct xfrm_state *x)
kfree(x->coaddr);
kfree(x->replay_esn);
kfree(x->preplay_esn);
+ xfrm_unset_type_offload(x);
if (x->type) {
x->type->destructor(x);
xfrm_put_type(x->type);
@@ -631,7 +631,7 @@ static void xfrm_state_gc_task(struct work_struct *work)
synchronize_rcu();
hlist_for_each_entry_safe(x, tmp, &gc_list, gclist)
- ___xfrm_state_destroy(x);
+ xfrm_state_gc_destroy(x);
}
static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
@@ -780,8 +780,6 @@ void xfrm_dev_state_free(struct xfrm_state *x)
struct xfrm_dev_offload *xso = &x->xso;
struct net_device *dev = READ_ONCE(xso->dev);
- xfrm_unset_type_offload(x);
-
if (dev && dev->xfrmdev_ops) {
spin_lock_bh(&xfrm_state_dev_gc_lock);
if (!hlist_unhashed(&x->dev_gclist))
@@ -797,22 +795,18 @@ void xfrm_dev_state_free(struct xfrm_state *x)
}
#endif
-void __xfrm_state_destroy(struct xfrm_state *x, bool sync)
+void __xfrm_state_destroy(struct xfrm_state *x)
{
WARN_ON(x->km.state != XFRM_STATE_DEAD);
- if (sync) {
- synchronize_rcu();
- ___xfrm_state_destroy(x);
- } else {
- spin_lock_bh(&xfrm_state_gc_lock);
- hlist_add_head(&x->gclist, &xfrm_state_gc_list);
- spin_unlock_bh(&xfrm_state_gc_lock);
- schedule_work(&xfrm_state_gc_work);
- }
+ spin_lock_bh(&xfrm_state_gc_lock);
+ hlist_add_head(&x->gclist, &xfrm_state_gc_list);
+ spin_unlock_bh(&xfrm_state_gc_lock);
+ schedule_work(&xfrm_state_gc_work);
}
EXPORT_SYMBOL(__xfrm_state_destroy);
+static void xfrm_state_delete_tunnel(struct xfrm_state *x);
int __xfrm_state_delete(struct xfrm_state *x)
{
struct net *net = xs_net(x);
@@ -840,6 +834,8 @@ int __xfrm_state_delete(struct xfrm_state *x)
xfrm_dev_state_delete(x);
+ xfrm_state_delete_tunnel(x);
+
/* All xfrm_state objects are created by xfrm_state_alloc.
* The xfrm_state_alloc call gives a reference, and that
* is what we are dropping here.
@@ -921,7 +917,7 @@ xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool
}
#endif
-int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync)
+int xfrm_state_flush(struct net *net, u8 proto, bool task_valid)
{
int i, err = 0, cnt = 0;
@@ -943,10 +939,7 @@ restart:
err = xfrm_state_delete(x);
xfrm_audit_state_delete(x, err ? 0 : 1,
task_valid);
- if (sync)
- xfrm_state_put_sync(x);
- else
- xfrm_state_put(x);
+ xfrm_state_put(x);
if (!err)
cnt++;
@@ -1307,14 +1300,8 @@ static void xfrm_hash_grow_check(struct net *net, int have_hash_collision)
static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x,
const struct flowi *fl, unsigned short family,
struct xfrm_state **best, int *acq_in_progress,
- int *error)
+ int *error, unsigned int pcpu_id)
{
- /* We need the cpu id just as a lookup key,
- * we don't require it to be stable.
- */
- unsigned int pcpu_id = get_cpu();
- put_cpu();
-
/* Resolution logic:
* 1. There is a valid state with matching selector. Done.
* 2. Valid state with inappropriate selector. Skip.
@@ -1381,14 +1368,15 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
/* We need the cpu id just as a lookup key,
* we don't require it to be stable.
*/
- pcpu_id = get_cpu();
- put_cpu();
+ pcpu_id = raw_smp_processor_id();
to_put = NULL;
sequence = read_seqcount_begin(&net->xfrm.xfrm_state_hash_generation);
rcu_read_lock();
+ xfrm_hash_ptrs_get(net, &state_ptrs);
+
hlist_for_each_entry_rcu(x, &pol->state_cache_list, state_cache) {
if (x->props.family == encap_family &&
x->props.reqid == tmpl->reqid &&
@@ -1400,7 +1388,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
tmpl->id.proto == x->id.proto &&
(tmpl->id.spi == x->id.spi || !tmpl->id.spi))
xfrm_state_look_at(pol, x, fl, encap_family,
- &best, &acquire_in_progress, &error);
+ &best, &acquire_in_progress, &error, pcpu_id);
}
if (best)
@@ -1417,7 +1405,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
tmpl->id.proto == x->id.proto &&
(tmpl->id.spi == x->id.spi || !tmpl->id.spi))
xfrm_state_look_at(pol, x, fl, family,
- &best, &acquire_in_progress, &error);
+ &best, &acquire_in_progress, &error, pcpu_id);
}
cached:
@@ -1429,8 +1417,6 @@ cached:
else if (acquire_in_progress) /* XXX: acquire_in_progress should not happen */
WARN_ON(1);
- xfrm_hash_ptrs_get(net, &state_ptrs);
-
h = __xfrm_dst_hash(daddr, saddr, tmpl->reqid, encap_family, state_ptrs.hmask);
hlist_for_each_entry_rcu(x, state_ptrs.bydst + h, bydst) {
#ifdef CONFIG_XFRM_OFFLOAD
@@ -1460,7 +1446,7 @@ cached:
tmpl->id.proto == x->id.proto &&
(tmpl->id.spi == x->id.spi || !tmpl->id.spi))
xfrm_state_look_at(pol, x, fl, family,
- &best, &acquire_in_progress, &error);
+ &best, &acquire_in_progress, &error, pcpu_id);
}
if (best || acquire_in_progress)
goto found;
@@ -1495,7 +1481,7 @@ cached:
tmpl->id.proto == x->id.proto &&
(tmpl->id.spi == x->id.spi || !tmpl->id.spi))
xfrm_state_look_at(pol, x, fl, family,
- &best, &acquire_in_progress, &error);
+ &best, &acquire_in_progress, &error, pcpu_id);
}
found:
@@ -1711,6 +1697,26 @@ struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi,
}
EXPORT_SYMBOL(xfrm_state_lookup_byspi);
+static struct xfrm_state *xfrm_state_lookup_spi_proto(struct net *net, __be32 spi, u8 proto)
+{
+ struct xfrm_state *x;
+ unsigned int i;
+
+ rcu_read_lock();
+ for (i = 0; i <= net->xfrm.state_hmask; i++) {
+ hlist_for_each_entry_rcu(x, &net->xfrm.state_byspi[i], byspi) {
+ if (x->id.spi == spi && x->id.proto == proto) {
+ if (!xfrm_state_hold_rcu(x))
+ continue;
+ rcu_read_unlock();
+ return x;
+ }
+ }
+ }
+ rcu_read_unlock();
+ return NULL;
+}
+
static void __xfrm_state_insert(struct xfrm_state *x)
{
struct net *net = xs_net(x);
@@ -2262,7 +2268,12 @@ EXPORT_SYMBOL(xfrm_state_update);
int xfrm_state_check_expire(struct xfrm_state *x)
{
- xfrm_dev_state_update_stats(x);
+ /* All counters which are needed to decide if state is expired
+ * are handled by SW for non-packet offload modes. Simply skip
+ * the following update and save extra boilerplate in drivers.
+ */
+ if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET)
+ xfrm_dev_state_update_stats(x);
if (!READ_ONCE(x->curlft.use_time))
WRITE_ONCE(x->curlft.use_time, ktime_get_real_seconds());
@@ -2555,10 +2566,8 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high,
unsigned int h;
struct xfrm_state *x0;
int err = -ENOENT;
- __be32 minspi = htonl(low);
- __be32 maxspi = htonl(high);
+ u32 range = high - low + 1;
__be32 newspi = 0;
- u32 mark = x->mark.v & x->mark.m;
spin_lock_bh(&x->lock);
if (x->km.state == XFRM_STATE_DEAD) {
@@ -2572,38 +2581,34 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high,
err = -ENOENT;
- if (minspi == maxspi) {
- x0 = xfrm_state_lookup(net, mark, &x->id.daddr, minspi, x->id.proto, x->props.family);
- if (x0) {
- NL_SET_ERR_MSG(extack, "Requested SPI is already in use");
- xfrm_state_put(x0);
+ for (h = 0; h < range; h++) {
+ u32 spi = (low == high) ? low : get_random_u32_inclusive(low, high);
+ newspi = htonl(spi);
+
+ spin_lock_bh(&net->xfrm.xfrm_state_lock);
+ x0 = xfrm_state_lookup_spi_proto(net, newspi, x->id.proto);
+ if (!x0) {
+ x->id.spi = newspi;
+ h = xfrm_spi_hash(net, &x->id.daddr, newspi, x->id.proto, x->props.family);
+ XFRM_STATE_INSERT(byspi, &x->byspi, net->xfrm.state_byspi + h, x->xso.type);
+ spin_unlock_bh(&net->xfrm.xfrm_state_lock);
+ err = 0;
goto unlock;
}
- newspi = minspi;
- } else {
- u32 spi = 0;
- for (h = 0; h < high-low+1; h++) {
- spi = get_random_u32_inclusive(low, high);
- x0 = xfrm_state_lookup(net, mark, &x->id.daddr, htonl(spi), x->id.proto, x->props.family);
- if (x0 == NULL) {
- newspi = htonl(spi);
- break;
- }
- xfrm_state_put(x0);
+ xfrm_state_put(x0);
+ spin_unlock_bh(&net->xfrm.xfrm_state_lock);
+
+ if (signal_pending(current)) {
+ err = -ERESTARTSYS;
+ goto unlock;
}
+
+ if (low == high)
+ break;
}
- if (newspi) {
- spin_lock_bh(&net->xfrm.xfrm_state_lock);
- x->id.spi = newspi;
- h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, x->props.family);
- XFRM_STATE_INSERT(byspi, &x->byspi, net->xfrm.state_byspi + h,
- x->xso.type);
- spin_unlock_bh(&net->xfrm.xfrm_state_lock);
- err = 0;
- } else {
+ if (err)
NL_SET_ERR_MSG(extack, "No SPI available in the requested range");
- }
unlock:
spin_unlock_bh(&x->lock);
@@ -3077,20 +3082,17 @@ void xfrm_flush_gc(void)
}
EXPORT_SYMBOL(xfrm_flush_gc);
-/* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
-void xfrm_state_delete_tunnel(struct xfrm_state *x)
+static void xfrm_state_delete_tunnel(struct xfrm_state *x)
{
if (x->tunnel) {
struct xfrm_state *t = x->tunnel;
- if (atomic_read(&t->tunnel_users) == 2)
+ if (atomic_dec_return(&t->tunnel_users) == 1)
xfrm_state_delete(t);
- atomic_dec(&t->tunnel_users);
- xfrm_state_put_sync(t);
+ xfrm_state_put(t);
x->tunnel = NULL;
}
}
-EXPORT_SYMBOL(xfrm_state_delete_tunnel);
u32 xfrm_state_mtu(struct xfrm_state *x, int mtu)
{
@@ -3295,8 +3297,8 @@ void xfrm_state_fini(struct net *net)
unsigned int sz;
flush_work(&net->xfrm.state_hash_work);
+ xfrm_state_flush(net, IPSEC_PROTO_ANY, false);
flush_work(&xfrm_state_gc_work);
- xfrm_state_flush(net, 0, false, true);
WARN_ON(!list_empty(&net->xfrm.state_all));
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 59f258daf830..684239018bec 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -977,6 +977,7 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
/* override default values from above */
xfrm_update_ae_params(x, attrs, 0);
+ xfrm_set_type_offload(x, attrs[XFRMA_OFFLOAD_DEV]);
/* configure the hardware if offload is requested */
if (attrs[XFRMA_OFFLOAD_DEV]) {
err = xfrm_dev_state_add(net, x,
@@ -2634,7 +2635,7 @@ static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
struct xfrm_usersa_flush *p = nlmsg_data(nlh);
int err;
- err = xfrm_state_flush(net, p->proto, true, false);
+ err = xfrm_state_flush(net, p->proto, true);
if (err) {
if (err == -ESRCH) /* empty table */
return 0;
diff --git a/rust/Makefile b/rust/Makefile
index 27dec7904c3a..115b63b7d1e3 100644
--- a/rust/Makefile
+++ b/rust/Makefile
@@ -194,6 +194,7 @@ quiet_cmd_rustdoc_test = RUSTDOC T $<
RUST_MODFILE=test.rs \
OBJTREE=$(abspath $(objtree)) \
$(RUSTDOC) --test $(rust_common_flags) \
+ -Zcrate-attr='feature(used_with_arg)' \
@$(objtree)/include/generated/rustc_cfg \
$(rustc_target_flags) $(rustdoc_test_target_flags) \
$(rustdoc_test_quiet) \
diff --git a/rust/kernel/firmware.rs b/rust/kernel/firmware.rs
index 2494c96e105f..4fe621f35716 100644
--- a/rust/kernel/firmware.rs
+++ b/rust/kernel/firmware.rs
@@ -202,7 +202,7 @@ macro_rules! module_firmware {
};
#[link_section = ".modinfo"]
- #[used]
+ #[used(compiler)]
static __MODULE_FIRMWARE: [u8; $($builder)*::create(__MODULE_FIRMWARE_PREFIX)
.build_length()] = $($builder)*::create(__MODULE_FIRMWARE_PREFIX).build();
};
diff --git a/rust/kernel/init.rs b/rust/kernel/init.rs
index 8d228c237954..21ef202ab0db 100644
--- a/rust/kernel/init.rs
+++ b/rust/kernel/init.rs
@@ -231,14 +231,14 @@ macro_rules! try_init {
($(&$this:ident in)? $t:ident $(::<$($generics:ty),* $(,)?>)? {
$($fields:tt)*
}) => {
- ::pin_init::try_init!($(&$this in)? $t $(::<$($generics),* $(,)?>)? {
+ ::pin_init::try_init!($(&$this in)? $t $(::<$($generics),*>)? {
$($fields)*
}? $crate::error::Error)
};
($(&$this:ident in)? $t:ident $(::<$($generics:ty),* $(,)?>)? {
$($fields:tt)*
}? $err:ty) => {
- ::pin_init::try_init!($(&$this in)? $t $(::<$($generics),* $(,)?>)? {
+ ::pin_init::try_init!($(&$this in)? $t $(::<$($generics),*>)? {
$($fields)*
}? $err)
};
@@ -291,14 +291,14 @@ macro_rules! try_pin_init {
($(&$this:ident in)? $t:ident $(::<$($generics:ty),* $(,)?>)? {
$($fields:tt)*
}) => {
- ::pin_init::try_pin_init!($(&$this in)? $t $(::<$($generics),* $(,)?>)? {
+ ::pin_init::try_pin_init!($(&$this in)? $t $(::<$($generics),*>)? {
$($fields)*
}? $crate::error::Error)
};
($(&$this:ident in)? $t:ident $(::<$($generics:ty),* $(,)?>)? {
$($fields:tt)*
}? $err:ty) => {
- ::pin_init::try_pin_init!($(&$this in)? $t $(::<$($generics),* $(,)?>)? {
+ ::pin_init::try_pin_init!($(&$this in)? $t $(::<$($generics),*>)? {
$($fields)*
}? $err)
};
diff --git a/rust/kernel/kunit.rs b/rust/kernel/kunit.rs
index 4b8cdcb21e77..b9e65905e121 100644
--- a/rust/kernel/kunit.rs
+++ b/rust/kernel/kunit.rs
@@ -302,7 +302,7 @@ macro_rules! kunit_unsafe_test_suite {
is_init: false,
};
- #[used]
+ #[used(compiler)]
#[allow(unused_unsafe)]
#[cfg_attr(not(target_os = "macos"), link_section = ".kunit_test_suites")]
static mut KUNIT_TEST_SUITE_ENTRY: *const ::kernel::bindings::kunit_suite =
diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs
index 6b4774b2b1c3..e13d6ed88fa6 100644
--- a/rust/kernel/lib.rs
+++ b/rust/kernel/lib.rs
@@ -34,6 +34,9 @@
// Expected to become stable.
#![feature(arbitrary_self_types)]
//
+// To be determined.
+#![feature(used_with_arg)]
+//
// `feature(derive_coerce_pointee)` is expected to become stable. Before Rust
// 1.84.0, it did not exist, so enable the predecessor features.
#![cfg_attr(CONFIG_RUSTC_HAS_COERCE_POINTEE, feature(derive_coerce_pointee))]
diff --git a/rust/macros/module.rs b/rust/macros/module.rs
index 2ddd2eeb2852..75efc6eeeafc 100644
--- a/rust/macros/module.rs
+++ b/rust/macros/module.rs
@@ -57,7 +57,7 @@ impl<'a> ModInfoBuilder<'a> {
{cfg}
#[doc(hidden)]
#[cfg_attr(not(target_os = \"macos\"), link_section = \".modinfo\")]
- #[used]
+ #[used(compiler)]
pub static __{module}_{counter}: [u8; {length}] = *{string};
",
cfg = if builtin {
@@ -249,7 +249,7 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream {
// key or a new section. For the moment, keep it simple.
#[cfg(MODULE)]
#[doc(hidden)]
- #[used]
+ #[used(compiler)]
static __IS_RUST_MODULE: () = ();
static mut __MOD: ::core::mem::MaybeUninit<{type_}> =
@@ -273,7 +273,7 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream {
#[cfg(MODULE)]
#[doc(hidden)]
- #[used]
+ #[used(compiler)]
#[link_section = \".init.data\"]
static __UNIQUE_ID___addressable_init_module: unsafe extern \"C\" fn() -> i32 = init_module;
@@ -293,7 +293,7 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream {
#[cfg(MODULE)]
#[doc(hidden)]
- #[used]
+ #[used(compiler)]
#[link_section = \".exit.data\"]
static __UNIQUE_ID___addressable_cleanup_module: extern \"C\" fn() = cleanup_module;
@@ -303,7 +303,7 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream {
#[cfg(not(CONFIG_HAVE_ARCH_PREL32_RELOCATIONS))]
#[doc(hidden)]
#[link_section = \"{initcall_section}\"]
- #[used]
+ #[used(compiler)]
pub static __{ident}_initcall: extern \"C\" fn() ->
::kernel::ffi::c_int = __{ident}_init;
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index a6461ea411f7..ba71b27aa363 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -312,10 +312,11 @@ $(obj)/%.lst: $(obj)/%.c FORCE
# - Stable since Rust 1.82.0: `feature(asm_const)`, `feature(raw_ref_op)`.
# - Stable since Rust 1.87.0: `feature(asm_goto)`.
# - Expected to become stable: `feature(arbitrary_self_types)`.
+# - To be determined: `feature(used_with_arg)`.
#
# Please see https://github.com/Rust-for-Linux/linux/issues/2 for details on
# the unstable features in use.
-rust_allowed_features := asm_const,asm_goto,arbitrary_self_types,lint_reasons,raw_ref_op
+rust_allowed_features := asm_const,asm_goto,arbitrary_self_types,lint_reasons,raw_ref_op,used_with_arg
# `--out-dir` is required to avoid temporaries being created by `rustc` in the
# current working directory, which may be not accessible in the out-of-tree
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
index 840bb9cfe789..a66f258cafaa 100644
--- a/sound/core/compress_offload.c
+++ b/sound/core/compress_offload.c
@@ -1269,62 +1269,62 @@ static long snd_compr_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
stream = &data->stream;
guard(mutex)(&stream->device->lock);
- switch (_IOC_NR(cmd)) {
- case _IOC_NR(SNDRV_COMPRESS_IOCTL_VERSION):
+ switch (cmd) {
+ case SNDRV_COMPRESS_IOCTL_VERSION:
return put_user(SNDRV_COMPRESS_VERSION,
(int __user *)arg) ? -EFAULT : 0;
- case _IOC_NR(SNDRV_COMPRESS_GET_CAPS):
+ case SNDRV_COMPRESS_GET_CAPS:
return snd_compr_get_caps(stream, arg);
#ifndef COMPR_CODEC_CAPS_OVERFLOW
- case _IOC_NR(SNDRV_COMPRESS_GET_CODEC_CAPS):
+ case SNDRV_COMPRESS_GET_CODEC_CAPS:
return snd_compr_get_codec_caps(stream, arg);
#endif
- case _IOC_NR(SNDRV_COMPRESS_SET_PARAMS):
+ case SNDRV_COMPRESS_SET_PARAMS:
return snd_compr_set_params(stream, arg);
- case _IOC_NR(SNDRV_COMPRESS_GET_PARAMS):
+ case SNDRV_COMPRESS_GET_PARAMS:
return snd_compr_get_params(stream, arg);
- case _IOC_NR(SNDRV_COMPRESS_SET_METADATA):
+ case SNDRV_COMPRESS_SET_METADATA:
return snd_compr_set_metadata(stream, arg);
- case _IOC_NR(SNDRV_COMPRESS_GET_METADATA):
+ case SNDRV_COMPRESS_GET_METADATA:
return snd_compr_get_metadata(stream, arg);
}
if (stream->direction == SND_COMPRESS_ACCEL) {
#if IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL)
- switch (_IOC_NR(cmd)) {
- case _IOC_NR(SNDRV_COMPRESS_TASK_CREATE):
+ switch (cmd) {
+ case SNDRV_COMPRESS_TASK_CREATE:
return snd_compr_task_create(stream, arg);
- case _IOC_NR(SNDRV_COMPRESS_TASK_FREE):
+ case SNDRV_COMPRESS_TASK_FREE:
return snd_compr_task_seq(stream, arg, snd_compr_task_free_one);
- case _IOC_NR(SNDRV_COMPRESS_TASK_START):
+ case SNDRV_COMPRESS_TASK_START:
return snd_compr_task_start_ioctl(stream, arg);
- case _IOC_NR(SNDRV_COMPRESS_TASK_STOP):
+ case SNDRV_COMPRESS_TASK_STOP:
return snd_compr_task_seq(stream, arg, snd_compr_task_stop_one);
- case _IOC_NR(SNDRV_COMPRESS_TASK_STATUS):
+ case SNDRV_COMPRESS_TASK_STATUS:
return snd_compr_task_status_ioctl(stream, arg);
}
#endif
return -ENOTTY;
}
- switch (_IOC_NR(cmd)) {
- case _IOC_NR(SNDRV_COMPRESS_TSTAMP):
+ switch (cmd) {
+ case SNDRV_COMPRESS_TSTAMP:
return snd_compr_tstamp(stream, arg);
- case _IOC_NR(SNDRV_COMPRESS_AVAIL):
+ case SNDRV_COMPRESS_AVAIL:
return snd_compr_ioctl_avail(stream, arg);
- case _IOC_NR(SNDRV_COMPRESS_PAUSE):
+ case SNDRV_COMPRESS_PAUSE:
return snd_compr_pause(stream);
- case _IOC_NR(SNDRV_COMPRESS_RESUME):
+ case SNDRV_COMPRESS_RESUME:
return snd_compr_resume(stream);
- case _IOC_NR(SNDRV_COMPRESS_START):
+ case SNDRV_COMPRESS_START:
return snd_compr_start(stream);
- case _IOC_NR(SNDRV_COMPRESS_STOP):
+ case SNDRV_COMPRESS_STOP:
return snd_compr_stop(stream);
- case _IOC_NR(SNDRV_COMPRESS_DRAIN):
+ case SNDRV_COMPRESS_DRAIN:
return snd_compr_drain(stream);
- case _IOC_NR(SNDRV_COMPRESS_PARTIAL_DRAIN):
+ case SNDRV_COMPRESS_PARTIAL_DRAIN:
return snd_compr_partial_drain(stream);
- case _IOC_NR(SNDRV_COMPRESS_NEXT_TRACK):
+ case SNDRV_COMPRESS_NEXT_TRACK:
return snd_compr_next_track(stream);
}
diff --git a/sound/pci/hda/cs35l56_hda.c b/sound/pci/hda/cs35l56_hda.c
index 3f2fd32f4ad9..886c53184fec 100644
--- a/sound/pci/hda/cs35l56_hda.c
+++ b/sound/pci/hda/cs35l56_hda.c
@@ -873,6 +873,52 @@ static int cs35l56_hda_system_resume(struct device *dev)
return 0;
}
+static int cs35l56_hda_fixup_yoga9(struct cs35l56_hda *cs35l56, int *bus_addr)
+{
+ /* The cirrus,dev-index property has the wrong values */
+ switch (*bus_addr) {
+ case 0x30:
+ cs35l56->index = 1;
+ return 0;
+ case 0x31:
+ cs35l56->index = 0;
+ return 0;
+ default:
+ /* There is a pseudo-address for broadcast to both amps - ignore it */
+ dev_dbg(cs35l56->base.dev, "Ignoring I2C address %#x\n", *bus_addr);
+ return 0;
+ }
+}
+
+static const struct {
+ const char *sub;
+ int (*fixup_fn)(struct cs35l56_hda *cs35l56, int *bus_addr);
+} cs35l56_hda_fixups[] = {
+ {
+ .sub = "17AA390B", /* Lenovo Yoga Book 9i GenX */
+ .fixup_fn = cs35l56_hda_fixup_yoga9,
+ },
+};
+
+static int cs35l56_hda_apply_platform_fixups(struct cs35l56_hda *cs35l56, const char *sub,
+ int *bus_addr)
+{
+ int i;
+
+ if (IS_ERR(sub))
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(cs35l56_hda_fixups); i++) {
+ if (strcasecmp(cs35l56_hda_fixups[i].sub, sub) == 0) {
+ dev_dbg(cs35l56->base.dev, "Applying fixup for %s\n",
+ cs35l56_hda_fixups[i].sub);
+ return (cs35l56_hda_fixups[i].fixup_fn)(cs35l56, bus_addr);
+ }
+ }
+
+ return 0;
+}
+
static int cs35l56_hda_read_acpi(struct cs35l56_hda *cs35l56, int hid, int id)
{
u32 values[HDA_MAX_COMPONENTS];
@@ -897,39 +943,47 @@ static int cs35l56_hda_read_acpi(struct cs35l56_hda *cs35l56, int hid, int id)
ACPI_COMPANION_SET(cs35l56->base.dev, adev);
}
- property = "cirrus,dev-index";
- ret = device_property_count_u32(cs35l56->base.dev, property);
- if (ret <= 0)
- goto err;
-
- if (ret > ARRAY_SIZE(values)) {
- ret = -EINVAL;
- goto err;
- }
- nval = ret;
+ /* Initialize things that could be overwritten by a fixup */
+ cs35l56->index = -1;
- ret = device_property_read_u32_array(cs35l56->base.dev, property, values, nval);
+ sub = acpi_get_subsystem_id(ACPI_HANDLE(cs35l56->base.dev));
+ ret = cs35l56_hda_apply_platform_fixups(cs35l56, sub, &id);
if (ret)
- goto err;
+ return ret;
- cs35l56->index = -1;
- for (i = 0; i < nval; i++) {
- if (values[i] == id) {
- cs35l56->index = i;
- break;
- }
- }
- /*
- * It's not an error for the ID to be missing: for I2C there can be
- * an alias address that is not a real device. So reject silently.
- */
if (cs35l56->index == -1) {
- dev_dbg(cs35l56->base.dev, "No index found in %s\n", property);
- ret = -ENODEV;
- goto err;
- }
+ property = "cirrus,dev-index";
+ ret = device_property_count_u32(cs35l56->base.dev, property);
+ if (ret <= 0)
+ goto err;
- sub = acpi_get_subsystem_id(ACPI_HANDLE(cs35l56->base.dev));
+ if (ret > ARRAY_SIZE(values)) {
+ ret = -EINVAL;
+ goto err;
+ }
+ nval = ret;
+
+ ret = device_property_read_u32_array(cs35l56->base.dev, property, values, nval);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < nval; i++) {
+ if (values[i] == id) {
+ cs35l56->index = i;
+ break;
+ }
+ }
+
+ /*
+ * It's not an error for the ID to be missing: for I2C there can be
+ * an alias address that is not a real device. So reject silently.
+ */
+ if (cs35l56->index == -1) {
+ dev_dbg(cs35l56->base.dev, "No index found in %s\n", property);
+ ret = -ENODEV;
+ goto err;
+ }
+ }
if (IS_ERR(sub)) {
dev_info(cs35l56->base.dev,
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 060db37eab83..d91aac06adde 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -7497,6 +7497,9 @@ static void alc287_fixup_yoga9_14iap7_bass_spk_pin(struct hda_codec *codec,
};
struct alc_spec *spec = codec->spec;
+ /* Support Audio mute LED and Mic mute LED on keyboard */
+ hda_fixup_ideapad_acpi(codec, fix, action);
+
switch (action) {
case HDA_FIXUP_ACT_PRE_PROBE:
snd_hda_apply_pincfgs(codec, pincfgs);
@@ -10814,6 +10817,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8b97, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
SND_PCI_QUIRK(0x103c, 0x8bb3, "HP Slim OMEN", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x103c, 0x8bb4, "HP Slim OMEN", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x103c, 0x8bbe, "HP Victus 16-r0xxx (MB 8BBE)", ALC245_FIXUP_HP_MUTE_LED_COEFBIT),
SND_PCI_QUIRK(0x103c, 0x8bc8, "HP Victus 15-fa1xxx", ALC245_FIXUP_HP_MUTE_LED_COEFBIT),
SND_PCI_QUIRK(0x103c, 0x8bcd, "HP Omen 16-xd0xxx", ALC245_FIXUP_HP_MUTE_LED_V1_COEFBIT),
SND_PCI_QUIRK(0x103c, 0x8bdd, "HP Envy 17", ALC287_FIXUP_CS35L41_I2C_2),
@@ -11006,6 +11010,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
SND_PCI_QUIRK(0x1043, 0x1a63, "ASUS UX3405MA", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x1a83, "ASUS UM5302LA", ALC294_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x1043, 0x1a8e, "ASUS G712LWS", ALC294_FIXUP_LENOVO_MIC_LOCATION),
SND_PCI_QUIRK(0x1043, 0x1a8f, "ASUS UX582ZS", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x1b11, "ASUS UX431DA", ALC294_FIXUP_ASUS_COEF_1B),
SND_PCI_QUIRK(0x1043, 0x1b13, "ASUS U41SV/GA403U", ALC285_FIXUP_ASUS_GA403U_HEADSET_MIC),
diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
index 97e340140d0c..f210a253da9f 100644
--- a/sound/soc/amd/yc/acp6x-mach.c
+++ b/sound/soc/amd/yc/acp6x-mach.c
@@ -420,6 +420,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
.driver_data = &acp6x_card,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "M6501RM"),
+ }
+ },
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "E1404FA"),
}
},
@@ -539,6 +546,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
.driver_data = &acp6x_card,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Victus by HP Gaming Laptop 15-fb1xxx"),
+ }
+ },
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "Victus by HP Gaming Laptop 15-fb2xxx"),
}
},
@@ -588,6 +602,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
.driver_data = &acp6x_card,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
+ DMI_MATCH(DMI_BOARD_NAME, "8A81"),
+ }
+ },
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
DMI_MATCH(DMI_BOARD_NAME, "8B27"),
}
},
diff --git a/sound/soc/codecs/rt5660.c b/sound/soc/codecs/rt5660.c
index 82b92e83be4c..44c3a3b92f98 100644
--- a/sound/soc/codecs/rt5660.c
+++ b/sound/soc/codecs/rt5660.c
@@ -1315,14 +1315,17 @@ static int rt5660_i2c_probe(struct i2c_client *i2c)
regmap_update_bits(rt5660->regmap, RT5660_GPIO_CTRL1,
RT5660_GP1_PIN_MASK, RT5660_GP1_PIN_DMIC1_SCL);
- if (rt5660->pdata.dmic1_data_pin == RT5660_DMIC1_DATA_GPIO2)
+ if (rt5660->pdata.dmic1_data_pin == RT5660_DMIC1_DATA_GPIO2) {
regmap_update_bits(rt5660->regmap, RT5660_DMIC_CTRL1,
RT5660_SEL_DMIC_DATA_MASK,
RT5660_SEL_DMIC_DATA_GPIO2);
- else if (rt5660->pdata.dmic1_data_pin == RT5660_DMIC1_DATA_IN1P)
+ regmap_update_bits(rt5660->regmap, RT5660_GPIO_CTRL1,
+ RT5660_GP2_PIN_MASK, RT5660_GP2_PIN_DMIC1_SDA);
+ } else if (rt5660->pdata.dmic1_data_pin == RT5660_DMIC1_DATA_IN1P) {
regmap_update_bits(rt5660->regmap, RT5660_DMIC_CTRL1,
RT5660_SEL_DMIC_DATA_MASK,
RT5660_SEL_DMIC_DATA_IN1P);
+ }
}
return devm_snd_soc_register_component(&i2c->dev,
diff --git a/sound/soc/intel/avs/pcm.c b/sound/soc/intel/avs/pcm.c
index ccf90428126d..0efe490024b0 100644
--- a/sound/soc/intel/avs/pcm.c
+++ b/sound/soc/intel/avs/pcm.c
@@ -1570,11 +1570,13 @@ static void avs_component_hda_unregister_dais(struct snd_soc_component *componen
{
struct snd_soc_acpi_mach *mach;
struct snd_soc_dai *dai, *save;
+ struct avs_mach_pdata *pdata;
struct hda_codec *codec;
char name[32];
mach = dev_get_platdata(component->card->dev);
- codec = mach->pdata;
+ pdata = mach->pdata;
+ codec = pdata->codec;
snprintf(name, sizeof(name), "%s-cpu", dev_name(&codec->core.dev));
for_each_component_dais_safe(component, dai, save) {
diff --git a/sound/soc/intel/boards/Kconfig b/sound/soc/intel/boards/Kconfig
index 128b6876af83..c23fdb6aad4c 100644
--- a/sound/soc/intel/boards/Kconfig
+++ b/sound/soc/intel/boards/Kconfig
@@ -11,7 +11,7 @@ menuconfig SND_SOC_INTEL_MACH
kernel: saying N will just cause the configurator to skip all
the questions about Intel ASoC machine drivers.
-if SND_SOC_INTEL_MACH
+if SND_SOC_INTEL_MACH && (SND_SOC_SOF_INTEL_COMMON || !SND_SOC_SOF_INTEL_COMMON)
config SND_SOC_INTEL_USER_FRIENDLY_LONG_NAMES
bool "Use more user friendly long card names"
diff --git a/sound/soc/intel/common/soc-acpi-intel-arl-match.c b/sound/soc/intel/common/soc-acpi-intel-arl-match.c
index 1ad704ca2c5f..6bf7a6250ddc 100644
--- a/sound/soc/intel/common/soc-acpi-intel-arl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-arl-match.c
@@ -238,6 +238,15 @@ static const struct snd_soc_acpi_adr_device rt722_0_single_adr[] = {
}
};
+static const struct snd_soc_acpi_adr_device rt1316_3_single_adr[] = {
+ {
+ .adr = 0x000330025D131601ull,
+ .num_endpoints = 1,
+ .endpoints = &single_endpoint,
+ .name_prefix = "rt1316-1"
+ }
+};
+
static const struct snd_soc_acpi_adr_device rt1320_2_single_adr[] = {
{
.adr = 0x000230025D132001ull,
@@ -368,6 +377,20 @@ static const struct snd_soc_acpi_link_adr arl_sdca_rvp[] = {
{}
};
+static const struct snd_soc_acpi_link_adr arl_rt711_l0_rt1316_l3[] = {
+ {
+ .mask = BIT(0),
+ .num_adr = ARRAY_SIZE(rt711_sdca_0_adr),
+ .adr_d = rt711_sdca_0_adr,
+ },
+ {
+ .mask = BIT(3),
+ .num_adr = ARRAY_SIZE(rt1316_3_single_adr),
+ .adr_d = rt1316_3_single_adr,
+ },
+ {}
+};
+
static const struct snd_soc_acpi_link_adr arl_rt722_l0_rt1320_l2[] = {
{
.mask = BIT(0),
@@ -482,6 +505,12 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_arl_sdw_machines[] = {
.get_function_tplg_files = sof_sdw_get_tplg_files,
},
{
+ .link_mask = BIT(0) | BIT(3),
+ .links = arl_rt711_l0_rt1316_l3,
+ .drv_name = "sof_sdw",
+ .sof_tplg_filename = "sof-arl-rt711-l0-rt1316-l3.tplg",
+ },
+ {
.link_mask = 0x1, /* link0 required */
.links = arl_rvp,
.drv_name = "sof_sdw",
diff --git a/tools/hv/hv_fcopy_uio_daemon.c b/tools/hv/hv_fcopy_uio_daemon.c
index 0198321d14a2..92e8307b2a46 100644
--- a/tools/hv/hv_fcopy_uio_daemon.c
+++ b/tools/hv/hv_fcopy_uio_daemon.c
@@ -35,7 +35,10 @@
#define WIN8_SRV_MINOR 1
#define WIN8_SRV_VERSION (WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR)
-#define FCOPY_UIO "/sys/bus/vmbus/devices/eb765408-105f-49b6-b4aa-c123b64d17d4/uio"
+#define FCOPY_DEVICE_PATH(subdir) \
+ "/sys/bus/vmbus/devices/eb765408-105f-49b6-b4aa-c123b64d17d4/" #subdir
+#define FCOPY_UIO_PATH FCOPY_DEVICE_PATH(uio)
+#define FCOPY_CHANNELS_PATH FCOPY_DEVICE_PATH(channels)
#define FCOPY_VER_COUNT 1
static const int fcopy_versions[] = {
@@ -47,9 +50,62 @@ static const int fw_versions[] = {
UTIL_FW_VERSION
};
-#define HV_RING_SIZE 0x4000 /* 16KB ring buffer size */
+static uint32_t get_ring_buffer_size(void)
+{
+ char ring_path[PATH_MAX];
+ DIR *dir;
+ struct dirent *entry;
+ struct stat st;
+ uint32_t ring_size = 0;
+ int retry_count = 0;
+
+ /* Find the channel directory */
+ dir = opendir(FCOPY_CHANNELS_PATH);
+ if (!dir) {
+ usleep(100 * 1000); /* Avoid race with kernel, wait 100ms and retry once */
+ dir = opendir(FCOPY_CHANNELS_PATH);
+ if (!dir) {
+ syslog(LOG_ERR, "Failed to open channels directory: %s", strerror(errno));
+ return 0;
+ }
+ }
+
+retry_once:
+ while ((entry = readdir(dir)) != NULL) {
+ if (entry->d_type == DT_DIR && strcmp(entry->d_name, ".") != 0 &&
+ strcmp(entry->d_name, "..") != 0) {
+ snprintf(ring_path, sizeof(ring_path), "%s/%s/ring",
+ FCOPY_CHANNELS_PATH, entry->d_name);
+
+ if (stat(ring_path, &st) == 0) {
+ /*
+ * stat returns size of Tx, Rx rings combined,
+ * so take half of it for individual ring size.
+ */
+ ring_size = (uint32_t)st.st_size / 2;
+ syslog(LOG_INFO, "Ring buffer size from %s: %u bytes",
+ ring_path, ring_size);
+ break;
+ }
+ }
+ }
+
+ if (!ring_size && retry_count == 0) {
+ retry_count = 1;
+ rewinddir(dir);
+ usleep(100 * 1000); /* Wait 100ms and retry once */
+ goto retry_once;
+ }
+
+ closedir(dir);
-static unsigned char desc[HV_RING_SIZE];
+ if (!ring_size)
+ syslog(LOG_ERR, "Could not determine ring size");
+
+ return ring_size;
+}
+
+static unsigned char *desc;
static int target_fd;
static char target_fname[PATH_MAX];
@@ -62,8 +118,11 @@ static int hv_fcopy_create_file(char *file_name, char *path_name, __u32 flags)
filesize = 0;
p = path_name;
- snprintf(target_fname, sizeof(target_fname), "%s/%s",
- path_name, file_name);
+ if (snprintf(target_fname, sizeof(target_fname), "%s/%s",
+ path_name, file_name) >= sizeof(target_fname)) {
+ syslog(LOG_ERR, "target file name is too long: %s/%s", path_name, file_name);
+ goto done;
+ }
/*
* Check to see if the path is already in place; if not,
@@ -270,7 +329,7 @@ static void wcstoutf8(char *dest, const __u16 *src, size_t dest_size)
{
size_t len = 0;
- while (len < dest_size) {
+ while (len < dest_size && *src) {
if (src[len] < 0x80)
dest[len++] = (char)(*src++);
else
@@ -282,27 +341,15 @@ static void wcstoutf8(char *dest, const __u16 *src, size_t dest_size)
static int hv_fcopy_start(struct hv_start_fcopy *smsg_in)
{
- setlocale(LC_ALL, "en_US.utf8");
- size_t file_size, path_size;
- char *file_name, *path_name;
- char *in_file_name = (char *)smsg_in->file_name;
- char *in_path_name = (char *)smsg_in->path_name;
-
- file_size = wcstombs(NULL, (const wchar_t *restrict)in_file_name, 0) + 1;
- path_size = wcstombs(NULL, (const wchar_t *restrict)in_path_name, 0) + 1;
-
- file_name = (char *)malloc(file_size * sizeof(char));
- path_name = (char *)malloc(path_size * sizeof(char));
-
- if (!file_name || !path_name) {
- free(file_name);
- free(path_name);
- syslog(LOG_ERR, "Can't allocate memory for file name and/or path name");
- return HV_E_FAIL;
- }
+ /*
+ * file_name and path_name should have same length with appropriate
+ * member of hv_start_fcopy.
+ */
+ char file_name[W_MAX_PATH], path_name[W_MAX_PATH];
- wcstoutf8(file_name, (__u16 *)in_file_name, file_size);
- wcstoutf8(path_name, (__u16 *)in_path_name, path_size);
+ setlocale(LC_ALL, "en_US.utf8");
+ wcstoutf8(file_name, smsg_in->file_name, W_MAX_PATH - 1);
+ wcstoutf8(path_name, smsg_in->path_name, W_MAX_PATH - 1);
return hv_fcopy_create_file(file_name, path_name, smsg_in->copy_flags);
}
@@ -406,7 +453,7 @@ int main(int argc, char *argv[])
int daemonize = 1, long_index = 0, opt, ret = -EINVAL;
struct vmbus_br txbr, rxbr;
void *ring;
- uint32_t len = HV_RING_SIZE;
+ uint32_t ring_size, len;
char uio_name[NAME_MAX] = {0};
char uio_dev_path[PATH_MAX] = {0};
@@ -437,7 +484,20 @@ int main(int argc, char *argv[])
openlog("HV_UIO_FCOPY", 0, LOG_USER);
syslog(LOG_INFO, "starting; pid is:%d", getpid());
- fcopy_get_first_folder(FCOPY_UIO, uio_name);
+ ring_size = get_ring_buffer_size();
+ if (!ring_size) {
+ ret = -ENODEV;
+ goto exit;
+ }
+
+ desc = malloc(ring_size * sizeof(unsigned char));
+ if (!desc) {
+ syslog(LOG_ERR, "malloc failed for desc buffer");
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ fcopy_get_first_folder(FCOPY_UIO_PATH, uio_name);
snprintf(uio_dev_path, sizeof(uio_dev_path), "/dev/%s", uio_name);
fcopy_fd = open(uio_dev_path, O_RDWR);
@@ -445,17 +505,17 @@ int main(int argc, char *argv[])
syslog(LOG_ERR, "open %s failed; error: %d %s",
uio_dev_path, errno, strerror(errno));
ret = fcopy_fd;
- goto exit;
+ goto free_desc;
}
- ring = vmbus_uio_map(&fcopy_fd, HV_RING_SIZE);
+ ring = vmbus_uio_map(&fcopy_fd, ring_size);
if (!ring) {
ret = errno;
syslog(LOG_ERR, "mmap ringbuffer failed; error: %d %s", ret, strerror(ret));
goto close;
}
- vmbus_br_setup(&txbr, ring, HV_RING_SIZE);
- vmbus_br_setup(&rxbr, (char *)ring + HV_RING_SIZE, HV_RING_SIZE);
+ vmbus_br_setup(&txbr, ring, ring_size);
+ vmbus_br_setup(&rxbr, (char *)ring + ring_size, ring_size);
rxbr.vbr->imask = 0;
@@ -472,7 +532,7 @@ int main(int argc, char *argv[])
goto close;
}
- len = HV_RING_SIZE;
+ len = ring_size;
ret = rte_vmbus_chan_recv_raw(&rxbr, desc, &len);
if (unlikely(ret <= 0)) {
/* This indicates a failure to communicate (or worse) */
@@ -492,6 +552,8 @@ int main(int argc, char *argv[])
}
close:
close(fcopy_fd);
+free_desc:
+ free(desc);
exit:
return ret;
}
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 52e353368f58..d41ee26b9443 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -735,7 +735,7 @@ struct bpf_object {
struct usdt_manager *usdt_man;
- struct bpf_map *arena_map;
+ int arena_map_idx;
void *arena_data;
size_t arena_data_sz;
@@ -1517,6 +1517,7 @@ static struct bpf_object *bpf_object__new(const char *path,
obj->efile.obj_buf_sz = obj_buf_sz;
obj->efile.btf_maps_shndx = -1;
obj->kconfig_map_idx = -1;
+ obj->arena_map_idx = -1;
obj->kern_version = get_kernel_version();
obj->state = OBJ_OPEN;
@@ -2964,7 +2965,7 @@ static int init_arena_map_data(struct bpf_object *obj, struct bpf_map *map,
const long page_sz = sysconf(_SC_PAGE_SIZE);
size_t mmap_sz;
- mmap_sz = bpf_map_mmap_sz(obj->arena_map);
+ mmap_sz = bpf_map_mmap_sz(map);
if (roundup(data_sz, page_sz) > mmap_sz) {
pr_warn("elf: sec '%s': declared ARENA map size (%zu) is too small to hold global __arena variables of size %zu\n",
sec_name, mmap_sz, data_sz);
@@ -3038,12 +3039,12 @@ static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
if (map->def.type != BPF_MAP_TYPE_ARENA)
continue;
- if (obj->arena_map) {
+ if (obj->arena_map_idx >= 0) {
pr_warn("map '%s': only single ARENA map is supported (map '%s' is also ARENA)\n",
- map->name, obj->arena_map->name);
+ map->name, obj->maps[obj->arena_map_idx].name);
return -EINVAL;
}
- obj->arena_map = map;
+ obj->arena_map_idx = i;
if (obj->efile.arena_data) {
err = init_arena_map_data(obj, map, ARENA_SEC, obj->efile.arena_data_shndx,
@@ -3053,7 +3054,7 @@ static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
return err;
}
}
- if (obj->efile.arena_data && !obj->arena_map) {
+ if (obj->efile.arena_data && obj->arena_map_idx < 0) {
pr_warn("elf: sec '%s': to use global __arena variables the ARENA map should be explicitly declared in SEC(\".maps\")\n",
ARENA_SEC);
return -ENOENT;
@@ -4583,8 +4584,13 @@ static int bpf_program__record_reloc(struct bpf_program *prog,
if (shdr_idx == obj->efile.arena_data_shndx) {
reloc_desc->type = RELO_DATA;
reloc_desc->insn_idx = insn_idx;
- reloc_desc->map_idx = obj->arena_map - obj->maps;
+ reloc_desc->map_idx = obj->arena_map_idx;
reloc_desc->sym_off = sym->st_value;
+
+ map = &obj->maps[obj->arena_map_idx];
+ pr_debug("prog '%s': found arena map %d (%s, sec %d, off %zu) for insn %u\n",
+ prog->name, obj->arena_map_idx, map->name, map->sec_idx,
+ map->sec_offset, insn_idx);
return 0;
}
diff --git a/tools/net/ynl/pyynl/ynl_gen_c.py b/tools/net/ynl/pyynl/ynl_gen_c.py
index 76032e01c2e7..ef032e17fec4 100755
--- a/tools/net/ynl/pyynl/ynl_gen_c.py
+++ b/tools/net/ynl/pyynl/ynl_gen_c.py
@@ -275,9 +275,8 @@ class Type(SpecAttr):
def _setter_lines(self, ri, member, presence):
raise Exception(f"Setter not implemented for class type {self.type}")
- def setter(self, ri, space, direction, deref=False, ref=None):
+ def setter(self, ri, space, direction, deref=False, ref=None, var="req"):
ref = (ref if ref else []) + [self.c_name]
- var = "req"
member = f"{var}->{'.'.join(ref)}"
local_vars = []
@@ -332,7 +331,7 @@ class TypeUnused(Type):
def attr_get(self, ri, var, first):
pass
- def setter(self, ri, space, direction, deref=False, ref=None):
+ def setter(self, ri, space, direction, deref=False, ref=None, var=None):
pass
@@ -355,7 +354,7 @@ class TypePad(Type):
def attr_policy(self, cw):
pass
- def setter(self, ri, space, direction, deref=False, ref=None):
+ def setter(self, ri, space, direction, deref=False, ref=None, var=None):
pass
@@ -695,13 +694,14 @@ class TypeNest(Type):
f"parg.data = &{var}->{self.c_name};"]
return get_lines, init_lines, None
- def setter(self, ri, space, direction, deref=False, ref=None):
+ def setter(self, ri, space, direction, deref=False, ref=None, var="req"):
ref = (ref if ref else []) + [self.c_name]
for _, attr in ri.family.pure_nested_structs[self.nested_attrs].member_list():
if attr.is_recursive():
continue
- attr.setter(ri, self.nested_attrs, direction, deref=deref, ref=ref)
+ attr.setter(ri, self.nested_attrs, direction, deref=deref, ref=ref,
+ var=var)
class TypeMultiAttr(Type):
@@ -1879,7 +1879,9 @@ def rdir(direction):
def op_prefix(ri, direction, deref=False):
suffix = f"_{ri.type_name}"
- if not ri.op_mode or ri.op_mode == 'do':
+ if not ri.op_mode:
+ pass
+ elif ri.op_mode == 'do':
suffix += f"{direction_to_suffix[direction]}"
else:
if direction == 'request':
@@ -2470,11 +2472,22 @@ def free_arg_name(direction):
return 'obj'
-def print_alloc_wrapper(ri, direction):
+def print_alloc_wrapper(ri, direction, struct=None):
name = op_prefix(ri, direction)
- ri.cw.write_func_prot(f'static inline struct {name} *', f"{name}_alloc", [f"void"])
+ struct_name = name
+ if ri.type_name_conflict:
+ struct_name += '_'
+
+ args = ["void"]
+ cnt = "1"
+ if struct and struct.in_multi_val:
+ args = ["unsigned int n"]
+ cnt = "n"
+
+ ri.cw.write_func_prot(f'static inline struct {struct_name} *',
+ f"{name}_alloc", args)
ri.cw.block_start()
- ri.cw.p(f'return calloc(1, sizeof(struct {name}));')
+ ri.cw.p(f'return calloc({cnt}, sizeof(struct {struct_name}));')
ri.cw.block_end()
@@ -2544,6 +2557,19 @@ def print_type(ri, direction):
def print_type_full(ri, struct):
_print_type(ri, "", struct)
+ if struct.request and struct.in_multi_val:
+ print_alloc_wrapper(ri, "", struct)
+ ri.cw.nl()
+ free_rsp_nested_prototype(ri)
+ ri.cw.nl()
+
+ # Name conflicts are too hard to deal with with the current code base,
+ # they are very rare so don't bother printing setters in that case.
+ if ri.ku_space == 'user' and not ri.type_name_conflict:
+ for _, attr in struct.member_list():
+ attr.setter(ri, ri.attr_set, "", var="obj")
+ ri.cw.nl()
+
def print_type_helpers(ri, direction, deref=False):
print_free_prototype(ri, direction)
@@ -3515,9 +3541,6 @@ def main():
for attr_set, struct in parsed.pure_nested_structs.items():
ri = RenderInfo(cw, parsed, args.mode, "", "", attr_set)
print_type_full(ri, struct)
- if struct.request and struct.in_multi_val:
- free_rsp_nested_prototype(ri)
- cw.nl()
for op_name, op in parsed.ops.items():
cw.p(f"/* ============== {op.enum_name} ============== */")
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index d967ac001498..67d76f3a1dce 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -224,6 +224,7 @@ static bool is_rust_noreturn(const struct symbol *func)
str_ends_with(func->name, "_4core9panicking14panic_explicit") ||
str_ends_with(func->name, "_4core9panicking14panic_nounwind") ||
str_ends_with(func->name, "_4core9panicking18panic_bounds_check") ||
+ str_ends_with(func->name, "_4core9panicking18panic_nounwind_fmt") ||
str_ends_with(func->name, "_4core9panicking19assert_failed_inner") ||
str_ends_with(func->name, "_4core9panicking30panic_null_pointer_dereference") ||
str_ends_with(func->name, "_4core9panicking36panic_misaligned_pointer_dereference") ||
diff --git a/tools/testing/selftests/bpf/prog_tests/recursive_attach.c b/tools/testing/selftests/bpf/prog_tests/recursive_attach.c
index 8100509e561b..0ffa01d54ce2 100644
--- a/tools/testing/selftests/bpf/prog_tests/recursive_attach.c
+++ b/tools/testing/selftests/bpf/prog_tests/recursive_attach.c
@@ -149,3 +149,70 @@ close_prog:
fentry_recursive_target__destroy(target_skel);
fentry_recursive__destroy(tracing_skel);
}
+
+static void *fentry_target_test_run(void *arg)
+{
+ for (;;) {
+ int prog_fd = __atomic_load_n((int *)arg, __ATOMIC_SEQ_CST);
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+ int err;
+
+ if (prog_fd == -1)
+ break;
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ if (!ASSERT_OK(err, "fentry_target test_run"))
+ break;
+ }
+
+ return NULL;
+}
+
+void test_fentry_attach_stress(void)
+{
+ struct fentry_recursive_target *target_skel = NULL;
+ struct fentry_recursive *tracing_skel = NULL;
+ struct bpf_program *prog;
+ int err, i, tgt_prog_fd;
+ pthread_t thread;
+
+ target_skel = fentry_recursive_target__open_and_load();
+ if (!ASSERT_OK_PTR(target_skel,
+ "fentry_recursive_target__open_and_load"))
+ goto close_prog;
+ tgt_prog_fd = bpf_program__fd(target_skel->progs.fentry_target);
+ err = pthread_create(&thread, NULL,
+ fentry_target_test_run, &tgt_prog_fd);
+ if (!ASSERT_OK(err, "bpf_program__set_attach_target"))
+ goto close_prog;
+
+ for (i = 0; i < 1000; i++) {
+ tracing_skel = fentry_recursive__open();
+ if (!ASSERT_OK_PTR(tracing_skel, "fentry_recursive__open"))
+ goto stop_thread;
+
+ prog = tracing_skel->progs.recursive_attach;
+ err = bpf_program__set_attach_target(prog, tgt_prog_fd,
+ "fentry_target");
+ if (!ASSERT_OK(err, "bpf_program__set_attach_target"))
+ goto stop_thread;
+
+ err = fentry_recursive__load(tracing_skel);
+ if (!ASSERT_OK(err, "fentry_recursive__load"))
+ goto stop_thread;
+
+ err = fentry_recursive__attach(tracing_skel);
+ if (!ASSERT_OK(err, "fentry_recursive__attach"))
+ goto stop_thread;
+
+ fentry_recursive__destroy(tracing_skel);
+ tracing_skel = NULL;
+ }
+
+stop_thread:
+ __atomic_store_n(&tgt_prog_fd, -1, __ATOMIC_SEQ_CST);
+ err = pthread_join(thread, NULL);
+ ASSERT_OK(err, "pthread_join");
+close_prog:
+ fentry_recursive__destroy(tracing_skel);
+ fentry_recursive_target__destroy(target_skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/snprintf.c b/tools/testing/selftests/bpf/prog_tests/snprintf.c
index 4be6fdb78c6a..594441acb707 100644
--- a/tools/testing/selftests/bpf/prog_tests/snprintf.c
+++ b/tools/testing/selftests/bpf/prog_tests/snprintf.c
@@ -116,6 +116,8 @@ static void test_snprintf_negative(void)
ASSERT_ERR(load_single_snprintf("%llc"), "invalid specifier 7");
ASSERT_ERR(load_single_snprintf("\x80"), "non ascii character");
ASSERT_ERR(load_single_snprintf("\x1"), "non printable character");
+ ASSERT_ERR(load_single_snprintf("%p%"), "invalid specifier 8");
+ ASSERT_ERR(load_single_snprintf("%s%"), "invalid specifier 9");
}
void test_snprintf(void)
diff --git a/tools/testing/selftests/drivers/net/Makefile b/tools/testing/selftests/drivers/net/Makefile
index 9bd84d6b542e..3556f3563e08 100644
--- a/tools/testing/selftests/drivers/net/Makefile
+++ b/tools/testing/selftests/drivers/net/Makefile
@@ -22,6 +22,7 @@ TEST_PROGS := \
stats.py \
shaper.py \
hds.py \
+ xdp.py \
# end of TEST_PROGS
include ../../lib.mk
diff --git a/tools/testing/selftests/drivers/net/hw/devlink_rate_tc_bw.py b/tools/testing/selftests/drivers/net/hw/devlink_rate_tc_bw.py
index 820d8a03becc..835c357919a8 100755
--- a/tools/testing/selftests/drivers/net/hw/devlink_rate_tc_bw.py
+++ b/tools/testing/selftests/drivers/net/hw/devlink_rate_tc_bw.py
@@ -208,14 +208,14 @@ def setup_devlink_rate(cfg):
"port-index": port_index,
"rate-tx-max": 125000000,
"rate-tc-bws": [
- {"rate-tc-index": 0, "rate-tc-bw": 0},
- {"rate-tc-index": 1, "rate-tc-bw": 0},
- {"rate-tc-index": 2, "rate-tc-bw": 0},
- {"rate-tc-index": 3, "rate-tc-bw": 20},
- {"rate-tc-index": 4, "rate-tc-bw": 80},
- {"rate-tc-index": 5, "rate-tc-bw": 0},
- {"rate-tc-index": 6, "rate-tc-bw": 0},
- {"rate-tc-index": 7, "rate-tc-bw": 0},
+ {"index": 0, "bw": 0},
+ {"index": 1, "bw": 0},
+ {"index": 2, "bw": 0},
+ {"index": 3, "bw": 20},
+ {"index": 4, "bw": 80},
+ {"index": 5, "bw": 0},
+ {"index": 6, "bw": 0},
+ {"index": 7, "bw": 0},
]
})
except NlError as exc:
diff --git a/tools/testing/selftests/drivers/net/hw/ncdevmem.c b/tools/testing/selftests/drivers/net/hw/ncdevmem.c
index cc9b40d9c5d5..72f828021f83 100644
--- a/tools/testing/selftests/drivers/net/hw/ncdevmem.c
+++ b/tools/testing/selftests/drivers/net/hw/ncdevmem.c
@@ -526,12 +526,10 @@ static struct netdev_queue_id *create_queues(void)
struct netdev_queue_id *queues;
size_t i = 0;
- queues = calloc(num_queues, sizeof(*queues));
+ queues = netdev_queue_id_alloc(num_queues);
for (i = 0; i < num_queues; i++) {
- queues[i]._present.type = 1;
- queues[i]._present.id = 1;
- queues[i].type = NETDEV_QUEUE_TYPE_RX;
- queues[i].id = start_queue + i;
+ netdev_queue_id_set_type(&queues[i], NETDEV_QUEUE_TYPE_RX);
+ netdev_queue_id_set_id(&queues[i], start_queue + i);
}
return queues;
diff --git a/tools/testing/selftests/drivers/net/hw/rss_api.py b/tools/testing/selftests/drivers/net/hw/rss_api.py
index 424743bb583b..19847f3d4a00 100755
--- a/tools/testing/selftests/drivers/net/hw/rss_api.py
+++ b/tools/testing/selftests/drivers/net/hw/rss_api.py
@@ -5,6 +5,7 @@
API level tests for RSS (mostly Netlink vs IOCTL).
"""
+import errno
import glob
import random
from lib.py import ksft_run, ksft_exit, ksft_eq, ksft_is, ksft_ne, ksft_raises
@@ -390,6 +391,78 @@ def test_rxfh_fields_ntf(cfg):
ksft_eq(next(ethnl.poll_ntf(duration=0.01), None), None)
+def test_rss_ctx_add(cfg):
+ """ Test creating an additional RSS context via Netlink """
+
+ _require_2qs(cfg)
+
+ # Test basic creation
+ ctx = cfg.ethnl.rss_create_act({"header": {"dev-index": cfg.ifindex}})
+ d = defer(ethtool, f"-X {cfg.ifname} context {ctx.get('context')} delete")
+ ksft_ne(ctx.get("context", 0), 0)
+ ksft_ne(set(ctx.get("indir", [0])), {0},
+ comment="Driver should init the indirection table")
+
+ # Try requesting the ID we just got allocated
+ with ksft_raises(NlError) as cm:
+ ctx = cfg.ethnl.rss_create_act({
+ "header": {"dev-index": cfg.ifindex},
+ "context": ctx.get("context"),
+ })
+ ethtool(f"-X {cfg.ifname} context {ctx.get('context')} delete")
+ d.exec()
+ ksft_eq(cm.exception.nl_msg.error, -errno.EBUSY)
+
+ # Test creating with a specified RSS table, and context ID
+ ctx_id = ctx.get("context")
+ ctx = cfg.ethnl.rss_create_act({
+ "header": {"dev-index": cfg.ifindex},
+ "context": ctx_id,
+ "indir": [1],
+ })
+ ethtool(f"-X {cfg.ifname} context {ctx.get('context')} delete")
+ ksft_eq(ctx.get("context"), ctx_id)
+ ksft_eq(set(ctx.get("indir", [0])), {1})
+
+
+def test_rss_ctx_ntf(cfg):
+ """ Test notifications for creating additional RSS contexts """
+
+ ethnl = EthtoolFamily()
+ ethnl.ntf_subscribe("monitor")
+
+ # Create / delete via Netlink
+ ctx = cfg.ethnl.rss_create_act({"header": {"dev-index": cfg.ifindex}})
+ cfg.ethnl.rss_delete_act({
+ "header": {"dev-index": cfg.ifindex},
+ "context": ctx["context"],
+ })
+
+ ntf = next(ethnl.poll_ntf(duration=0.2), None)
+ if ntf is None:
+ raise KsftFailEx("[NL] No notification after context creation")
+ ksft_eq(ntf["name"], "rss-create-ntf")
+ ksft_eq(ctx, ntf["msg"])
+
+ ntf = next(ethnl.poll_ntf(duration=0.2), None)
+ if ntf is None:
+ raise KsftFailEx("[NL] No notification after context deletion")
+ ksft_eq(ntf["name"], "rss-delete-ntf")
+
+ # Create / deleve via IOCTL
+ ctx_id = _ethtool_create(cfg, "--disable-netlink -X", "context new")
+ ethtool(f"--disable-netlink -X {cfg.ifname} context {ctx_id} delete")
+ ntf = next(ethnl.poll_ntf(duration=0.2), None)
+ if ntf is None:
+ raise KsftFailEx("[IOCTL] No notification after context creation")
+ ksft_eq(ntf["name"], "rss-create-ntf")
+
+ ntf = next(ethnl.poll_ntf(duration=0.2), None)
+ if ntf is None:
+ raise KsftFailEx("[IOCTL] No notification after context deletion")
+ ksft_eq(ntf["name"], "rss-delete-ntf")
+
+
def main() -> None:
""" Ksft boiler plate main """
diff --git a/tools/testing/selftests/drivers/net/lib/py/load.py b/tools/testing/selftests/drivers/net/lib/py/load.py
index d9c10613ae67..44151b7b1a24 100644
--- a/tools/testing/selftests/drivers/net/lib/py/load.py
+++ b/tools/testing/selftests/drivers/net/lib/py/load.py
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
+import re
import time
from lib.py import ksft_pr, cmd, ip, rand_port, wait_port_listen
@@ -10,12 +11,11 @@ class GenerateTraffic:
self.env = env
- if port is None:
- port = rand_port()
- self._iperf_server = cmd(f"iperf3 -s -1 -p {port}", background=True)
- wait_port_listen(port)
+ self.port = rand_port() if port is None else port
+ self._iperf_server = cmd(f"iperf3 -s -1 -p {self.port}", background=True)
+ wait_port_listen(self.port)
time.sleep(0.1)
- self._iperf_client = cmd(f"iperf3 -c {env.addr} -P 16 -p {port} -t 86400",
+ self._iperf_client = cmd(f"iperf3 -c {env.addr} -P 16 -p {self.port} -t 86400",
background=True, host=env.remote)
# Wait for traffic to ramp up
@@ -56,3 +56,16 @@ class GenerateTraffic:
ksft_pr(">> Server:")
ksft_pr(self._iperf_server.stdout)
ksft_pr(self._iperf_server.stderr)
+ self._wait_client_stopped()
+
+ def _wait_client_stopped(self, sleep=0.005, timeout=5):
+ end = time.monotonic() + timeout
+
+ live_port_pattern = re.compile(fr":{self.port:04X} 0[^6] ")
+
+ while time.monotonic() < end:
+ data = cmd("cat /proc/net/tcp*", host=self.env.remote).stdout
+ if not live_port_pattern.search(data):
+ return
+ time.sleep(sleep)
+ raise Exception(f"Waiting for client to stop timed out after {timeout}s")
diff --git a/tools/testing/selftests/drivers/net/netdevsim/devlink.sh b/tools/testing/selftests/drivers/net/netdevsim/devlink.sh
index a102803ff74f..030762b203d7 100755
--- a/tools/testing/selftests/drivers/net/netdevsim/devlink.sh
+++ b/tools/testing/selftests/drivers/net/netdevsim/devlink.sh
@@ -40,6 +40,8 @@ fw_flash_test()
return
fi
+ echo "10"> $DEBUGFS_DIR/fw_update_flash_chunk_time_ms
+
devlink dev flash $DL_HANDLE file $DUMMYFILE
check_err $? "Failed to flash with status updates on"
diff --git a/tools/testing/selftests/drivers/net/xdp.py b/tools/testing/selftests/drivers/net/xdp.py
new file mode 100755
index 000000000000..887d662ad128
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/xdp.py
@@ -0,0 +1,656 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+
+"""
+This file contains tests to verify native XDP support in network drivers.
+The tests utilize the BPF program `xdp_native.bpf.o` from the `selftests.net.lib`
+directory, with each test focusing on a specific aspect of XDP functionality.
+"""
+import random
+import string
+from dataclasses import dataclass
+from enum import Enum
+
+from lib.py import ksft_run, ksft_exit, ksft_eq, ksft_ne, ksft_pr
+from lib.py import KsftFailEx, NetDrvEpEnv, EthtoolFamily, NlError
+from lib.py import bkg, cmd, rand_port
+from lib.py import ip, bpftool, defer
+
+
+class TestConfig(Enum):
+ """Enum for XDP configuration options."""
+ MODE = 0 # Configures the BPF program for a specific test
+ PORT = 1 # Port configuration to communicate with the remote host
+ ADJST_OFFSET = 2 # Tail/Head adjustment offset for extension/shrinking
+ ADJST_TAG = 3 # Adjustment tag to annotate the start and end of extension
+
+
+class XDPAction(Enum):
+ """Enum for XDP actions."""
+ PASS = 0 # Pass the packet up to the stack
+ DROP = 1 # Drop the packet
+ TX = 2 # Route the packet to the remote host
+ TAIL_ADJST = 3 # Adjust the tail of the packet
+ HEAD_ADJST = 4 # Adjust the head of the packet
+
+
+class XDPStats(Enum):
+ """Enum for XDP statistics."""
+ RX = 0 # Count of valid packets received for testing
+ PASS = 1 # Count of packets passed up to the stack
+ DROP = 2 # Count of packets dropped
+ TX = 3 # Count of incoming packets routed to the remote host
+ ABORT = 4 # Count of packets that were aborted
+
+
+@dataclass
+class BPFProgInfo:
+ """Data class to store information about a BPF program."""
+ name: str # Name of the BPF program
+ file: str # BPF program object file
+ xdp_sec: str = "xdp" # XDP section name (e.g., "xdp" or "xdp.frags")
+ mtu: int = 1500 # Maximum Transmission Unit, default is 1500
+
+
+def _exchg_udp(cfg, port, test_string):
+ """
+ Exchanges UDP packets between a local and remote host using the socat tool.
+
+ Args:
+ cfg: Configuration object containing network settings.
+ port: Port number to use for the UDP communication.
+ test_string: String that the remote host will send.
+
+ Returns:
+ The string received by the test host.
+ """
+ cfg.require_cmd("socat", remote=True)
+
+ rx_udp_cmd = f"socat -{cfg.addr_ipver} -T 2 -u UDP-RECV:{port},reuseport STDOUT"
+ tx_udp_cmd = f"echo -n {test_string} | socat -t 2 -u STDIN UDP:{cfg.baddr}:{port}"
+
+ with bkg(rx_udp_cmd, exit_wait=True) as nc:
+ cmd(tx_udp_cmd, host=cfg.remote, shell=True)
+
+ return nc.stdout.strip()
+
+
+def _test_udp(cfg, port, size=256):
+ """
+ Tests UDP packet exchange between a local and remote host.
+
+ Args:
+ cfg: Configuration object containing network settings.
+ port: Port number to use for the UDP communication.
+ size: The length of the test string to be exchanged, default is 256 characters.
+
+ Returns:
+ bool: True if the received string matches the sent string, False otherwise.
+ """
+ test_str = "".join(random.choice(string.ascii_lowercase) for _ in range(size))
+ recvd_str = _exchg_udp(cfg, port, test_str)
+
+ return recvd_str == test_str
+
+
+def _load_xdp_prog(cfg, bpf_info):
+ """
+ Loads an XDP program onto a network interface.
+
+ Args:
+ cfg: Configuration object containing network settings.
+ bpf_info: BPFProgInfo object containing information about the BPF program.
+
+ Returns:
+ dict: A dictionary containing the XDP program ID, name, and associated map IDs.
+ """
+ abs_path = cfg.net_lib_dir / bpf_info.file
+ prog_info = {}
+
+ cmd(f"ip link set dev {cfg.remote_ifname} mtu {bpf_info.mtu}", shell=True, host=cfg.remote)
+ defer(ip, f"link set dev {cfg.remote_ifname} mtu 1500", host=cfg.remote)
+
+ cmd(
+ f"ip link set dev {cfg.ifname} mtu {bpf_info.mtu} xdp obj {abs_path} sec {bpf_info.xdp_sec}",
+ shell=True
+ )
+ defer(ip, f"link set dev {cfg.ifname} mtu 1500 xdp off")
+
+ xdp_info = ip(f"-d link show dev {cfg.ifname}", json=True)[0]
+ prog_info["id"] = xdp_info["xdp"]["prog"]["id"]
+ prog_info["name"] = xdp_info["xdp"]["prog"]["name"]
+ prog_id = prog_info["id"]
+
+ map_ids = bpftool(f"prog show id {prog_id}", json=True)["map_ids"]
+ prog_info["maps"] = {}
+ for map_id in map_ids:
+ name = bpftool(f"map show id {map_id}", json=True)["name"]
+ prog_info["maps"][name] = map_id
+
+ return prog_info
+
+
+def format_hex_bytes(value):
+ """
+ Helper function that converts an integer into a formatted hexadecimal byte string.
+
+ Args:
+ value: An integer representing the number to be converted.
+
+ Returns:
+ A string representing hexadecimal equivalent of value, with bytes separated by spaces.
+ """
+ hex_str = value.to_bytes(4, byteorder='little', signed=True)
+ return ' '.join(f'{byte:02x}' for byte in hex_str)
+
+
+def _set_xdp_map(map_name, key, value):
+ """
+ Updates an XDP map with a given key-value pair using bpftool.
+
+ Args:
+ map_name: The name of the XDP map to update.
+ key: The key to update in the map, formatted as a hexadecimal string.
+ value: The value to associate with the key, formatted as a hexadecimal string.
+ """
+ key_formatted = format_hex_bytes(key)
+ value_formatted = format_hex_bytes(value)
+ bpftool(
+ f"map update name {map_name} key hex {key_formatted} value hex {value_formatted}"
+ )
+
+
+def _get_stats(xdp_map_id):
+ """
+ Retrieves and formats statistics from an XDP map.
+
+ Args:
+ xdp_map_id: The ID of the XDP map from which to retrieve statistics.
+
+ Returns:
+ A dictionary containing formatted packet statistics for various XDP actions.
+ The keys are based on the XDPStats Enum values.
+
+ Raises:
+ KsftFailEx: If the stats retrieval fails.
+ """
+ stats_dump = bpftool(f"map dump id {xdp_map_id}", json=True)
+ if not stats_dump:
+ raise KsftFailEx(f"Failed to get stats for map {xdp_map_id}")
+
+ stats_formatted = {}
+ for key in range(0, 5):
+ val = stats_dump[key]["formatted"]["value"]
+ if stats_dump[key]["formatted"]["key"] == XDPStats.RX.value:
+ stats_formatted[XDPStats.RX.value] = val
+ elif stats_dump[key]["formatted"]["key"] == XDPStats.PASS.value:
+ stats_formatted[XDPStats.PASS.value] = val
+ elif stats_dump[key]["formatted"]["key"] == XDPStats.DROP.value:
+ stats_formatted[XDPStats.DROP.value] = val
+ elif stats_dump[key]["formatted"]["key"] == XDPStats.TX.value:
+ stats_formatted[XDPStats.TX.value] = val
+ elif stats_dump[key]["formatted"]["key"] == XDPStats.ABORT.value:
+ stats_formatted[XDPStats.ABORT.value] = val
+
+ return stats_formatted
+
+
+def _test_pass(cfg, bpf_info, msg_sz):
+ """
+ Tests the XDP_PASS action by exchanging UDP packets.
+
+ Args:
+ cfg: Configuration object containing network settings.
+ bpf_info: BPFProgInfo object containing information about the BPF program.
+ msg_sz: Size of the test message to send.
+ """
+
+ prog_info = _load_xdp_prog(cfg, bpf_info)
+ port = rand_port()
+
+ _set_xdp_map("map_xdp_setup", TestConfig.MODE.value, XDPAction.PASS.value)
+ _set_xdp_map("map_xdp_setup", TestConfig.PORT.value, port)
+
+ ksft_eq(_test_udp(cfg, port, msg_sz), True, "UDP packet exchange failed")
+ stats = _get_stats(prog_info["maps"]["map_xdp_stats"])
+
+ ksft_ne(stats[XDPStats.RX.value], 0, "RX stats should not be zero")
+ ksft_eq(stats[XDPStats.RX.value], stats[XDPStats.PASS.value], "RX and PASS stats mismatch")
+
+
+def test_xdp_native_pass_sb(cfg):
+ """
+ Tests the XDP_PASS action for single buffer case.
+
+ Args:
+ cfg: Configuration object containing network settings.
+ """
+ bpf_info = BPFProgInfo("xdp_prog", "xdp_native.bpf.o", "xdp", 1500)
+
+ _test_pass(cfg, bpf_info, 256)
+
+
+def test_xdp_native_pass_mb(cfg):
+ """
+ Tests the XDP_PASS action for a multi-buff size.
+
+ Args:
+ cfg: Configuration object containing network settings.
+ """
+ bpf_info = BPFProgInfo("xdp_prog_frags", "xdp_native.bpf.o", "xdp.frags", 9000)
+
+ _test_pass(cfg, bpf_info, 8000)
+
+
+def _test_drop(cfg, bpf_info, msg_sz):
+ """
+ Tests the XDP_DROP action by exchanging UDP packets.
+
+ Args:
+ cfg: Configuration object containing network settings.
+ bpf_info: BPFProgInfo object containing information about the BPF program.
+ msg_sz: Size of the test message to send.
+ """
+
+ prog_info = _load_xdp_prog(cfg, bpf_info)
+ port = rand_port()
+
+ _set_xdp_map("map_xdp_setup", TestConfig.MODE.value, XDPAction.DROP.value)
+ _set_xdp_map("map_xdp_setup", TestConfig.PORT.value, port)
+
+ ksft_eq(_test_udp(cfg, port, msg_sz), False, "UDP packet exchange should fail")
+ stats = _get_stats(prog_info["maps"]["map_xdp_stats"])
+
+ ksft_ne(stats[XDPStats.RX.value], 0, "RX stats should be zero")
+ ksft_eq(stats[XDPStats.RX.value], stats[XDPStats.DROP.value], "RX and DROP stats mismatch")
+
+
+def test_xdp_native_drop_sb(cfg):
+ """
+ Tests the XDP_DROP action for a signle-buff case.
+
+ Args:
+ cfg: Configuration object containing network settings.
+ """
+ bpf_info = BPFProgInfo("xdp_prog", "xdp_native.bpf.o", "xdp", 1500)
+
+ _test_drop(cfg, bpf_info, 256)
+
+
+def test_xdp_native_drop_mb(cfg):
+ """
+ Tests the XDP_DROP action for a multi-buff case.
+
+ Args:
+ cfg: Configuration object containing network settings.
+ """
+ bpf_info = BPFProgInfo("xdp_prog_frags", "xdp_native.bpf.o", "xdp.frags", 9000)
+
+ _test_drop(cfg, bpf_info, 8000)
+
+
+def test_xdp_native_tx_mb(cfg):
+ """
+ Tests the XDP_TX action for a multi-buff case.
+
+ Args:
+ cfg: Configuration object containing network settings.
+ """
+ cfg.require_cmd("socat", remote=True)
+
+ bpf_info = BPFProgInfo("xdp_prog_frags", "xdp_native.bpf.o", "xdp.frags", 9000)
+ prog_info = _load_xdp_prog(cfg, bpf_info)
+ port = rand_port()
+
+ _set_xdp_map("map_xdp_setup", TestConfig.MODE.value, XDPAction.TX.value)
+ _set_xdp_map("map_xdp_setup", TestConfig.PORT.value, port)
+
+ test_string = ''.join(random.choice(string.ascii_lowercase) for _ in range(8000))
+ rx_udp = f"socat -{cfg.addr_ipver} -T 2 -u UDP-RECV:{port},reuseport STDOUT"
+ tx_udp = f"echo {test_string} | socat -t 2 -u STDIN UDP:{cfg.baddr}:{port}"
+
+ with bkg(rx_udp, host=cfg.remote, exit_wait=True) as rnc:
+ cmd(tx_udp, host=cfg.remote, shell=True)
+
+ stats = _get_stats(prog_info['maps']['map_xdp_stats'])
+
+ ksft_eq(rnc.stdout.strip(), test_string, "UDP packet exchange failed")
+ ksft_eq(stats[XDPStats.TX.value], 1, "TX stats mismatch")
+
+
+def _validate_res(res, offset_lst, pkt_sz_lst):
+ """
+ Validates the result of a test.
+
+ Args:
+ res: The result of the test, which should be a dictionary with a "status" key.
+
+ Raises:
+ KsftFailEx: If the test fails to pass any combination of offset and packet size.
+ """
+ if "status" not in res:
+ raise KsftFailEx("Missing 'status' key in result dictionary")
+
+ # Validate that not a single case was successful
+ if res["status"] == "fail":
+ if res["offset"] == offset_lst[0] and res["pkt_sz"] == pkt_sz_lst[0]:
+ raise KsftFailEx(f"{res['reason']}")
+
+ # Get the previous offset and packet size to report the successful run
+ tmp_idx = offset_lst.index(res["offset"])
+ prev_offset = offset_lst[tmp_idx - 1]
+ if tmp_idx == 0:
+ tmp_idx = pkt_sz_lst.index(res["pkt_sz"])
+ prev_pkt_sz = pkt_sz_lst[tmp_idx - 1]
+ else:
+ prev_pkt_sz = res["pkt_sz"]
+
+ # Use these values for error reporting
+ ksft_pr(
+ f"Failed run: pkt_sz {res['pkt_sz']}, offset {res['offset']}. "
+ f"Last successful run: pkt_sz {prev_pkt_sz}, offset {prev_offset}. "
+ f"Reason: {res['reason']}"
+ )
+
+
+def _check_for_failures(recvd_str, stats):
+ """
+ Checks for common failures while adjusting headroom or tailroom.
+
+ Args:
+ recvd_str: The string received from the remote host after sending a test string.
+ stats: A dictionary containing formatted packet statistics for various XDP actions.
+
+ Returns:
+ str: A string describing the failure reason if a failure is detected, otherwise None.
+ """
+
+ # Any adjustment failure result in an abort hence, we track this counter
+ if stats[XDPStats.ABORT.value] != 0:
+ return "Adjustment failed"
+
+ # Since we are using aggregate stats for a single test across all offsets and packet sizes
+ # we can't use RX stats only to track data exchange failure without taking a previous
+ # snapshot. An easier way is to simply check for non-zero length of received string.
+ if len(recvd_str) == 0:
+ return "Data exchange failed"
+
+ # Check for RX and PASS stats mismatch. Ideally, they should be equal for a successful run
+ if stats[XDPStats.RX.value] != stats[XDPStats.PASS.value]:
+ return "RX stats mismatch"
+
+ return None
+
+
+def _test_xdp_native_tail_adjst(cfg, pkt_sz_lst, offset_lst):
+ """
+ Tests the XDP tail adjustment functionality.
+
+ This function loads the appropriate XDP program based on the provided
+ program name and configures the XDP map for tail adjustment. It then
+ validates the tail adjustment by sending and receiving UDP packets
+ with specified packet sizes and offsets.
+
+ Args:
+ cfg: Configuration object containing network settings.
+ prog: Name of the XDP program to load.
+ pkt_sz_lst: List of packet sizes to test.
+ offset_lst: List of offsets to validate support for tail adjustment.
+
+ Returns:
+ dict: A dictionary with test status and failure details if applicable.
+ """
+ port = rand_port()
+ bpf_info = BPFProgInfo("xdp_prog_frags", "xdp_native.bpf.o", "xdp.frags", 9000)
+
+ prog_info = _load_xdp_prog(cfg, bpf_info)
+
+ # Configure the XDP map for tail adjustment
+ _set_xdp_map("map_xdp_setup", TestConfig.MODE.value, XDPAction.TAIL_ADJST.value)
+ _set_xdp_map("map_xdp_setup", TestConfig.PORT.value, port)
+
+ for offset in offset_lst:
+ tag = format(random.randint(65, 90), "02x")
+
+ _set_xdp_map("map_xdp_setup", TestConfig.ADJST_OFFSET.value, offset)
+ if offset > 0:
+ _set_xdp_map("map_xdp_setup", TestConfig.ADJST_TAG.value, int(tag, 16))
+
+ for pkt_sz in pkt_sz_lst:
+ test_str = "".join(random.choice(string.ascii_lowercase) for _ in range(pkt_sz))
+ recvd_str = _exchg_udp(cfg, port, test_str)
+ stats = _get_stats(prog_info["maps"]["map_xdp_stats"])
+
+ failure = _check_for_failures(recvd_str, stats)
+ if failure is not None:
+ return {
+ "status": "fail",
+ "reason": failure,
+ "offset": offset,
+ "pkt_sz": pkt_sz,
+ }
+
+ # Validate data content based on offset direction
+ expected_data = None
+ if offset > 0:
+ expected_data = test_str + (offset * chr(int(tag, 16)))
+ else:
+ expected_data = test_str[0:pkt_sz + offset]
+
+ if recvd_str != expected_data:
+ return {
+ "status": "fail",
+ "reason": "Data mismatch",
+ "offset": offset,
+ "pkt_sz": pkt_sz,
+ }
+
+ return {"status": "pass"}
+
+
+def test_xdp_native_adjst_tail_grow_data(cfg):
+ """
+ Tests the XDP tail adjustment by growing packet data.
+
+ Args:
+ cfg: Configuration object containing network settings.
+ """
+ pkt_sz_lst = [512, 1024, 2048]
+ offset_lst = [1, 16, 32, 64, 128, 256]
+ res = _test_xdp_native_tail_adjst(
+ cfg,
+ pkt_sz_lst,
+ offset_lst,
+ )
+
+ _validate_res(res, offset_lst, pkt_sz_lst)
+
+
+def test_xdp_native_adjst_tail_shrnk_data(cfg):
+ """
+ Tests the XDP tail adjustment by shrinking packet data.
+
+ Args:
+ cfg: Configuration object containing network settings.
+ """
+ pkt_sz_lst = [512, 1024, 2048]
+ offset_lst = [-16, -32, -64, -128, -256]
+ res = _test_xdp_native_tail_adjst(
+ cfg,
+ pkt_sz_lst,
+ offset_lst,
+ )
+
+ _validate_res(res, offset_lst, pkt_sz_lst)
+
+
+def get_hds_thresh(cfg):
+ """
+ Retrieves the header data split (HDS) threshold for a network interface.
+
+ Args:
+ cfg: Configuration object containing network settings.
+
+ Returns:
+ The HDS threshold value. If the threshold is not supported or an error occurs,
+ a default value of 1500 is returned.
+ """
+ netnl = cfg.netnl
+ hds_thresh = 1500
+
+ try:
+ rings = netnl.rings_get({'header': {'dev-index': cfg.ifindex}})
+ if 'hds-thresh' not in rings:
+ ksft_pr(f'hds-thresh not supported. Using default: {hds_thresh}')
+ return hds_thresh
+ hds_thresh = rings['hds-thresh']
+ except NlError as e:
+ ksft_pr(f"Failed to get rings: {e}. Using default: {hds_thresh}")
+
+ return hds_thresh
+
+
+def _test_xdp_native_head_adjst(cfg, prog, pkt_sz_lst, offset_lst):
+ """
+ Tests the XDP head adjustment action for a multi-buffer case.
+
+ Args:
+ cfg: Configuration object containing network settings.
+ netnl: Network namespace or link object (not used in this function).
+
+ This function sets up the packet size and offset lists, then performs
+ the head adjustment test by sending and receiving UDP packets.
+ """
+ cfg.require_cmd("socat", remote=True)
+
+ prog_info = _load_xdp_prog(cfg, BPFProgInfo(prog, "xdp_native.bpf.o", "xdp.frags", 9000))
+ port = rand_port()
+
+ _set_xdp_map("map_xdp_setup", TestConfig.MODE.value, XDPAction.HEAD_ADJST.value)
+ _set_xdp_map("map_xdp_setup", TestConfig.PORT.value, port)
+
+ hds_thresh = get_hds_thresh(cfg)
+ for offset in offset_lst:
+ for pkt_sz in pkt_sz_lst:
+ # The "head" buffer must contain at least the Ethernet header
+ # after we eat into it. We send large-enough packets, but if HDS
+ # is enabled head will only contain headers. Don't try to eat
+ # more than 28 bytes (UDPv4 + eth hdr left: (14 + 20 + 8) - 14)
+ l2_cut_off = 28 if cfg.addr_ipver == 4 else 48
+ if pkt_sz > hds_thresh and offset > l2_cut_off:
+ ksft_pr(
+ f"Failed run: pkt_sz ({pkt_sz}) > HDS threshold ({hds_thresh}) and "
+ f"offset {offset} > {l2_cut_off}"
+ )
+ return {"status": "pass"}
+
+ test_str = ''.join(random.choice(string.ascii_lowercase) for _ in range(pkt_sz))
+ tag = format(random.randint(65, 90), '02x')
+
+ _set_xdp_map("map_xdp_setup",
+ TestConfig.ADJST_OFFSET.value,
+ offset)
+ _set_xdp_map("map_xdp_setup", TestConfig.ADJST_TAG.value, int(tag, 16))
+ _set_xdp_map("map_xdp_setup", TestConfig.ADJST_OFFSET.value, offset)
+
+ recvd_str = _exchg_udp(cfg, port, test_str)
+
+ # Check for failures around adjustment and data exchange
+ failure = _check_for_failures(recvd_str, _get_stats(prog_info['maps']['map_xdp_stats']))
+ if failure is not None:
+ return {
+ "status": "fail",
+ "reason": failure,
+ "offset": offset,
+ "pkt_sz": pkt_sz
+ }
+
+ # Validate data content based on offset direction
+ expected_data = None
+ if offset < 0:
+ expected_data = chr(int(tag, 16)) * (0 - offset) + test_str
+ else:
+ expected_data = test_str[offset:]
+
+ if recvd_str != expected_data:
+ return {
+ "status": "fail",
+ "reason": "Data mismatch",
+ "offset": offset,
+ "pkt_sz": pkt_sz
+ }
+
+ return {"status": "pass"}
+
+
+def test_xdp_native_adjst_head_grow_data(cfg):
+ """
+ Tests the XDP headroom growth support.
+
+ Args:
+ cfg: Configuration object containing network settings.
+
+ This function sets up the packet size and offset lists, then calls the
+ _test_xdp_native_head_adjst_mb function to perform the actual test. The
+ test is passed if the headroom is successfully extended for given packet
+ sizes and offsets.
+ """
+ pkt_sz_lst = [512, 1024, 2048]
+
+ # Negative values result in headroom shrinking, resulting in growing of payload
+ offset_lst = [-16, -32, -64, -128, -256]
+ res = _test_xdp_native_head_adjst(cfg, "xdp_prog_frags", pkt_sz_lst, offset_lst)
+
+ _validate_res(res, offset_lst, pkt_sz_lst)
+
+
+def test_xdp_native_adjst_head_shrnk_data(cfg):
+ """
+ Tests the XDP headroom shrinking support.
+
+ Args:
+ cfg: Configuration object containing network settings.
+
+ This function sets up the packet size and offset lists, then calls the
+ _test_xdp_native_head_adjst_mb function to perform the actual test. The
+ test is passed if the headroom is successfully shrunk for given packet
+ sizes and offsets.
+ """
+ pkt_sz_lst = [512, 1024, 2048]
+
+ # Positive values result in headroom growing, resulting in shrinking of payload
+ offset_lst = [16, 32, 64, 128, 256]
+ res = _test_xdp_native_head_adjst(cfg, "xdp_prog_frags", pkt_sz_lst, offset_lst)
+
+ _validate_res(res, offset_lst, pkt_sz_lst)
+
+
+def main():
+ """
+ Main function to execute the XDP tests.
+
+ This function runs a series of tests to validate the XDP support for
+ both the single and multi-buffer. It uses the NetDrvEpEnv context
+ manager to manage the network driver environment and the ksft_run
+ function to execute the tests.
+ """
+ with NetDrvEpEnv(__file__) as cfg:
+ cfg.netnl = EthtoolFamily()
+ ksft_run(
+ [
+ test_xdp_native_pass_sb,
+ test_xdp_native_pass_mb,
+ test_xdp_native_drop_sb,
+ test_xdp_native_drop_mb,
+ test_xdp_native_tx_mb,
+ test_xdp_native_adjst_tail_grow_data,
+ test_xdp_native_adjst_tail_shrnk_data,
+ test_xdp_native_adjst_head_grow_data,
+ test_xdp_native_adjst_head_shrnk_data,
+ ],
+ args=(cfg,))
+ ksft_exit()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/testing/selftests/futex/include/futex2test.h b/tools/testing/selftests/futex/include/futex2test.h
index ea79662405bc..1f625b39948a 100644
--- a/tools/testing/selftests/futex/include/futex2test.h
+++ b/tools/testing/selftests/futex/include/futex2test.h
@@ -4,6 +4,7 @@
*
* Copyright 2021 Collabora Ltd.
*/
+#include <linux/time_types.h>
#include <stdint.h>
#define u64_to_ptr(x) ((void *)(uintptr_t)(x))
@@ -65,7 +66,12 @@ struct futex32_numa {
static inline int futex_waitv(volatile struct futex_waitv *waiters, unsigned long nr_waiters,
unsigned long flags, struct timespec *timo, clockid_t clockid)
{
- return syscall(__NR_futex_waitv, waiters, nr_waiters, flags, timo, clockid);
+ struct __kernel_timespec ts = {
+ .tv_sec = timo->tv_sec,
+ .tv_nsec = timo->tv_nsec,
+ };
+
+ return syscall(__NR_futex_waitv, waiters, nr_waiters, flags, &ts, clockid);
}
/*
diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh
index 9308b2f77fed..890b3374dacd 100644
--- a/tools/testing/selftests/net/forwarding/lib.sh
+++ b/tools/testing/selftests/net/forwarding/lib.sh
@@ -142,6 +142,20 @@ check_tc_version()
fi
}
+check_tc_erspan_support()
+{
+ local dev=$1; shift
+
+ tc filter add dev $dev ingress pref 1 handle 1 flower \
+ erspan_opts 1:0:0:0 &> /dev/null
+ if [[ $? -ne 0 ]]; then
+ echo "SKIP: iproute2 too old; tc is missing erspan support"
+ return $ksft_skip
+ fi
+ tc filter del dev $dev ingress pref 1 handle 1 flower \
+ erspan_opts 1:0:0:0 &> /dev/null
+}
+
# Old versions of tc don't understand "mpls_uc"
check_tc_mpls_support()
{
diff --git a/tools/testing/selftests/net/forwarding/tc_flower.sh b/tools/testing/selftests/net/forwarding/tc_flower.sh
index b1daad19b01e..b58909a93112 100755
--- a/tools/testing/selftests/net/forwarding/tc_flower.sh
+++ b/tools/testing/selftests/net/forwarding/tc_flower.sh
@@ -6,7 +6,7 @@ ALL_TESTS="match_dst_mac_test match_src_mac_test match_dst_ip_test \
match_ip_tos_test match_indev_test match_ip_ttl_test
match_mpls_label_test \
match_mpls_tc_test match_mpls_bos_test match_mpls_ttl_test \
- match_mpls_lse_test"
+ match_mpls_lse_test match_erspan_opts_test"
NUM_NETIFS=2
source tc_common.sh
source lib.sh
@@ -676,6 +676,56 @@ match_mpls_lse_test()
log_test "mpls lse match ($tcflags)"
}
+match_erspan_opts_test()
+{
+ RET=0
+
+ check_tc_erspan_support $h2 || return 0
+
+ # h1 erspan setup
+ tunnel_create erspan1 erspan 192.0.2.1 192.0.2.2 dev $h1 seq key 1001 \
+ tos C ttl 64 erspan_ver 1 erspan 6789 # ERSPAN Type II
+ tunnel_create erspan2 erspan 192.0.2.1 192.0.2.2 dev $h1 seq key 1002 \
+ tos C ttl 64 erspan_ver 2 erspan_dir egress erspan_hwid 63 \
+ # ERSPAN Type III
+ ip link set dev erspan1 master v$h1
+ ip link set dev erspan2 master v$h1
+ # h2 erspan setup
+ ip link add ep-ex type erspan ttl 64 external # To collect tunnel info
+ ip link set ep-ex up
+ ip link set dev ep-ex master v$h2
+ tc qdisc add dev ep-ex clsact
+
+ # ERSPAN Type II [decap direction]
+ tc filter add dev ep-ex ingress protocol ip handle 101 flower \
+ $tcflags enc_src_ip 192.0.2.1 enc_dst_ip 192.0.2.2 \
+ enc_key_id 1001 erspan_opts 1:6789:0:0 \
+ action drop
+ # ERSPAN Type III [decap direction]
+ tc filter add dev ep-ex ingress protocol ip handle 102 flower \
+ $tcflags enc_src_ip 192.0.2.1 enc_dst_ip 192.0.2.2 \
+ enc_key_id 1002 erspan_opts 2:0:1:63 action drop
+
+ ep1mac=$(mac_get erspan1)
+ $MZ erspan1 -c 1 -p 64 -a $ep1mac -b $h2mac -t ip -q
+ tc_check_packets "dev ep-ex ingress" 101 1
+ check_err $? "ERSPAN Type II"
+
+ ep2mac=$(mac_get erspan2)
+ $MZ erspan2 -c 1 -p 64 -a $ep1mac -b $h2mac -t ip -q
+ tc_check_packets "dev ep-ex ingress" 102 1
+ check_err $? "ERSPAN Type III"
+
+ # h2 erspan cleanup
+ tc qdisc del dev ep-ex clsact
+ tunnel_destroy ep-ex
+ # h1 erspan cleanup
+ tunnel_destroy erspan2 # ERSPAN Type III
+ tunnel_destroy erspan1 # ERSPAN Type II
+
+ log_test "erspan_opts match ($tcflags)"
+}
+
setup_prepare()
{
h1=${NETIFS[p1]}
diff --git a/tools/testing/selftests/net/lib/xdp_native.bpf.c b/tools/testing/selftests/net/lib/xdp_native.bpf.c
new file mode 100644
index 000000000000..521ba38f2ddd
--- /dev/null
+++ b/tools/testing/selftests/net/lib/xdp_native.bpf.c
@@ -0,0 +1,621 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <stddef.h>
+#include <linux/bpf.h>
+#include <linux/in.h>
+#include <linux/if_ether.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/udp.h>
+#include <bpf/bpf_endian.h>
+#include <bpf/bpf_helpers.h>
+
+#define MAX_ADJST_OFFSET 256
+#define MAX_PAYLOAD_LEN 5000
+#define MAX_HDR_LEN 64
+
+enum {
+ XDP_MODE = 0,
+ XDP_PORT = 1,
+ XDP_ADJST_OFFSET = 2,
+ XDP_ADJST_TAG = 3,
+} xdp_map_setup_keys;
+
+enum {
+ XDP_MODE_PASS = 0,
+ XDP_MODE_DROP = 1,
+ XDP_MODE_TX = 2,
+ XDP_MODE_TAIL_ADJST = 3,
+ XDP_MODE_HEAD_ADJST = 4,
+} xdp_map_modes;
+
+enum {
+ STATS_RX = 0,
+ STATS_PASS = 1,
+ STATS_DROP = 2,
+ STATS_TX = 3,
+ STATS_ABORT = 4,
+} xdp_stats;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 5);
+ __type(key, __u32);
+ __type(value, __s32);
+} map_xdp_setup SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 5);
+ __type(key, __u32);
+ __type(value, __u64);
+} map_xdp_stats SEC(".maps");
+
+static __u32 min(__u32 a, __u32 b)
+{
+ return a < b ? a : b;
+}
+
+static void record_stats(struct xdp_md *ctx, __u32 stat_type)
+{
+ __u64 *count;
+
+ count = bpf_map_lookup_elem(&map_xdp_stats, &stat_type);
+
+ if (count)
+ __sync_fetch_and_add(count, 1);
+}
+
+static struct udphdr *filter_udphdr(struct xdp_md *ctx, __u16 port)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ struct udphdr *udph = NULL;
+ struct ethhdr *eth = data;
+
+ if (data + sizeof(*eth) > data_end)
+ return NULL;
+
+ if (eth->h_proto == bpf_htons(ETH_P_IP)) {
+ struct iphdr *iph = data + sizeof(*eth);
+
+ if (iph + 1 > (struct iphdr *)data_end ||
+ iph->protocol != IPPROTO_UDP)
+ return NULL;
+
+ udph = (void *)eth + sizeof(*iph) + sizeof(*eth);
+ } else if (eth->h_proto == bpf_htons(ETH_P_IPV6)) {
+ struct ipv6hdr *ipv6h = data + sizeof(*eth);
+
+ if (ipv6h + 1 > (struct ipv6hdr *)data_end ||
+ ipv6h->nexthdr != IPPROTO_UDP)
+ return NULL;
+
+ udph = (void *)eth + sizeof(*ipv6h) + sizeof(*eth);
+ } else {
+ return NULL;
+ }
+
+ if (udph + 1 > (struct udphdr *)data_end)
+ return NULL;
+
+ if (udph->dest != bpf_htons(port))
+ return NULL;
+
+ record_stats(ctx, STATS_RX);
+
+ return udph;
+}
+
+static int xdp_mode_pass(struct xdp_md *ctx, __u16 port)
+{
+ struct udphdr *udph = NULL;
+
+ udph = filter_udphdr(ctx, port);
+ if (!udph)
+ return XDP_PASS;
+
+ record_stats(ctx, STATS_PASS);
+
+ return XDP_PASS;
+}
+
+static int xdp_mode_drop_handler(struct xdp_md *ctx, __u16 port)
+{
+ struct udphdr *udph = NULL;
+
+ udph = filter_udphdr(ctx, port);
+ if (!udph)
+ return XDP_PASS;
+
+ record_stats(ctx, STATS_DROP);
+
+ return XDP_DROP;
+}
+
+static void swap_machdr(void *data)
+{
+ struct ethhdr *eth = data;
+ __u8 tmp_mac[ETH_ALEN];
+
+ __builtin_memcpy(tmp_mac, eth->h_source, ETH_ALEN);
+ __builtin_memcpy(eth->h_source, eth->h_dest, ETH_ALEN);
+ __builtin_memcpy(eth->h_dest, tmp_mac, ETH_ALEN);
+}
+
+static int xdp_mode_tx_handler(struct xdp_md *ctx, __u16 port)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ struct udphdr *udph = NULL;
+ struct ethhdr *eth = data;
+
+ if (data + sizeof(*eth) > data_end)
+ return XDP_PASS;
+
+ if (eth->h_proto == bpf_htons(ETH_P_IP)) {
+ struct iphdr *iph = data + sizeof(*eth);
+ __be32 tmp_ip = iph->saddr;
+
+ if (iph + 1 > (struct iphdr *)data_end ||
+ iph->protocol != IPPROTO_UDP)
+ return XDP_PASS;
+
+ udph = data + sizeof(*iph) + sizeof(*eth);
+
+ if (udph + 1 > (struct udphdr *)data_end)
+ return XDP_PASS;
+ if (udph->dest != bpf_htons(port))
+ return XDP_PASS;
+
+ record_stats(ctx, STATS_RX);
+ swap_machdr((void *)eth);
+
+ iph->saddr = iph->daddr;
+ iph->daddr = tmp_ip;
+
+ record_stats(ctx, STATS_TX);
+
+ return XDP_TX;
+
+ } else if (eth->h_proto == bpf_htons(ETH_P_IPV6)) {
+ struct ipv6hdr *ipv6h = data + sizeof(*eth);
+ struct in6_addr tmp_ipv6;
+
+ if (ipv6h + 1 > (struct ipv6hdr *)data_end ||
+ ipv6h->nexthdr != IPPROTO_UDP)
+ return XDP_PASS;
+
+ udph = data + sizeof(*ipv6h) + sizeof(*eth);
+
+ if (udph + 1 > (struct udphdr *)data_end)
+ return XDP_PASS;
+ if (udph->dest != bpf_htons(port))
+ return XDP_PASS;
+
+ record_stats(ctx, STATS_RX);
+ swap_machdr((void *)eth);
+
+ __builtin_memcpy(&tmp_ipv6, &ipv6h->saddr, sizeof(tmp_ipv6));
+ __builtin_memcpy(&ipv6h->saddr, &ipv6h->daddr,
+ sizeof(tmp_ipv6));
+ __builtin_memcpy(&ipv6h->daddr, &tmp_ipv6, sizeof(tmp_ipv6));
+
+ record_stats(ctx, STATS_TX);
+
+ return XDP_TX;
+ }
+
+ return XDP_PASS;
+}
+
+static void *update_pkt(struct xdp_md *ctx, __s16 offset, __u32 *udp_csum)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ struct udphdr *udph = NULL;
+ struct ethhdr *eth = data;
+ __u32 len, len_new;
+
+ if (data + sizeof(*eth) > data_end)
+ return NULL;
+
+ if (eth->h_proto == bpf_htons(ETH_P_IP)) {
+ struct iphdr *iph = data + sizeof(*eth);
+ __u16 total_len;
+
+ if (iph + 1 > (struct iphdr *)data_end)
+ return NULL;
+
+ iph->tot_len = bpf_htons(bpf_ntohs(iph->tot_len) + offset);
+
+ udph = (void *)eth + sizeof(*iph) + sizeof(*eth);
+ if (!udph || udph + 1 > (struct udphdr *)data_end)
+ return NULL;
+
+ len_new = bpf_htons(bpf_ntohs(udph->len) + offset);
+ } else if (eth->h_proto == bpf_htons(ETH_P_IPV6)) {
+ struct ipv6hdr *ipv6h = data + sizeof(*eth);
+ __u16 payload_len;
+
+ if (ipv6h + 1 > (struct ipv6hdr *)data_end)
+ return NULL;
+
+ udph = (void *)eth + sizeof(*ipv6h) + sizeof(*eth);
+ if (!udph || udph + 1 > (struct udphdr *)data_end)
+ return NULL;
+
+ *udp_csum = ~((__u32)udph->check);
+
+ len = ipv6h->payload_len;
+ len_new = bpf_htons(bpf_ntohs(len) + offset);
+ ipv6h->payload_len = len_new;
+
+ *udp_csum = bpf_csum_diff(&len, sizeof(len), &len_new,
+ sizeof(len_new), *udp_csum);
+
+ len = udph->len;
+ len_new = bpf_htons(bpf_ntohs(udph->len) + offset);
+ *udp_csum = bpf_csum_diff(&len, sizeof(len), &len_new,
+ sizeof(len_new), *udp_csum);
+ } else {
+ return NULL;
+ }
+
+ udph->len = len_new;
+
+ return udph;
+}
+
+static __u16 csum_fold_helper(__u32 csum)
+{
+ return ~((csum & 0xffff) + (csum >> 16)) ? : 0xffff;
+}
+
+static int xdp_adjst_tail_shrnk_data(struct xdp_md *ctx, __u16 offset,
+ __u32 hdr_len)
+{
+ char tmp_buff[MAX_ADJST_OFFSET];
+ __u32 buff_pos, udp_csum = 0;
+ struct udphdr *udph = NULL;
+ __u32 buff_len;
+
+ udph = update_pkt(ctx, 0 - offset, &udp_csum);
+ if (!udph)
+ return -1;
+
+ buff_len = bpf_xdp_get_buff_len(ctx);
+
+ offset = (offset & 0x1ff) >= MAX_ADJST_OFFSET ? MAX_ADJST_OFFSET :
+ offset & 0xff;
+ if (offset == 0)
+ return -1;
+
+ /* Make sure we have enough data to avoid eating the header */
+ if (buff_len - offset < hdr_len)
+ return -1;
+
+ buff_pos = buff_len - offset;
+ if (bpf_xdp_load_bytes(ctx, buff_pos, tmp_buff, offset) < 0)
+ return -1;
+
+ udp_csum = bpf_csum_diff((__be32 *)tmp_buff, offset, 0, 0, udp_csum);
+ udph->check = (__u16)csum_fold_helper(udp_csum);
+
+ if (bpf_xdp_adjust_tail(ctx, 0 - offset) < 0)
+ return -1;
+
+ return 0;
+}
+
+static int xdp_adjst_tail_grow_data(struct xdp_md *ctx, __u16 offset)
+{
+ char tmp_buff[MAX_ADJST_OFFSET];
+ __u32 buff_pos, udp_csum = 0;
+ __u32 buff_len, hdr_len, key;
+ struct udphdr *udph;
+ __s32 *val;
+ __u8 tag;
+
+ /* Proceed to update the packet headers before attempting to adjuste
+ * the tail. Once the tail is adjusted we lose access to the offset
+ * amount of data at the end of the packet which is crucial to update
+ * the checksum.
+ * Since any failure beyond this would abort the packet, we should
+ * not worry about passing a packet up the stack with wrong headers
+ */
+ udph = update_pkt(ctx, offset, &udp_csum);
+ if (!udph)
+ return -1;
+
+ key = XDP_ADJST_TAG;
+ val = bpf_map_lookup_elem(&map_xdp_setup, &key);
+ if (!val)
+ return -1;
+
+ tag = (__u8)(*val);
+
+ for (int i = 0; i < MAX_ADJST_OFFSET; i++)
+ __builtin_memcpy(&tmp_buff[i], &tag, 1);
+
+ offset = (offset & 0x1ff) >= MAX_ADJST_OFFSET ? MAX_ADJST_OFFSET :
+ offset & 0xff;
+ if (offset == 0)
+ return -1;
+
+ udp_csum = bpf_csum_diff(0, 0, (__be32 *)tmp_buff, offset, udp_csum);
+ udph->check = (__u16)csum_fold_helper(udp_csum);
+
+ buff_len = bpf_xdp_get_buff_len(ctx);
+
+ if (bpf_xdp_adjust_tail(ctx, offset) < 0) {
+ bpf_printk("Failed to adjust tail\n");
+ return -1;
+ }
+
+ if (bpf_xdp_store_bytes(ctx, buff_len, tmp_buff, offset) < 0)
+ return -1;
+
+ return 0;
+}
+
+static int xdp_adjst_tail(struct xdp_md *ctx, __u16 port)
+{
+ void *data = (void *)(long)ctx->data;
+ struct udphdr *udph = NULL;
+ __s32 *adjust_offset, *val;
+ __u32 key, hdr_len;
+ void *offset_ptr;
+ __u8 tag;
+ int ret;
+
+ udph = filter_udphdr(ctx, port);
+ if (!udph)
+ return XDP_PASS;
+
+ hdr_len = (void *)udph - data + sizeof(struct udphdr);
+ key = XDP_ADJST_OFFSET;
+ adjust_offset = bpf_map_lookup_elem(&map_xdp_setup, &key);
+ if (!adjust_offset)
+ return XDP_PASS;
+
+ if (*adjust_offset < 0)
+ ret = xdp_adjst_tail_shrnk_data(ctx,
+ (__u16)(0 - *adjust_offset),
+ hdr_len);
+ else
+ ret = xdp_adjst_tail_grow_data(ctx, (__u16)(*adjust_offset));
+ if (ret)
+ goto abort_pkt;
+
+ record_stats(ctx, STATS_PASS);
+ return XDP_PASS;
+
+abort_pkt:
+ record_stats(ctx, STATS_ABORT);
+ return XDP_ABORTED;
+}
+
+static int xdp_adjst_head_shrnk_data(struct xdp_md *ctx, __u64 hdr_len,
+ __u32 offset)
+{
+ char tmp_buff[MAX_ADJST_OFFSET];
+ struct udphdr *udph;
+ void *offset_ptr;
+ __u32 udp_csum = 0;
+
+ /* Update the length information in the IP and UDP headers before
+ * adjusting the headroom. This simplifies accessing the relevant
+ * fields in the IP and UDP headers for fragmented packets. Any
+ * failure beyond this point will result in the packet being aborted,
+ * so we don't need to worry about incorrect length information for
+ * passed packets.
+ */
+ udph = update_pkt(ctx, (__s16)(0 - offset), &udp_csum);
+ if (!udph)
+ return -1;
+
+ offset = (offset & 0x1ff) >= MAX_ADJST_OFFSET ? MAX_ADJST_OFFSET :
+ offset & 0xff;
+ if (offset == 0)
+ return -1;
+
+ if (bpf_xdp_load_bytes(ctx, hdr_len, tmp_buff, offset) < 0)
+ return -1;
+
+ udp_csum = bpf_csum_diff((__be32 *)tmp_buff, offset, 0, 0, udp_csum);
+
+ udph->check = (__u16)csum_fold_helper(udp_csum);
+
+ if (bpf_xdp_load_bytes(ctx, 0, tmp_buff, MAX_ADJST_OFFSET) < 0)
+ return -1;
+
+ if (bpf_xdp_adjust_head(ctx, offset) < 0)
+ return -1;
+
+ if (offset > MAX_ADJST_OFFSET)
+ return -1;
+
+ if (hdr_len > MAX_ADJST_OFFSET || hdr_len == 0)
+ return -1;
+
+ /* Added here to handle clang complain about negative value */
+ hdr_len = hdr_len & 0xff;
+
+ if (hdr_len == 0)
+ return -1;
+
+ if (bpf_xdp_store_bytes(ctx, 0, tmp_buff, hdr_len) < 0)
+ return -1;
+
+ return 0;
+}
+
+static int xdp_adjst_head_grow_data(struct xdp_md *ctx, __u64 hdr_len,
+ __u32 offset)
+{
+ char hdr_buff[MAX_HDR_LEN];
+ char data_buff[MAX_ADJST_OFFSET];
+ void *offset_ptr;
+ __s32 *val;
+ __u32 key;
+ __u8 tag;
+ __u32 udp_csum = 0;
+ struct udphdr *udph;
+
+ udph = update_pkt(ctx, (__s16)(offset), &udp_csum);
+ if (!udph)
+ return -1;
+
+ key = XDP_ADJST_TAG;
+ val = bpf_map_lookup_elem(&map_xdp_setup, &key);
+ if (!val)
+ return -1;
+
+ tag = (__u8)(*val);
+ for (int i = 0; i < MAX_ADJST_OFFSET; i++)
+ __builtin_memcpy(&data_buff[i], &tag, 1);
+
+ offset = (offset & 0x1ff) >= MAX_ADJST_OFFSET ? MAX_ADJST_OFFSET :
+ offset & 0xff;
+ if (offset == 0)
+ return -1;
+
+ udp_csum = bpf_csum_diff(0, 0, (__be32 *)data_buff, offset, udp_csum);
+ udph->check = (__u16)csum_fold_helper(udp_csum);
+
+ if (hdr_len > MAX_ADJST_OFFSET || hdr_len == 0)
+ return -1;
+
+ /* Added here to handle clang complain about negative value */
+ hdr_len = hdr_len & 0xff;
+
+ if (hdr_len == 0)
+ return -1;
+
+ if (bpf_xdp_load_bytes(ctx, 0, hdr_buff, hdr_len) < 0)
+ return -1;
+
+ if (offset > MAX_ADJST_OFFSET)
+ return -1;
+
+ if (bpf_xdp_adjust_head(ctx, 0 - offset) < 0)
+ return -1;
+
+ if (bpf_xdp_store_bytes(ctx, 0, hdr_buff, hdr_len) < 0)
+ return -1;
+
+ if (bpf_xdp_store_bytes(ctx, hdr_len, data_buff, offset) < 0)
+ return -1;
+
+ return 0;
+}
+
+static int xdp_head_adjst(struct xdp_md *ctx, __u16 port)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ struct udphdr *udph_ptr = NULL;
+ __u32 key, size, hdr_len;
+ __s32 *val;
+ int res;
+
+ /* Filter packets based on UDP port */
+ udph_ptr = filter_udphdr(ctx, port);
+ if (!udph_ptr)
+ return XDP_PASS;
+
+ hdr_len = (void *)udph_ptr - data + sizeof(struct udphdr);
+
+ key = XDP_ADJST_OFFSET;
+ val = bpf_map_lookup_elem(&map_xdp_setup, &key);
+ if (!val)
+ return XDP_PASS;
+
+ switch (*val) {
+ case -16:
+ case 16:
+ size = 16;
+ break;
+ case -32:
+ case 32:
+ size = 32;
+ break;
+ case -64:
+ case 64:
+ size = 64;
+ break;
+ case -128:
+ case 128:
+ size = 128;
+ break;
+ case -256:
+ case 256:
+ size = 256;
+ break;
+ default:
+ bpf_printk("Invalid adjustment offset: %d\n", *val);
+ goto abort;
+ }
+
+ if (*val < 0)
+ res = xdp_adjst_head_grow_data(ctx, hdr_len, size);
+ else
+ res = xdp_adjst_head_shrnk_data(ctx, hdr_len, size);
+
+ if (res)
+ goto abort;
+
+ record_stats(ctx, STATS_PASS);
+ return XDP_PASS;
+
+abort:
+ record_stats(ctx, STATS_ABORT);
+ return XDP_ABORTED;
+}
+
+static int xdp_prog_common(struct xdp_md *ctx)
+{
+ __u32 key, *port;
+ __s32 *mode;
+
+ key = XDP_MODE;
+ mode = bpf_map_lookup_elem(&map_xdp_setup, &key);
+ if (!mode)
+ return XDP_PASS;
+
+ key = XDP_PORT;
+ port = bpf_map_lookup_elem(&map_xdp_setup, &key);
+ if (!port)
+ return XDP_PASS;
+
+ switch (*mode) {
+ case XDP_MODE_PASS:
+ return xdp_mode_pass(ctx, (__u16)(*port));
+ case XDP_MODE_DROP:
+ return xdp_mode_drop_handler(ctx, (__u16)(*port));
+ case XDP_MODE_TX:
+ return xdp_mode_tx_handler(ctx, (__u16)(*port));
+ case XDP_MODE_TAIL_ADJST:
+ return xdp_adjst_tail(ctx, (__u16)(*port));
+ case XDP_MODE_HEAD_ADJST:
+ return xdp_head_adjst(ctx, (__u16)(*port));
+ }
+
+ /* Default action is to simple pass */
+ return XDP_PASS;
+}
+
+SEC("xdp")
+int xdp_prog(struct xdp_md *ctx)
+{
+ return xdp_prog_common(ctx);
+}
+
+SEC("xdp.frags")
+int xdp_prog_frags(struct xdp_md *ctx)
+{
+ return xdp_prog_common(ctx);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/net/mptcp/Makefile b/tools/testing/selftests/net/mptcp/Makefile
index e47788bfa671..4c7e51336ab2 100644
--- a/tools/testing/selftests/net/mptcp/Makefile
+++ b/tools/testing/selftests/net/mptcp/Makefile
@@ -4,7 +4,8 @@ top_srcdir = ../../../../..
CFLAGS += -Wall -Wl,--no-as-needed -O2 -g -I$(top_srcdir)/usr/include $(KHDR_INCLUDES)
-TEST_PROGS := mptcp_connect.sh pm_netlink.sh mptcp_join.sh diag.sh \
+TEST_PROGS := mptcp_connect.sh mptcp_connect_mmap.sh mptcp_connect_sendfile.sh \
+ mptcp_connect_checksum.sh pm_netlink.sh mptcp_join.sh diag.sh \
simult_flows.sh mptcp_sockopt.sh userspace_pm.sh
TEST_GEN_FILES = mptcp_connect pm_nl_ctl mptcp_sockopt mptcp_inq mptcp_diag
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect_checksum.sh b/tools/testing/selftests/net/mptcp/mptcp_connect_checksum.sh
new file mode 100755
index 000000000000..ce93ec2f107f
--- /dev/null
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect_checksum.sh
@@ -0,0 +1,5 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+MPTCP_LIB_KSFT_TEST="$(basename "${0}" .sh)" \
+ "$(dirname "${0}")/mptcp_connect.sh" -C "${@}"
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect_mmap.sh b/tools/testing/selftests/net/mptcp/mptcp_connect_mmap.sh
new file mode 100755
index 000000000000..5dd30f9394af
--- /dev/null
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect_mmap.sh
@@ -0,0 +1,5 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+MPTCP_LIB_KSFT_TEST="$(basename "${0}" .sh)" \
+ "$(dirname "${0}")/mptcp_connect.sh" -m mmap "${@}"
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect_sendfile.sh b/tools/testing/selftests/net/mptcp/mptcp_connect_sendfile.sh
new file mode 100755
index 000000000000..1d16fb1cc9bb
--- /dev/null
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect_sendfile.sh
@@ -0,0 +1,5 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+MPTCP_LIB_KSFT_TEST="$(basename "${0}" .sh)" \
+ "$(dirname "${0}")/mptcp_connect.sh" -m sendfile "${@}"
diff --git a/tools/testing/selftests/net/netfilter/conntrack_clash.sh b/tools/testing/selftests/net/netfilter/conntrack_clash.sh
index 3712c1b9b38b..606a43a60f73 100755
--- a/tools/testing/selftests/net/netfilter/conntrack_clash.sh
+++ b/tools/testing/selftests/net/netfilter/conntrack_clash.sh
@@ -93,32 +93,28 @@ ping_test()
run_one_clash_test()
{
local ns="$1"
- local daddr="$2"
- local dport="$3"
+ local ctns="$2"
+ local daddr="$3"
+ local dport="$4"
local entries
local cre
if ! ip netns exec "$ns" ./udpclash $daddr $dport;then
- echo "FAIL: did not receive expected number of replies for $daddr:$dport"
- ret=1
- return 1
+ echo "INFO: did not receive expected number of replies for $daddr:$dport"
+ ip netns exec "$ctns" conntrack -S
+ # don't fail: check if clash resolution triggered after all.
fi
- entries=$(conntrack -S | wc -l)
- cre=$(conntrack -S | grep -v "clash_resolve=0" | wc -l)
+ entries=$(ip netns exec "$ctns" conntrack -S | wc -l)
+ cre=$(ip netns exec "$ctns" conntrack -S | grep "clash_resolve=0" | wc -l)
- if [ "$cre" -ne "$entries" ] ;then
+ if [ "$cre" -ne "$entries" ];then
clash_resolution_active=1
return 0
fi
- # 1 cpu -> parallel insertion impossible
- if [ "$entries" -eq 1 ]; then
- return 0
- fi
-
- # not a failure: clash resolution logic did not trigger, but all replies
- # were received. With right timing, xmit completed sequentially and
+ # not a failure: clash resolution logic did not trigger.
+ # With right timing, xmit completed sequentially and
# no parallel insertion occurs.
return $ksft_skip
}
@@ -126,20 +122,23 @@ run_one_clash_test()
run_clash_test()
{
local ns="$1"
- local daddr="$2"
- local dport="$3"
+ local ctns="$2"
+ local daddr="$3"
+ local dport="$4"
+ local softerr=0
for i in $(seq 1 10);do
- run_one_clash_test "$ns" "$daddr" "$dport"
+ run_one_clash_test "$ns" "$ctns" "$daddr" "$dport"
local rv=$?
if [ $rv -eq 0 ];then
echo "PASS: clash resolution test for $daddr:$dport on attempt $i"
return 0
- elif [ $rv -eq 1 ];then
- echo "FAIL: clash resolution test for $daddr:$dport on attempt $i"
- return 1
+ elif [ $rv -eq $ksft_skip ]; then
+ softerr=1
fi
done
+
+ [ $softerr -eq 1 ] && echo "SKIP: clash resolution for $daddr:$dport did not trigger"
}
ip link add veth0 netns "$nsclient1" type veth peer name veth0 netns "$nsrouter"
@@ -161,11 +160,11 @@ spawn_servers "$nsclient2"
# exercise clash resolution with nat:
# nsrouter is supposed to dnat to 10.0.2.1:900{0,1,2,3}.
-run_clash_test "$nsclient1" 10.0.1.99 "$dport"
+run_clash_test "$nsclient1" "$nsrouter" 10.0.1.99 "$dport"
# exercise clash resolution without nat.
load_simple_ruleset "$nsclient2"
-run_clash_test "$nsclient2" 127.0.0.1 9001
+run_clash_test "$nsclient2" "$nsclient2" 127.0.0.1 9001
if [ $clash_resolution_active -eq 0 ];then
[ "$ret" -eq 0 ] && ret=$ksft_skip
diff --git a/tools/testing/selftests/sched_ext/exit.c b/tools/testing/selftests/sched_ext/exit.c
index 9451782689de..ee25824b1cbe 100644
--- a/tools/testing/selftests/sched_ext/exit.c
+++ b/tools/testing/selftests/sched_ext/exit.c
@@ -22,6 +22,14 @@ static enum scx_test_status run(void *ctx)
struct bpf_link *link;
char buf[16];
+ /*
+ * On single-CPU systems, ops.select_cpu() is never
+ * invoked, so skip this test to avoid getting stuck
+ * indefinitely.
+ */
+ if (tc == EXIT_SELECT_CPU && libbpf_num_possible_cpus() == 1)
+ continue;
+
skel = exit__open();
SCX_ENUM_INIT(skel);
skel->rodata->exit_point = tc;
diff --git a/tools/testing/selftests/tc-testing/config b/tools/testing/selftests/tc-testing/config
index 8e902f7f1a18..c20aa16b1d63 100644
--- a/tools/testing/selftests/tc-testing/config
+++ b/tools/testing/selftests/tc-testing/config
@@ -31,6 +31,7 @@ CONFIG_NET_SCH_CBS=m
CONFIG_NET_SCH_CHOKE=m
CONFIG_NET_SCH_CODEL=m
CONFIG_NET_SCH_DRR=m
+CONFIG_NET_SCH_DUALPI2=m
CONFIG_NET_SCH_ETF=m
CONFIG_NET_SCH_FQ=m
CONFIG_NET_SCH_FQ_CODEL=m
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/dualpi2.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/dualpi2.json
new file mode 100644
index 000000000000..cd1f2ee8f354
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/dualpi2.json
@@ -0,0 +1,254 @@
+[
+ {
+ "id": "a4c7",
+ "name": "Create DualPI2 with default setting",
+ "category": [
+ "qdisc",
+ "dualpi2"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ ],
+ "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root dualpi2",
+ "expExitCode": "0",
+ "verifyCmd": "$TC qdisc show dev $DUMMY",
+ "matchPattern": "qdisc dualpi2 1: root refcnt [0-9]+ limit 10000p.* step_thresh 1ms min_qlen_step 0p coupling_factor 2 drop_on_overload drop_dequeue classic_protection 10% l4s_ect split_gso",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1: root"
+ ]
+ },
+ {
+ "id": "1ea4",
+ "name": "Create DualPI2 with memlimit",
+ "category": [
+ "qdisc",
+ "dualpi2"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ ],
+ "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root dualpi2 memlimit 20000000",
+ "expExitCode": "0",
+ "verifyCmd": "$TC qdisc show dev $DUMMY",
+ "matchPattern": "qdisc dualpi2 1: root refcnt [0-9]+ limit 10000p.* memlimit 20000000B",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1: root"
+ ]
+ },
+ {
+ "id": "2130",
+ "name": "Create DualPI2 with typical_rtt and max_rtt",
+ "category": [
+ "qdisc",
+ "dualpi2"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ ],
+ "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root dualpi2 typical_rtt 20ms max_rtt 200ms",
+ "expExitCode": "0",
+ "verifyCmd": "$TC qdisc show dev $DUMMY",
+ "matchPattern": "qdisc dualpi2 1: root refcnt [0-9]+ limit 10000p.* target 20ms tupdate 20ms alpha 0.042969 beta 1.496094",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1: root"
+ ]
+ },
+ {
+ "id": "90c1",
+ "name": "Create DualPI2 with max_rtt",
+ "category": [
+ "qdisc",
+ "dualpi2"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ ],
+ "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root dualpi2 max_rtt 300ms",
+ "expExitCode": "0",
+ "verifyCmd": "$TC qdisc show dev $DUMMY",
+ "matchPattern": "qdisc dualpi2 1: root refcnt [0-9]+ limit 10000p.* target 50ms tupdate 50ms alpha 0.050781 beta 0.996094",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1: root"
+ ]
+ },
+ {
+ "id": "7b3c",
+ "name": "Create DualPI2 with any_ect option",
+ "category": [
+ "qdisc",
+ "dualpi2"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ ],
+ "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root dualpi2 any_ect",
+ "expExitCode": "0",
+ "verifyCmd": "$TC qdisc show dev $DUMMY",
+ "matchPattern": "qdisc dualpi2 1: root refcnt [0-9]+ limit 10000p .* any_ect",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1: root"
+ ]
+ },
+ {
+ "id": "49a3",
+ "name": "Create DualPI2 with overflow option",
+ "category": [
+ "qdisc",
+ "dualpi2"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ ],
+ "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root dualpi2 overflow",
+ "expExitCode": "0",
+ "verifyCmd": "$TC qdisc show dev $DUMMY",
+ "matchPattern": "qdisc dualpi2 1: root refcnt [0-9]+ limit 10000p.* overflow",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1: root"
+ ]
+ },
+ {
+ "id": "d0a1",
+ "name": "Create DualPI2 with drop_enqueue option",
+ "category": [
+ "qdisc",
+ "dualpi2"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ ],
+ "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root dualpi2 drop_enqueue",
+ "expExitCode": "0",
+ "verifyCmd": "$TC qdisc show dev $DUMMY",
+ "matchPattern": "qdisc dualpi2 1: root refcnt [0-9]+ limit 10000p .* drop_enqueue",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1: root"
+ ]
+ },
+ {
+ "id": "f051",
+ "name": "Create DualPI2 with no_split_gso option",
+ "category": [
+ "qdisc",
+ "dualpi2"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ ],
+ "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root dualpi2 no_split_gso",
+ "expExitCode": "0",
+ "verifyCmd": "$TC qdisc show dev $DUMMY",
+ "matchPattern": "qdisc dualpi2 1: root refcnt [0-9]+ limit 10000p .* no_split_gso",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1: root"
+ ]
+ },
+ {
+ "id": "456b",
+ "name": "Create DualPI2 with packet step_thresh",
+ "category": [
+ "qdisc",
+ "dualpi2"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ ],
+ "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root dualpi2 step_thresh 3p",
+ "expExitCode": "0",
+ "verifyCmd": "$TC qdisc show dev $DUMMY",
+ "matchPattern": "qdisc dualpi2 1: root refcnt [0-9]+ limit 10000p .* step_thresh 3p",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1: root"
+ ]
+ },
+ {
+ "id": "610c",
+ "name": "Create DualPI2 with packet min_qlen_step",
+ "category": [
+ "qdisc",
+ "dualpi2"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ ],
+ "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root dualpi2 min_qlen_step 1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC qdisc show dev $DUMMY",
+ "matchPattern": "qdisc dualpi2 1: root refcnt [0-9]+ limit 10000p .* min_qlen_step 1p",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1: root"
+ ]
+ },
+ {
+ "id": "b4fa",
+ "name": "Create DualPI2 with packet coupling_factor",
+ "category": [
+ "qdisc",
+ "dualpi2"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ ],
+ "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root dualpi2 coupling_factor 1",
+ "expExitCode": "0",
+ "verifyCmd": "$TC qdisc show dev $DUMMY",
+ "matchPattern": "qdisc dualpi2 1: root refcnt [0-9]+ limit 10000p .* coupling_factor 1",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1: root"
+ ]
+ },
+ {
+ "id": "37f1",
+ "name": "Create DualPI2 with packet classic_protection",
+ "category": [
+ "qdisc",
+ "dualpi2"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ ],
+ "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root dualpi2 classic_protection 0",
+ "expExitCode": "0",
+ "verifyCmd": "$TC qdisc show dev $DUMMY",
+ "matchPattern": "qdisc dualpi2 1: root refcnt [0-9]+ limit 10000p .* classic_protection 0%",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DUMMY handle 1: root"
+ ]
+ }
+]
diff --git a/tools/testing/selftests/tc-testing/tdc.sh b/tools/testing/selftests/tc-testing/tdc.sh
index 589b18ed758a..dae19687912d 100755
--- a/tools/testing/selftests/tc-testing/tdc.sh
+++ b/tools/testing/selftests/tc-testing/tdc.sh
@@ -4,8 +4,7 @@
# If a module is required and was not compiled
# the test that requires it will fail anyways
try_modprobe() {
- modprobe -q -R "$1"
- if [ $? -ne 0 ]; then
+ if ! modprobe -q -R "$1"; then
echo "Module $1 not found... skipping."
else
modprobe "$1"
@@ -67,4 +66,5 @@ try_modprobe sch_hfsc
try_modprobe sch_hhf
try_modprobe sch_htb
try_modprobe sch_teql
-./tdc.py -J`nproc`
+try_modprobe sch_dualpi2
+./tdc.py -J"$(nproc)"