summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-class-net-phydev10
-rw-r--r--Documentation/dev-tools/checkpatch.rst9
-rw-r--r--Documentation/devicetree/bindings/net/cdns,macb.yaml1
-rw-r--r--Documentation/devicetree/bindings/net/pse-pd/microchip,pd692x0.yaml22
-rw-r--r--Documentation/devicetree/bindings/net/pse-pd/ti,tps23881.yaml18
-rw-r--r--Documentation/devicetree/bindings/net/qca,ar803x.yaml43
-rw-r--r--Documentation/devicetree/bindings/net/qca,qca7000.txt87
-rw-r--r--Documentation/devicetree/bindings/net/qca,qca7000.yaml109
-rw-r--r--Documentation/devicetree/bindings/net/renesas,r9a09g057-gbeth.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml2
-rw-r--r--Documentation/driver-api/dpll.rst18
-rw-r--r--Documentation/netlink/specs/conntrack.yaml38
-rw-r--r--Documentation/netlink/specs/devlink.yaml208
-rw-r--r--Documentation/netlink/specs/dpll.yaml38
-rw-r--r--Documentation/netlink/specs/ethtool.yaml159
-rw-r--r--Documentation/netlink/specs/fou.yaml14
-rw-r--r--Documentation/netlink/specs/handshake.yaml10
-rw-r--r--Documentation/netlink/specs/lockd.yaml4
-rw-r--r--Documentation/netlink/specs/mptcp_pm.yaml192
-rw-r--r--Documentation/netlink/specs/net_shaper.yaml7
-rw-r--r--Documentation/netlink/specs/netdev.yaml43
-rw-r--r--Documentation/netlink/specs/nfsd.yaml10
-rw-r--r--Documentation/netlink/specs/nftables.yaml16
-rw-r--r--Documentation/netlink/specs/nl80211.yaml109
-rw-r--r--Documentation/netlink/specs/nlctrl.yaml6
-rw-r--r--Documentation/netlink/specs/ovpn.yaml26
-rw-r--r--Documentation/netlink/specs/ovs_datapath.yaml2
-rw-r--r--Documentation/netlink/specs/ovs_flow.yaml16
-rw-r--r--Documentation/netlink/specs/ovs_vport.yaml4
-rw-r--r--Documentation/netlink/specs/rt-addr.yaml2
-rw-r--r--Documentation/netlink/specs/rt-link.yaml2
-rw-r--r--Documentation/netlink/specs/rt-neigh.yaml2
-rw-r--r--Documentation/netlink/specs/rt-route.yaml10
-rw-r--r--Documentation/netlink/specs/rt-rule.yaml2
-rw-r--r--Documentation/netlink/specs/tc.yaml27
-rw-r--r--Documentation/netlink/specs/tcp_metrics.yaml8
-rw-r--r--Documentation/netlink/specs/team.yaml16
-rw-r--r--Documentation/networking/can.rst11
-rw-r--r--Documentation/networking/device_drivers/ethernet/amazon/ena.rst108
-rw-r--r--Documentation/networking/device_drivers/ethernet/intel/ice.rst13
-rw-r--r--Documentation/networking/devlink/devlink-params.rst3
-rw-r--r--Documentation/networking/ethtool-netlink.rst52
-rw-r--r--Documentation/networking/ip-sysctl.rst682
-rw-r--r--Documentation/networking/net_cachelines/tcp_sock.rst2
-rw-r--r--Documentation/networking/netconsole.rst32
-rw-r--r--MAINTAINERS5
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_common.h5
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c13
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c4
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c6
-rw-r--r--drivers/dpll/dpll_core.h1
-rw-r--r--drivers/dpll/dpll_netlink.c69
-rw-r--r--drivers/dpll/dpll_nl.c5
-rw-r--r--drivers/gpu/drm/display/drm_dp_tunnel.c2
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c4
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.c3
-rw-r--r--drivers/net/amt.c11
-rw-r--r--drivers/net/bareudp.c7
-rw-r--r--drivers/net/can/dev/calc_bittiming.c2
-rw-r--r--drivers/net/can/dev/netlink.c26
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c216
-rw-r--r--drivers/net/can/spi/mcp251x.c37
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_fd.c2
-rw-r--r--drivers/net/can/xilinx_can.c2
-rw-r--r--drivers/net/dsa/b53/Kconfig1
-rw-r--r--drivers/net/dsa/b53/b53_common.c284
-rw-r--r--drivers/net/dsa/b53/b53_priv.h48
-rw-r--r--drivers/net/dsa/b53/b53_regs.h27
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c3
-rw-r--r--drivers/net/dsa/microchip/ksz_ptp.c4
-rw-r--r--drivers/net/dsa/mt7530.c6
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c6
-rw-r--r--drivers/net/dsa/qca/ar9331.c4
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-core.c10
-rw-r--r--drivers/net/ethernet/airoha/airoha_ppe.c31
-rw-r--r--drivers/net/ethernet/amazon/Kconfig2
-rw-r--r--drivers/net/ethernet/amazon/ena/Makefile2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_admin_defs.h74
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c267
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.h84
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_debugfs.c62
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_debugfs.h27
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_devlink.c210
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_devlink.h21
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c55
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c62
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h14
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_phc.c233
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_phc.h37
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_regs_defs.h8
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c33
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c86
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c24
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c4
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c6
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c7
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c15
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c39
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h3
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c37
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.c37
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c105
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c8
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c57
-rw-r--r--drivers/net/ethernet/dlink/dl2k.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c56
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c3
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c44
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c36
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c12
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c11
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h3
-rw-r--r--drivers/net/ethernet/freescale/fec.h15
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c129
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c2
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c40
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c17
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c24
-rw-r--r--drivers/net/ethernet/google/Kconfig1
-rw-r--r--drivers/net/ethernet/google/gve/Makefile4
-rw-r--r--drivers/net/ethernet/google/gve/gve.h64
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c101
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.h30
-rw-r--r--drivers/net/ethernet/google/gve/gve_desc_dqo.h3
-rw-r--r--drivers/net/ethernet/google/gve/gve_dqo.h2
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c34
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c99
-rw-r--r--drivers/net/ethernet/google/gve/gve_ptp.c139
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c103
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c4
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c236
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c42
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c107
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c15
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c40
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c27
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_ethtool.c47
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_tx.c23
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c77
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c34
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c129
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c29
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c25
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h47
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h32
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c46
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h12
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c52
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c75
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c17
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.h42
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_type.h32
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c33
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile2
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h20
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_cgu_regs.h181
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c95
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h194
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.c1120
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.h31
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c69
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.c45
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.h68
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c257
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_consts.h177
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c546
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h55
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tspll.c511
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tspll.h31
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c87
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h20
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c44
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c2
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c2
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c36
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c20
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h33
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.h8
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c30
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c57
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.c118
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c32
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c28
-rw-r--r--drivers/net/ethernet/intel/libeth/Kconfig10
-rw-r--r--drivers/net/ethernet/intel/libeth/Makefile8
-rw-r--r--drivers/net/ethernet/intel/libeth/priv.h37
-rw-r--r--drivers/net/ethernet/intel/libeth/rx.c42
-rw-r--r--drivers/net/ethernet/intel/libeth/tx.c41
-rw-r--r--drivers/net/ethernet/intel/libeth/xdp.c451
-rw-r--r--drivers/net/ethernet/intel/libeth/xsk.c271
-rw-r--r--drivers/net/ethernet/intel/libie/rx.c7
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c6
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h6
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c31
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/api.h32
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/mbox_init.c424
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/reg.h81
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/struct.h40
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/common.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.c106
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c240
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h81
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c68
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c22
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c54
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c13
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c18
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c252
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn20k.h17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h46
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c31
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c171
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h49
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c44
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/rep.c7
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c69
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs_ethtool.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c309
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c138
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c19
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_csr.h134
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_devlink.c4
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c237
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.c53
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.h16
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h19
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.c169
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.h27
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.c2
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.h16
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_phylink.c126
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.h3
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c31
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c441
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.c10
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c292
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_ethtool.c82
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c17
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c1
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c7
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c22
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c1
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c3
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase.h1
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase_main.c39
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c45
-rw-r--r--drivers/net/ethernet/sfc/ef10.c1
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c2
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.c102
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.h2
-rw-r--r--drivers/net/ethernet/sfc/falcon/ethtool.c51
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool.c1
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool_common.c77
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool_common.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c73
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c712
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c129
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c49
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h32
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c2
-rw-r--r--drivers/net/ethernet/sun/niu.c17
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c27
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c147
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.h17
-rw-r--r--drivers/net/geneve.c14
-rw-r--r--drivers/net/gtp.c12
-rw-r--r--drivers/net/hyperv/netvsc_drv.c30
-rw-r--r--drivers/net/mdio/fwnode_mdio.c26
-rw-r--r--drivers/net/mdio/mdio-mux-gpio.c3
-rw-r--r--drivers/net/netconsole.c270
-rw-r--r--drivers/net/netdevsim/netdev.c56
-rw-r--r--drivers/net/netdevsim/netdevsim.h7
-rw-r--r--drivers/net/netdevsim/udp_tunnels.c12
-rw-r--r--drivers/net/ovpn/udp.c4
-rw-r--r--drivers/net/phy/Kconfig6
-rw-r--r--drivers/net/phy/Makefile3
-rw-r--r--drivers/net/phy/dp83822.c7
-rw-r--r--drivers/net/phy/dp83869.c7
-rw-r--r--drivers/net/phy/dp83tg720.c185
-rw-r--r--drivers/net/phy/intel-xway.c7
-rw-r--r--drivers/net/phy/mdio-boardinfo.c29
-rw-r--r--drivers/net/phy/mdio-boardinfo.h9
-rw-r--r--drivers/net/phy/mdio_bus_provider.c1
-rw-r--r--drivers/net/phy/mdio_device.c5
-rw-r--r--drivers/net/phy/mediatek/Kconfig1
-rw-r--r--drivers/net/phy/micrel.c111
-rw-r--r--drivers/net/phy/mscc/mscc_main.c5
-rw-r--r--drivers/net/phy/phy-c45.c7
-rw-r--r--drivers/net/phy/phy-core.c78
-rw-r--r--drivers/net/phy/phy_caps.c9
-rw-r--r--drivers/net/phy/phy_device.c168
-rw-r--r--drivers/net/phy/phy_package.c71
-rw-r--r--drivers/net/phy/phylib-internal.h6
-rw-r--r--drivers/net/phy/phylink.c13
-rw-r--r--drivers/net/phy/qcom/Kconfig3
-rw-r--r--drivers/net/phy/qcom/at803x.c167
-rw-r--r--drivers/net/phy/qcom/qca807x.c13
-rw-r--r--drivers/net/phy/realtek/realtek_main.c10
-rw-r--r--drivers/net/phy/sfp.c21
-rw-r--r--drivers/net/ppp/ppp_generic.c52
-rw-r--r--drivers/net/ppp/pppoe.c6
-rw-r--r--drivers/net/pse-pd/pd692x0.c225
-rw-r--r--drivers/net/pse-pd/pse_core.c1066
-rw-r--r--drivers/net/pse-pd/tps23881.c403
-rw-r--r--drivers/net/team/team_core.c96
-rw-r--r--drivers/net/team/team_mode_activebackup.c3
-rw-r--r--drivers/net/team/team_mode_loadbalance.c13
-rw-r--r--drivers/net/usb/Kconfig3
-rw-r--r--drivers/net/usb/lan78xx.c736
-rw-r--r--drivers/net/usb/usbnet.c36
-rw-r--r--drivers/net/virtio_net.c47
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c74
-rw-r--r--drivers/net/vxlan/vxlan_core.c58
-rw-r--r--drivers/net/vxlan/vxlan_private.h2
-rw-r--r--drivers/net/vxlan/vxlan_vnifilter.c18
-rw-r--r--drivers/net/wireguard/socket.c4
-rw-r--r--drivers/net/wireless/admtek/adm8211.c2
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c5
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c20
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/coredump.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/debug.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/fw.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c14
-rw-r--r--drivers/net/wireless/ath/ath11k/pcic.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/trace.c2
-rw-r--r--drivers/net/wireless/ath/ath12k/core.c7
-rw-r--r--drivers/net/wireless/ath/ath12k/core.h13
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.c1
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.c1
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.c18
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.c5
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.c2
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c439
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.h7
-rw-r--r--drivers/net/wireless/ath/ath12k/p2p.c3
-rw-r--r--drivers/net/wireless/ath/ath12k/reg.c117
-rw-r--r--drivers/net/wireless/ath/ath12k/reg.h3
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.c26
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.h2
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c12
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/common-beacon.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/common-debug.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/common-init.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/common-spectral.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/dynack.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c9
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c2
-rw-r--r--drivers/net/wireless/ath/main.c1
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c5
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c3
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c6
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/main.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c25
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c24
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c3
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_module.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c7
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Kconfig1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/bz.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/dr.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/sc.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/agn.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/commands.h16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/dev.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/devices.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/lib.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/power.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rs.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rx.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rxon.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tx.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/d3.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/debugfs.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.c18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-utils.c113
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-utils.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/sap.h30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/Makefile4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/d3.c89
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/debugfs.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ftm-initiator.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/iface.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/iface.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/link.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mac80211.c63
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mld.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mld.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mlo.c42
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mlo.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/phy.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/scan.c20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/scan.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c129
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c117
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-v2.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/internal.h (renamed from drivers/net/wireless/intel/iwlwifi/pcie/internal.h)49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c (renamed from drivers/net/wireless/intel/iwlwifi/pcie/rx.c)34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans-gen2.c (renamed from drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c)9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c (renamed from drivers/net/wireless/intel/iwlwifi/pcie/trans.c)182
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx-gen2.c (renamed from drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c)0
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c (renamed from drivers/net/wireless/intel/iwlwifi/pcie/tx.c)16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/iwl-context-info-v2.h (renamed from drivers/net/wireless/intel/iwlwifi/iwl-context-info-v2.h)6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/iwl-context-info.h (renamed from drivers/net/wireless/intel/iwlwifi/iwl-context-info.h)0
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/utils.c104
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/utils.h38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/utils.c (renamed from drivers/net/wireless/intel/iwlwifi/mvm/tests/scan.c)43
-rw-r--r--drivers/net/wireless/intersil/p54/main.c3
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/main.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c11
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c2
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/main.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/main.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/main.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x.h3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_core.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/main.c11
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/main.c5
-rw-r--r--drivers/net/wireless/microchip/wilc1000/cfg80211.c7
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/mac.c5
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c8
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.h3
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h8
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mac.c8
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c2
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c5
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/core.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c23
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c16
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c8
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c22
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c9
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c15
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h14
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c49
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8703b.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723de.c1
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8812a.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814a.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821a.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821ce.c1
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822be.c1
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822ce.c1
-rw-r--r--drivers/net/wireless/realtek/rtw88/sdio.c8
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.c347
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.h58
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.c1287
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.h7
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c135
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h147
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.c13
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c314
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h47
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c68
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h6
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c88
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac_be.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.c37
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.c96
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.c26
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.h3
-rw-r--r--drivers/net/wireless/realtek/rtw89/reg.h2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851be.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ae.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852be.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bte.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ce.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a.c46
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c52
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922ae.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/sar.c5
-rw-r--r--drivers/net/wireless/realtek/rtw89/ser.c11
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c9
-rw-r--r--drivers/net/wireless/silabs/wfx/sta.c4
-rw-r--r--drivers/net/wireless/silabs/wfx/sta.h4
-rw-r--r--drivers/net/wireless/st/cw1200/sta.c5
-rw-r--r--drivers/net/wireless/st/cw1200/sta.h5
-rw-r--r--drivers/net/wireless/ti/wl1251/acx.c35
-rw-r--r--drivers/net/wireless/ti/wl1251/acx.h1
-rw-r--r--drivers/net/wireless/ti/wl1251/cmd.c79
-rw-r--r--drivers/net/wireless/ti/wl1251/cmd.h3
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c5
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c26
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.h1
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c8
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.c6
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_mac.c2
-rw-r--r--drivers/pci/controller/pci-hyperv.c3
-rw-r--r--drivers/pci/msi/irqdomain.c5
-rw-r--r--drivers/ptp/ptp_clock.c2
-rw-r--r--drivers/ssb/driver_gpio.c8
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c6
-rw-r--r--fs/smb/server/transport_tcp.c6
-rw-r--r--include/linux/avf/virtchnl.h23
-rw-r--r--include/linux/can/bittiming.h2
-rw-r--r--include/linux/can/dev.h4
-rw-r--r--include/linux/dpll.h8
-rw-r--r--include/linux/ethtool.h25
-rw-r--r--include/linux/ethtool_netlink.h7
-rw-r--r--include/linux/ieee80211.h10
-rw-r--r--include/linux/if_team.h3
-rw-r--r--include/linux/if_vlan.h23
-rw-r--r--include/linux/ipv6.h1
-rw-r--r--include/linux/mdio.h1
-rw-r--r--include/linux/mroute6.h7
-rw-r--r--include/linux/msi.h2
-rw-r--r--include/linux/net/intel/libie/pctype.h41
-rw-r--r--include/linux/netdevice.h17
-rw-r--r--include/linux/netpoll.h10
-rw-r--r--include/linux/packing.h6
-rw-r--r--include/linux/phy.h41
-rw-r--r--include/linux/pse-pd/pse.h114
-rw-r--r--include/linux/ref_tracker.h50
-rw-r--r--include/linux/skbuff.h20
-rw-r--r--include/linux/soc/marvell/silicons.h25
-rw-r--r--include/linux/tcp.h3
-rw-r--r--include/linux/usb/usbnet.h2
-rw-r--r--include/net/cfg80211.h170
-rw-r--r--include/net/devlink.h4
-rw-r--r--include/net/dropreason-core.h18
-rw-r--r--include/net/dsa.h2
-rw-r--r--include/net/inet_hashtables.h6
-rw-r--r--include/net/ip.h2
-rw-r--r--include/net/ip6_tunnel.h3
-rw-r--r--include/net/ip_tunnels.h2
-rw-r--r--include/net/libeth/rx.h28
-rw-r--r--include/net/libeth/tx.h36
-rw-r--r--include/net/libeth/types.h106
-rw-r--r--include/net/libeth/xdp.h1879
-rw-r--r--include/net/libeth/xsk.h685
-rw-r--r--include/net/mac80211.h35
-rw-r--r--include/net/mana/gdma.h17
-rw-r--r--include/net/mana/mana.h173
-rw-r--r--include/net/neighbour.h1
-rw-r--r--include/net/netdev_queues.h9
-rw-r--r--include/net/netmem.h23
-rw-r--r--include/net/page_pool/helpers.h7
-rw-r--r--include/net/pfcp.h2
-rw-r--r--include/net/route.h4
-rw-r--r--include/net/sock.h21
-rw-r--r--include/net/tc_act/tc_csum.h9
-rw-r--r--include/net/tc_act/tc_ct.h9
-rw-r--r--include/net/tc_act/tc_gate.h9
-rw-r--r--include/net/tc_act/tc_mpls.h9
-rw-r--r--include/net/tc_act/tc_police.h9
-rw-r--r--include/net/tc_act/tc_sample.h9
-rw-r--r--include/net/tc_act/tc_vlan.h9
-rw-r--r--include/net/tcp.h8
-rw-r--r--include/net/udp_tunnel.h103
-rw-r--r--include/net/vxlan.h5
-rw-r--r--include/trace/events/tcp.h2
-rw-r--r--include/uapi/linux/dpll.h12
-rw-r--r--include/uapi/linux/ethtool_netlink.h2
-rw-r--r--include/uapi/linux/ethtool_netlink_generated.h43
-rw-r--r--include/uapi/linux/if_link.h1
-rw-r--r--include/uapi/linux/in6.h4
-rw-r--r--include/uapi/linux/net_dropmon.h7
-rw-r--r--include/uapi/linux/nl80211.h22
-rw-r--r--lib/ref_tracker.c283
-rw-r--r--lib/test_objagg.c77
-rw-r--r--net/appletalk/atalk_proc.c2
-rw-r--r--net/bluetooth/af_bluetooth.c2
-rw-r--r--net/bluetooth/iso.c4
-rw-r--r--net/bluetooth/l2cap_sock.c4
-rw-r--r--net/bluetooth/sco.c4
-rw-r--r--net/caif/cfctrl.c294
-rw-r--r--net/can/af_can.c6
-rw-r--r--net/can/bcm.c5
-rw-r--r--net/can/isotp.c5
-rw-r--r--net/can/j1939/socket.c5
-rw-r--r--net/can/raw.c5
-rw-r--r--net/core/dev.c51
-rw-r--r--net/core/neighbour.c16
-rw-r--r--net/core/net-sysfs.c59
-rw-r--r--net/core/net_namespace.c34
-rw-r--r--net/core/netdev_rx_queue.c6
-rw-r--r--net/core/netpoll.c288
-rw-r--r--net/core/sock.c41
-rw-r--r--net/devlink/param.c5
-rw-r--r--net/dsa/Kconfig16
-rw-r--r--net/dsa/tag_brcm.c119
-rw-r--r--net/ethtool/common.h8
-rw-r--r--net/ethtool/ioctl.c134
-rw-r--r--net/ethtool/netlink.c47
-rw-r--r--net/ethtool/netlink.h4
-rw-r--r--net/ethtool/pse-pd.c65
-rw-r--r--net/ethtool/rss.c11
-rw-r--r--net/ipv4/arp.c10
-rw-r--r--net/ipv4/fib_semantics.c3
-rw-r--r--net/ipv4/inet_connection_sock.c31
-rw-r--r--net/ipv4/inet_diag.c2
-rw-r--r--net/ipv4/inet_hashtables.c4
-rw-r--r--net/ipv4/ip_tunnel.c4
-rw-r--r--net/ipv4/ip_tunnel_core.c4
-rw-r--r--net/ipv4/ipmr.c169
-rw-r--r--net/ipv4/nexthop.c3
-rw-r--r--net/ipv4/ping.c4
-rw-r--r--net/ipv4/raw.c4
-rw-r--r--net/ipv4/route.c8
-rw-r--r--net/ipv4/syncookies.c3
-rw-r--r--net/ipv4/tcp.c5
-rw-r--r--net/ipv4/tcp_input.c166
-rw-r--r--net/ipv4/tcp_ipv4.c8
-rw-r--r--net/ipv4/tcp_output.c42
-rw-r--r--net/ipv4/udp.c19
-rw-r--r--net/ipv4/udp_tunnel_core.c21
-rw-r--r--net/ipv4/udp_tunnel_nic.c78
-rw-r--r--net/ipv6/addrconf.c10
-rw-r--r--net/ipv6/af_inet6.c2
-rw-r--r--net/ipv6/datagram.c4
-rw-r--r--net/ipv6/inet6_connection_sock.c4
-rw-r--r--net/ipv6/ip6_tunnel.c2
-rw-r--r--net/ipv6/ip6_udp_tunnel.c18
-rw-r--r--net/ipv6/ip6mr.c148
-rw-r--r--net/ipv6/ping.c2
-rw-r--r--net/ipv6/raw.c2
-rw-r--r--net/ipv6/route.c8
-rw-r--r--net/ipv6/seg6_local.c22
-rw-r--r--net/ipv6/sit.c2
-rw-r--r--net/ipv6/syncookies.c2
-rw-r--r--net/ipv6/tcp_ipv6.c6
-rw-r--r--net/ipv6/udp.c5
-rw-r--r--net/key/af_key.c2
-rw-r--r--net/l2tp/l2tp_ip6.c2
-rw-r--r--net/llc/af_llc.c6
-rw-r--r--net/llc/llc_proc.c2
-rw-r--r--net/mac80211/agg-rx.c6
-rw-r--r--net/mac80211/agg-tx.c3
-rw-r--r--net/mac80211/cfg.c117
-rw-r--r--net/mac80211/chan.c33
-rw-r--r--net/mac80211/driver-ops.h55
-rw-r--r--net/mac80211/ibss.c4
-rw-r--r--net/mac80211/ieee80211_i.h14
-rw-r--r--net/mac80211/iface.c6
-rw-r--r--net/mac80211/main.c9
-rw-r--r--net/mac80211/mlme.c18
-rw-r--r--net/mac80211/offchannel.c7
-rw-r--r--net/mac80211/pm.c2
-rw-r--r--net/mac80211/rx.c15
-rw-r--r--net/mac80211/s1g.c26
-rw-r--r--net/mac80211/scan.c20
-rw-r--r--net/mac80211/sta_info.c415
-rw-r--r--net/mac80211/sta_info.h59
-rw-r--r--net/mac80211/trace.h105
-rw-r--r--net/mac80211/tx.c7
-rw-r--r--net/mac80211/util.c65
-rw-r--r--net/mptcp/protocol.c2
-rw-r--r--net/ncsi/internal.h2
-rw-r--r--net/ncsi/ncsi-rsp.c1
-rw-r--r--net/nfc/netlink.c6
-rw-r--r--net/packet/af_packet.c2
-rw-r--r--net/packet/diag.c2
-rw-r--r--net/phonet/socket.c4
-rw-r--r--net/rds/af_rds.c2
-rw-r--r--net/rds/send.c2
-rw-r--r--net/rds/tcp_listen.c6
-rw-r--r--net/sched/em_text.c2
-rw-r--r--net/sctp/input.c2
-rw-r--r--net/sctp/ipv6.c7
-rw-r--r--net/sctp/proc.c4
-rw-r--r--net/sctp/protocol.c3
-rw-r--r--net/sctp/socket.c8
-rw-r--r--net/smc/af_smc.c6
-rw-r--r--net/smc/smc_clc.c6
-rw-r--r--net/smc/smc_core.c5
-rw-r--r--net/smc/smc_diag.c2
-rw-r--r--net/smc/smc_loopback.c6
-rw-r--r--net/smc/smc_pnet.c2
-rw-r--r--net/socket.c54
-rw-r--r--net/strparser/strparser.c2
-rw-r--r--net/tipc/socket.c2
-rw-r--r--net/tipc/udp_media.c12
-rw-r--r--net/unix/af_unix.c2
-rw-r--r--net/unix/diag.c2
-rw-r--r--net/wireless/core.c19
-rw-r--r--net/wireless/mlme.c10
-rw-r--r--net/wireless/nl80211.c539
-rw-r--r--net/wireless/rdev-ops.h39
-rw-r--r--net/wireless/trace.h89
-rw-r--r--net/wireless/util.c36
-rw-r--r--net/wireless/wext-compat.c10
-rw-r--r--net/x25/af_x25.c2
-rw-r--r--net/xdp/xsk_diag.c2
-rwxr-xr-xscripts/checkpatch.pl12
-rw-r--r--tools/testing/selftests/drivers/net/Makefile1
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/devmem.py5
-rw-r--r--tools/testing/selftests/drivers/net/hw/lib/py/__init__.py17
-rw-r--r--tools/testing/selftests/drivers/net/hw/ncdevmem.c1
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/rss_api.py89
-rw-r--r--tools/testing/selftests/drivers/net/lib/py/__init__.py14
-rw-r--r--tools/testing/selftests/drivers/net/lib/sh/lib_netcons.sh84
-rwxr-xr-xtools/testing/selftests/drivers/net/netcons_basic.sh50
-rwxr-xr-xtools/testing/selftests/drivers/net/netcons_cmdline.sh52
-rwxr-xr-xtools/testing/selftests/drivers/net/netcons_sysdata.sh30
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh23
-rwxr-xr-xtools/testing/selftests/drivers/net/stats.py45
-rw-r--r--tools/testing/selftests/net/Makefile1
-rw-r--r--tools/testing/selftests/net/bench/Makefile7
-rw-r--r--tools/testing/selftests/net/bench/page_pool/Makefile17
-rw-r--r--tools/testing/selftests/net/bench/page_pool/bench_page_pool_simple.c276
-rw-r--r--tools/testing/selftests/net/bench/page_pool/time_bench.c394
-rw-r--r--tools/testing/selftests/net/bench/page_pool/time_bench.h238
-rwxr-xr-xtools/testing/selftests/net/bench/test_bench_page_pool.sh32
-rw-r--r--tools/testing/selftests/net/forwarding/Makefile1
-rw-r--r--tools/testing/selftests/net/forwarding/lib.sh46
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_multicast.sh35
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_bridge_1q_mc_ul.sh771
-rw-r--r--tools/testing/selftests/net/lib.sh12
-rw-r--r--tools/testing/selftests/net/nettest.c12
-rwxr-xr-xtools/testing/selftests/net/nl_netdev.py38
-rwxr-xr-xtools/testing/selftests/net/packetdrill/ksft_runner.sh4
-rwxr-xr-xtools/testing/selftests/net/rtnetlink_notification.sh112
-rwxr-xr-xtools/testing/selftests/net/srv6_end_x_next_csid_l3vpn_test.sh48
-rw-r--r--tools/testing/selftests/net/tcp_ao/seq-ext.c2
-rwxr-xr-xtools/testing/selftests/net/test_vxlan_vnifiltering.sh9
-rwxr-xr-xtools/testing/selftests/net/vrf_route_leaking.sh4
-rw-r--r--tools/testing/selftests/ptp/testptp.c11
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/sfq.json36
-rw-r--r--tools/testing/selftests/vsock/.gitignore2
-rw-r--r--tools/testing/selftests/vsock/Makefile17
-rw-r--r--tools/testing/selftests/vsock/config111
-rw-r--r--tools/testing/selftests/vsock/settings1
-rwxr-xr-xtools/testing/selftests/vsock/vmtest.sh487
-rw-r--r--tools/testing/vsock/util.c80
-rw-r--r--tools/testing/vsock/util.h30
-rw-r--r--tools/testing/vsock/vsock_test.c93
881 files changed, 29799 insertions, 9575 deletions
diff --git a/Documentation/ABI/testing/sysfs-class-net-phydev b/Documentation/ABI/testing/sysfs-class-net-phydev
index ac722dd5e694..31615c59bff9 100644
--- a/Documentation/ABI/testing/sysfs-class-net-phydev
+++ b/Documentation/ABI/testing/sysfs-class-net-phydev
@@ -26,6 +26,16 @@ Description:
This ID is used to match the device with the appropriate
driver.
+What: /sys/class/mdio_bus/<bus>/<device>/c45_phy_ids/mmd<n>_device_id
+Date: June 2025
+KernelVersion: 6.17
+Contact: netdev@vger.kernel.org
+Description:
+ This attribute contains the 32-bit PHY Identifier as reported
+ by the device during bus enumeration, encoded in hexadecimal.
+ These C45 IDs are used to match the device with the appropriate
+ driver. These files are invisible to the C22 device.
+
What: /sys/class/mdio_bus/<bus>/<device>/phy_interface
Date: February 2014
KernelVersion: 3.15
diff --git a/Documentation/dev-tools/checkpatch.rst b/Documentation/dev-tools/checkpatch.rst
index 76bd0ddb0041..d5c47e560324 100644
--- a/Documentation/dev-tools/checkpatch.rst
+++ b/Documentation/dev-tools/checkpatch.rst
@@ -495,6 +495,15 @@ Comments
See: https://lore.kernel.org/lkml/20131006222342.GT19510@leaf/
+ **UNCOMMENTED_RGMII_MODE**
+ Historically, the RGMII PHY modes specified in Device Trees have been
+ used inconsistently, often referring to the usage of delays on the PHY
+ side rather than describing the board.
+
+ PHY modes "rgmii", "rgmii-rxid" and "rgmii-txid" modes require the clock
+ signal to be delayed on the PCB; this unusual configuration should be
+ described in a comment. If they are not (meaning that the delay is realized
+ internally in the MAC or PHY), "rgmii-id" is the correct PHY mode.
Commit message
--------------
diff --git a/Documentation/devicetree/bindings/net/cdns,macb.yaml b/Documentation/devicetree/bindings/net/cdns,macb.yaml
index 8d69846b2e09..4423d038b243 100644
--- a/Documentation/devicetree/bindings/net/cdns,macb.yaml
+++ b/Documentation/devicetree/bindings/net/cdns,macb.yaml
@@ -62,6 +62,7 @@ properties:
- items:
- enum:
- microchip,sam9x7-gem # Microchip SAM9X7 gigabit ethernet interface
+ - microchip,sama7d65-gem # Microchip SAMA7D65 gigabit ethernet interface
- const: microchip,sama7g5-gem # Microchip SAMA7G5 gigabit ethernet interface
reg:
diff --git a/Documentation/devicetree/bindings/net/pse-pd/microchip,pd692x0.yaml b/Documentation/devicetree/bindings/net/pse-pd/microchip,pd692x0.yaml
index fd4244fceced..ca61cc37a790 100644
--- a/Documentation/devicetree/bindings/net/pse-pd/microchip,pd692x0.yaml
+++ b/Documentation/devicetree/bindings/net/pse-pd/microchip,pd692x0.yaml
@@ -22,6 +22,12 @@ properties:
reg:
maxItems: 1
+ vdd-supply:
+ description: Regulator that provides 3.3V VDD power supply.
+
+ vdda-supply:
+ description: Regulator that provides 3.3V VDDA power supply.
+
managers:
type: object
additionalProperties: false
@@ -68,6 +74,15 @@ properties:
"#size-cells":
const: 0
+ vmain-supply:
+ description: Regulator that provides 44-57V VMAIN power supply.
+
+ vaux5-supply:
+ description: Regulator that provides 5V VAUX5 power supply.
+
+ vaux3p3-supply:
+ description: Regulator that provides 3.3V VAUX3P3 power supply.
+
patternProperties:
'^port@[0-7]$':
type: object
@@ -106,10 +121,11 @@ examples:
#address-cells = <1>;
#size-cells = <0>;
- manager@0 {
+ manager0: manager@0 {
reg = <0>;
#address-cells = <1>;
#size-cells = <0>;
+ vmain-supply = <&pse1_supply>;
phys0: port@0 {
reg = <0>;
@@ -161,7 +177,7 @@ examples:
pairset-names = "alternative-a", "alternative-b";
pairsets = <&phys0>, <&phys1>;
polarity-supported = "MDI", "S";
- vpwr-supply = <&vpwr1>;
+ vpwr-supply = <&manager0>;
};
pse_pi1: pse-pi@1 {
reg = <1>;
@@ -169,7 +185,7 @@ examples:
pairset-names = "alternative-a";
pairsets = <&phys2>;
polarity-supported = "MDI";
- vpwr-supply = <&vpwr2>;
+ vpwr-supply = <&manager0>;
};
};
};
diff --git a/Documentation/devicetree/bindings/net/pse-pd/ti,tps23881.yaml b/Documentation/devicetree/bindings/net/pse-pd/ti,tps23881.yaml
index d08abcb01211..bb1ee3398655 100644
--- a/Documentation/devicetree/bindings/net/pse-pd/ti,tps23881.yaml
+++ b/Documentation/devicetree/bindings/net/pse-pd/ti,tps23881.yaml
@@ -20,6 +20,9 @@ properties:
reg:
maxItems: 1
+ interrupts:
+ maxItems: 1
+
'#pse-cells':
const: 1
@@ -27,10 +30,12 @@ properties:
maxItems: 1
channels:
- description: each set of 8 ports can be assigned to one physical
- channels or two for PoE4. This parameter describes the configuration
- of the ports conversion matrix that establishes relationship between
- the logical ports and the physical channels.
+ description: |
+ Defines the 8 physical delivery channels on the controller that can
+ be referenced by PSE PIs through their "pairsets" property. The actual
+ port matrix mapping is created when PSE PIs reference these channels in
+ their pairsets. For 4-pair operation, two channels from the same group
+ (0-3 or 4-7) must be referenced by a single PSE PI.
type: object
additionalProperties: false
@@ -62,9 +67,12 @@ unevaluatedProperties: false
required:
- compatible
- reg
+ - interrupts
examples:
- |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
i2c {
#address-cells = <1>;
#size-cells = <0>;
@@ -72,6 +80,8 @@ examples:
ethernet-pse@20 {
compatible = "ti,tps23881";
reg = <0x20>;
+ interrupts = <8 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&gpiog>;
channels {
#address-cells = <1>;
diff --git a/Documentation/devicetree/bindings/net/qca,ar803x.yaml b/Documentation/devicetree/bindings/net/qca,ar803x.yaml
index 3acd09f0da86..7ae5110e7aa2 100644
--- a/Documentation/devicetree/bindings/net/qca,ar803x.yaml
+++ b/Documentation/devicetree/bindings/net/qca,ar803x.yaml
@@ -16,8 +16,37 @@ description: |
allOf:
- $ref: ethernet-phy.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - ethernet-phy-id004d.d0c0
+
+ then:
+ properties:
+ reg:
+ const: 7 # This PHY is always at MDIO address 7 in the IPQ5018 SoC
+
+ resets:
+ items:
+ - description:
+ GE PHY MISC reset which triggers a reset across MDC, DSP, RX, and TX lines.
+
+ qcom,dac-preset-short-cable:
+ description:
+ Set if this phy is connected to another phy to adjust the values for
+ MDAC and EDAC to adjust amplitude, bias current settings, and error
+ detection and correction algorithm to accommodate for short cable length.
+ If not set, DAC values are not modified and it is assumed the MDI output pins
+ of this PHY are directly connected to an RJ45 connector.
+ type: boolean
properties:
+ compatible:
+ enum:
+ - ethernet-phy-id004d.d0c0
+
qca,clk-out-frequency:
description: Clock output frequency in Hertz.
$ref: /schemas/types.yaml#/definitions/uint32
@@ -132,3 +161,17 @@ examples:
};
};
};
+ - |
+ #include <dt-bindings/reset/qcom,gcc-ipq5018.h>
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ge_phy: ethernet-phy@7 {
+ compatible = "ethernet-phy-id004d.d0c0";
+ reg = <7>;
+
+ resets = <&gcc GCC_GEPHY_MISC_ARES>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/qca,qca7000.txt b/Documentation/devicetree/bindings/net/qca,qca7000.txt
deleted file mode 100644
index 8f5ae0b84eec..000000000000
--- a/Documentation/devicetree/bindings/net/qca,qca7000.txt
+++ /dev/null
@@ -1,87 +0,0 @@
-* Qualcomm QCA7000
-
-The QCA7000 is a serial-to-powerline bridge with a host interface which could
-be configured either as SPI or UART slave. This configuration is done by
-the QCA7000 firmware.
-
-(a) Ethernet over SPI
-
-In order to use the QCA7000 as SPI device it must be defined as a child of a
-SPI master in the device tree.
-
-Required properties:
-- compatible : Should be "qca,qca7000"
-- reg : Should specify the SPI chip select
-- interrupts : The first cell should specify the index of the source
- interrupt and the second cell should specify the trigger
- type as rising edge
-- spi-cpha : Must be set
-- spi-cpol : Must be set
-
-Optional properties:
-- spi-max-frequency : Maximum frequency of the SPI bus the chip can operate at.
- Numbers smaller than 1000000 or greater than 16000000
- are invalid. Missing the property will set the SPI
- frequency to 8000000 Hertz.
-- qca,legacy-mode : Set the SPI data transfer of the QCA7000 to legacy mode.
- In this mode the SPI master must toggle the chip select
- between each data word. In burst mode these gaps aren't
- necessary, which is faster. This setting depends on how
- the QCA7000 is setup via GPIO pin strapping. If the
- property is missing the driver defaults to burst mode.
-
-The MAC address will be determined using the optional properties
-defined in ethernet.txt.
-
-SPI Example:
-
-/* Freescale i.MX28 SPI master*/
-ssp2: spi@80014000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,imx28-spi";
- pinctrl-names = "default";
- pinctrl-0 = <&spi2_pins_a>;
-
- qca7000: ethernet@0 {
- compatible = "qca,qca7000";
- reg = <0x0>;
- interrupt-parent = <&gpio3>; /* GPIO Bank 3 */
- interrupts = <25 0x1>; /* Index: 25, rising edge */
- spi-cpha; /* SPI mode: CPHA=1 */
- spi-cpol; /* SPI mode: CPOL=1 */
- spi-max-frequency = <8000000>; /* freq: 8 MHz */
- local-mac-address = [ A0 B0 C0 D0 E0 F0 ];
- };
-};
-
-(b) Ethernet over UART
-
-In order to use the QCA7000 as UART slave it must be defined as a child of a
-UART master in the device tree. It is possible to preconfigure the UART
-settings of the QCA7000 firmware, but it's not possible to change them during
-runtime.
-
-Required properties:
-- compatible : Should be "qca,qca7000"
-
-Optional properties:
-- local-mac-address : see ./ethernet.txt
-- current-speed : current baud rate of QCA7000 which defaults to 115200
- if absent, see also ../serial/serial.yaml
-
-UART Example:
-
-/* Freescale i.MX28 UART */
-auart0: serial@8006a000 {
- compatible = "fsl,imx28-auart", "fsl,imx23-auart";
- reg = <0x8006a000 0x2000>;
- pinctrl-names = "default";
- pinctrl-0 = <&auart0_2pins_a>;
-
- qca7000: ethernet {
- compatible = "qca,qca7000";
- local-mac-address = [ A0 B0 C0 D0 E0 F0 ];
- current-speed = <38400>;
- };
-};
diff --git a/Documentation/devicetree/bindings/net/qca,qca7000.yaml b/Documentation/devicetree/bindings/net/qca,qca7000.yaml
new file mode 100644
index 000000000000..b503c3aa3616
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/qca,qca7000.yaml
@@ -0,0 +1,109 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/qca,qca7000.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm QCA7000
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+
+description: |
+ The QCA7000 is a serial-to-powerline bridge with a host interface which could
+ be configured either as SPI or UART slave. This configuration is done by
+ the QCA7000 firmware.
+
+ (a) Ethernet over SPI
+
+ In order to use the QCA7000 as SPI device it must be defined as a child of a
+ SPI master in the device tree.
+
+ (b) Ethernet over UART
+
+ In order to use the QCA7000 as UART slave it must be defined as a child of a
+ UART master in the device tree. It is possible to preconfigure the UART
+ settings of the QCA7000 firmware, but it's not possible to change them during
+ runtime
+
+properties:
+ compatible:
+ const: qca,qca7000
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ qca,legacy-mode:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ Set the SPI data transfer of the QCA7000 to legacy mode.
+ In this mode the SPI master must toggle the chip select
+ between each data word. In burst mode these gaps aren't
+ necessary, which is faster. This setting depends on how
+ the QCA7000 is setup via GPIO pin strapping. If the
+ property is missing the driver defaults to burst mode.
+
+allOf:
+ - $ref: ethernet-controller.yaml#
+
+ - if:
+ required:
+ - reg
+
+ then:
+ properties:
+ spi-cpha: true
+
+ spi-cpol: true
+
+ spi-max-frequency:
+ default: 8000000
+ maximum: 16000000
+ minimum: 1000000
+
+ allOf:
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
+
+ else:
+ properties:
+ current-speed:
+ default: 115200
+
+ qca,legacy-mode: false
+
+ allOf:
+ - $ref: /schemas/serial/serial-peripheral-props.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethernet@0 {
+ compatible = "qca,qca7000";
+ reg = <0x0>;
+ interrupt-parent = <&gpio3>;
+ interrupts = <25 IRQ_TYPE_EDGE_RISING>;
+ spi-cpha;
+ spi-cpol;
+ spi-max-frequency = <8000000>;
+ local-mac-address = [ a0 b0 c0 d0 e0 f0 ];
+ };
+ };
+
+ - |
+ serial {
+ ethernet {
+ compatible = "qca,qca7000";
+ local-mac-address = [ a0 b0 c0 d0 e0 f0 ];
+ current-speed = <38400>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/renesas,r9a09g057-gbeth.yaml b/Documentation/devicetree/bindings/net/renesas,r9a09g057-gbeth.yaml
index c498a9999289..9961253d1d41 100644
--- a/Documentation/devicetree/bindings/net/renesas,r9a09g057-gbeth.yaml
+++ b/Documentation/devicetree/bindings/net/renesas,r9a09g057-gbeth.yaml
@@ -14,6 +14,7 @@ select:
compatible:
contains:
enum:
+ - renesas,r9a09g047-gbeth
- renesas,r9a09g056-gbeth
- renesas,r9a09g057-gbeth
- renesas,rzv2h-gbeth
@@ -24,6 +25,7 @@ properties:
compatible:
items:
- enum:
+ - renesas,r9a09g047-gbeth # RZ/G3E
- renesas,r9a09g056-gbeth # RZ/V2N
- renesas,r9a09g057-gbeth # RZ/V2H(P)
- const: renesas,rzv2h-gbeth
diff --git a/Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml b/Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml
index 7b3d948f187d..a959c1d7e643 100644
--- a/Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml
+++ b/Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml
@@ -284,7 +284,7 @@ examples:
ti,syscon-efuse = <&mcu_conf 0x200>;
phys = <&phy_gmii_sel 1>;
- phy-mode = "rgmii-rxid";
+ phy-mode = "rgmii-id";
phy-handle = <&phy0>;
};
};
diff --git a/Documentation/driver-api/dpll.rst b/Documentation/driver-api/dpll.rst
index e6855cd37e85..195e1e5d9a58 100644
--- a/Documentation/driver-api/dpll.rst
+++ b/Documentation/driver-api/dpll.rst
@@ -214,6 +214,24 @@ offset values are fractional with 3-digit decimal places and shell be
divided with ``DPLL_PIN_PHASE_OFFSET_DIVIDER`` to get integer part and
modulo divided to get fractional part.
+Phase offset monitor
+====================
+
+Phase offset measurement is typically performed against the current active
+source. However, some DPLL (Digital Phase-Locked Loop) devices may offer
+the capability to monitor phase offsets across all available inputs.
+The attribute and current feature state shall be included in the response
+message of the ``DPLL_CMD_DEVICE_GET`` command for supported DPLL devices.
+In such cases, users can also control the feature using the
+``DPLL_CMD_DEVICE_SET`` command by setting the ``enum dpll_feature_state``
+values for the attribute.
+Once enabled the phase offset measurements for the input shall be returned
+in the ``DPLL_A_PIN_PHASE_OFFSET`` attribute.
+
+ =============================== ========================
+ ``DPLL_A_PHASE_OFFSET_MONITOR`` attr state of a feature
+ =============================== ========================
+
Embedded SYNC
=============
diff --git a/Documentation/netlink/specs/conntrack.yaml b/Documentation/netlink/specs/conntrack.yaml
index 840dc4504216..c6832633ab7b 100644
--- a/Documentation/netlink/specs/conntrack.yaml
+++ b/Documentation/netlink/specs/conntrack.yaml
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
-
+---
name: conntrack
protocol: netlink-raw
protonum: 12
@@ -195,17 +195,17 @@ attribute-sets:
-
name: tuple-attrs
attributes:
- -
+ -
name: tuple-ip
type: nest
nested-attributes: tuple-ip-attrs
doc: conntrack l3 information
- -
+ -
name: tuple-proto
type: nest
nested-attributes: tuple-proto-attrs
doc: conntrack l4 information
- -
+ -
name: tuple-zone
type: u16
byte-order: big-endian
@@ -213,74 +213,74 @@ attribute-sets:
-
name: protoinfo-tcp-attrs
attributes:
- -
+ -
name: tcp-state
type: u8
enum: nf-ct-tcp-state
doc: tcp connection state
- -
+ -
name: tcp-wscale-original
type: u8
doc: window scaling factor in original direction
- -
+ -
name: tcp-wscale-reply
type: u8
doc: window scaling factor in reply direction
- -
+ -
name: tcp-flags-original
type: binary
struct: nf-ct-tcp-flags-mask
- -
+ -
name: tcp-flags-reply
type: binary
struct: nf-ct-tcp-flags-mask
-
name: protoinfo-dccp-attrs
attributes:
- -
+ -
name: dccp-state
type: u8
doc: dccp connection state
- -
+ -
name: dccp-role
type: u8
- -
+ -
name: dccp-handshake-seq
type: u64
byte-order: big-endian
- -
+ -
name: dccp-pad
type: pad
-
name: protoinfo-sctp-attrs
attributes:
- -
+ -
name: sctp-state
type: u8
doc: sctp connection state
enum: nf-ct-sctp-state
- -
+ -
name: vtag-original
type: u32
byte-order: big-endian
- -
+ -
name: vtag-reply
type: u32
byte-order: big-endian
-
name: protoinfo-attrs
attributes:
- -
+ -
name: protoinfo-tcp
type: nest
nested-attributes: protoinfo-tcp-attrs
doc: conntrack tcp state information
- -
+ -
name: protoinfo-dccp
type: nest
nested-attributes: protoinfo-dccp-attrs
doc: conntrack dccp state information
- -
+ -
name: protoinfo-sctp
type: nest
nested-attributes: protoinfo-sctp-attrs
diff --git a/Documentation/netlink/specs/devlink.yaml b/Documentation/netlink/specs/devlink.yaml
index 38ddc04f9e6d..bfba466d694a 100644
--- a/Documentation/netlink/specs/devlink.yaml
+++ b/Documentation/netlink/specs/devlink.yaml
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
-
+---
name: devlink
protocol: genetlink-legacy
@@ -744,7 +744,7 @@ attribute-sets:
name: flash-update-overwrite-mask
type: bitfield32
enum: flash-overwrite
- enum-as-flags: True
+ enum-as-flags: true
-
name: reload-action
type: u8
@@ -753,12 +753,12 @@ attribute-sets:
name: reload-actions-performed
type: bitfield32
enum: reload-action
- enum-as-flags: True
+ enum-as-flags: true
-
name: reload-limits
type: bitfield32
enum: reload-action
- enum-as-flags: True
+ enum-as-flags: true
-
name: dev-stats
type: nest
@@ -812,14 +812,14 @@ attribute-sets:
name: rate-parent-node-name
type: string
-
- name: region-max-snapshots
- type: u32
+ name: region-max-snapshots
+ type: u32
-
name: linecard-index
type: u32
-
- name: linecard-state
- type: u8
+ name: linecard-state
+ type: u8
-
name: linecard-type
type: string
@@ -917,7 +917,7 @@ attribute-sets:
name: caps
type: bitfield32
enum: port-fn-attr-cap
- enum-as-flags: True
+ enum-as-flags: true
-
name: dl-dpipe-tables
@@ -1139,7 +1139,7 @@ attribute-sets:
-
name: param-type
- # TODO: fill in the attribute param-value-list
+ # TODO: fill in the attribute param-value-list
-
name: dl-region-snapshots
@@ -1257,7 +1257,7 @@ operations:
name: get
doc: Get devlink instances.
attribute-set: devlink
- dont-validate: [ strict, dump ]
+ dont-validate: [strict, dump]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -1266,7 +1266,7 @@ operations:
attributes: &dev-id-attrs
- bus-name
- dev-name
- reply: &get-reply
+ reply: &get-reply
value: 3
attributes:
- bus-name
@@ -1280,7 +1280,7 @@ operations:
name: port-get
doc: Get devlink port instances.
attribute-set: devlink
- dont-validate: [ strict ]
+ dont-validate: [strict]
do:
pre: devlink-nl-pre-doit-port
post: devlink-nl-post-doit
@@ -1304,8 +1304,8 @@ operations:
name: port-set
doc: Set devlink port instances.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit-port
post: devlink-nl-post-doit
@@ -1321,8 +1321,8 @@ operations:
name: port-new
doc: Create devlink port instances.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -1343,8 +1343,8 @@ operations:
name: port-del
doc: Delete devlink port instances.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit-port
post: devlink-nl-post-doit
@@ -1355,8 +1355,8 @@ operations:
name: port-split
doc: Split devlink port instances.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit-port
post: devlink-nl-post-doit
@@ -1371,8 +1371,8 @@ operations:
name: port-unsplit
doc: Unplit devlink port instances.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit-port
post: devlink-nl-post-doit
@@ -1383,7 +1383,7 @@ operations:
name: sb-get
doc: Get shared buffer instances.
attribute-set: devlink
- dont-validate: [ strict ]
+ dont-validate: [strict]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -1405,7 +1405,7 @@ operations:
name: sb-pool-get
doc: Get shared buffer pool instances.
attribute-set: devlink
- dont-validate: [ strict ]
+ dont-validate: [strict]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -1428,8 +1428,8 @@ operations:
name: sb-pool-set
doc: Set shared buffer pool instances.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -1446,7 +1446,7 @@ operations:
name: sb-port-pool-get
doc: Get shared buffer port-pool combinations and threshold.
attribute-set: devlink
- dont-validate: [ strict ]
+ dont-validate: [strict]
do:
pre: devlink-nl-pre-doit-port
post: devlink-nl-post-doit
@@ -1470,8 +1470,8 @@ operations:
name: sb-port-pool-set
doc: Set shared buffer port-pool combinations and threshold.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit-port
post: devlink-nl-post-doit
@@ -1488,7 +1488,7 @@ operations:
name: sb-tc-pool-bind-get
doc: Get shared buffer port-TC to pool bindings and threshold.
attribute-set: devlink
- dont-validate: [ strict ]
+ dont-validate: [strict]
do:
pre: devlink-nl-pre-doit-port
post: devlink-nl-post-doit
@@ -1513,8 +1513,8 @@ operations:
name: sb-tc-pool-bind-set
doc: Set shared buffer port-TC to pool bindings and threshold.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit-port
post: devlink-nl-post-doit
@@ -1533,8 +1533,8 @@ operations:
name: sb-occ-snapshot
doc: Take occupancy snapshot of shared buffer.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -1549,8 +1549,8 @@ operations:
name: sb-occ-max-clear
doc: Clear occupancy watermarks of shared buffer.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -1564,8 +1564,8 @@ operations:
name: eswitch-get
doc: Get eswitch attributes.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -1584,8 +1584,8 @@ operations:
name: eswitch-set
doc: Set eswitch attributes.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -1596,7 +1596,7 @@ operations:
name: dpipe-table-get
doc: Get dpipe table attributes.
attribute-set: devlink
- dont-validate: [ strict ]
+ dont-validate: [strict]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -1616,7 +1616,7 @@ operations:
name: dpipe-entries-get
doc: Get dpipe entries attributes.
attribute-set: devlink
- dont-validate: [ strict ]
+ dont-validate: [strict]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -1635,7 +1635,7 @@ operations:
name: dpipe-headers-get
doc: Get dpipe headers attributes.
attribute-set: devlink
- dont-validate: [ strict ]
+ dont-validate: [strict]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -1653,8 +1653,8 @@ operations:
name: dpipe-table-counters-set
doc: Set dpipe counter attributes.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -1669,8 +1669,8 @@ operations:
name: resource-set
doc: Set resource attributes.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -1685,7 +1685,7 @@ operations:
name: resource-dump
doc: Get resource attributes.
attribute-set: devlink
- dont-validate: [ strict ]
+ dont-validate: [strict]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -1704,8 +1704,8 @@ operations:
name: reload
doc: Reload devlink.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit-dev-lock
post: devlink-nl-post-doit-dev-lock
@@ -1728,7 +1728,7 @@ operations:
name: param-get
doc: Get param instances.
attribute-set: devlink
- dont-validate: [ strict ]
+ dont-validate: [strict]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -1748,8 +1748,8 @@ operations:
name: param-set
doc: Set param instances.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -1766,7 +1766,7 @@ operations:
name: region-get
doc: Get region instances.
attribute-set: devlink
- dont-validate: [ strict ]
+ dont-validate: [strict]
do:
pre: devlink-nl-pre-doit-port-optional
post: devlink-nl-post-doit
@@ -1789,8 +1789,8 @@ operations:
name: region-new
doc: Create region snapshot.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit-port-optional
post: devlink-nl-post-doit
@@ -1810,8 +1810,8 @@ operations:
name: region-del
doc: Delete region snapshot.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit-port-optional
post: devlink-nl-post-doit
@@ -1822,8 +1822,8 @@ operations:
name: region-read
doc: Read region data.
attribute-set: devlink
- dont-validate: [ dump-strict ]
- flags: [ admin-perm ]
+ dont-validate: [dump-strict]
+ flags: [admin-perm]
dump:
request:
attributes:
@@ -1847,7 +1847,7 @@ operations:
name: port-param-get
doc: Get port param instances.
attribute-set: devlink
- dont-validate: [ strict, dump-strict ]
+ dont-validate: [strict, dump-strict]
do:
pre: devlink-nl-pre-doit-port
post: devlink-nl-post-doit
@@ -1863,8 +1863,8 @@ operations:
name: port-param-set
doc: Set port param instances.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit-port
post: devlink-nl-post-doit
@@ -1873,9 +1873,11 @@ operations:
-
name: info-get
- doc: Get device information, like driver name, hardware and firmware versions etc.
+ doc: |
+ Get device information, like driver name, hardware and firmware versions
+ etc.
attribute-set: devlink
- dont-validate: [ strict, dump ]
+ dont-validate: [strict, dump]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -1900,7 +1902,7 @@ operations:
name: health-reporter-get
doc: Get health reporter instances.
attribute-set: devlink
- dont-validate: [ strict ]
+ dont-validate: [strict]
do:
pre: devlink-nl-pre-doit-port-optional
post: devlink-nl-post-doit
@@ -1921,8 +1923,8 @@ operations:
name: health-reporter-set
doc: Set health reporter instances.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit-port-optional
post: devlink-nl-post-doit
@@ -1940,8 +1942,8 @@ operations:
name: health-reporter-recover
doc: Recover health reporter instances.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit-port-optional
post: devlink-nl-post-doit
@@ -1952,8 +1954,8 @@ operations:
name: health-reporter-diagnose
doc: Diagnose health reporter instances.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit-port-optional
post: devlink-nl-post-doit
@@ -1964,8 +1966,8 @@ operations:
name: health-reporter-dump-get
doc: Dump health reporter instances.
attribute-set: devlink
- dont-validate: [ dump-strict ]
- flags: [ admin-perm ]
+ dont-validate: [dump-strict]
+ flags: [admin-perm]
dump:
request:
attributes: *health-reporter-id-attrs
@@ -1978,8 +1980,8 @@ operations:
name: health-reporter-dump-clear
doc: Clear dump of health reporter instances.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit-port-optional
post: devlink-nl-post-doit
@@ -1990,8 +1992,8 @@ operations:
name: flash-update
doc: Flash update devlink instances.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -2007,7 +2009,7 @@ operations:
name: trap-get
doc: Get trap instances.
attribute-set: devlink
- dont-validate: [ strict ]
+ dont-validate: [strict]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -2029,8 +2031,8 @@ operations:
name: trap-set
doc: Set trap instances.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -2045,7 +2047,7 @@ operations:
name: trap-group-get
doc: Get trap group instances.
attribute-set: devlink
- dont-validate: [ strict ]
+ dont-validate: [strict]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -2067,8 +2069,8 @@ operations:
name: trap-group-set
doc: Set trap group instances.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -2084,7 +2086,7 @@ operations:
name: trap-policer-get
doc: Get trap policer instances.
attribute-set: devlink
- dont-validate: [ strict ]
+ dont-validate: [strict]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -2106,8 +2108,8 @@ operations:
name: trap-policer-set
doc: Get trap policer instances.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -2123,8 +2125,8 @@ operations:
name: health-reporter-test
doc: Test health reporter instances.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit-port-optional
post: devlink-nl-post-doit
@@ -2136,7 +2138,7 @@ operations:
name: rate-get
doc: Get rate instances.
attribute-set: devlink
- dont-validate: [ strict ]
+ dont-validate: [strict]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -2159,8 +2161,8 @@ operations:
name: rate-set
doc: Set rate instances.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -2179,8 +2181,8 @@ operations:
name: rate-new
doc: Create rate instances.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -2199,8 +2201,8 @@ operations:
name: rate-del
doc: Delete rate instances.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -2214,7 +2216,7 @@ operations:
name: linecard-get
doc: Get line card instances.
attribute-set: devlink
- dont-validate: [ strict ]
+ dont-validate: [strict]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -2236,8 +2238,8 @@ operations:
name: linecard-set
doc: Set line card instances.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -2252,7 +2254,7 @@ operations:
name: selftests-get
doc: Get device selftest instances.
attribute-set: devlink
- dont-validate: [ strict, dump ]
+ dont-validate: [strict, dump]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
@@ -2269,8 +2271,8 @@ operations:
name: selftests-run
doc: Run device selftest instances.
attribute-set: devlink
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
pre: devlink-nl-pre-doit
post: devlink-nl-post-doit
diff --git a/Documentation/netlink/specs/dpll.yaml b/Documentation/netlink/specs/dpll.yaml
index f434140b538e..c13440efab24 100644
--- a/Documentation/netlink/specs/dpll.yaml
+++ b/Documentation/netlink/specs/dpll.yaml
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
-
+---
name: dpll
doc: DPLL subsystem.
@@ -240,6 +240,20 @@ definitions:
integer part of a measured phase offset value.
Value of (DPLL_A_PHASE_OFFSET % DPLL_PHASE_OFFSET_DIVIDER) is a
fractional part of a measured phase offset value.
+ -
+ type: enum
+ name: feature-state
+ doc: |
+ Allow control (enable/disable) and status checking over features.
+ entries:
+ -
+ name: disable
+ doc: |
+ feature shall be disabled
+ -
+ name: enable
+ doc: |
+ feature shall be enabled
attribute-sets:
-
@@ -293,6 +307,14 @@ attribute-sets:
be put to message multiple times to indicate possible parallel
quality levels (e.g. one specified by ITU option 1 and another
one specified by option 2).
+ -
+ name: phase-offset-monitor
+ type: u32
+ enum: feature-state
+ doc: Receive or request state of phase offset monitor feature.
+ If enabled, dpll device shall monitor and notify all currently
+ available inputs for changes of their phase offset against the
+ dpll device.
-
name: pin
enum-name: dpll_a_pin
@@ -445,7 +467,7 @@ operations:
doc: |
Get id of dpll device that matches given attributes
attribute-set: dpll
- flags: [ admin-perm ]
+ flags: [admin-perm]
do:
pre: dpll-lock-doit
@@ -464,7 +486,7 @@ operations:
doc: |
Get list of DPLL devices (dump) or attributes of a single dpll device
attribute-set: dpll
- flags: [ admin-perm ]
+ flags: [admin-perm]
do:
pre: dpll-pre-doit
@@ -483,6 +505,7 @@ operations:
- temp
- clock-id
- type
+ - phase-offset-monitor
dump:
reply: *dev-attrs
@@ -491,7 +514,7 @@ operations:
name: device-set
doc: Set attributes for a DPLL device
attribute-set: dpll
- flags: [ admin-perm ]
+ flags: [admin-perm]
do:
pre: dpll-pre-doit
@@ -499,6 +522,7 @@ operations:
request:
attributes:
- id
+ - phase-offset-monitor
-
name: device-create-ntf
doc: Notification about device appearing
@@ -519,7 +543,7 @@ operations:
doc: |
Get id of a pin that matches given attributes
attribute-set: pin
- flags: [ admin-perm ]
+ flags: [admin-perm]
do:
pre: dpll-lock-doit
@@ -547,7 +571,7 @@ operations:
a given dpll device
- do request with target dpll and target pin - single pin attributes
attribute-set: pin
- flags: [ admin-perm ]
+ flags: [admin-perm]
do:
pre: dpll-pin-pre-doit
@@ -585,7 +609,7 @@ operations:
name: pin-set
doc: Set attributes of a target pin
attribute-set: pin
- flags: [ admin-perm ]
+ flags: [admin-perm]
do:
pre: dpll-pin-pre-doit
diff --git a/Documentation/netlink/specs/ethtool.yaml b/Documentation/netlink/specs/ethtool.yaml
index 348c6ad548f5..49e782a33eb6 100644
--- a/Documentation/netlink/specs/ethtool.yaml
+++ b/Documentation/netlink/specs/ethtool.yaml
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
-
+---
name: ethtool
protocol: genetlink-legacy
@@ -15,14 +15,14 @@ definitions:
name: udp-tunnel-type
enum-name:
type: enum
- entries: [ vxlan, geneve, vxlan-gpe ]
+ entries: [vxlan, geneve, vxlan-gpe]
enum-cnt-name: __ethtool-udp-tunnel-type-cnt
render-max: true
-
name: stringset
type: enum
entries: []
- header: linux/ethtool.h # skip rendering, no actual definition
+ header: linux/ethtool.h # skip rendering, no actual definition
-
name: header-flags
type: flags
@@ -58,49 +58,51 @@ definitions:
doc: The firmware flashing process was stopped due to an error.
-
name: c33-pse-ext-state
- doc: "groups of PSE extended states functions. IEEE 802.3-2022 33.2.4.4 Variables"
+ doc: |
+ "groups of PSE extended states functions. IEEE 802.3-2022 33.2.4.4
+ Variables"
type: enum
name-prefix: ethtool-c33-pse-ext-state-
header: linux/ethtool.h
entries:
- -
- name: none
- doc: none
- -
- name: error-condition
- doc: Group of error_condition states
- -
- name: mr-mps-valid
- doc: Group of mr_mps_valid states
- -
- name: mr-pse-enable
- doc: Group of mr_pse_enable states
- -
- name: option-detect-ted
- doc: Group of option_detect_ted states
- -
- name: option-vport-lim
- doc: Group of option_vport_lim states
- -
- name: ovld-detected
- doc: Group of ovld_detected states
- -
- name: power-not-available
- doc: Group of power_not_available states
- -
- name: short-detected
- doc: Group of short_detected states
+ -
+ name: none
+ doc: none
+ -
+ name: error-condition
+ doc: Group of error_condition states
+ -
+ name: mr-mps-valid
+ doc: Group of mr_mps_valid states
+ -
+ name: mr-pse-enable
+ doc: Group of mr_pse_enable states
+ -
+ name: option-detect-ted
+ doc: Group of option_detect_ted states
+ -
+ name: option-vport-lim
+ doc: Group of option_vport_lim states
+ -
+ name: ovld-detected
+ doc: Group of ovld_detected states
+ -
+ name: power-not-available
+ doc: Group of power_not_available states
+ -
+ name: short-detected
+ doc: Group of short_detected states
-
name: phy-upstream-type
enum-name: phy-upstream
header: linux/ethtool.h
type: enum
name-prefix: phy-upstream
- entries: [ mac, phy ]
+ entries: [mac, phy]
-
name: tcp-data-split
type: enum
- entries: [ unknown, disabled, enabled ]
+ entries: [unknown, disabled, enabled]
-
name: hwtstamp-source
doc: Source of the hardware timestamp
@@ -119,6 +121,43 @@ definitions:
doc: |
Hardware timestamp comes from one PHY device
of the network topology
+ -
+ name: pse-event
+ doc: PSE event list for the PSE controller
+ type: flags
+ name-prefix: ethtool-
+ entries:
+ -
+ name: pse-event-over-current
+ doc: PSE output current is too high
+ -
+ name: pse-event-over-temp
+ doc: PSE in over temperature state
+ -
+ name: c33-pse-event-detection
+ doc: |
+ detection process occur on the PSE. IEEE 802.3-2022 33.2.5 and
+ 145.2.6 PSE detection of PDs. IEEE 802.3-202 30.9.1.1.5
+ aPSEPowerDetectionStatus
+ -
+ name: c33-pse-event-classification
+ doc: |
+ classification process occur on the PSE. IEEE 802.3-2022 33.2.6
+ and 145.2.8 classification of PDs mutual identification.
+ IEEE 802.3-2022 30.9.1.1.8 aPSEPowerClassification.
+ -
+ name: c33-pse-event-disconnection
+ doc: |
+ PD has been disconnected on the PSE. IEEE 802.3-2022 33.3.8
+ and 145.3.9 PD Maintain Power Signature. IEEE 802.3-2022
+ 33.5.1.2.9 MPS Absent. IEEE 802.3-2022 30.9.1.1.20
+ aPSEMPSAbsentCounter.
+ -
+ name: pse-event-over-budget
+ doc: PSE turned off due to over budget situation
+ -
+ name: pse-event-sw-pw-control-error
+ doc: PSE faced an error managing the power control from software
attribute-sets:
-
@@ -1227,7 +1266,7 @@ attribute-sets:
-
name: stat
type: u64
- type-value: [ id ]
+ type-value: [id]
-
name: hist-rx
type: nest
@@ -1396,6 +1435,18 @@ attribute-sets:
type: nest
multi-attr: true
nested-attributes: c33-pse-pw-limit
+ -
+ name: pse-pw-d-id
+ type: u32
+ name-prefix: ethtool-a-
+ -
+ name: pse-prio-max
+ type: u32
+ name-prefix: ethtool-a-
+ -
+ name: pse-prio
+ type: u32
+ name-prefix: ethtool-a-
-
name: rss
attr-cnt-name: __ethtool-a-rss-cnt
@@ -1556,6 +1607,19 @@ attribute-sets:
name: hwtstamp-flags
type: nest
nested-attributes: bitset
+ -
+ name: pse-ntf
+ attr-cnt-name: --ethtool-a-pse-ntf-cnt
+ attributes:
+ -
+ name: header
+ type: nest
+ nested-attributes: header
+ -
+ name: events
+ type: uint
+ enum: pse-event
+ doc: List of events reported by the PSE controller
operations:
enum-model: directional
@@ -2206,6 +2270,9 @@ operations:
- c33-pse-ext-substate
- c33-pse-avail-pw-limit
- c33-pse-pw-limit-ranges
+ - pse-pw-d-id
+ - pse-prio-max
+ - pse-prio
dump: *pse-get-op
-
name: pse-set
@@ -2220,6 +2287,7 @@ operations:
- podl-pse-admin-control
- c33-pse-admin-control
- c33-pse-avail-pw-limit
+ - pse-prio
-
name: rss-get
doc: Get RSS params.
@@ -2414,3 +2482,26 @@ operations:
attributes: *tsconfig
reply:
attributes: *tsconfig
+ -
+ name: pse-ntf
+ doc: Notification for PSE events.
+
+ attribute-set: pse-ntf
+
+ event:
+ attributes:
+ - header
+ - events
+ -
+ name: rss-ntf
+ doc: |
+ Notification for change in RSS configuration.
+ For additional contexts only modifications are modified, not creation
+ or removal of the contexts.
+ notify: rss-get
+
+mcast-groups:
+ list:
+ -
+ name: monitor
+ c-define-name: ethtool-mcgrp-monitor-name
diff --git a/Documentation/netlink/specs/fou.yaml b/Documentation/netlink/specs/fou.yaml
index b02ab19817d3..57735726262e 100644
--- a/Documentation/netlink/specs/fou.yaml
+++ b/Documentation/netlink/specs/fou.yaml
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
-
+---
name: fou
protocol: genetlink-legacy
@@ -18,7 +18,7 @@ definitions:
name: encap-type
name-prefix: fou-encap-
enum-name:
- entries: [ unspec, direct, gue ]
+ entries: [unspec, direct, gue]
attribute-sets:
-
@@ -81,8 +81,8 @@ operations:
doc: Add port.
attribute-set: fou
- dont-validate: [ strict, dump ]
- flags: [ admin-perm ]
+ dont-validate: [strict, dump]
+ flags: [admin-perm]
do:
request: &all_attrs
@@ -103,8 +103,8 @@ operations:
doc: Delete port.
attribute-set: fou
- dont-validate: [ strict, dump ]
- flags: [ admin-perm ]
+ dont-validate: [strict, dump]
+ flags: [admin-perm]
do:
request: &select_attrs
@@ -122,7 +122,7 @@ operations:
name: get
doc: Get tunnel info.
attribute-set: fou
- dont-validate: [ strict, dump ]
+ dont-validate: [strict, dump]
do:
request: *select_attrs
diff --git a/Documentation/netlink/specs/handshake.yaml b/Documentation/netlink/specs/handshake.yaml
index b934cc513e3d..39ed1661c7f1 100644
--- a/Documentation/netlink/specs/handshake.yaml
+++ b/Documentation/netlink/specs/handshake.yaml
@@ -4,7 +4,7 @@
#
# Copyright (c) 2023, Oracle and/or its affiliates.
#
-
+---
name: handshake
protocol: genetlink
@@ -16,17 +16,17 @@ definitions:
type: enum
name: handler-class
value-start: 0
- entries: [ none, tlshd, max ]
+ entries: [none, tlshd, max]
-
type: enum
name: msg-type
value-start: 0
- entries: [ unspec, clienthello, serverhello ]
+ entries: [unspec, clienthello, serverhello]
-
type: enum
name: auth
value-start: 0
- entries: [ unspec, unauth, psk, x509 ]
+ entries: [unspec, unauth, psk, x509]
attribute-sets:
-
@@ -95,7 +95,7 @@ operations:
name: accept
doc: Handler retrieves next queued handshake request
attribute-set: accept
- flags: [ admin-perm ]
+ flags: [admin-perm]
do:
request:
attributes:
diff --git a/Documentation/netlink/specs/lockd.yaml b/Documentation/netlink/specs/lockd.yaml
index bbd4da5fe54b..3dc4ac1a051b 100644
--- a/Documentation/netlink/specs/lockd.yaml
+++ b/Documentation/netlink/specs/lockd.yaml
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
-
+---
name: lockd
protocol: genetlink
uapi-header: linux/lockd_netlink.h
@@ -26,7 +26,7 @@ operations:
name: server-set
doc: set the lockd server parameters
attribute-set: server
- flags: [ admin-perm ]
+ flags: [admin-perm]
do:
request:
attributes:
diff --git a/Documentation/netlink/specs/mptcp_pm.yaml b/Documentation/netlink/specs/mptcp_pm.yaml
index fb57860fe778..02f1ddcfbf1c 100644
--- a/Documentation/netlink/specs/mptcp_pm.yaml
+++ b/Documentation/netlink/specs/mptcp_pm.yaml
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
-
+---
name: mptcp_pm
protocol: genetlink-legacy
doc: Multipath TCP.
@@ -17,72 +17,72 @@ definitions:
enum-name: mptcp-event-type
name-prefix: mptcp-event-
entries:
- -
- name: unspec
- doc: unused event
- -
- name: created
- doc: >-
- A new MPTCP connection has been created. It is the good time to
- allocate memory and send ADD_ADDR if needed. Depending on the
- traffic-patterns it can take a long time until the
- MPTCP_EVENT_ESTABLISHED is sent.
- Attributes: token, family, saddr4 | saddr6, daddr4 | daddr6, sport,
- dport, server-side.
- -
- name: established
- doc: >-
- A MPTCP connection is established (can start new subflows).
- Attributes: token, family, saddr4 | saddr6, daddr4 | daddr6, sport,
- dport, server-side.
- -
- name: closed
- doc: >-
- A MPTCP connection has stopped.
- Attribute: token.
- -
- name: announced
- value: 6
- doc: >-
- A new address has been announced by the peer.
- Attributes: token, rem_id, family, daddr4 | daddr6 [, dport].
- -
- name: removed
- doc: >-
- An address has been lost by the peer.
- Attributes: token, rem_id.
- -
- name: sub-established
- value: 10
- doc: >-
- A new subflow has been established. 'error' should not be set.
- Attributes: token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 |
- daddr6, sport, dport, backup, if-idx [, error].
- -
- name: sub-closed
- doc: >-
- A subflow has been closed. An error (copy of sk_err) could be set if an
- error has been detected for this subflow.
- Attributes: token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 |
- daddr6, sport, dport, backup, if-idx [, error].
- -
- name: sub-priority
- value: 13
- doc: >-
- The priority of a subflow has changed. 'error' should not be set.
- Attributes: token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 |
- daddr6, sport, dport, backup, if-idx [, error].
- -
- name: listener-created
- value: 15
- doc: >-
- A new PM listener is created.
- Attributes: family, sport, saddr4 | saddr6.
- -
- name: listener-closed
- doc: >-
- A PM listener is closed.
- Attributes: family, sport, saddr4 | saddr6.
+ -
+ name: unspec
+ doc: unused event
+ -
+ name: created
+ doc: >-
+ A new MPTCP connection has been created. It is the good time to
+ allocate memory and send ADD_ADDR if needed. Depending on the
+ traffic-patterns it can take a long time until the
+ MPTCP_EVENT_ESTABLISHED is sent.
+ Attributes: token, family, saddr4 | saddr6, daddr4 | daddr6, sport,
+ dport, server-side.
+ -
+ name: established
+ doc: >-
+ A MPTCP connection is established (can start new subflows).
+ Attributes: token, family, saddr4 | saddr6, daddr4 | daddr6, sport,
+ dport, server-side.
+ -
+ name: closed
+ doc: >-
+ A MPTCP connection has stopped.
+ Attribute: token.
+ -
+ name: announced
+ value: 6
+ doc: >-
+ A new address has been announced by the peer.
+ Attributes: token, rem_id, family, daddr4 | daddr6 [, dport].
+ -
+ name: removed
+ doc: >-
+ An address has been lost by the peer.
+ Attributes: token, rem_id.
+ -
+ name: sub-established
+ value: 10
+ doc: >-
+ A new subflow has been established. 'error' should not be set.
+ Attributes: token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 |
+ daddr6, sport, dport, backup, if-idx [, error].
+ -
+ name: sub-closed
+ doc: >-
+ A subflow has been closed. An error (copy of sk_err) could be set if
+ an error has been detected for this subflow.
+ Attributes: token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 |
+ daddr6, sport, dport, backup, if-idx [, error].
+ -
+ name: sub-priority
+ value: 13
+ doc: >-
+ The priority of a subflow has changed. 'error' should not be set.
+ Attributes: token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 |
+ daddr6, sport, dport, backup, if-idx [, error].
+ -
+ name: listener-created
+ value: 15
+ doc: >-
+ A new PM listener is created.
+ Attributes: family, sport, saddr4 | saddr6.
+ -
+ name: listener-closed
+ doc: >-
+ A PM listener is closed.
+ Attributes: family, sport, saddr4 | saddr6.
attribute-sets:
-
@@ -277,8 +277,8 @@ operations:
name: add-addr
doc: Add endpoint
attribute-set: endpoint
- dont-validate: [ strict ]
- flags: [ uns-admin-perm ]
+ dont-validate: [strict]
+ flags: [uns-admin-perm]
do: &add-addr-attrs
request:
attributes:
@@ -287,39 +287,39 @@ operations:
name: del-addr
doc: Delete endpoint
attribute-set: endpoint
- dont-validate: [ strict ]
- flags: [ uns-admin-perm ]
+ dont-validate: [strict]
+ flags: [uns-admin-perm]
do: *add-addr-attrs
-
name: get-addr
doc: Get endpoint information
attribute-set: attr
- dont-validate: [ strict ]
+ dont-validate: [strict]
do: &get-addr-attrs
request:
attributes:
- - addr
- - token
+ - addr
+ - token
reply:
attributes:
- - addr
+ - addr
dump:
reply:
- attributes:
- - addr
+ attributes:
+ - addr
-
name: flush-addrs
doc: Flush addresses
attribute-set: endpoint
- dont-validate: [ strict ]
- flags: [ uns-admin-perm ]
+ dont-validate: [strict]
+ flags: [uns-admin-perm]
do: *add-addr-attrs
-
name: set-limits
doc: Set protocol limits
attribute-set: attr
- dont-validate: [ strict ]
- flags: [ uns-admin-perm ]
+ dont-validate: [strict]
+ flags: [uns-admin-perm]
do: &mptcp-limits
request:
attributes:
@@ -329,10 +329,10 @@ operations:
name: get-limits
doc: Get protocol limits
attribute-set: attr
- dont-validate: [ strict ]
+ dont-validate: [strict]
do: &mptcp-get-limits
request:
- attributes:
+ attributes:
- rcv-add-addrs
- subflows
reply:
@@ -343,8 +343,8 @@ operations:
name: set-flags
doc: Change endpoint flags
attribute-set: attr
- dont-validate: [ strict ]
- flags: [ uns-admin-perm ]
+ dont-validate: [strict]
+ flags: [uns-admin-perm]
do: &mptcp-set-flags
request:
attributes:
@@ -355,8 +355,8 @@ operations:
name: announce
doc: Announce new address
attribute-set: attr
- dont-validate: [ strict ]
- flags: [ uns-admin-perm ]
+ dont-validate: [strict]
+ flags: [uns-admin-perm]
do: &announce-add
request:
attributes:
@@ -366,19 +366,19 @@ operations:
name: remove
doc: Announce removal
attribute-set: attr
- dont-validate: [ strict ]
- flags: [ uns-admin-perm ]
+ dont-validate: [strict]
+ flags: [uns-admin-perm]
do:
request:
- attributes:
- - token
- - loc-id
+ attributes:
+ - token
+ - loc-id
-
name: subflow-create
doc: Create subflow
attribute-set: attr
- dont-validate: [ strict ]
- flags: [ uns-admin-perm ]
+ dont-validate: [strict]
+ flags: [uns-admin-perm]
do: &sf-create
request:
attributes:
@@ -389,6 +389,6 @@ operations:
name: subflow-destroy
doc: Destroy subflow
attribute-set: attr
- dont-validate: [ strict ]
- flags: [ uns-admin-perm ]
+ dont-validate: [strict]
+ flags: [uns-admin-perm]
do: *sf-create
diff --git a/Documentation/netlink/specs/net_shaper.yaml b/Documentation/netlink/specs/net_shaper.yaml
index 8ebad0d02904..0b1b54be48f9 100644
--- a/Documentation/netlink/specs/net_shaper.yaml
+++ b/Documentation/netlink/specs/net_shaper.yaml
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+---
name: net-shaper
doc: |
@@ -243,7 +244,7 @@ operations:
The set operation can't be used to create a @node scope shaper,
use the @group operation instead.
attribute-set: net-shaper
- flags: [ admin-perm ]
+ flags: [admin-perm]
do:
pre: net-shaper-nl-pre-doit
@@ -274,7 +275,7 @@ operations:
node with infinite bandwidth. The queue's implicit node
feeds an implicit RR node at the root of the hierarchy.
attribute-set: net-shaper
- flags: [ admin-perm ]
+ flags: [admin-perm]
do:
pre: net-shaper-nl-pre-doit
@@ -305,7 +306,7 @@ operations:
full identifier, comprising @binding and @handle, is provided
as the reply.
attribute-set: net-shaper
- flags: [ admin-perm ]
+ flags: [admin-perm]
do:
pre: net-shaper-nl-pre-doit
diff --git a/Documentation/netlink/specs/netdev.yaml b/Documentation/netlink/specs/netdev.yaml
index c0ef6d0d7786..ce4cfec82100 100644
--- a/Documentation/netlink/specs/netdev.yaml
+++ b/Documentation/netlink/specs/netdev.yaml
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
-
+---
name: netdev
doc:
@@ -31,7 +31,7 @@ definitions:
-
name: hw-offload
doc:
- This feature informs if netdev supports XDP hw offloading.
+ This feature informs if netdev supports XDP hw offloading.
-
name: rx-sg
doc:
@@ -48,16 +48,19 @@ definitions:
entries:
-
name: timestamp
- doc:
- Device is capable of exposing receive HW timestamp via bpf_xdp_metadata_rx_timestamp().
+ doc: |
+ Device is capable of exposing receive HW timestamp via
+ bpf_xdp_metadata_rx_timestamp().
-
name: hash
- doc:
- Device is capable of exposing receive packet hash via bpf_xdp_metadata_rx_hash().
+ doc: |
+ Device is capable of exposing receive packet hash via
+ bpf_xdp_metadata_rx_hash().
-
name: vlan-tag
- doc:
- Device is capable of exposing receive packet VLAN tag via bpf_xdp_metadata_rx_vlan_tag().
+ doc: |
+ Device is capable of exposing receive packet VLAN tag via
+ bpf_xdp_metadata_rx_vlan_tag().
-
type: flags
name: xsk-flags
@@ -77,11 +80,11 @@ definitions:
-
name: queue-type
type: enum
- entries: [ rx, tx ]
+ entries: [rx, tx]
-
name: qstats-scope
type: flags
- entries: [ queue ]
+ entries: [queue]
attribute-sets:
-
@@ -205,7 +208,7 @@ attribute-sets:
-
name: alloc-fast
type: uint
- value: 8 # reserve some attr ids in case we need more metadata later
+ value: 8 # reserve some attr ids in case we need more metadata later
-
name: alloc-slow
type: uint
@@ -367,7 +370,7 @@ attribute-sets:
For drivers supporting XDP, XDP is considered the first layer
of the stack, so packets consumed by XDP are still counted here.
type: uint
- value: 8 # reserve some attr ids in case we need more metadata later
+ value: 8 # reserve some attr ids in case we need more metadata later
-
name: rx-bytes
doc: Successfully received bytes, see `rx-packets`.
@@ -425,9 +428,9 @@ attribute-sets:
-
name: rx-hw-gro-packets
doc: |
- Number of packets that were coalesced from smaller packets by the device.
- Counts only packets coalesced with the HW-GRO netdevice feature,
- LRO-coalesced packets are not counted.
+ Number of packets that were coalesced from smaller packets by the
+ device. Counts only packets coalesced with the HW-GRO netdevice
+ feature, LRO-coalesced packets are not counted.
type: uint
-
name: rx-hw-gro-bytes
@@ -436,8 +439,8 @@ attribute-sets:
-
name: rx-hw-gro-wire-packets
doc: |
- Number of packets that were coalesced to bigger packetss with the HW-GRO
- netdevice feature. LRO-coalesced packets are not counted.
+ Number of packets that were coalesced to bigger packetss with the
+ HW-GRO netdevice feature. LRO-coalesced packets are not counted.
type: uint
-
name: rx-hw-gro-wire-bytes
@@ -721,7 +724,7 @@ operations:
name: bind-rx
doc: Bind dmabuf to netdev
attribute-set: dmabuf
- flags: [ admin-perm ]
+ flags: [admin-perm]
do:
request:
attributes:
@@ -735,7 +738,7 @@ operations:
name: napi-set
doc: Set configurable NAPI instance settings.
attribute-set: napi
- flags: [ admin-perm ]
+ flags: [admin-perm]
do:
request:
attributes:
@@ -757,7 +760,7 @@ operations:
- id
kernel-family:
- headers: [ "net/netdev_netlink.h"]
+ headers: ["net/netdev_netlink.h"]
sock-priv: struct netdev_nl_sock
mcast-groups:
diff --git a/Documentation/netlink/specs/nfsd.yaml b/Documentation/netlink/specs/nfsd.yaml
index 8d1a3c01708f..100363029e82 100644
--- a/Documentation/netlink/specs/nfsd.yaml
+++ b/Documentation/netlink/specs/nfsd.yaml
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
-
+---
name: nfsd
protocol: genetlink
uapi-header: linux/nfsd_netlink.h
@@ -151,7 +151,7 @@ operations:
name: threads-set
doc: set the number of running threads
attribute-set: server
- flags: [ admin-perm ]
+ flags: [admin-perm]
do:
request:
attributes:
@@ -174,7 +174,7 @@ operations:
name: version-set
doc: set nfs enabled versions
attribute-set: server-proto
- flags: [ admin-perm ]
+ flags: [admin-perm]
do:
request:
attributes:
@@ -191,7 +191,7 @@ operations:
name: listener-set
doc: set nfs running sockets
attribute-set: server-sock
- flags: [ admin-perm ]
+ flags: [admin-perm]
do:
request:
attributes:
@@ -208,7 +208,7 @@ operations:
name: pool-mode-set
doc: set the current server pool-mode
attribute-set: pool-mode
- flags: [ admin-perm ]
+ flags: [admin-perm]
do:
request:
attributes:
diff --git a/Documentation/netlink/specs/nftables.yaml b/Documentation/netlink/specs/nftables.yaml
index bd938bd01b6b..2ee10d92d644 100644
--- a/Documentation/netlink/specs/nftables.yaml
+++ b/Documentation/netlink/specs/nftables.yaml
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
-
+---
name: nftables
protocol: netlink-raw
protonum: 12
@@ -1205,7 +1205,9 @@ operations:
- name
-
name: destroytable
- doc: Delete an existing table with destroy semantics (ignoring ENOENT errors).
+ doc: |
+ Delete an existing table with destroy semantics (ignoring ENOENT
+ errors).
attribute-set: table-attrs
fixed-header: nfgenmsg
do:
@@ -1249,7 +1251,9 @@ operations:
- name
-
name: destroychain
- doc: Delete an existing chain with destroy semantics (ignoring ENOENT errors).
+ doc: |
+ Delete an existing chain with destroy semantics (ignoring ENOENT
+ errors).
attribute-set: chain-attrs
fixed-header: nfgenmsg
do:
@@ -1307,7 +1311,8 @@ operations:
- name
-
name: destroyrule
- doc: Delete an existing rule with destroy semantics (ignoring ENOENT errors).
+ doc: |
+ Delete an existing rule with destroy semantics (ignoring ENOENT errors).
attribute-set: rule-attrs
fixed-header: nfgenmsg
do:
@@ -1351,7 +1356,8 @@ operations:
- name
-
name: destroyset
- doc: Delete an existing set with destroy semantics (ignoring ENOENT errors).
+ doc: |
+ Delete an existing set with destroy semantics (ignoring ENOENT errors).
attribute-set: set-attrs
fixed-header: nfgenmsg
do:
diff --git a/Documentation/netlink/specs/nl80211.yaml b/Documentation/netlink/specs/nl80211.yaml
index 3611b11a7d8f..610fdd5e000e 100644
--- a/Documentation/netlink/specs/nl80211.yaml
+++ b/Documentation/netlink/specs/nl80211.yaml
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
-
+---
name: nl80211
protocol: genetlink-legacy
@@ -285,7 +285,7 @@ attribute-sets:
type: u16
-
name: sta-flags
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: sta-listen-interval
type: u16
@@ -297,14 +297,14 @@ attribute-sets:
type: u32
-
name: sta-info
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: wiphy-bands
type: nest
nested-attributes: wiphy-bands
-
name: mntr-flags
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: mesh-id
type: binary
@@ -317,7 +317,7 @@ attribute-sets:
display-hint: mac
-
name: mpath-info
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: bss-cts-prot
type: u8
@@ -339,16 +339,16 @@ attribute-sets:
type: binary
-
name: reg-rules
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: mesh-config
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: bss-basic-rates
type: binary
-
name: wiphy-txq-params
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: wiphy-freq
type: u32
@@ -370,16 +370,16 @@ attribute-sets:
type: u8
-
name: scan-frequencies
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: scan-ssids
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: generation
type: u32
-
name: bss
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: reg-initiator
type: u8
@@ -416,10 +416,10 @@ attribute-sets:
display-hint: hex
-
name: freq-before
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: freq-after
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: freq-fixed
type: flag
@@ -483,10 +483,10 @@ attribute-sets:
type: binary
-
name: key
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: keys
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: pid
type: u32
@@ -495,7 +495,7 @@ attribute-sets:
type: u8
-
name: survey-info
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: pmkid
type: binary
@@ -513,7 +513,7 @@ attribute-sets:
type: u8
-
name: tx-rates
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: frame-match
type: binary
@@ -525,7 +525,7 @@ attribute-sets:
type: u32
-
name: cqm
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: local-state-change
type: flag
@@ -575,13 +575,13 @@ attribute-sets:
type: u16
-
name: key-default-types
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: max-remain-on-channel-duration
type: u32
-
name: mesh-setup
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: wiphy-antenna-avail-tx
type: u32
@@ -596,7 +596,7 @@ attribute-sets:
type: u8
-
name: wowlan-triggers
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: wowlan-triggers-supported
type: nest
@@ -615,7 +615,7 @@ attribute-sets:
nested-attributes: supported-iftypes
-
name: rekey-data
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: max-num-sched-scan-ssids
type: u8
@@ -624,7 +624,7 @@ attribute-sets:
type: u16
-
name: scan-supp-rates
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: hidden-ssid
type: u32
@@ -636,7 +636,7 @@ attribute-sets:
type: binary
-
name: sta-wme
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: support-ap-uapsd
type: flag
@@ -645,13 +645,13 @@ attribute-sets:
type: flag
-
name: sched-scan-match
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: max-match-sets
type: u8
-
name: pmksa-candidate
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: tx-no-cck-rate
type: flag
@@ -680,7 +680,7 @@ attribute-sets:
name: feature-flags
type: u32
enum: feature-flags
- enum-as-flags: True
+ enum-as-flags: true
-
name: probe-resp-offload
type: u32
@@ -749,7 +749,7 @@ attribute-sets:
type: u32
-
name: mac-addrs
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: mac-acl-max
type: u32
@@ -798,7 +798,7 @@ attribute-sets:
type: u16
-
name: coalesce-rule
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: ch-switch-count
type: u32
@@ -807,7 +807,7 @@ attribute-sets:
type: flag
-
name: csa-ies
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: cntdwn-offs-beacon
type: binary
@@ -929,13 +929,13 @@ attribute-sets:
type: u32
-
name: sched-scan-plans
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: pbss
type: flag
-
name: bss-select
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: sta-support-p2p-ps
type: u8
@@ -944,7 +944,7 @@ attribute-sets:
type: binary
-
name: iftype-ext-capa
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: mu-mimo-group-data
type: binary
@@ -975,10 +975,10 @@ attribute-sets:
type: u32
-
name: nan-func
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: nan-match
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: fils-kek
type: binary
@@ -1067,16 +1067,16 @@ attribute-sets:
type: binary
-
name: ftm-responder
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: ftm-responder-stats
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: timeout
type: u32
-
name: peer-measurements
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: airtime-weight
type: u16
@@ -1094,7 +1094,7 @@ attribute-sets:
type: flag
-
name: he-obss-pd
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: wiphy-edmg-channels
type: u8
@@ -1106,13 +1106,13 @@ attribute-sets:
type: u16
-
name: he-bss-color
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: iftype-akm-suites
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: tid-config
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: control-port-no-preauth
type: flag
@@ -1133,16 +1133,16 @@ attribute-sets:
type: u32
-
name: scan-freq-khz
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: he-6ghz-capability
type: binary
-
name: fils-discovery
- type: binary # TOOD: nest
+ type: binary # TOOD: nest
-
name: unsol-bcast-probe-resp
- type: binary # TOOD: nest
+ type: binary # TOOD: nest
-
name: s1g-capability
type: binary
@@ -1173,13 +1173,13 @@ attribute-sets:
type: u8
-
name: color-change-elems
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: mbssid-config
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: mbssid-elems
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: radar-background
type: flag
@@ -1194,7 +1194,7 @@ attribute-sets:
type: flag
-
name: mlo-links
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: mlo-link-id
type: u8
@@ -1234,7 +1234,7 @@ attribute-sets:
type: flag
-
name: ema-rnr-elems
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: mlo-link-disabled
type: flag
@@ -1252,10 +1252,10 @@ attribute-sets:
type: flag
-
name: wiphy-radios
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: wiphy-interface-combinations
- type: binary # TODO: nest
+ type: binary # TODO: nest
-
name: vif-radio-mask
type: u32
@@ -1799,8 +1799,9 @@ operations:
-
name: get-wiphy
doc: |
- Get information about a wiphy or dump a list of all wiphys. Requests to dump get-wiphy
- should unconditionally include the split-wiphy-dump flag in the request.
+ Get information about a wiphy or dump a list of all wiphys. Requests to
+ dump get-wiphy should unconditionally include the split-wiphy-dump flag
+ in the request.
attribute-set: nl80211-attrs
do:
request:
diff --git a/Documentation/netlink/specs/nlctrl.yaml b/Documentation/netlink/specs/nlctrl.yaml
index a36535350bdb..8b4472a6aa36 100644
--- a/Documentation/netlink/specs/nlctrl.yaml
+++ b/Documentation/netlink/specs/nlctrl.yaml
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
-
+---
name: nlctrl
protocol: genetlink-legacy
uapi-header: linux/genetlink.h
@@ -76,12 +76,12 @@ attribute-sets:
-
name: policy
type: nest-type-value
- type-value: [ policy-id, attr-id ]
+ type-value: [policy-id, attr-id]
nested-attributes: policy-attrs
-
name: op-policy
type: nest-type-value
- type-value: [ op-id ]
+ type-value: [op-id]
nested-attributes: op-policy-attrs
-
name: op
diff --git a/Documentation/netlink/specs/ovpn.yaml b/Documentation/netlink/specs/ovpn.yaml
index 096c51f0c69a..17e5e9b7f5a5 100644
--- a/Documentation/netlink/specs/ovpn.yaml
+++ b/Documentation/netlink/specs/ovpn.yaml
@@ -4,7 +4,7 @@
#
# Copyright (c) 2024-2025, OpenVPN Inc.
#
-
+---
name: ovpn
protocol: genetlink
@@ -19,7 +19,7 @@ definitions:
-
type: enum
name: cipher-alg
- entries: [ none, aes-gcm, chacha20-poly1305 ]
+ entries: [none, aes-gcm, chacha20-poly1305]
-
type: enum
name: del-peer-reason
@@ -32,7 +32,7 @@ definitions:
-
type: enum
name: key-slot
- entries: [ primary, secondary ]
+ entries: [primary, secondary]
attribute-sets:
-
@@ -42,8 +42,8 @@ attribute-sets:
name: id
type: u32
doc: >-
- The unique ID of the peer in the device context. To be used to identify
- peers during operations for a specific device
+ The unique ID of the peer in the device context. To be used to
+ identify peers during operations for a specific device
checks:
max: 0xFFFFFF
-
@@ -241,7 +241,7 @@ operations:
-
name: peer-new
attribute-set: ovpn
- flags: [ admin-perm ]
+ flags: [admin-perm]
doc: Add a remote peer
do:
pre: ovpn-nl-pre-doit
@@ -253,7 +253,7 @@ operations:
-
name: peer-set
attribute-set: ovpn
- flags: [ admin-perm ]
+ flags: [admin-perm]
doc: modify a remote peer
do:
pre: ovpn-nl-pre-doit
@@ -265,7 +265,7 @@ operations:
-
name: peer-get
attribute-set: ovpn
- flags: [ admin-perm ]
+ flags: [admin-perm]
doc: Retrieve data about existing remote peers (or a specific one)
do:
pre: ovpn-nl-pre-doit
@@ -287,7 +287,7 @@ operations:
-
name: peer-del
attribute-set: ovpn
- flags: [ admin-perm ]
+ flags: [admin-perm]
doc: Delete existing remote peer
do:
pre: ovpn-nl-pre-doit
@@ -305,7 +305,7 @@ operations:
-
name: key-new
attribute-set: ovpn
- flags: [ admin-perm ]
+ flags: [admin-perm]
doc: Add a cipher key for a specific peer
do:
pre: ovpn-nl-pre-doit
@@ -317,7 +317,7 @@ operations:
-
name: key-get
attribute-set: ovpn
- flags: [ admin-perm ]
+ flags: [admin-perm]
doc: Retrieve non-sensitive data about peer key and cipher
do:
pre: ovpn-nl-pre-doit
@@ -332,7 +332,7 @@ operations:
-
name: key-swap
attribute-set: ovpn
- flags: [ admin-perm ]
+ flags: [admin-perm]
doc: Swap primary and secondary session keys for a specific peer
do:
pre: ovpn-nl-pre-doit
@@ -351,7 +351,7 @@ operations:
-
name: key-del
attribute-set: ovpn
- flags: [ admin-perm ]
+ flags: [admin-perm]
doc: Delete cipher key for a specific peer
do:
pre: ovpn-nl-pre-doit
diff --git a/Documentation/netlink/specs/ovs_datapath.yaml b/Documentation/netlink/specs/ovs_datapath.yaml
index df6a8f94975e..0c0abf3f9f05 100644
--- a/Documentation/netlink/specs/ovs_datapath.yaml
+++ b/Documentation/netlink/specs/ovs_datapath.yaml
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
-
+---
name: ovs_datapath
version: 2
protocol: genetlink-legacy
diff --git a/Documentation/netlink/specs/ovs_flow.yaml b/Documentation/netlink/specs/ovs_flow.yaml
index 7974aa7d8905..2dac9c8add57 100644
--- a/Documentation/netlink/specs/ovs_flow.yaml
+++ b/Documentation/netlink/specs/ovs_flow.yaml
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
-
+---
name: ovs_flow
version: 1
protocol: genetlink-legacy
@@ -293,9 +293,10 @@ definitions:
enum-name: ovs-hash-alg
type: enum
doc: |
- Data path hash algorithm for computing Datapath hash. The algorithm type only specifies
- the fields in a flow will be used as part of the hash. Each datapath is free to use its
- own hash algorithm. The hash value will be opaque to the user space daemon.
+ Data path hash algorithm for computing Datapath hash. The algorithm type
+ only specifies the fields in a flow will be used as part of the hash. Each
+ datapath is free to use its own hash algorithm. The hash value will be
+ opaque to the user space daemon.
entries:
- ovs-hash-alg-l4
@@ -615,7 +616,9 @@ attribute-sets:
name: set
type: nest
nested-attributes: key-attrs
- doc: Replaces the contents of an existing header. The single nested attribute specifies a header to modify and its value.
+ doc: |
+ Replaces the contents of an existing header. The single nested
+ attribute specifies a header to modify and its value.
-
name: push-vlan
type: binary
@@ -630,7 +633,8 @@ attribute-sets:
type: nest
nested-attributes: sample-attrs
doc: |
- Probabilistically executes actions, as specified in the nested attributes.
+ Probabilistically executes actions, as specified in the nested
+ attributes.
-
name: recirc
type: u32
diff --git a/Documentation/netlink/specs/ovs_vport.yaml b/Documentation/netlink/specs/ovs_vport.yaml
index 306da6bb842d..da47e65fd574 100644
--- a/Documentation/netlink/specs/ovs_vport.yaml
+++ b/Documentation/netlink/specs/ovs_vport.yaml
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
-
+---
name: ovs_vport
version: 2
protocol: genetlink-legacy
@@ -21,7 +21,7 @@ definitions:
type: enum
enum-name: ovs-vport-type
name-prefix: ovs-vport-type-
- entries: [ unspec, netdev, internal, gre, vxlan, geneve ]
+ entries: [unspec, netdev, internal, gre, vxlan, geneve]
-
name: ovs-vport-stats
type: struct
diff --git a/Documentation/netlink/specs/rt-addr.yaml b/Documentation/netlink/specs/rt-addr.yaml
index 4f86aa1075da..bafe3bfeabfb 100644
--- a/Documentation/netlink/specs/rt-addr.yaml
+++ b/Documentation/netlink/specs/rt-addr.yaml
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
-
+---
name: rt-addr
protocol: netlink-raw
uapi-header: linux/rtnetlink.h
diff --git a/Documentation/netlink/specs/rt-link.yaml b/Documentation/netlink/specs/rt-link.yaml
index 28c4cf66517c..210394c188a3 100644
--- a/Documentation/netlink/specs/rt-link.yaml
+++ b/Documentation/netlink/specs/rt-link.yaml
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
-
+---
name: rt-link
protocol: netlink-raw
uapi-header: linux/rtnetlink.h
diff --git a/Documentation/netlink/specs/rt-neigh.yaml b/Documentation/netlink/specs/rt-neigh.yaml
index e9cba164e3d1..25cc2d528d2f 100644
--- a/Documentation/netlink/specs/rt-neigh.yaml
+++ b/Documentation/netlink/specs/rt-neigh.yaml
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
-
+---
name: rt-neigh
protocol: netlink-raw
uapi-header: linux/rtnetlink.h
diff --git a/Documentation/netlink/specs/rt-route.yaml b/Documentation/netlink/specs/rt-route.yaml
index 800f3a823d47..5b514ddeff1d 100644
--- a/Documentation/netlink/specs/rt-route.yaml
+++ b/Documentation/netlink/specs/rt-route.yaml
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
-
+---
name: rt-route
protocol: netlink-raw
uapi-header: linux/rtnetlink.h
@@ -117,7 +117,7 @@ attribute-sets:
name: multipath
type: binary
-
- name: protoinfo # not used
+ name: protoinfo # not used
type: binary
-
name: flow
@@ -127,10 +127,10 @@ attribute-sets:
type: binary
struct: rta-cacheinfo
-
- name: session # not used
+ name: session # not used
type: binary
-
- name: mp-algo # not used
+ name: mp-algo # not used
type: binary
-
name: table
@@ -155,7 +155,7 @@ attribute-sets:
type: u16
-
name: encap
- type: binary # tunnel specific nest
+ type: binary # tunnel specific nest
-
name: expires
type: u32
diff --git a/Documentation/netlink/specs/rt-rule.yaml b/Documentation/netlink/specs/rt-rule.yaml
index 003707ca4a3e..46b1d426e7e8 100644
--- a/Documentation/netlink/specs/rt-rule.yaml
+++ b/Documentation/netlink/specs/rt-rule.yaml
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
-
+---
name: rt-rule
protocol: netlink-raw
uapi-header: linux/fib_rules.h
diff --git a/Documentation/netlink/specs/tc.yaml b/Documentation/netlink/specs/tc.yaml
index 42d74c9aeb54..e983c0c82eb9 100644
--- a/Documentation/netlink/specs/tc.yaml
+++ b/Documentation/netlink/specs/tc.yaml
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
-
+---
name: tc
protocol: netlink-raw
uapi-header: linux/pkt_cls.h
@@ -76,7 +76,8 @@ definitions:
name: overlimits
type: u32
doc: |
- Number of throttle events when this flow goes out of allocated bandwidth
+ Number of throttle events when this flow goes out of allocated
+ bandwidth
-
name: bps
type: u32
@@ -751,7 +752,9 @@ definitions:
-
name: count
type: u32
- doc: How many drops we've done since the last time we entered dropping state
+ doc: |
+ How many drops we've done since the last time we entered dropping
+ state
-
name: lastcount
type: u32
@@ -1161,7 +1164,7 @@ definitions:
-
name: keys
type: binary
- struct: tc-u32-key # TODO: array
+ struct: tc-u32-key # TODO: array
-
name: tc-u32-pcnt
type: struct
@@ -1174,7 +1177,7 @@ definitions:
type: u64
-
name: kcnts
- type: u64 # TODO: array
+ type: u64 # TODO: array
-
name: tcf-t
type: struct
@@ -1336,7 +1339,7 @@ definitions:
-
name: keys
type: binary
- struct: tc-pedit-key # TODO: array
+ struct: tc-pedit-key # TODO: array
-
name: tc-pedit-key
type: struct
@@ -2885,7 +2888,7 @@ attribute-sets:
attributes:
-
name: parms
- type: binary # array of struct: tc-gred-qopt
+ type: binary # array of struct: tc-gred-qopt
-
name: stab
type: binary
@@ -3335,10 +3338,10 @@ attribute-sets:
struct: tc-police
-
name: rate
- type: binary # TODO
+ type: binary # TODO
-
name: peakrate
- type: binary # TODO
+ type: binary # TODO
-
name: avrate
type: u32
@@ -3698,7 +3701,7 @@ sub-messages:
value: choke
attribute-set: choke-attrs
-
- value: clsact # no content
+ value: clsact # no content
-
value: codel
attribute-set: codel-attrs
@@ -3742,12 +3745,12 @@ sub-messages:
value: htb
attribute-set: htb-attrs
-
- value: ingress # no content
+ value: ingress # no content
-
value: matchall
attribute-set: matchall-attrs
-
- value: mq # no content
+ value: mq # no content
-
value: mqprio
fixed-header: tc-mqprio-qopt
diff --git a/Documentation/netlink/specs/tcp_metrics.yaml b/Documentation/netlink/specs/tcp_metrics.yaml
index 1bd94f43e526..13144aeed31a 100644
--- a/Documentation/netlink/specs/tcp_metrics.yaml
+++ b/Documentation/netlink/specs/tcp_metrics.yaml
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
-
+---
name: tcp_metrics
protocol: genetlink-legacy
@@ -133,7 +133,7 @@ operations:
doc: Retrieve metrics.
attribute-set: tcp-metrics
- dont-validate: [ strict, dump ]
+ dont-validate: [strict, dump]
do:
request: &sel_attrs
@@ -162,8 +162,8 @@ operations:
doc: Delete metrics.
attribute-set: tcp-metrics
- dont-validate: [ strict, dump ]
- flags: [ admin-perm ]
+ dont-validate: [strict, dump]
+ flags: [admin-perm]
do:
request: *sel_attrs
diff --git a/Documentation/netlink/specs/team.yaml b/Documentation/netlink/specs/team.yaml
index c13529e011c9..cf02d47d12a4 100644
--- a/Documentation/netlink/specs/team.yaml
+++ b/Documentation/netlink/specs/team.yaml
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
-
+---
name: team
protocol: genetlink-legacy
@@ -152,7 +152,7 @@ operations:
doc: No operation
value: 0
attribute-set: team
- dont-validate: [ strict ]
+ dont-validate: [strict]
do:
# Actually it only reply the team netlink family
@@ -164,8 +164,8 @@ operations:
name: options-set
doc: Set team options
attribute-set: team
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
request: &option_attrs
@@ -178,8 +178,8 @@ operations:
name: options-get
doc: Get team options info
attribute-set: team
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
request:
@@ -191,8 +191,8 @@ operations:
name: port-list-get
doc: Get team ports info
attribute-set: team
- dont-validate: [ strict ]
- flags: [ admin-perm ]
+ dont-validate: [strict]
+ flags: [admin-perm]
do:
request:
diff --git a/Documentation/networking/can.rst b/Documentation/networking/can.rst
index b018ce346392..bc1b585355f7 100644
--- a/Documentation/networking/can.rst
+++ b/Documentation/networking/can.rst
@@ -1104,15 +1104,12 @@ for writing CAN network device driver are described below:
General Settings
----------------
-.. code-block:: C
-
- dev->type = ARPHRD_CAN; /* the netdevice hardware type */
- dev->flags = IFF_NOARP; /* CAN has no arp */
+CAN network device drivers can use alloc_candev_mqs() and friends instead of
+alloc_netdev_mqs(), to automatically take care of CAN-specific setup:
- dev->mtu = CAN_MTU; /* sizeof(struct can_frame) -> Classical CAN interface */
+.. code-block:: C
- or alternative, when the controller supports CAN with flexible data rate:
- dev->mtu = CANFD_MTU; /* sizeof(struct canfd_frame) -> CAN FD interface */
+ dev = alloc_candev_mqs(...);
The struct can_frame or struct canfd_frame is the payload of each socket
buffer (skbuff) in the protocol family PF_CAN.
diff --git a/Documentation/networking/device_drivers/ethernet/amazon/ena.rst b/Documentation/networking/device_drivers/ethernet/amazon/ena.rst
index 4561e8ab9e08..14784a0a6a8a 100644
--- a/Documentation/networking/device_drivers/ethernet/amazon/ena.rst
+++ b/Documentation/networking/device_drivers/ethernet/amazon/ena.rst
@@ -56,6 +56,9 @@ ena_netdev.[ch] Main Linux kernel driver.
ena_ethtool.c ethtool callbacks.
ena_xdp.[ch] XDP files
ena_pci_id_tbl.h Supported device IDs.
+ena_phc.[ch] PTP hardware clock infrastructure (see `PHC`_ for more info)
+ena_devlink.[ch] devlink files.
+ena_debugfs.[ch] debugfs files.
================= ======================================================
Management Interface:
@@ -221,6 +224,99 @@ descriptor it was received on would be recycled. When a packet smaller
than RX copybreak bytes is received, it is copied into a new memory
buffer and the RX descriptor is returned to HW.
+.. _`PHC`:
+
+PTP Hardware Clock (PHC)
+========================
+.. _`ptp-userspace-api`: https://docs.kernel.org/driver-api/ptp.html#ptp-hardware-clock-user-space-api
+.. _`testptp`: https://elixir.bootlin.com/linux/latest/source/tools/testing/selftests/ptp/testptp.c
+
+ENA Linux driver supports PTP hardware clock providing timestamp reference to achieve nanosecond resolution.
+
+**PHC support**
+
+PHC depends on the PTP module, which needs to be either loaded as a module or compiled into the kernel.
+
+Verify if the PTP module is present:
+
+.. code-block:: shell
+
+ grep -w '^CONFIG_PTP_1588_CLOCK=[ym]' /boot/config-`uname -r`
+
+- If no output is provided, the ENA driver cannot be loaded with PHC support.
+
+**PHC activation**
+
+The feature is turned off by default, in order to turn the feature on, the ENA driver
+can be loaded in the following way:
+
+- devlink:
+
+.. code-block:: shell
+
+ sudo devlink dev param set pci/<domain:bus:slot.function> name enable_phc value true cmode driverinit
+ sudo devlink dev reload pci/<domain:bus:slot.function>
+ # for example:
+ sudo devlink dev param set pci/0000:00:06.0 name enable_phc value true cmode driverinit
+ sudo devlink dev reload pci/0000:00:06.0
+
+All available PTP clock sources can be tracked here:
+
+.. code-block:: shell
+
+ ls /sys/class/ptp
+
+PHC support and capabilities can be verified using ethtool:
+
+.. code-block:: shell
+
+ ethtool -T <interface>
+
+**PHC timestamp**
+
+To retrieve PHC timestamp, use `ptp-userspace-api`_, usage example using `testptp`_:
+
+.. code-block:: shell
+
+ testptp -d /dev/ptp$(ethtool -T <interface> | awk '/PTP Hardware Clock:/ {print $NF}') -k 1
+
+PHC get time requests should be within reasonable bounds,
+avoid excessive utilization to ensure optimal performance and efficiency.
+The ENA device restricts the frequency of PHC get time requests to a maximum
+of 125 requests per second. If this limit is surpassed, the get time request
+will fail, leading to an increment in the phc_err_ts statistic.
+
+**PHC statistics**
+
+PHC can be monitored using debugfs (if mounted):
+
+.. code-block:: shell
+
+ sudo cat /sys/kernel/debug/<domain:bus:slot.function>/phc_stats
+
+ # for example:
+ sudo cat /sys/kernel/debug/0000:00:06.0/phc_stats
+
+PHC errors must remain below 1% of all PHC requests to maintain the desired level of accuracy and reliability
+
+================= ======================================================
+**phc_cnt** | Number of successful retrieved timestamps (below expire timeout).
+**phc_exp** | Number of expired retrieved timestamps (above expire timeout).
+**phc_skp** | Number of skipped get time attempts (during block period).
+**phc_err_dv** | Number of failed get time attempts due to device errors (entering into block state).
+**phc_err_ts** | Number of failed get time attempts due to timestamp errors (entering into block state),
+ | This occurs if driver exceeded the request limit or device received an invalid timestamp.
+================= ======================================================
+
+PHC timeouts:
+
+================= ======================================================
+**expire** | Max time for a valid timestamp retrieval, passing this threshold will fail
+ | the get time request and block new requests until block timeout.
+**block** | Blocking period starts once get time request expires or fails,
+ | all get time requests during block period will be skipped.
+================= ======================================================
+
Statistics
==========
@@ -268,6 +364,18 @@ RSS
- The user can provide a hash key, hash function, and configure the
indirection table through `ethtool(8)`.
+DEVLINK SUPPORT
+===============
+.. _`devlink`: https://www.kernel.org/doc/html/latest/networking/devlink/index.html
+
+`devlink`_ supports reloading the driver and initiating re-negotiation with the ENA device
+
+.. code-block:: shell
+
+ sudo devlink dev reload pci/<domain:bus:slot.function>
+ # for example:
+ sudo devlink dev reload pci/0000:00:06.0
+
DATA PATH
=========
diff --git a/Documentation/networking/device_drivers/ethernet/intel/ice.rst b/Documentation/networking/device_drivers/ethernet/intel/ice.rst
index 3c46a48d99ba..0bca293cf9cb 100644
--- a/Documentation/networking/device_drivers/ethernet/intel/ice.rst
+++ b/Documentation/networking/device_drivers/ethernet/intel/ice.rst
@@ -927,6 +927,19 @@ To enable/disable UDP Segmentation Offload, issue the following command::
# ethtool -K <ethX> tx-udp-segmentation [off|on]
+PTP pin interface
+-----------------
+All adapters support standard PTP pin interface. SDPs (Software Definable Pin)
+are single ended pins with both periodic output and external timestamp
+supported. There are also specific differential input/output pins (TIME_SYNC,
+1PPS) with only one of the functions supported.
+
+There are adapters with DPLL, where pins are connected to the DPLL instead of
+being exposed on the board. You have to be aware that in those configurations,
+only SDP pins are exposed and each pin has its own fixed direction.
+To see input signal on those PTP pins, you need to configure DPLL properly.
+Output signal is only visible on DPLL and to send it to the board SMA/U.FL pins,
+DPLL output pins have to be manually configured.
GNSS module
-----------
diff --git a/Documentation/networking/devlink/devlink-params.rst b/Documentation/networking/devlink/devlink-params.rst
index 4e01dc32bc08..3da8f4ef2417 100644
--- a/Documentation/networking/devlink/devlink-params.rst
+++ b/Documentation/networking/devlink/devlink-params.rst
@@ -137,3 +137,6 @@ own name.
* - ``event_eq_size``
- u32
- Control the size of asynchronous control events EQ.
+ * - ``enable_phc``
+ - Boolean
+ - Enable PHC (PTP Hardware Clock) functionality in the device.
diff --git a/Documentation/networking/ethtool-netlink.rst b/Documentation/networking/ethtool-netlink.rst
index b6e9af4d0f1b..07e9808ebd2c 100644
--- a/Documentation/networking/ethtool-netlink.rst
+++ b/Documentation/networking/ethtool-netlink.rst
@@ -281,6 +281,7 @@ Kernel to userspace:
``ETHTOOL_MSG_MODULE_GET_REPLY`` transceiver module parameters
``ETHTOOL_MSG_PSE_GET_REPLY`` PSE parameters
``ETHTOOL_MSG_RSS_GET_REPLY`` RSS settings
+ ``ETHTOOL_MSG_RSS_NTF`` RSS settings
``ETHTOOL_MSG_PLCA_GET_CFG_REPLY`` PLCA RS parameters
``ETHTOOL_MSG_PLCA_GET_STATUS_REPLY`` PLCA RS status
``ETHTOOL_MSG_PLCA_NTF`` PLCA RS parameters
@@ -290,6 +291,7 @@ Kernel to userspace:
``ETHTOOL_MSG_PHY_NTF`` Ethernet PHY information change
``ETHTOOL_MSG_TSCONFIG_GET_REPLY`` hw timestamping configuration
``ETHTOOL_MSG_TSCONFIG_SET_REPLY`` new hw timestamping configuration
+ ``ETHTOOL_MSG_PSE_NTF`` PSE events notification
======================================== =================================
``GET`` requests are sent by userspace applications to retrieve device
@@ -1788,6 +1790,11 @@ Kernel response contents:
limit of the PoE PSE.
``ETHTOOL_A_C33_PSE_PW_LIMIT_RANGES`` nested Supported power limit
configuration ranges.
+ ``ETHTOOL_A_PSE_PW_D_ID`` u32 Index of the PSE power domain
+ ``ETHTOOL_A_PSE_PRIO_MAX`` u32 Priority maximum configurable
+ on the PoE PSE
+ ``ETHTOOL_A_PSE_PRIO`` u32 Priority of the PoE PSE
+ currently configured
========================================== ====== =============================
When set, the optional ``ETHTOOL_A_PODL_PSE_ADMIN_STATE`` attribute identifies
@@ -1861,6 +1868,15 @@ identifies the C33 PSE power limit ranges through
If the controller works with fixed classes, the min and max values will be
equal.
+The ``ETHTOOL_A_PSE_PW_D_ID`` attribute identifies the index of PSE power
+domain.
+
+When set, the optional ``ETHTOOL_A_PSE_PRIO_MAX`` attribute identifies
+the PSE maximum priority value.
+When set, the optional ``ETHTOOL_A_PSE_PRIO`` attributes is used to
+identifies the currently configured PSE priority.
+For a description of PSE priority attributes, see ``PSE_SET``.
+
PSE_SET
=======
@@ -1874,6 +1890,8 @@ Request contents:
``ETHTOOL_A_C33_PSE_ADMIN_CONTROL`` u32 Control PSE Admin state
``ETHTOOL_A_C33_PSE_AVAIL_PWR_LIMIT`` u32 Control PoE PSE available
power limit
+ ``ETHTOOL_A_PSE_PRIO`` u32 Control priority of the
+ PoE PSE
====================================== ====== =============================
When set, the optional ``ETHTOOL_A_PODL_PSE_ADMIN_CONTROL`` attribute is used
@@ -1896,6 +1914,38 @@ various existing products that document power consumption in watts rather than
classes. If power limit configuration based on classes is needed, the
conversion can be done in user space, for example by ethtool.
+When set, the optional ``ETHTOOL_A_PSE_PRIO`` attributes is used to
+control the PSE priority. Allowed priority value are between zero and
+the value of ``ETHTOOL_A_PSE_PRIO_MAX`` attribute.
+
+A lower value indicates a higher priority, meaning that a priority value
+of 0 corresponds to the highest port priority.
+Port priority serves two functions:
+
+ - Power-up Order: After a reset, ports are powered up in order of their
+ priority from highest to lowest. Ports with higher priority
+ (lower values) power up first.
+ - Shutdown Order: When the power budget is exceeded, ports with lower
+ priority (higher values) are turned off first.
+
+PSE_NTF
+=======
+
+Notify PSE events.
+
+Notification contents:
+
+ =============================== ====== ========================
+ ``ETHTOOL_A_PSE_HEADER`` nested request header
+ ``ETHTOOL_A_PSE_EVENTS`` bitset PSE events
+ =============================== ====== ========================
+
+When set, the optional ``ETHTOOL_A_PSE_EVENTS`` attribute identifies the
+PSE events.
+
+.. kernel-doc:: include/uapi/linux/ethtool_netlink_generated.h
+ :identifiers: ethtool_pse_event
+
RSS_GET
=======
@@ -2401,7 +2451,7 @@ are netlink only.
``ETHTOOL_SRXNTUPLE`` n/a
``ETHTOOL_GRXNTUPLE`` n/a
``ETHTOOL_GSSET_INFO`` ``ETHTOOL_MSG_STRSET_GET``
- ``ETHTOOL_GRXFHINDIR`` n/a
+ ``ETHTOOL_GRXFHINDIR`` ``ETHTOOL_MSG_RSS_GET``
``ETHTOOL_SRXFHINDIR`` n/a
``ETHTOOL_GFEATURES`` ``ETHTOOL_MSG_FEATURES_GET``
``ETHTOOL_SFEATURES`` ``ETHTOOL_MSG_FEATURES_SET``
diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst
index 0f1251cce314..9af5a8935d57 100644
--- a/Documentation/networking/ip-sysctl.rst
+++ b/Documentation/networking/ip-sysctl.rst
@@ -8,15 +8,19 @@ IP Sysctl
==============================
ip_forward - BOOLEAN
- - 0 - disabled (default)
- - not 0 - enabled
-
Forward Packets between interfaces.
This variable is special, its change resets all configuration
parameters to their default state (RFC1122 for hosts, RFC1812
for routers)
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
+
ip_default_ttl - INTEGER
Default value of TTL field (Time To Live) for outgoing (but not
forwarded) IP packets. Should be between 1 and 255 inclusive.
@@ -62,20 +66,25 @@ ip_forward_use_pmtu - BOOLEAN
kernel honoring this information. This is normally not the
case.
- Default: 0 (disabled)
-
Possible values:
- - 0 - disabled
- - 1 - enabled
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
fwmark_reflect - BOOLEAN
Controls the fwmark of kernel-generated IPv4 reply packets that are not
associated with a socket for example, TCP RSTs or ICMP echo replies).
- If unset, these packets have a fwmark of zero. If set, they have the
+ If disabled, these packets have a fwmark of zero. If enabled, they have the
fwmark of the packet they are replying to.
- Default: 0
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
fib_multipath_use_neigh - BOOLEAN
Use status of existing neighbor entry when determining nexthop for
@@ -83,12 +92,12 @@ fib_multipath_use_neigh - BOOLEAN
packets could be directed to a failed nexthop. Only valid for kernels
built with CONFIG_IP_ROUTE_MULTIPATH enabled.
- Default: 0 (disabled)
-
Possible values:
- - 0 - disabled
- - 1 - enabled
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
fib_multipath_hash_policy - INTEGER
Controls which hash policy to use for multipath routes. Only valid
@@ -368,7 +377,12 @@ tcp_autocorking - BOOLEAN
queue. Applications can still use TCP_CORK for optimal behavior
when they know how/when to uncork their sockets.
- Default : 1
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 1 (enabled)
tcp_available_congestion_control - STRING
Shows the available congestion control choices that are registered.
@@ -408,6 +422,13 @@ tcp_congestion_control - STRING
tcp_dsack - BOOLEAN
Allows TCP to send "duplicate" SACKs.
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 1 (enabled)
+
tcp_early_retrans - INTEGER
Tail loss probe (TLP) converts RTOs occurring due to tail
losses into fast recovery (draft-ietf-tcpm-rack). Note that
@@ -447,7 +468,12 @@ tcp_ecn_fallback - BOOLEAN
knob. The value is not used, if tcp_ecn or per route (or congestion
control) ECN settings are disabled.
- Default: 1 (fallback enabled)
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 1 (enabled)
tcp_fack - BOOLEAN
This is a legacy option, it has no effect anymore.
@@ -474,7 +500,7 @@ tcp_frto - INTEGER
By default it's enabled with a non-zero value. 0 disables F-RTO.
tcp_fwmark_accept - BOOLEAN
- If set, incoming connections to listening sockets that do not have a
+ If enabled, incoming connections to listening sockets that do not have a
socket mark will set the mark of the accepting socket to the fwmark of
the incoming SYN packet. This will cause all packets on that connection
(starting from the first SYNACK) to be sent with that fwmark. The
@@ -482,7 +508,12 @@ tcp_fwmark_accept - BOOLEAN
have a fwmark set via setsockopt(SOL_SOCKET, SO_MARK, ...) are
unaffected.
- Default: 0
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
tcp_invalid_ratelimit - INTEGER
Limit the maximal rate for sending duplicate acknowledgments
@@ -528,6 +559,11 @@ tcp_l3mdev_accept - BOOLEAN
which the packets originated. Only valid when the kernel was
compiled with CONFIG_NET_L3_MASTER_DEV.
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
Default: 0 (disabled)
tcp_low_latency - BOOLEAN
@@ -593,10 +629,16 @@ tcp_min_rtt_wlen - INTEGER
Default: 300
tcp_moderate_rcvbuf - BOOLEAN
- If set, TCP performs receive buffer auto-tuning, attempting to
+ If enabled, TCP performs receive buffer auto-tuning, attempting to
automatically size the buffer (no greater than tcp_rmem[2]) to
- match the size required by the path for full throughput. Enabled by
- default.
+ match the size required by the path for full throughput.
+
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 1 (enabled)
tcp_mtu_probing - INTEGER
Controls TCP Packetization-Layer Path MTU Discovery. Takes three
@@ -621,13 +663,26 @@ tcp_no_metrics_save - BOOLEAN
when the connection closes, so that connections established in the
near future can use these to set initial conditions. Usually, this
increases overall performance, but may sometimes cause performance
- degradation. If set, TCP will not cache metrics on closing
+ degradation. If enabled, TCP will not cache metrics on closing
connections.
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
+
tcp_no_ssthresh_metrics_save - BOOLEAN
Controls whether TCP saves ssthresh metrics in the route cache.
+ If enabled, ssthresh metrics are disabled.
+
+ Possible values:
- Default is 1, which disables ssthresh metrics.
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 1 (enabled)
tcp_orphan_retries - INTEGER
This value influences the timeout of a locally closed TCP connection,
@@ -645,9 +700,11 @@ tcp_recovery - INTEGER
features.
========= =============================================================
- RACK: 0x1 enables the RACK loss detection for fast detection of lost
- retransmissions and tail drops. It also subsumes and disables
- RFC6675 recovery for SACK connections.
+ RACK: 0x1 enables RACK loss detection, for fast detection of lost
+ retransmissions and tail drops, and resilience to
+ reordering. currently, setting this bit to 0 has no
+ effect, since RACK is the only supported loss detection
+ algorithm.
RACK: 0x2 makes RACK's reordering window static (min_rtt/4).
@@ -664,6 +721,11 @@ tcp_reflect_tos - BOOLEAN
This options affects both IPv4 and IPv6.
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
Default: 0 (disabled)
tcp_reordering - INTEGER
@@ -685,6 +747,13 @@ tcp_retrans_collapse - BOOLEAN
On retransmit try to send bigger packets to work around bugs in
certain TCP stacks.
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 1 (enabled)
+
tcp_retries1 - INTEGER
This value influences the time, after which TCP decides, that
something is wrong due to unacknowledged RTO retransmissions,
@@ -712,11 +781,16 @@ tcp_retries2 - INTEGER
which corresponds to a value of at least 8.
tcp_rfc1337 - BOOLEAN
- If set, the TCP stack behaves conforming to RFC1337. If unset,
+ If enabled, the TCP stack behaves conforming to RFC1337. If unset,
we are not conforming to RFC, but prevent TCP TIME_WAIT
assassination.
- Default: 0
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
tcp_rmem - vector of 3 INTEGERs: min, default, max
min: Minimal size of receive buffer used by TCP sockets.
@@ -740,6 +814,13 @@ tcp_rmem - vector of 3 INTEGERs: min, default, max
tcp_sack - BOOLEAN
Enable select acknowledgments (SACKS).
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 1 (enabled)
+
tcp_comp_sack_delay_ns - LONG INTEGER
TCP tries to reduce number of SACK sent, using a timer
based on 5% of SRTT, capped by this sysctl, in nano seconds.
@@ -762,26 +843,41 @@ tcp_comp_sack_nr - INTEGER
Default : 44
tcp_backlog_ack_defer - BOOLEAN
- If set, user thread processing socket backlog tries sending
+ If enabled, user thread processing socket backlog tries sending
one ACK for the whole queue. This helps to avoid potential
long latencies at end of a TCP socket syscall.
- Default : true
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 1 (enabled)
tcp_slow_start_after_idle - BOOLEAN
- If set, provide RFC2861 behavior and time out the congestion
+ If enabled, provide RFC2861 behavior and time out the congestion
window after an idle period. An idle period is defined at
the current RTO. If unset, the congestion window will not
be timed out after an idle period.
- Default: 1
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 1 (enabled)
tcp_stdurg - BOOLEAN
Use the Host requirements interpretation of the TCP urgent pointer field.
- Most hosts use the older BSD interpretation, so if you turn this on
+ Most hosts use the older BSD interpretation, so if enabled,
Linux might not communicate correctly with them.
- Default: FALSE
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
tcp_synack_retries - INTEGER
Number of times SYNACKs for a passive TCP connection attempt will
@@ -838,7 +934,12 @@ tcp_migrate_req - BOOLEAN
migration by returning SK_DROP in the type of eBPF program, or
disable this option.
- Default: 0
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
tcp_fastopen - INTEGER
Enable TCP Fast Open (RFC7413) to send and accept data in the opening
@@ -1019,6 +1120,13 @@ tcp_tw_reuse_delay - UNSIGNED INTEGER
tcp_window_scaling - BOOLEAN
Enable window scaling as defined in RFC1323.
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 1 (enabled)
+
tcp_shrink_window - BOOLEAN
This changes how the TCP receive window is calculated.
@@ -1026,13 +1134,15 @@ tcp_shrink_window - BOOLEAN
window can be offered, and that TCP implementations MUST ensure
that they handle a shrinking window, as specified in RFC 1122.
- - 0 - Disabled. The window is never shrunk.
- - 1 - Enabled. The window is shrunk when necessary to remain within
- the memory limit set by autotuning (sk_rcvbuf).
- This only occurs if a non-zero receive window
- scaling factor is also in effect.
+ Possible values:
- Default: 0
+ - 0 (disabled) - The window is never shrunk.
+ - 1 (enabled) - The window is shrunk when necessary to remain within
+ the memory limit set by autotuning (sk_rcvbuf).
+ This only occurs if a non-zero receive window
+ scaling factor is also in effect.
+
+ Default: 0 (disabled)
tcp_wmem - vector of 3 INTEGERs: min, default, max
min: Amount of memory reserved for send buffers for TCP sockets.
@@ -1069,16 +1179,21 @@ tcp_notsent_lowat - UNSIGNED INTEGER
Default: UINT_MAX (0xFFFFFFFF)
tcp_workaround_signed_windows - BOOLEAN
- If set, assume no receipt of a window scaling option means the
+ If enabled, assume no receipt of a window scaling option means the
remote TCP is broken and treats the window as a signed quantity.
- If unset, assume the remote TCP is not broken even if we do
+ If disabled, assume the remote TCP is not broken even if we do
not receive a window scaling option from them.
- Default: 0
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
tcp_thin_linear_timeouts - BOOLEAN
Enable dynamic triggering of linear timeouts for thin streams.
- If set, a check is performed upon retransmission by timeout to
+ If enabled, a check is performed upon retransmission by timeout to
determine if the stream is thin (less than 4 packets in flight).
As long as the stream is found to be thin, up to 6 linear
timeouts may be performed before exponential backoff mode is
@@ -1087,7 +1202,12 @@ tcp_thin_linear_timeouts - BOOLEAN
For more information on thin streams, see
Documentation/networking/tcp-thin.rst
- Default: 0
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
tcp_limit_output_bytes - INTEGER
Controls TCP Small Queue limit per tcp socket.
@@ -1139,7 +1259,7 @@ tcp_child_ehash_entries - INTEGER
Default: 0
tcp_plb_enabled - BOOLEAN
- If set and the underlying congestion control (e.g. DCTCP) supports
+ If enabled and the underlying congestion control (e.g. DCTCP) supports
and enables PLB feature, TCP PLB (Protective Load Balancing) is
enabled. PLB is described in the following paper:
https://doi.org/10.1145/3544216.3544226. Based on PLB parameters,
@@ -1155,12 +1275,17 @@ tcp_plb_enabled - BOOLEAN
by switches to determine next hop. In either case, further host
and switch side changes will be needed.
- When set, PLB assumes that congestion signal (e.g. ECN) is made
+ If enabled, PLB assumes that congestion signal (e.g. ECN) is made
available and used by congestion control module to estimate a
congestion measure (e.g. ce_ratio). PLB needs a congestion measure to
make repathing decisions.
- Default: FALSE
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
tcp_plb_idle_rehash_rounds - INTEGER
Number of consecutive congested rounds (RTT) seen after which
@@ -1260,6 +1385,11 @@ udp_l3mdev_accept - BOOLEAN
originated. Only valid when the kernel was compiled with
CONFIG_NET_L3_MASTER_DEV.
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
Default: 0 (disabled)
udp_mem - vector of 3 INTEGERs: min, pressure, max
@@ -1320,19 +1450,29 @@ raw_l3mdev_accept - BOOLEAN
originated. Only valid when the kernel was compiled with
CONFIG_NET_L3_MASTER_DEV.
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
Default: 1 (enabled)
CIPSOv4 Variables
=================
cipso_cache_enable - BOOLEAN
- If set, enable additions to and lookups from the CIPSO label mapping
- cache. If unset, additions are ignored and lookups always result in a
+ If enabled, enable additions to and lookups from the CIPSO label mapping
+ cache. If disabled, additions are ignored and lookups always result in a
miss. However, regardless of the setting the cache is still
invalidated when required when means you can safely toggle this on and
off and the cache will always be "safe".
- Default: 1
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 1 (enabled)
cipso_cache_bucket_size - INTEGER
The CIPSO label cache consists of a fixed size hash table with each
@@ -1350,17 +1490,27 @@ cipso_rbm_optfmt - BOOLEAN
This means that when set the CIPSO tag will be padded with empty
categories in order to make the packet data 32-bit aligned.
- Default: 0
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
-cipso_rbm_structvalid - BOOLEAN
- If set, do a very strict check of the CIPSO option when
- ip_options_compile() is called. If unset, relax the checks done during
+cipso_rbm_strictvalid - BOOLEAN
+ If enabled, do a very strict check of the CIPSO option when
+ ip_options_compile() is called. If disabled, relax the checks done during
ip_options_compile(). Either way is "safe" as errors are caught else
where in the CIPSO processing code but setting this to 0 (False) should
result in less work (i.e. it should be faster) but could cause problems
with other implementations that require strict checking.
- Default: 0
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
IP Variables
============
@@ -1417,10 +1567,15 @@ ip_unprivileged_port_start - INTEGER
Default: 1024
ip_nonlocal_bind - BOOLEAN
- If set, allows processes to bind() to non-local IP addresses,
+ If enabled, allows processes to bind() to non-local IP addresses,
which can be quite useful - but may break some applications.
- Default: 0
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
ip_autobind_reuse - BOOLEAN
By default, bind() does not select the ports automatically even if
@@ -1429,7 +1584,13 @@ ip_autobind_reuse - BOOLEAN
when you use bind()+connect(), but may break some applications.
The preferred solution is to use IP_BIND_ADDRESS_NO_PORT and this
option should only be set by experts.
- Default: 0
+
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
ip_dynaddr - INTEGER
If set non-zero, enables support for dynamic addresses.
@@ -1447,7 +1608,12 @@ ip_early_demux - BOOLEAN
It may add an additional cost for pure routing workloads that
reduces overall throughput, in such case you should disable it.
- Default: 1
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 1 (enabled)
ping_group_range - 2 INTEGERS
Restrict ICMP_PROTO datagram sockets to users in the group range.
@@ -1459,31 +1625,56 @@ ping_group_range - 2 INTEGERS
tcp_early_demux - BOOLEAN
Enable early demux for established TCP sockets.
- Default: 1
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 1 (enabled)
udp_early_demux - BOOLEAN
Enable early demux for connected UDP sockets. Disable this if
your system could experience more unconnected load.
- Default: 1
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 1 (enabled)
icmp_echo_ignore_all - BOOLEAN
- If set non-zero, then the kernel will ignore all ICMP ECHO
+ If enabled, then the kernel will ignore all ICMP ECHO
requests sent to it.
- Default: 0
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
icmp_echo_enable_probe - BOOLEAN
- If set to one, then the kernel will respond to RFC 8335 PROBE
+ If enabled, then the kernel will respond to RFC 8335 PROBE
requests sent to it.
- Default: 0
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
icmp_echo_ignore_broadcasts - BOOLEAN
- If set non-zero, then the kernel will ignore all ICMP ECHO and
+ If enabled, then the kernel will ignore all ICMP ECHO and
TIMESTAMP requests sent to it via broadcast/multicast.
- Default: 1
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 1 (enabled)
icmp_ratelimit - INTEGER
Limit the maximal rates for sending ICMP packets whose type matches
@@ -1540,17 +1731,22 @@ icmp_ratemask - INTEGER
icmp_ignore_bogus_error_responses - BOOLEAN
Some routers violate RFC1122 by sending bogus responses to broadcast
frames. Such violations are normally logged via a kernel warning.
- If this is set to TRUE, the kernel will not give such warnings, which
+ If enabled, the kernel will not give such warnings, which
will avoid log file clutter.
- Default: 1
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 1 (enabled)
icmp_errors_use_inbound_ifaddr - BOOLEAN
- If zero, icmp error messages are sent with the primary address of
+ If disabled, icmp error messages are sent with the primary address of
the exiting interface.
- If non-zero, the message will be sent with the primary address of
+ If enabled, the message will be sent with the primary address of
the interface that received the packet that caused the icmp error.
This is the behaviour many network administrators will expect from
a router. And it can make debugging complicated network layouts
@@ -1560,7 +1756,12 @@ icmp_errors_use_inbound_ifaddr - BOOLEAN
then the primary address of the first non-loopback interface that
has one will be used regardless of this setting.
- Default: 0
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
igmp_max_memberships - INTEGER
Change the maximum number of multicast groups we can subscribe to.
@@ -1910,8 +2111,12 @@ arp_evict_nocarrier - BOOLEAN
between access points on the same network. In most cases this should
remain as the default (1).
- - 1 - (default): Clear the ARP cache on NOCARRIER events
- - 0 - Do not clear ARP cache on NOCARRIER events
+ Possible values:
+
+ - 0 (disabled) - Do not clear ARP cache on NOCARRIER events
+ - 1 (enabled) - Clear the ARP cache on NOCARRIER events
+
+ Default: 1 (enabled)
mcast_solicit - INTEGER
The maximum number of multicast probes in INCOMPLETE state,
@@ -1934,9 +2139,23 @@ mcast_resolicit - INTEGER
disable_policy - BOOLEAN
Disable IPSEC policy (SPD) for this interface
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
+
disable_xfrm - BOOLEAN
Disable IPSEC encryption on this interface, whatever the policy
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
+
igmpv2_unsolicited_report_interval - INTEGER
The interval in milliseconds in which the next unsolicited
IGMPv1 or IGMPv2 report retransmit will take place.
@@ -1952,11 +2171,25 @@ igmpv3_unsolicited_report_interval - INTEGER
ignore_routes_with_linkdown - BOOLEAN
Ignore routes whose link is down when performing a FIB lookup.
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
+
promote_secondaries - BOOLEAN
When a primary IP address is removed from this interface
promote a corresponding secondary IP address instead of
removing all the corresponding secondary IP addresses.
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
+
drop_unicast_in_l2_multicast - BOOLEAN
Drop any unicast IP packets that are received in link-layer
multicast (or broadcast) frames.
@@ -1964,14 +2197,24 @@ drop_unicast_in_l2_multicast - BOOLEAN
This behavior (for multicast) is actually a SHOULD in RFC
1122, but is disabled by default for compatibility reasons.
- Default: off (0)
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
drop_gratuitous_arp - BOOLEAN
Drop all gratuitous ARP frames, for example if there's a known
good ARP proxy on the network and such frames need not be used
(or in the case of 802.11, must not be used to prevent attacks.)
- Default: off (0)
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
tag - INTEGER
@@ -2015,20 +2258,24 @@ bindv6only - BOOLEAN
which restricts use of the IPv6 socket to IPv6 communication
only.
- - TRUE: disable IPv4-mapped address feature
- - FALSE: enable IPv4-mapped address feature
+ Possible values:
- Default: FALSE (as specified in RFC3493)
+ - 0 (disabled) - enable IPv4-mapped address feature
+ - 1 (enabled) - disable IPv4-mapped address feature
+
+ Default: 0 (disabled)
flowlabel_consistency - BOOLEAN
Protect the consistency (and unicity) of flow label.
You have to disable it to use IPV6_FL_F_REFLECT flag on the
flow label manager.
- - TRUE: enabled
- - FALSE: disabled
+ Possible values:
- Default: TRUE
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 1 (enabled)
auto_flowlabels - INTEGER
Automatically generate flow labels based on a flow hash of the
@@ -2054,10 +2301,13 @@ flowlabel_state_ranges - BOOLEAN
reserved for the IPv6 flow manager facility, 0x80000-0xFFFFF
is reserved for stateless flow labels as described in RFC6437.
- - TRUE: enabled
- - FALSE: disabled
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 1 (enabled)
- Default: true
flowlabel_reflect - INTEGER
Control flow label reflection. Needed for Path MTU
@@ -2125,10 +2375,13 @@ anycast_src_echo_reply - BOOLEAN
Controls the use of anycast addresses as source addresses for ICMPv6
echo reply
- - TRUE: enabled
- - FALSE: disabled
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
- Default: FALSE
idgen_delay - INTEGER
Controls the delay in seconds after which time to retry
@@ -2185,7 +2438,12 @@ skip_notify_on_dev_down - BOOLEAN
to true skips the message, making IPv4 and IPv6 on par in relying
on userspace caches to track link events and evict routes.
- Default: false (generate message)
+ Possible values:
+
+ - 0 (disabled) - generate the message
+ - 1 (enabled) - skip generating the message
+
+ Default: 0 (disabled)
nexthop_compat_mode - BOOLEAN
New nexthop API provides a means for managing nexthops independent of
@@ -2292,13 +2550,26 @@ conf/all/forwarding - BOOLEAN
proxy_ndp - BOOLEAN
Do proxy ndp.
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
+
+
fwmark_reflect - BOOLEAN
Controls the fwmark of kernel-generated IPv6 reply packets that are not
associated with a socket for example, TCP RSTs or ICMPv6 echo replies).
- If unset, these packets have a fwmark of zero. If set, they have the
+ If disabled, these packets have a fwmark of zero. If enabled, they have the
fwmark of the packet they are replying to.
- Default: 0
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
``conf/interface/*``:
Change special settings per interface.
@@ -2389,9 +2660,11 @@ ra_honor_pio_life - BOOLEAN
lifetime of an address matching a prefix sent in a Router
Advertisement Prefix Information Option.
- - If enabled, the PIO valid lifetime will always be honored.
- - If disabled, RFC4862 section 5.5.3e is used to determine
+ Possible values:
+
+ - 0 (disabled) - RFC4862 section 5.5.3e is used to determine
the valid lifetime of the address.
+ - 1 (enabled) - the PIO valid lifetime will always be honored.
Default: 0 (disabled)
@@ -2403,8 +2676,10 @@ ra_honor_pio_pflag - BOOLEAN
P-flag suppresses any effects of the A-flag within the same
PIO. For a given PIO, P=1 and A=1 is treated as A=0.
- - If disabled, the P-flag is ignored.
- - If enabled, the P-flag will disable SLAAC autoconfiguration
+ Possible values:
+
+ - 0 (disabled) - the P-flag is ignored.
+ - 1 (enabled) - the P-flag will disable SLAAC autoconfiguration
for the given Prefix Information Option.
Default: 0 (disabled)
@@ -2526,10 +2801,15 @@ mtu - INTEGER
Default: 1280 (IPv6 required minimum)
ip_nonlocal_bind - BOOLEAN
- If set, allows processes to bind() to non-local IPv6 addresses,
+ If enabled, allows processes to bind() to non-local IPv6 addresses,
which can be quite useful - but may break some applications.
- Default: 0
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
router_probe_interval - INTEGER
Minimum interval (in seconds) between Router Probing described
@@ -2559,7 +2839,12 @@ use_oif_addrs_only - BOOLEAN
routed via this interface are restricted to the set of addresses
configured on this interface (vis. RFC 6724, section 4).
- Default: false
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
use_tempaddr - INTEGER
Preference for Privacy Extensions (RFC3041).
@@ -2684,10 +2969,14 @@ force_tllao - BOOLEAN
ndisc_notify - BOOLEAN
Define mode for notification of address and device changes.
- * 0 - (default): do nothing
- * 1 - Generate unsolicited neighbour advertisements when device is brought
+ Possible values:
+
+ - 0 (disabled) - do nothing
+ - 1 (enabled) - Generate unsolicited neighbour advertisements when device is brought
up or hardware address changes.
+ Default: 0 (disabled)
+
ndisc_tclass - INTEGER
The IPv6 Traffic Class to use by default when sending IPv6 Neighbor
Discovery (Router Solicitation, Router Advertisement, Neighbor
@@ -2704,8 +2993,12 @@ ndisc_evict_nocarrier - BOOLEAN
not be cleared when roaming between access points on the same network.
In most cases this should remain as the default (1).
- - 1 - (default): Clear neighbor discover cache on NOCARRIER events.
- - 0 - Do not clear neighbor discovery cache on NOCARRIER events.
+ Possible values:
+
+ - 0 (disabled) - Do not clear neighbor discovery cache on NOCARRIER events.
+ - 1 (enabled) - Clear neighbor discover cache on NOCARRIER events.
+
+ Default: 1 (enabled)
mldv1_unsolicited_report_interval - INTEGER
The interval in milliseconds in which the next unsolicited
@@ -2734,25 +3027,34 @@ suppress_frag_ndisc - INTEGER
optimistic_dad - BOOLEAN
Whether to perform Optimistic Duplicate Address Detection (RFC 4429).
- * 0: disabled (default)
- * 1: enabled
-
Optimistic Duplicate Address Detection for the interface will be enabled
if at least one of conf/{all,interface}/optimistic_dad is set to 1,
it will be disabled otherwise.
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
+
+
use_optimistic - BOOLEAN
If enabled, do not classify optimistic addresses as deprecated during
source address selection. Preferred addresses will still be chosen
before optimistic addresses, subject to other ranking in the source
address selection algorithm.
- * 0: disabled (default)
- * 1: enabled
-
This will be enabled if at least one of
conf/{all,interface}/use_optimistic is set to 1, disabled otherwise.
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
+
stable_secret - IPv6 address
This IPv6 address will be used as a secret to generate IPv6
addresses for link-local addresses and autoconfigured
@@ -2783,14 +3085,24 @@ drop_unicast_in_l2_multicast - BOOLEAN
Drop any unicast IPv6 packets that are received in link-layer
multicast (or broadcast) frames.
- By default this is turned off.
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
drop_unsolicited_na - BOOLEAN
Drop all unsolicited neighbor advertisements, for example if there's
a known good NA proxy on the network and such frames need not be used
(or in the case of 802.11, must not be used to prevent attacks.)
- By default this is turned off.
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled).
accept_untracked_na - INTEGER
Define behavior for accepting neighbor advertisements from devices that
@@ -2831,7 +3143,12 @@ enhanced_dad - BOOLEAN
The nonce option will be sent on an interface unless both of
conf/{all,interface}/enhanced_dad are set to FALSE.
- Default: TRUE
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 1 (enabled)
``icmp/*``:
===========
@@ -2860,29 +3177,49 @@ ratemask - list of comma separated ranges
Default: 0-1,3-127 (rate limit ICMPv6 errors except Packet Too Big)
echo_ignore_all - BOOLEAN
- If set non-zero, then the kernel will ignore all ICMP ECHO
+ If enabled, then the kernel will ignore all ICMP ECHO
requests sent to it over the IPv6 protocol.
- Default: 0
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
echo_ignore_multicast - BOOLEAN
- If set non-zero, then the kernel will ignore all ICMP ECHO
+ If enabled, then the kernel will ignore all ICMP ECHO
requests sent to it over the IPv6 protocol via multicast.
- Default: 0
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
echo_ignore_anycast - BOOLEAN
- If set non-zero, then the kernel will ignore all ICMP ECHO
+ If enabled, then the kernel will ignore all ICMP ECHO
requests sent to it over the IPv6 protocol destined to anycast address.
- Default: 0
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
error_anycast_as_unicast - BOOLEAN
- If set to 1, then the kernel will respond with ICMP Errors
+ If enabled, then the kernel will respond with ICMP Errors
resulting from requests sent to it over the IPv6 protocol destined
to anycast address essentially treating anycast as unicast.
- Default: 0
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
+ Default: 0 (disabled)
xfrm6_gc_thresh - INTEGER
(Obsolete since linux-4.14)
@@ -2900,34 +3237,49 @@ YOSHIFUJI Hideaki / USAGI Project <yoshfuji@linux-ipv6.org>
=================================
bridge-nf-call-arptables - BOOLEAN
- - 1 : pass bridged ARP traffic to arptables' FORWARD chain.
- - 0 : disable this.
- Default: 1
+ Possible values:
+
+ - 0 (disabled) - disable this.
+ - 1 (enabled) - pass bridged ARP traffic to arptables' FORWARD chain.
+
+ Default: 1 (enabled)
bridge-nf-call-iptables - BOOLEAN
- - 1 : pass bridged IPv4 traffic to iptables' chains.
- - 0 : disable this.
- Default: 1
+ Possible values:
+
+ - 0 (disabled) - disable this.
+ - 1 (enabled) - pass bridged IPv4 traffic to iptables' chains.
+
+ Default: 1 (enabled)
bridge-nf-call-ip6tables - BOOLEAN
- - 1 : pass bridged IPv6 traffic to ip6tables' chains.
- - 0 : disable this.
- Default: 1
+ Possible values:
+
+ - 0 (disabled) - disable this.
+ - 1 (enabled) - pass bridged IPv6 traffic to ip6tables' chains.
+
+ Default: 1 (enabled)
bridge-nf-filter-vlan-tagged - BOOLEAN
- - 1 : pass bridged vlan-tagged ARP/IP/IPv6 traffic to {arp,ip,ip6}tables.
- - 0 : disable this.
- Default: 0
+ Possible values:
+
+ - 0 (disabled) - disable this.
+ - 1 (enabled) - pass bridged vlan-tagged ARP/IP/IPv6 traffic to {arp,ip,ip6}tables
+
+ Default: 0 (disabled)
bridge-nf-filter-pppoe-tagged - BOOLEAN
- - 1 : pass bridged pppoe-tagged IP/IPv6 traffic to {ip,ip6}tables.
- - 0 : disable this.
- Default: 0
+ Possible values:
+
+ - 0 (disabled) - disable this.
+ - 1 (enabled) - pass bridged pppoe-tagged IP/IPv6 traffic to {ip,ip6}tables.
+
+ Default: 0 (disabled)
bridge-nf-pass-vlan-input-dev - BOOLEAN
- 1: if bridge-nf-filter-vlan-tagged is enabled, try to find a vlan
@@ -2950,11 +3302,12 @@ addip_enable - BOOLEAN
the ability to dynamically add and remove new addresses for the SCTP
associations.
- 1: Enable extension.
+ Possible values:
- 0: Disable extension.
+ - 0 (disabled) - disable extension.
+ - 1 (enabled) - enable extension
- Default: 0
+ Default: 0 (disabled)
pf_enable - INTEGER
Enable or disable pf (pf is short for potentially failed) state. A value
@@ -3023,19 +3376,23 @@ auth_enable - BOOLEAN
required for secure operation of Dynamic Address Reconfiguration
(ADD-IP) extension.
- - 1: Enable this extension.
- - 0: Disable this extension.
+ Possible values:
- Default: 0
+ - 0 (disabled) - disable extension.
+ - 1 (enabled) - enable extension
+
+ Default: 0 (disabled)
prsctp_enable - BOOLEAN
Enable or disable the Partial Reliability extension (RFC3758) which
is used to notify peers that a given DATA should no longer be expected.
- - 1: Enable extension
- - 0: Disable
+ Possible values:
- Default: 1
+ - 0 (disabled) - disable extension.
+ - 1 (enabled) - enable extension
+
+ Default: 1 (enabled)
max_burst - INTEGER
The limit of the number of new packets that can be initially sent. It
@@ -3135,10 +3492,12 @@ cookie_preserve_enable - BOOLEAN
Enable or disable the ability to extend the lifetime of the SCTP cookie
that is used during the establishment phase of SCTP association
- - 1: Enable cookie lifetime extension.
- - 0: Disable
+ Possible values:
+
+ - 0 (disabled) - disable.
+ - 1 (enabled) - enable cookie lifetime extension.
- Default: 1
+ Default: 1 (enabled)
cookie_hmac_alg - STRING
Select the hmac algorithm used when generating the cookie value sent by
@@ -3272,10 +3631,12 @@ reconf_enable - BOOLEAN
a stream, and it includes the Parameters of "Outgoing/Incoming SSN
Reset", "SSN/TSN Reset" and "Add Outgoing/Incoming Streams".
- - 1: Enable extension.
- - 0: Disable extension.
+ Possible values:
- Default: 0
+ - 0 (disabled) - Disable extension.
+ - 1 (enabled) - Enable extension.
+
+ Default: 0 (disabled)
intl_enable - BOOLEAN
Enable or disable extension of User Message Interleaving functionality
@@ -3286,10 +3647,12 @@ intl_enable - BOOLEAN
to 1 and also needs to set socket options SCTP_FRAGMENT_INTERLEAVE to 2
and SCTP_INTERLEAVING_SUPPORTED to 1.
- - 1: Enable extension.
- - 0: Disable extension.
+ Possible values:
- Default: 0
+ - 0 (disabled) - Disable extension.
+ - 1 (enabled) - Enable extension.
+
+ Default: 0 (disabled)
ecn_enable - BOOLEAN
Control use of Explicit Congestion Notification (ECN) by SCTP.
@@ -3298,10 +3661,12 @@ ecn_enable - BOOLEAN
due to congestion by allowing supporting routers to signal congestion
before having to drop packets.
- 1: Enable ecn.
- 0: Disable ecn.
+ Possible values:
- Default: 1
+ - 0 (disabled) - Disable ecn.
+ - 1 (enabled) - Enable ecn.
+
+ Default: 1 (enabled)
l3mdev_accept - BOOLEAN
Enabling this option allows a "global" bound socket to work
@@ -3310,6 +3675,11 @@ l3mdev_accept - BOOLEAN
originated. Only valid when the kernel was compiled with
CONFIG_NET_L3_MASTER_DEV.
+ Possible values:
+
+ - 0 (disabled)
+ - 1 (enabled)
+
Default: 1 (enabled)
diff --git a/Documentation/networking/net_cachelines/tcp_sock.rst b/Documentation/networking/net_cachelines/tcp_sock.rst
index bc9b2131bf7a..7bbda5944ee2 100644
--- a/Documentation/networking/net_cachelines/tcp_sock.rst
+++ b/Documentation/networking/net_cachelines/tcp_sock.rst
@@ -115,7 +115,6 @@ u32 lost_out read_mostly read_m
u32 sacked_out read_mostly read_mostly tcp_left_out(tx);tcp_packets_in_flight(tx/rx);tcp_clean_rtx_queue(rx)
struct hrtimer pacing_timer
struct hrtimer compressed_ack_timer
-struct sk_buff* lost_skb_hint read_mostly tcp_clean_rtx_queue
struct sk_buff* retransmit_skb_hint read_mostly tcp_clean_rtx_queue
struct rb_root out_of_order_queue read_mostly tcp_data_queue,tcp_fast_path_check
struct sk_buff* ooo_last_skb
@@ -123,7 +122,6 @@ struct tcp_sack_block[1] duplicate_sack
struct tcp_sack_block[4] selective_acks
struct tcp_sack_block[4] recv_sack_cache
struct sk_buff* highest_sack read_write tcp_event_new_data_sent
-int lost_cnt_hint
u32 prior_ssthresh
u32 high_seq
u32 retrans_stamp
diff --git a/Documentation/networking/netconsole.rst b/Documentation/networking/netconsole.rst
index a0076b542e9c..59cb9982afe6 100644
--- a/Documentation/networking/netconsole.rst
+++ b/Documentation/networking/netconsole.rst
@@ -340,6 +340,38 @@ In this example, the message was sent by CPU 42.
cpu=42 # kernel-populated value
+Message ID auto population in userdata
+--------------------------------------
+
+Within the netconsole configfs hierarchy, there is a file named `msgid_enabled`
+located in the `userdata` directory. This file controls the message ID
+auto-population feature, which assigns a numeric id to each message sent to a
+given target and appends the ID to userdata dictionary in every message sent.
+
+The message ID is generated using a per-target 32 bit counter that is
+incremented for every message sent to the target. Note that this counter will
+eventually wrap around after reaching uint32_t max value, so the message ID is
+not globally unique over time. However, it can still be used by the target to
+detect if messages were dropped before reaching the target by identifying gaps
+in the sequence of IDs.
+
+It is important to distinguish message IDs from the message <sequnum> field.
+Some kernel messages may never reach netconsole (for example, due to printk
+rate limiting). Thus, a gap in <sequnum> cannot be solely relied upon to
+indicate that a message was dropped during transmission, as it may never have
+been sent via netconsole. The message ID, on the other hand, is only assigned
+to messages that are actually transmitted via netconsole.
+
+Example::
+
+ echo "This is message #1" > /dev/kmsg
+ echo "This is message #2" > /dev/kmsg
+ 13,434,54928466,-;This is message #1
+ msgid=1
+ 13,435,54934019,-;This is message #2
+ msgid=2
+
+
Extended console:
=================
diff --git a/MAINTAINERS b/MAINTAINERS
index efb51ee92683..bb9df569a3ff 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -12523,7 +12523,7 @@ M: Miri Korenblit <miriam.rachel.korenblit@intel.com>
L: linux-wireless@vger.kernel.org
S: Supported
W: https://wireless.wiki.kernel.org/en/users/drivers/iwlwifi
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi.git
+T: git https://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next.git/
F: drivers/net/wireless/intel/iwlwifi/
INTEL WMI SLIM BOOTLOADER (SBL) FIRMWARE UPDATE DRIVER
@@ -20339,7 +20339,7 @@ QUALCOMM ATHEROS QCA7K ETHERNET DRIVER
M: Stefan Wahren <wahrenst@gmx.net>
L: netdev@vger.kernel.org
S: Maintained
-F: Documentation/devicetree/bindings/net/qca,qca7000.txt
+F: Documentation/devicetree/bindings/net/qca,qca7000.yaml
F: drivers/net/ethernet/qualcomm/qca*
QUALCOMM BAM-DMUX WWAN NETWORK DRIVER
@@ -26428,6 +26428,7 @@ F: include/uapi/linux/vm_sockets.h
F: include/uapi/linux/vm_sockets_diag.h
F: include/uapi/linux/vsockmon.h
F: net/vmw_vsock/
+F: tools/testing/selftests/vsock/
F: tools/testing/vsock/
VMALLOC
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
index d529bcb03775..062def303dce 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
@@ -18,9 +18,8 @@
#define OTX2_CPT_MAX_VFS_NUM 128
#define OTX2_CPT_RVU_FUNC_ADDR_S(blk, slot, offs) \
(((blk) << 20) | ((slot) << 12) | (offs))
-#define OTX2_CPT_RVU_PFFUNC(pf, func) \
- ((((pf) & RVU_PFVF_PF_MASK) << RVU_PFVF_PF_SHIFT) | \
- (((func) & RVU_PFVF_FUNC_MASK) << RVU_PFVF_FUNC_SHIFT))
+
+#define OTX2_CPT_RVU_PFFUNC(pdev, pf, func) rvu_make_pcifunc(pdev, pf, func)
#define OTX2_CPT_INVALID_CRYPTO_ENG_GRP 0xFF
#define OTX2_CPT_NAME_LENGTH 64
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
index 12c0e966fa65..b4b2d3d1cbc2 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
@@ -142,7 +142,7 @@ static int send_inline_ipsec_inbound_msg(struct otx2_cptpf_dev *cptpf,
memset(req, 0, sizeof(*req));
req->hdr.id = MBOX_MSG_CPT_INLINE_IPSEC_CFG;
req->hdr.sig = OTX2_MBOX_REQ_SIG;
- req->hdr.pcifunc = OTX2_CPT_RVU_PFFUNC(cptpf->pf_id, 0);
+ req->hdr.pcifunc = OTX2_CPT_RVU_PFFUNC(cptpf->pdev, cptpf->pf_id, 0);
req->dir = CPT_INLINE_INBOUND;
req->slot = slot;
req->sso_pf_func_ovrd = cptpf->sso_pf_func_ovrd;
@@ -184,7 +184,8 @@ static int rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf, u8 egrp,
nix_req->gen_cfg.opcode = cpt_inline_rx_opcode(pdev);
nix_req->gen_cfg.param1 = req->param1;
nix_req->gen_cfg.param2 = req->param2;
- nix_req->inst_qsel.cpt_pf_func = OTX2_CPT_RVU_PFFUNC(cptpf->pf_id, 0);
+ nix_req->inst_qsel.cpt_pf_func =
+ OTX2_CPT_RVU_PFFUNC(cptpf->pdev, cptpf->pf_id, 0);
nix_req->inst_qsel.cpt_slot = 0;
ret = otx2_cpt_send_mbox_msg(&cptpf->afpf_mbox, pdev);
if (ret)
@@ -392,9 +393,8 @@ void otx2_cptpf_vfpf_mbox_handler(struct work_struct *work)
msg = (struct mbox_msghdr *)(mdev->mbase + offset);
/* Set which VF sent this message based on mbox IRQ */
- msg->pcifunc = ((u16)cptpf->pf_id << RVU_PFVF_PF_SHIFT) |
- ((vf->vf_id + 1) & RVU_PFVF_FUNC_MASK);
-
+ msg->pcifunc = rvu_make_pcifunc(cptpf->pdev, cptpf->pf_id,
+ (vf->vf_id + 1));
err = cptpf_handle_vf_req(cptpf, vf, msg,
msg->next_msgoff - offset);
/*
@@ -469,8 +469,7 @@ static void process_afpf_mbox_msg(struct otx2_cptpf_dev *cptpf,
switch (msg->id) {
case MBOX_MSG_READY:
- cptpf->pf_id = (msg->pcifunc >> RVU_PFVF_PF_SHIFT) &
- RVU_PFVF_PF_MASK;
+ cptpf->pf_id = rvu_get_pf(cptpf->pdev, msg->pcifunc);
break;
case MBOX_MSG_MSIX_OFFSET:
rsp_msix = (struct msix_offset_rsp *) msg;
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
index 78367849c3d5..7180944ece50 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
@@ -176,7 +176,9 @@ static int cptx_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp,
/* Set PF number for microcode fetches */
ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
CPT_AF_PF_FUNC,
- cptpf->pf_id << RVU_PFVF_PF_SHIFT, blkaddr);
+ rvu_make_pcifunc(cptpf->pdev,
+ cptpf->pf_id, 0),
+ blkaddr);
if (ret)
return ret;
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
index 931b72580fd9..92e49babd79a 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
@@ -189,7 +189,7 @@ int otx2_cptvf_send_eng_grp_num_msg(struct otx2_cptvf_dev *cptvf, int eng_type)
}
req->hdr.id = MBOX_MSG_GET_ENG_GRP_NUM;
req->hdr.sig = OTX2_MBOX_REQ_SIG;
- req->hdr.pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->vf_id, 0);
+ req->hdr.pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->pdev, cptvf->vf_id, 0);
req->eng_type = eng_type;
return otx2_cpt_send_mbox_msg(mbox, pdev);
@@ -210,7 +210,7 @@ int otx2_cptvf_send_kvf_limits_msg(struct otx2_cptvf_dev *cptvf)
}
req->id = MBOX_MSG_GET_KVF_LIMITS;
req->sig = OTX2_MBOX_REQ_SIG;
- req->pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->vf_id, 0);
+ req->pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->pdev, cptvf->vf_id, 0);
return otx2_cpt_send_mbox_msg(mbox, pdev);
}
@@ -230,7 +230,7 @@ int otx2_cptvf_send_caps_msg(struct otx2_cptvf_dev *cptvf)
}
req->id = MBOX_MSG_GET_CAPS;
req->sig = OTX2_MBOX_REQ_SIG;
- req->pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->vf_id, 0);
+ req->pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->pdev, cptvf->vf_id, 0);
return otx2_cpt_send_mbox_msg(mbox, pdev);
}
diff --git a/drivers/dpll/dpll_core.h b/drivers/dpll/dpll_core.h
index 2b6d8ef1cdf3..9b11e637397b 100644
--- a/drivers/dpll/dpll_core.h
+++ b/drivers/dpll/dpll_core.h
@@ -45,7 +45,6 @@ struct dpll_device {
* @dpll_refs: hold referencees to dplls pin was registered with
* @parent_refs: hold references to parent pins pin was registered with
* @prop: pin properties copied from the registerer
- * @rclk_dev_name: holds name of device when pin can recover clock from it
* @refcount: refcount
* @rcu: rcu_head for kfree_rcu()
**/
diff --git a/drivers/dpll/dpll_netlink.c b/drivers/dpll/dpll_netlink.c
index c130f87147fa..4619aaa18b9c 100644
--- a/drivers/dpll/dpll_netlink.c
+++ b/drivers/dpll/dpll_netlink.c
@@ -127,6 +127,26 @@ dpll_msg_add_mode_supported(struct sk_buff *msg, struct dpll_device *dpll,
}
static int
+dpll_msg_add_phase_offset_monitor(struct sk_buff *msg, struct dpll_device *dpll,
+ struct netlink_ext_ack *extack)
+{
+ const struct dpll_device_ops *ops = dpll_device_ops(dpll);
+ enum dpll_feature_state state;
+ int ret;
+
+ if (ops->phase_offset_monitor_set && ops->phase_offset_monitor_get) {
+ ret = ops->phase_offset_monitor_get(dpll, dpll_priv(dpll),
+ &state, extack);
+ if (ret)
+ return ret;
+ if (nla_put_u32(msg, DPLL_A_PHASE_OFFSET_MONITOR, state))
+ return -EMSGSIZE;
+ }
+
+ return 0;
+}
+
+static int
dpll_msg_add_lock_status(struct sk_buff *msg, struct dpll_device *dpll,
struct netlink_ext_ack *extack)
{
@@ -591,6 +611,9 @@ dpll_device_get_one(struct dpll_device *dpll, struct sk_buff *msg,
return ret;
if (nla_put_u32(msg, DPLL_A_TYPE, dpll->type))
return -EMSGSIZE;
+ ret = dpll_msg_add_phase_offset_monitor(msg, dpll, extack);
+ if (ret)
+ return ret;
return 0;
}
@@ -747,6 +770,31 @@ int dpll_pin_change_ntf(struct dpll_pin *pin)
EXPORT_SYMBOL_GPL(dpll_pin_change_ntf);
static int
+dpll_phase_offset_monitor_set(struct dpll_device *dpll, struct nlattr *a,
+ struct netlink_ext_ack *extack)
+{
+ const struct dpll_device_ops *ops = dpll_device_ops(dpll);
+ enum dpll_feature_state state = nla_get_u32(a), old_state;
+ int ret;
+
+ if (!(ops->phase_offset_monitor_set && ops->phase_offset_monitor_get)) {
+ NL_SET_ERR_MSG_ATTR(extack, a, "dpll device not capable of phase offset monitor");
+ return -EOPNOTSUPP;
+ }
+ ret = ops->phase_offset_monitor_get(dpll, dpll_priv(dpll), &old_state,
+ extack);
+ if (ret) {
+ NL_SET_ERR_MSG(extack, "unable to get current state of phase offset monitor");
+ return ret;
+ }
+ if (state == old_state)
+ return 0;
+
+ return ops->phase_offset_monitor_set(dpll, dpll_priv(dpll), state,
+ extack);
+}
+
+static int
dpll_pin_freq_set(struct dpll_pin *pin, struct nlattr *a,
struct netlink_ext_ack *extack)
{
@@ -1533,12 +1581,29 @@ int dpll_nl_device_get_doit(struct sk_buff *skb, struct genl_info *info)
return genlmsg_reply(msg, info);
}
-int dpll_nl_device_set_doit(struct sk_buff *skb, struct genl_info *info)
+static int
+dpll_set_from_nlattr(struct dpll_device *dpll, struct genl_info *info)
{
- /* placeholder for set command */
+ int ret;
+
+ if (info->attrs[DPLL_A_PHASE_OFFSET_MONITOR]) {
+ struct nlattr *a = info->attrs[DPLL_A_PHASE_OFFSET_MONITOR];
+
+ ret = dpll_phase_offset_monitor_set(dpll, a, info->extack);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
+int dpll_nl_device_set_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct dpll_device *dpll = info->user_ptr[0];
+
+ return dpll_set_from_nlattr(dpll, info);
+}
+
int dpll_nl_device_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
{
struct dpll_dump_ctx *ctx = dpll_dump_context(cb);
diff --git a/drivers/dpll/dpll_nl.c b/drivers/dpll/dpll_nl.c
index fe9b6893d261..8de90310c3be 100644
--- a/drivers/dpll/dpll_nl.c
+++ b/drivers/dpll/dpll_nl.c
@@ -37,8 +37,9 @@ static const struct nla_policy dpll_device_get_nl_policy[DPLL_A_ID + 1] = {
};
/* DPLL_CMD_DEVICE_SET - do */
-static const struct nla_policy dpll_device_set_nl_policy[DPLL_A_ID + 1] = {
+static const struct nla_policy dpll_device_set_nl_policy[DPLL_A_PHASE_OFFSET_MONITOR + 1] = {
[DPLL_A_ID] = { .type = NLA_U32, },
+ [DPLL_A_PHASE_OFFSET_MONITOR] = NLA_POLICY_MAX(NLA_U32, 1),
};
/* DPLL_CMD_PIN_ID_GET - do */
@@ -105,7 +106,7 @@ static const struct genl_split_ops dpll_nl_ops[] = {
.doit = dpll_nl_device_set_doit,
.post_doit = dpll_post_doit,
.policy = dpll_device_set_nl_policy,
- .maxattr = DPLL_A_ID,
+ .maxattr = DPLL_A_PHASE_OFFSET_MONITOR,
.flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
},
{
diff --git a/drivers/gpu/drm/display/drm_dp_tunnel.c b/drivers/gpu/drm/display/drm_dp_tunnel.c
index 076edf161048..1205a4432eb4 100644
--- a/drivers/gpu/drm/display/drm_dp_tunnel.c
+++ b/drivers/gpu/drm/display/drm_dp_tunnel.c
@@ -1920,7 +1920,7 @@ drm_dp_tunnel_mgr_create(struct drm_device *dev, int max_group_count)
}
#ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG
- ref_tracker_dir_init(&mgr->ref_tracker, 16, "dptun");
+ ref_tracker_dir_init(&mgr->ref_tracker, 16, "drm_dptun");
#endif
for (i = 0; i < max_group_count; i++) {
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 8d9f4c410546..7ce3e6de0c19 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -59,7 +59,9 @@ static struct drm_i915_private *rpm_to_i915(struct intel_runtime_pm *rpm)
static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
{
- ref_tracker_dir_init(&rpm->debug, INTEL_REFTRACK_DEAD_COUNT, dev_name(rpm->kdev));
+ if (!rpm->debug.class)
+ ref_tracker_dir_init(&rpm->debug, INTEL_REFTRACK_DEAD_COUNT,
+ "intel_runtime_pm");
}
static intel_wakeref_t
diff --git a/drivers/gpu/drm/i915/intel_wakeref.c b/drivers/gpu/drm/i915/intel_wakeref.c
index 51561b190b93..7fa194de5d35 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.c
+++ b/drivers/gpu/drm/i915/intel_wakeref.c
@@ -114,7 +114,8 @@ void __intel_wakeref_init(struct intel_wakeref *wf,
"wakeref.work", &key->work, 0);
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_WAKEREF)
- ref_tracker_dir_init(&wf->debug, INTEL_REFTRACK_DEAD_COUNT, name);
+ if (!wf->debug.class)
+ ref_tracker_dir_init(&wf->debug, INTEL_REFTRACK_DEAD_COUNT, "intel_wakeref");
#endif
}
diff --git a/drivers/net/amt.c b/drivers/net/amt.c
index 734a0b3242a9..ed86537b2f61 100644
--- a/drivers/net/amt.c
+++ b/drivers/net/amt.c
@@ -979,7 +979,7 @@ static void amt_event_send_request(struct amt_dev *amt)
amt->req_cnt++;
out:
exp = min_t(u32, (1 * (1 << amt->req_cnt)), AMT_MAX_REQ_TIMEOUT);
- mod_delayed_work(amt_wq, &amt->req_wq, msecs_to_jiffies(exp * 1000));
+ mod_delayed_work(amt_wq, &amt->req_wq, secs_to_jiffies(exp));
}
static void amt_req_work(struct work_struct *work)
@@ -1046,7 +1046,8 @@ static bool amt_send_membership_update(struct amt_dev *amt,
amt->gw_port,
amt->relay_port,
false,
- false);
+ false,
+ 0);
amt_update_gw_status(amt, AMT_STATUS_SENT_UPDATE, true);
return false;
}
@@ -1103,7 +1104,8 @@ static void amt_send_multicast_data(struct amt_dev *amt,
amt->relay_port,
tunnel->source_port,
false,
- false);
+ false,
+ 0);
}
static bool amt_send_membership_query(struct amt_dev *amt,
@@ -1161,7 +1163,8 @@ static bool amt_send_membership_query(struct amt_dev *amt,
amt->relay_port,
tunnel->source_port,
false,
- false);
+ false,
+ 0);
amt_update_relay_status(tunnel, AMT_STATUS_SENT_QUERY, true);
return false;
}
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
index a9dffdcac805..0df3208783ad 100644
--- a/drivers/net/bareudp.c
+++ b/drivers/net/bareudp.c
@@ -362,8 +362,8 @@ static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev,
udp_tunnel_xmit_skb(rt, sock->sk, skb, saddr, info->key.u.ipv4.dst,
tos, ttl, df, sport, bareudp->port,
!net_eq(bareudp->net, dev_net(bareudp->dev)),
- !test_bit(IP_TUNNEL_CSUM_BIT,
- info->key.tun_flags));
+ !test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags),
+ 0);
return 0;
free_dst:
@@ -431,7 +431,8 @@ static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
&saddr, &daddr, prio, ttl,
info->key.label, sport, bareudp->port,
!test_bit(IP_TUNNEL_CSUM_BIT,
- info->key.tun_flags));
+ info->key.tun_flags),
+ 0);
return 0;
free_dst:
diff --git a/drivers/net/can/dev/calc_bittiming.c b/drivers/net/can/dev/calc_bittiming.c
index 3809c148fb88..a94bd67c670c 100644
--- a/drivers/net/can/dev/calc_bittiming.c
+++ b/drivers/net/can/dev/calc_bittiming.c
@@ -179,7 +179,7 @@ void can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
if (!tdc_const || !(ctrlmode_supported & CAN_CTRLMODE_TDC_AUTO))
return;
- *ctrlmode &= ~CAN_CTRLMODE_TDC_MASK;
+ *ctrlmode &= ~CAN_CTRLMODE_FD_TDC_MASK;
/* As specified in ISO 11898-1 section 11.3.3 "Transmitter
* delay compensation" (TDC) is only applicable if data BRP is
diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c
index a36842ace084..13826e8a707b 100644
--- a/drivers/net/can/dev/netlink.c
+++ b/drivers/net/can/dev/netlink.c
@@ -18,7 +18,7 @@ static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
[IFLA_CAN_CLOCK] = { .len = sizeof(struct can_clock) },
[IFLA_CAN_BERR_COUNTER] = { .len = sizeof(struct can_berr_counter) },
[IFLA_CAN_DATA_BITTIMING] = { .len = sizeof(struct can_bittiming) },
- [IFLA_CAN_DATA_BITTIMING_CONST] = { .len = sizeof(struct can_bittiming_const) },
+ [IFLA_CAN_DATA_BITTIMING_CONST] = { .len = sizeof(struct can_bittiming_const) },
[IFLA_CAN_TERMINATION] = { .type = NLA_U16 },
[IFLA_CAN_TDC] = { .type = NLA_NESTED },
[IFLA_CAN_CTRLMODE_EXT] = { .type = NLA_NESTED },
@@ -67,12 +67,12 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[],
if (data[IFLA_CAN_CTRLMODE]) {
struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
- u32 tdc_flags = cm->flags & CAN_CTRLMODE_TDC_MASK;
+ u32 tdc_flags = cm->flags & CAN_CTRLMODE_FD_TDC_MASK;
is_can_fd = cm->flags & cm->mask & CAN_CTRLMODE_FD;
/* CAN_CTRLMODE_TDC_{AUTO,MANUAL} are mutually exclusive */
- if (tdc_flags == CAN_CTRLMODE_TDC_MASK)
+ if (tdc_flags == CAN_CTRLMODE_FD_TDC_MASK)
return -EOPNOTSUPP;
/* If one of the CAN_CTRLMODE_TDC_* flag is set then
* TDC must be set and vice-versa
@@ -144,7 +144,7 @@ static int can_tdc_changelink(struct can_priv *priv, const struct nlattr *nla,
const struct can_tdc_const *tdc_const = priv->fd.tdc_const;
int err;
- if (!tdc_const || !can_tdc_is_enabled(priv))
+ if (!tdc_const || !can_fd_tdc_is_enabled(priv))
return -EOPNOTSUPP;
err = nla_parse_nested(tb_tdc, IFLA_CAN_TDC_MAX, nla,
@@ -189,7 +189,7 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
struct netlink_ext_ack *extack)
{
struct can_priv *priv = netdev_priv(dev);
- u32 tdc_mask = 0;
+ bool fd_tdc_flag_provided = false;
int err;
/* We need synchronization with dev->stop() */
@@ -230,16 +230,16 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
dev->mtu = CAN_MTU;
memset(&priv->fd.data_bittiming, 0,
sizeof(priv->fd.data_bittiming));
- priv->ctrlmode &= ~CAN_CTRLMODE_TDC_MASK;
+ priv->ctrlmode &= ~CAN_CTRLMODE_FD_TDC_MASK;
memset(&priv->fd.tdc, 0, sizeof(priv->fd.tdc));
}
- tdc_mask = cm->mask & CAN_CTRLMODE_TDC_MASK;
+ fd_tdc_flag_provided = cm->mask & CAN_CTRLMODE_FD_TDC_MASK;
/* CAN_CTRLMODE_TDC_{AUTO,MANUAL} are mutually
* exclusive: make sure to turn the other one off
*/
- if (tdc_mask)
- priv->ctrlmode &= cm->flags | ~CAN_CTRLMODE_TDC_MASK;
+ if (fd_tdc_flag_provided)
+ priv->ctrlmode &= cm->flags | ~CAN_CTRLMODE_FD_TDC_MASK;
}
if (data[IFLA_CAN_BITTIMING]) {
@@ -339,10 +339,10 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
err = can_tdc_changelink(priv, data[IFLA_CAN_TDC],
extack);
if (err) {
- priv->ctrlmode &= ~CAN_CTRLMODE_TDC_MASK;
+ priv->ctrlmode &= ~CAN_CTRLMODE_FD_TDC_MASK;
return err;
}
- } else if (!tdc_mask) {
+ } else if (!fd_tdc_flag_provided) {
/* Neither of TDC parameters nor TDC flags are
* provided: do calculation
*/
@@ -409,7 +409,7 @@ static size_t can_tdc_get_size(const struct net_device *dev)
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCF_MAX */
}
- if (can_tdc_is_enabled(priv)) {
+ if (can_fd_tdc_is_enabled(priv)) {
if (priv->ctrlmode & CAN_CTRLMODE_TDC_MANUAL ||
priv->fd.do_get_auto_tdcv)
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCV */
@@ -490,7 +490,7 @@ static int can_tdc_fill_info(struct sk_buff *skb, const struct net_device *dev)
nla_put_u32(skb, IFLA_CAN_TDC_TDCF_MAX, tdc_const->tdcf_max)))
goto err_cancel;
- if (can_tdc_is_enabled(priv)) {
+ if (can_fd_tdc_is_enabled(priv)) {
u32 tdcv;
int err = -EINVAL;
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index 7f10213738e5..417d9196f41e 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -191,9 +191,19 @@
/* RSCFDnCFDCmFDCFG */
#define RCANFD_GEN4_FDCFG_CLOE BIT(30)
#define RCANFD_GEN4_FDCFG_FDOE BIT(28)
+#define RCANFD_FDCFG_TDCO GENMASK(23, 16)
#define RCANFD_FDCFG_TDCE BIT(9)
#define RCANFD_FDCFG_TDCOC BIT(8)
-#define RCANFD_FDCFG_TDCO(x) (((x) & 0x7f) >> 16)
+
+/* RSCFDnCFDCmFDSTS */
+#define RCANFD_FDSTS_SOC GENMASK(31, 24)
+#define RCANFD_FDSTS_EOC GENMASK(23, 16)
+#define RCANFD_GEN4_FDSTS_TDCVF BIT(15)
+#define RCANFD_GEN4_FDSTS_PNSTS GENMASK(13, 12)
+#define RCANFD_FDSTS_SOCO BIT(9)
+#define RCANFD_FDSTS_EOCO BIT(8)
+#define RCANFD_FDSTS_TDCVF BIT(7)
+#define RCANFD_FDSTS_TDCR GENMASK(7, 0)
/* RSCFDnCFDRFCCx */
#define RCANFD_RFCC_RFIM BIT(12)
@@ -425,18 +435,19 @@
#define RCANFD_C_RPGACC(r) (0x1900 + (0x04 * (r)))
/* R-Car Gen4 Classical and CAN FD mode specific register map */
-#define RCANFD_GEN4_FDCFG(m) (0x1404 + (0x20 * (m)))
-
#define RCANFD_GEN4_GAFL_OFFSET (0x1800)
/* CAN FD mode specific register map */
-/* RSCFDnCFDCmXXX -> RCANFD_F_XXX(m) */
-#define RCANFD_F_DCFG(gpriv, m) ((gpriv)->info->regs->f_dcfg + (0x20 * (m)))
-#define RCANFD_F_CFDCFG(m) (0x0504 + (0x20 * (m)))
-#define RCANFD_F_CFDCTR(m) (0x0508 + (0x20 * (m)))
-#define RCANFD_F_CFDSTS(m) (0x050c + (0x20 * (m)))
-#define RCANFD_F_CFDCRC(m) (0x0510 + (0x20 * (m)))
+/* RSCFDnCFDCmXXX -> gpriv->fcbase[m].xxx */
+struct rcar_canfd_f_c {
+ u32 dcfg;
+ u32 cfdcfg;
+ u32 cfdctr;
+ u32 cfdsts;
+ u32 cfdcrc;
+ u32 pad[3];
+};
/* RSCFDnCFDGAFLXXXj offset */
#define RCANFD_F_GAFL_OFFSET (0x1000)
@@ -510,7 +521,7 @@ struct rcar_canfd_regs {
u16 cfcc; /* Common FIFO Configuration/Control Register */
u16 cfsts; /* Common FIFO Status Register */
u16 cfpctr; /* Common FIFO Pointer Control Register */
- u16 f_dcfg; /* Global FD Configuration Register */
+ u16 coffset; /* Channel Data Bitrate Configuration Register */
u16 rfoffset; /* Receive FIFO buffer access ID register */
u16 cfoffset; /* Transmit/receive FIFO buffer access ID register */
};
@@ -529,6 +540,7 @@ struct rcar_canfd_shift_data {
struct rcar_canfd_hw_info {
const struct can_bittiming_const *nom_bittiming;
const struct can_bittiming_const *data_bittiming;
+ const struct can_tdc_const *tdc_const;
const struct rcar_canfd_regs *regs;
const struct rcar_canfd_shift_data *sh;
u8 rnc_field_width;
@@ -562,6 +574,7 @@ struct rcar_canfd_channel {
struct rcar_canfd_global {
struct rcar_canfd_channel *ch[RCANFD_NUM_CHANNELS];
void __iomem *base; /* Register base address */
+ struct rcar_canfd_f_c __iomem *fcbase;
struct platform_device *pdev; /* Respective platform device */
struct clk *clkp; /* Peripheral clock */
struct clk *can_clk; /* fCAN clock */
@@ -636,12 +649,31 @@ static const struct can_bittiming_const rcar_canfd_bittiming_const = {
.brp_inc = 1,
};
+/* CAN FD Transmission Delay Compensation constants */
+static const struct can_tdc_const rcar_canfd_gen3_tdc_const = {
+ .tdcv_min = 1,
+ .tdcv_max = 128,
+ .tdco_min = 1,
+ .tdco_max = 128,
+ .tdcf_min = 0, /* Filter window not supported */
+ .tdcf_max = 0,
+};
+
+static const struct can_tdc_const rcar_canfd_gen4_tdc_const = {
+ .tdcv_min = 1,
+ .tdcv_max = 256,
+ .tdco_min = 1,
+ .tdco_max = 256,
+ .tdcf_min = 0, /* Filter window not supported */
+ .tdcf_max = 0,
+};
+
static const struct rcar_canfd_regs rcar_gen3_regs = {
.rfcc = 0x00b8,
.cfcc = 0x0118,
.cfsts = 0x0178,
.cfpctr = 0x01d8,
- .f_dcfg = 0x0500,
+ .coffset = 0x0500,
.rfoffset = 0x3000,
.cfoffset = 0x3400,
};
@@ -651,7 +683,7 @@ static const struct rcar_canfd_regs rcar_gen4_regs = {
.cfcc = 0x0120,
.cfsts = 0x01e0,
.cfpctr = 0x0240,
- .f_dcfg = 0x1400,
+ .coffset = 0x1400,
.rfoffset = 0x6000,
.cfoffset = 0x6400,
};
@@ -681,6 +713,7 @@ static const struct rcar_canfd_shift_data rcar_gen4_shift_data = {
static const struct rcar_canfd_hw_info rcar_gen3_hw_info = {
.nom_bittiming = &rcar_canfd_gen3_nom_bittiming_const,
.data_bittiming = &rcar_canfd_gen3_data_bittiming_const,
+ .tdc_const = &rcar_canfd_gen3_tdc_const,
.regs = &rcar_gen3_regs,
.sh = &rcar_gen3_shift_data,
.rnc_field_width = 8,
@@ -697,6 +730,7 @@ static const struct rcar_canfd_hw_info rcar_gen3_hw_info = {
static const struct rcar_canfd_hw_info rcar_gen4_hw_info = {
.nom_bittiming = &rcar_canfd_gen4_nom_bittiming_const,
.data_bittiming = &rcar_canfd_gen4_data_bittiming_const,
+ .tdc_const = &rcar_canfd_gen4_tdc_const,
.regs = &rcar_gen4_regs,
.sh = &rcar_gen4_shift_data,
.rnc_field_width = 16,
@@ -713,6 +747,7 @@ static const struct rcar_canfd_hw_info rcar_gen4_hw_info = {
static const struct rcar_canfd_hw_info rzg2l_hw_info = {
.nom_bittiming = &rcar_canfd_gen3_nom_bittiming_const,
.data_bittiming = &rcar_canfd_gen3_data_bittiming_const,
+ .tdc_const = &rcar_canfd_gen3_tdc_const,
.regs = &rcar_gen3_regs,
.sh = &rcar_gen3_shift_data,
.rnc_field_width = 8,
@@ -729,6 +764,7 @@ static const struct rcar_canfd_hw_info rzg2l_hw_info = {
static const struct rcar_canfd_hw_info r9a09g047_hw_info = {
.nom_bittiming = &rcar_canfd_gen4_nom_bittiming_const,
.data_bittiming = &rcar_canfd_gen4_data_bittiming_const,
+ .tdc_const = &rcar_canfd_gen4_tdc_const,
.regs = &rcar_gen4_regs,
.sh = &rcar_gen4_shift_data,
.rnc_field_width = 16,
@@ -778,26 +814,36 @@ static void rcar_canfd_update_bit(void __iomem *base, u32 reg,
rcar_canfd_update(mask, val, base + reg);
}
+static void rcar_canfd_set_bit_reg(void __iomem *addr, u32 val)
+{
+ rcar_canfd_update(val, val, addr);
+}
+
+static void rcar_canfd_update_bit_reg(void __iomem *addr, u32 mask, u32 val)
+{
+ rcar_canfd_update(mask, val, addr);
+}
+
static void rcar_canfd_get_data(struct rcar_canfd_channel *priv,
struct canfd_frame *cf, u32 off)
{
+ u32 *data = (u32 *)cf->data;
u32 i, lwords;
lwords = DIV_ROUND_UP(cf->len, sizeof(u32));
for (i = 0; i < lwords; i++)
- *((u32 *)cf->data + i) =
- rcar_canfd_read(priv->base, off + i * sizeof(u32));
+ data[i] = rcar_canfd_read(priv->base, off + i * sizeof(u32));
}
static void rcar_canfd_put_data(struct rcar_canfd_channel *priv,
struct canfd_frame *cf, u32 off)
{
+ const u32 *data = (u32 *)cf->data;
u32 i, lwords;
lwords = DIV_ROUND_UP(cf->len, sizeof(u32));
for (i = 0; i < lwords; i++)
- rcar_canfd_write(priv->base, off + i * sizeof(u32),
- *((u32 *)cf->data + i));
+ rcar_canfd_write(priv->base, off + i * sizeof(u32), data[i]);
}
static void rcar_canfd_tx_failure_cleanup(struct net_device *ndev)
@@ -808,8 +854,8 @@ static void rcar_canfd_tx_failure_cleanup(struct net_device *ndev)
can_free_echo_skb(ndev, i, NULL);
}
-static void rcar_canfd_setrnc(struct rcar_canfd_global *gpriv, unsigned int ch,
- unsigned int num_rules)
+static void rcar_canfd_set_rnc(struct rcar_canfd_global *gpriv, unsigned int ch,
+ unsigned int num_rules)
{
unsigned int rnc_stride = 32 / gpriv->info->rnc_field_width;
unsigned int shift = 32 - (ch % rnc_stride + 1) * gpriv->info->rnc_field_width;
@@ -827,8 +873,7 @@ static void rcar_canfd_set_mode(struct rcar_canfd_global *gpriv)
for_each_set_bit(ch, &gpriv->channels_mask,
gpriv->info->max_channels)
- rcar_canfd_set_bit(gpriv->base, RCANFD_GEN4_FDCFG(ch),
- val);
+ rcar_canfd_set_bit_reg(&gpriv->fcbase[ch].cfdcfg, val);
} else {
if (gpriv->fdmode)
rcar_canfd_set_bit(gpriv->base, RCANFD_GRMCFG,
@@ -841,6 +886,7 @@ static void rcar_canfd_set_mode(struct rcar_canfd_global *gpriv)
static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
{
+ struct device *dev = &gpriv->pdev->dev;
u32 sts, ch;
int err;
@@ -850,7 +896,7 @@ static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
err = readl_poll_timeout((gpriv->base + RCANFD_GSTS), sts,
!(sts & RCANFD_GSTS_GRAMINIT), 2, 500000);
if (err) {
- dev_dbg(&gpriv->pdev->dev, "global raminit failed\n");
+ dev_dbg(dev, "global raminit failed\n");
return err;
}
@@ -863,7 +909,7 @@ static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
err = readl_poll_timeout((gpriv->base + RCANFD_GSTS), sts,
(sts & RCANFD_GSTS_GRSTSTS), 2, 500000);
if (err) {
- dev_dbg(&gpriv->pdev->dev, "global reset failed\n");
+ dev_dbg(dev, "global reset failed\n");
return err;
}
@@ -887,8 +933,7 @@ static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
(sts & RCANFD_CSTS_CRSTSTS),
2, 500000);
if (err) {
- dev_dbg(&gpriv->pdev->dev,
- "channel %u reset failed\n", ch);
+ dev_dbg(dev, "channel %u reset failed\n", ch);
return err;
}
}
@@ -938,7 +983,7 @@ static void rcar_canfd_configure_afl_rules(struct rcar_canfd_global *gpriv,
RCANFD_GAFLECTR_AFLDAE));
/* Write number of rules for channel */
- rcar_canfd_setrnc(gpriv, ch, num_rules);
+ rcar_canfd_set_rnc(gpriv, ch, num_rules);
if (gpriv->info->shared_can_regs)
offset = RCANFD_GEN4_GAFL_OFFSET;
else if (gpriv->fdmode)
@@ -1436,14 +1481,17 @@ static irqreturn_t rcar_canfd_channel_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void rcar_canfd_set_bittiming(struct net_device *dev)
+static void rcar_canfd_set_bittiming(struct net_device *ndev)
{
- struct rcar_canfd_channel *priv = netdev_priv(dev);
+ u32 mask = RCANFD_FDCFG_TDCO | RCANFD_FDCFG_TDCE | RCANFD_FDCFG_TDCOC;
+ struct rcar_canfd_channel *priv = netdev_priv(ndev);
struct rcar_canfd_global *gpriv = priv->gpriv;
const struct can_bittiming *bt = &priv->can.bittiming;
const struct can_bittiming *dbt = &priv->can.fd.data_bittiming;
+ const struct can_tdc_const *tdc_const = priv->can.fd.tdc_const;
+ const struct can_tdc *tdc = &priv->can.fd.tdc;
+ u32 cfg, tdcmode = 0, tdco = 0;
u16 brp, sjw, tseg1, tseg2;
- u32 cfg;
u32 ch = priv->channel;
/* Nominal bit timing settings */
@@ -1452,46 +1500,43 @@ static void rcar_canfd_set_bittiming(struct net_device *dev)
tseg1 = bt->prop_seg + bt->phase_seg1 - 1;
tseg2 = bt->phase_seg2 - 1;
- if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
- /* CAN FD only mode */
+ if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || gpriv->info->shared_can_regs) {
cfg = (RCANFD_NCFG_NTSEG1(gpriv, tseg1) | RCANFD_NCFG_NBRP(brp) |
RCANFD_NCFG_NSJW(gpriv, sjw) | RCANFD_NCFG_NTSEG2(gpriv, tseg2));
+ } else {
+ cfg = (RCANFD_CFG_TSEG1(tseg1) | RCANFD_CFG_BRP(brp) |
+ RCANFD_CFG_SJW(sjw) | RCANFD_CFG_TSEG2(tseg2));
+ }
- rcar_canfd_write(priv->base, RCANFD_CCFG(ch), cfg);
- netdev_dbg(priv->ndev, "nrate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n",
- brp, sjw, tseg1, tseg2);
-
- /* Data bit timing settings */
- brp = dbt->brp - 1;
- sjw = dbt->sjw - 1;
- tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1;
- tseg2 = dbt->phase_seg2 - 1;
-
- cfg = (RCANFD_DCFG_DTSEG1(gpriv, tseg1) | RCANFD_DCFG_DBRP(brp) |
- RCANFD_DCFG_DSJW(gpriv, sjw) | RCANFD_DCFG_DTSEG2(gpriv, tseg2));
+ rcar_canfd_write(priv->base, RCANFD_CCFG(ch), cfg);
- rcar_canfd_write(priv->base, RCANFD_F_DCFG(gpriv, ch), cfg);
- netdev_dbg(priv->ndev, "drate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n",
- brp, sjw, tseg1, tseg2);
- } else {
- /* Classical CAN only mode */
- if (gpriv->info->shared_can_regs) {
- cfg = (RCANFD_NCFG_NTSEG1(gpriv, tseg1) |
- RCANFD_NCFG_NBRP(brp) |
- RCANFD_NCFG_NSJW(gpriv, sjw) |
- RCANFD_NCFG_NTSEG2(gpriv, tseg2));
- } else {
- cfg = (RCANFD_CFG_TSEG1(tseg1) |
- RCANFD_CFG_BRP(brp) |
- RCANFD_CFG_SJW(sjw) |
- RCANFD_CFG_TSEG2(tseg2));
- }
+ if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD))
+ return;
- rcar_canfd_write(priv->base, RCANFD_CCFG(ch), cfg);
- netdev_dbg(priv->ndev,
- "rate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n",
- brp, sjw, tseg1, tseg2);
+ /* Data bit timing settings */
+ brp = dbt->brp - 1;
+ sjw = dbt->sjw - 1;
+ tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1;
+ tseg2 = dbt->phase_seg2 - 1;
+
+ cfg = (RCANFD_DCFG_DTSEG1(gpriv, tseg1) | RCANFD_DCFG_DBRP(brp) |
+ RCANFD_DCFG_DSJW(gpriv, sjw) | RCANFD_DCFG_DTSEG2(gpriv, tseg2));
+
+ writel(cfg, &gpriv->fcbase[ch].dcfg);
+
+ /* Transceiver Delay Compensation */
+ if (priv->can.ctrlmode & CAN_CTRLMODE_TDC_AUTO) {
+ /* TDC enabled, measured + offset */
+ tdcmode = RCANFD_FDCFG_TDCE;
+ tdco = tdc->tdco - 1;
+ } else if (priv->can.ctrlmode & CAN_CTRLMODE_TDC_MANUAL) {
+ /* TDC enabled, offset only */
+ tdcmode = RCANFD_FDCFG_TDCE | RCANFD_FDCFG_TDCOC;
+ tdco = min(tdc->tdcv + tdc->tdco, tdc_const->tdco_max) - 1;
}
+
+ rcar_canfd_update_bit_reg(&gpriv->fcbase[ch].cfdcfg, mask,
+ tdcmode | FIELD_PREP(RCANFD_FDCFG_TDCO, tdco));
}
static int rcar_canfd_start(struct net_device *ndev)
@@ -1691,7 +1736,8 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv)
{
- struct net_device_stats *stats = &priv->ndev->stats;
+ struct net_device *ndev = priv->ndev;
+ struct net_device_stats *stats = &ndev->stats;
struct rcar_canfd_global *gpriv = priv->gpriv;
struct canfd_frame *cf;
struct sk_buff *skb;
@@ -1707,14 +1753,13 @@ static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv)
if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) &&
sts & RCANFD_RFFDSTS_RFFDF)
- skb = alloc_canfd_skb(priv->ndev, &cf);
+ skb = alloc_canfd_skb(ndev, &cf);
else
- skb = alloc_can_skb(priv->ndev,
- (struct can_frame **)&cf);
+ skb = alloc_can_skb(ndev, (struct can_frame **)&cf);
} else {
id = rcar_canfd_read(priv->base, RCANFD_C_RFID(ridx));
dlc = rcar_canfd_read(priv->base, RCANFD_C_RFPTR(ridx));
- skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cf);
+ skb = alloc_can_skb(ndev, (struct can_frame **)&cf);
}
if (!skb) {
@@ -1735,7 +1780,7 @@ static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv)
if (sts & RCANFD_RFFDSTS_RFESI) {
cf->flags |= CANFD_ESI;
- netdev_dbg(priv->ndev, "ESI Error\n");
+ netdev_dbg(ndev, "ESI Error\n");
}
if (!(sts & RCANFD_RFFDSTS_RFFDF) && (id & RCANFD_RFID_RFRTR)) {
@@ -1802,6 +1847,29 @@ static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota)
return num_pkts;
}
+static unsigned int rcar_canfd_get_tdcr(struct rcar_canfd_global *gpriv,
+ unsigned int ch)
+{
+ u32 sts = readl(&gpriv->fcbase[ch].cfdsts);
+ u32 tdcr = FIELD_GET(RCANFD_FDSTS_TDCR, sts);
+
+ return tdcr & (gpriv->info->tdc_const->tdcv_max - 1);
+}
+
+static int rcar_canfd_get_auto_tdcv(const struct net_device *ndev, u32 *tdcv)
+{
+ struct rcar_canfd_channel *priv = netdev_priv(ndev);
+ u32 tdco = priv->can.fd.tdc.tdco;
+ u32 tdcr;
+
+ /* Transceiver Delay Compensation Result */
+ tdcr = rcar_canfd_get_tdcr(priv->gpriv, priv->channel) + 1;
+
+ *tdcv = tdcr < tdco ? 0 : tdcr - tdco;
+
+ return 0;
+}
+
static int rcar_canfd_do_set_mode(struct net_device *ndev, enum can_mode mode)
{
int err;
@@ -1818,10 +1886,10 @@ static int rcar_canfd_do_set_mode(struct net_device *ndev, enum can_mode mode)
}
}
-static int rcar_canfd_get_berr_counter(const struct net_device *dev,
+static int rcar_canfd_get_berr_counter(const struct net_device *ndev,
struct can_berr_counter *bec)
{
- struct rcar_canfd_channel *priv = netdev_priv(dev);
+ struct rcar_canfd_channel *priv = netdev_priv(ndev);
u32 val, ch = priv->channel;
/* Peripheral clock is already enabled in probe */
@@ -1924,12 +1992,17 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
if (gpriv->fdmode) {
priv->can.bittiming_const = gpriv->info->nom_bittiming;
priv->can.fd.data_bittiming_const = gpriv->info->data_bittiming;
+ priv->can.fd.tdc_const = gpriv->info->tdc_const;
/* Controller starts in CAN FD only mode */
err = can_set_static_ctrlmode(ndev, CAN_CTRLMODE_FD);
if (err)
goto fail;
- priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING;
+
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING |
+ CAN_CTRLMODE_TDC_AUTO |
+ CAN_CTRLMODE_TDC_MANUAL;
+ priv->can.fd.do_get_auto_tdcv = rcar_canfd_get_auto_tdcv;
} else {
/* Controller starts in Classical CAN only mode */
priv->can.bittiming_const = &rcar_canfd_bittiming_const;
@@ -2086,6 +2159,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
goto fail_dev;
}
gpriv->base = addr;
+ gpriv->fcbase = addr + gpriv->info->regs->coffset;
/* Request IRQ that's common for both channels */
if (info->shared_global_irqs) {
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index ec5c64006a16..5a95877b7419 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -388,8 +388,8 @@ static void mcp251x_write_2regs(struct spi_device *spi, u8 reg, u8 v1, u8 v2)
mcp251x_spi_write(spi, 4);
}
-static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
- u8 mask, u8 val)
+static int mcp251x_write_bits(struct spi_device *spi, u8 reg,
+ u8 mask, u8 val)
{
struct mcp251x_priv *priv = spi_get_drvdata(spi);
@@ -398,7 +398,7 @@ static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
priv->spi_tx_buf[2] = mask;
priv->spi_tx_buf[3] = val;
- mcp251x_spi_write(spi, 4);
+ return mcp251x_spi_write(spi, 4);
}
static u8 mcp251x_read_stat(struct spi_device *spi)
@@ -441,6 +441,7 @@ static int mcp251x_gpio_request(struct gpio_chip *chip,
unsigned int offset)
{
struct mcp251x_priv *priv = gpiochip_get_data(chip);
+ int ret;
u8 val;
/* nothing to be done for inputs */
@@ -450,8 +451,10 @@ static int mcp251x_gpio_request(struct gpio_chip *chip,
val = BFPCTRL_BFE(offset - MCP251X_GPIO_RX0BF);
mutex_lock(&priv->mcp_lock);
- mcp251x_write_bits(priv->spi, BFPCTRL, val, val);
+ ret = mcp251x_write_bits(priv->spi, BFPCTRL, val, val);
mutex_unlock(&priv->mcp_lock);
+ if (ret)
+ return ret;
priv->reg_bfpctrl |= val;
@@ -530,29 +533,35 @@ static int mcp251x_gpio_get_multiple(struct gpio_chip *chip,
return 0;
}
-static void mcp251x_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int mcp251x_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct mcp251x_priv *priv = gpiochip_get_data(chip);
u8 mask, val;
+ int ret;
mask = BFPCTRL_BFS(offset - MCP251X_GPIO_RX0BF);
val = value ? mask : 0;
mutex_lock(&priv->mcp_lock);
- mcp251x_write_bits(priv->spi, BFPCTRL, mask, val);
+ ret = mcp251x_write_bits(priv->spi, BFPCTRL, mask, val);
mutex_unlock(&priv->mcp_lock);
+ if (ret)
+ return ret;
priv->reg_bfpctrl &= ~mask;
priv->reg_bfpctrl |= val;
+
+ return 0;
}
-static void
+static int
mcp251x_gpio_set_multiple(struct gpio_chip *chip,
unsigned long *maskp, unsigned long *bitsp)
{
struct mcp251x_priv *priv = gpiochip_get_data(chip);
u8 mask, val;
+ int ret;
mask = FIELD_GET(MCP251X_GPIO_OUTPUT_MASK, maskp[0]);
mask = FIELD_PREP(BFPCTRL_BFS_MASK, mask);
@@ -561,14 +570,18 @@ mcp251x_gpio_set_multiple(struct gpio_chip *chip,
val = FIELD_PREP(BFPCTRL_BFS_MASK, val);
if (!mask)
- return;
+ return 0;
mutex_lock(&priv->mcp_lock);
- mcp251x_write_bits(priv->spi, BFPCTRL, mask, val);
+ ret = mcp251x_write_bits(priv->spi, BFPCTRL, mask, val);
mutex_unlock(&priv->mcp_lock);
+ if (ret)
+ return ret;
priv->reg_bfpctrl &= ~mask;
priv->reg_bfpctrl |= val;
+
+ return 0;
}
static void mcp251x_gpio_restore(struct spi_device *spi)
@@ -594,8 +607,8 @@ static int mcp251x_gpio_setup(struct mcp251x_priv *priv)
gpio->get_direction = mcp251x_gpio_get_direction;
gpio->get = mcp251x_gpio_get;
gpio->get_multiple = mcp251x_gpio_get_multiple;
- gpio->set = mcp251x_gpio_set;
- gpio->set_multiple = mcp251x_gpio_set_multiple;
+ gpio->set_rv = mcp251x_gpio_set;
+ gpio->set_multiple_rv = mcp251x_gpio_set_multiple;
gpio->base = -1;
gpio->ngpio = ARRAY_SIZE(mcp251x_gpio_names);
gpio->names = mcp251x_gpio_names;
diff --git a/drivers/net/can/usb/etas_es58x/es58x_fd.c b/drivers/net/can/usb/etas_es58x/es58x_fd.c
index d924b053677b..6476add1c105 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_fd.c
+++ b/drivers/net/can/usb/etas_es58x/es58x_fd.c
@@ -429,7 +429,7 @@ static int es58x_fd_enable_channel(struct es58x_priv *priv)
es58x_fd_convert_bittiming(&tx_conf_msg.data_bittiming,
&priv->can.fd.data_bittiming);
- if (can_tdc_is_enabled(&priv->can)) {
+ if (can_fd_tdc_is_enabled(&priv->can)) {
tx_conf_msg.tdc_enabled = 1;
tx_conf_msg.tdco = cpu_to_le16(priv->can.fd.tdc.tdco);
tx_conf_msg.tdcf = cpu_to_le16(priv->can.fd.tdc.tdcf);
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index 3f2e378199ab..81baec8eb1e5 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -515,7 +515,7 @@ static int xcan_set_bittiming(struct net_device *ndev)
priv->devtype.cantype == XAXI_CANFD_2_0) {
/* Setting Baud Rate prescaler value in F_BRPR Register */
btr0 = dbt->brp - 1;
- if (can_tdc_is_enabled(&priv->can)) {
+ if (can_fd_tdc_is_enabled(&priv->can)) {
if (priv->devtype.cantype == XAXI_CANFD)
btr0 |= FIELD_PREP(XCAN_BRPR_TDCO_MASK, priv->can.fd.tdc.tdco) |
XCAN_BRPR_TDC_ENABLE;
diff --git a/drivers/net/dsa/b53/Kconfig b/drivers/net/dsa/b53/Kconfig
index ebaa4a80d544..915008e8eff5 100644
--- a/drivers/net/dsa/b53/Kconfig
+++ b/drivers/net/dsa/b53/Kconfig
@@ -5,6 +5,7 @@ menuconfig B53
select NET_DSA_TAG_NONE
select NET_DSA_TAG_BRCM
select NET_DSA_TAG_BRCM_LEGACY
+ select NET_DSA_TAG_BRCM_LEGACY_FCS
select NET_DSA_TAG_BRCM_PREPEND
help
This driver adds support for Broadcom managed switch chips. It supports
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index dc2f4adac9bc..46978757c972 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -361,18 +361,23 @@ static void b53_set_forwarding(struct b53_device *dev, int enable)
b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
- /* Include IMP port in dumb forwarding mode
- */
- b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, &mgmt);
- mgmt |= B53_MII_DUMB_FWDG_EN;
- b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt);
-
- /* Look at B53_UC_FWD_EN and B53_MC_FWD_EN to decide whether
- * frames should be flooded or not.
- */
- b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt);
- mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IPMC_FWD_EN;
- b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt);
+ if (!is5325(dev)) {
+ /* Include IMP port in dumb forwarding mode */
+ b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, &mgmt);
+ mgmt |= B53_MII_DUMB_FWDG_EN;
+ b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt);
+
+ /* Look at B53_UC_FWD_EN and B53_MC_FWD_EN to decide whether
+ * frames should be flooded or not.
+ */
+ b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt);
+ mgmt |= B53_UC_FWD_EN | B53_MC_FWD_EN | B53_IPMC_FWD_EN;
+ b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt);
+ } else {
+ b53_read8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, &mgmt);
+ mgmt |= B53_IP_MCAST_25;
+ b53_write8(dev, B53_CTRL_PAGE, B53_IP_MULTICAST_CTRL, mgmt);
+ }
}
static void b53_enable_vlan(struct b53_device *dev, int port, bool enable,
@@ -487,6 +492,9 @@ static int b53_flush_arl(struct b53_device *dev, u8 mask)
{
unsigned int i;
+ if (is5325(dev))
+ return 0;
+
b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL,
FAST_AGE_DONE | FAST_AGE_DYNAMIC | mask);
@@ -511,6 +519,9 @@ out:
static int b53_fast_age_port(struct b53_device *dev, int port)
{
+ if (is5325(dev))
+ return 0;
+
b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_PORT_CTRL, port);
return b53_flush_arl(dev, FAST_AGE_PORT);
@@ -518,6 +529,9 @@ static int b53_fast_age_port(struct b53_device *dev, int port)
static int b53_fast_age_vlan(struct b53_device *dev, u16 vid)
{
+ if (is5325(dev))
+ return 0;
+
b53_write16(dev, B53_CTRL_PAGE, B53_FAST_AGE_VID_CTRL, vid);
return b53_flush_arl(dev, FAST_AGE_VLAN);
@@ -529,6 +543,10 @@ void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port)
unsigned int i;
u16 pvlan;
+ /* BCM5325 CPU port is at 8 */
+ if ((is5325(dev) || is5365(dev)) && cpu_port == B53_CPU_PORT_25)
+ cpu_port = B53_CPU_PORT;
+
/* Enable the IMP port to be in the same VLAN as the other ports
* on a per-port basis such that we only have Port i and IMP in
* the same VLAN.
@@ -546,12 +564,24 @@ static void b53_port_set_ucast_flood(struct b53_device *dev, int port,
{
u16 uc;
- b53_read16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, &uc);
- if (unicast)
- uc |= BIT(port);
- else
- uc &= ~BIT(port);
- b53_write16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, uc);
+ if (is5325(dev)) {
+ if (port == B53_CPU_PORT_25)
+ port = B53_CPU_PORT;
+
+ b53_read16(dev, B53_IEEE_PAGE, B53_IEEE_UCAST_DLF, &uc);
+ if (unicast)
+ uc |= BIT(port) | B53_IEEE_UCAST_DROP_EN;
+ else
+ uc &= ~BIT(port);
+ b53_write16(dev, B53_IEEE_PAGE, B53_IEEE_UCAST_DLF, uc);
+ } else {
+ b53_read16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, &uc);
+ if (unicast)
+ uc |= BIT(port);
+ else
+ uc &= ~BIT(port);
+ b53_write16(dev, B53_CTRL_PAGE, B53_UC_FLOOD_MASK, uc);
+ }
}
static void b53_port_set_mcast_flood(struct b53_device *dev, int port,
@@ -559,19 +589,31 @@ static void b53_port_set_mcast_flood(struct b53_device *dev, int port,
{
u16 mc;
- b53_read16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, &mc);
- if (multicast)
- mc |= BIT(port);
- else
- mc &= ~BIT(port);
- b53_write16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, mc);
+ if (is5325(dev)) {
+ if (port == B53_CPU_PORT_25)
+ port = B53_CPU_PORT;
- b53_read16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, &mc);
- if (multicast)
- mc |= BIT(port);
- else
- mc &= ~BIT(port);
- b53_write16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, mc);
+ b53_read16(dev, B53_IEEE_PAGE, B53_IEEE_MCAST_DLF, &mc);
+ if (multicast)
+ mc |= BIT(port) | B53_IEEE_MCAST_DROP_EN;
+ else
+ mc &= ~BIT(port);
+ b53_write16(dev, B53_IEEE_PAGE, B53_IEEE_MCAST_DLF, mc);
+ } else {
+ b53_read16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, &mc);
+ if (multicast)
+ mc |= BIT(port);
+ else
+ mc &= ~BIT(port);
+ b53_write16(dev, B53_CTRL_PAGE, B53_MC_FLOOD_MASK, mc);
+
+ b53_read16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, &mc);
+ if (multicast)
+ mc |= BIT(port);
+ else
+ mc &= ~BIT(port);
+ b53_write16(dev, B53_CTRL_PAGE, B53_IPMC_FLOOD_MASK, mc);
+ }
}
static void b53_port_set_learning(struct b53_device *dev, int port,
@@ -579,6 +621,9 @@ static void b53_port_set_learning(struct b53_device *dev, int port,
{
u16 reg;
+ if (is5325(dev))
+ return;
+
b53_read16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, &reg);
if (learning)
reg &= ~BIT(port);
@@ -615,6 +660,19 @@ int b53_setup_port(struct dsa_switch *ds, int port)
if (dsa_is_user_port(ds, port))
b53_set_eap_mode(dev, port, EAP_MODE_SIMPLIFIED);
+ if (is5325(dev) &&
+ in_range(port, 1, 4)) {
+ u8 reg;
+
+ b53_read8(dev, B53_CTRL_PAGE, B53_PD_MODE_CTRL_25, &reg);
+ reg &= ~PD_MODE_POWER_DOWN_PORT(0);
+ if (dsa_is_unused_port(ds, port))
+ reg |= PD_MODE_POWER_DOWN_PORT(port);
+ else
+ reg &= ~PD_MODE_POWER_DOWN_PORT(port);
+ b53_write8(dev, B53_CTRL_PAGE, B53_PD_MODE_CTRL_25, reg);
+ }
+
return 0;
}
EXPORT_SYMBOL(b53_setup_port);
@@ -713,6 +771,11 @@ void b53_brcm_hdr_setup(struct dsa_switch *ds, int port)
hdr_ctl |= GC_FRM_MGMT_PORT_M;
b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, hdr_ctl);
+ /* B53_BRCM_HDR not present on devices with legacy tags */
+ if (dev->tag_protocol == DSA_TAG_PROTO_BRCM_LEGACY ||
+ dev->tag_protocol == DSA_TAG_PROTO_BRCM_LEGACY_FCS)
+ return;
+
/* Enable Broadcom tags for IMP port */
b53_read8(dev, B53_MGMT_PAGE, B53_BRCM_HDR, &hdr_ctl);
if (tag_en)
@@ -1257,6 +1320,8 @@ static void b53_force_link(struct b53_device *dev, int port, int link)
if (port == dev->imp_port) {
off = B53_PORT_OVERRIDE_CTRL;
val = PORT_OVERRIDE_EN;
+ } else if (is5325(dev)) {
+ return;
} else {
off = B53_GMII_PORT_OVERRIDE_CTRL(port);
val = GMII_PO_EN;
@@ -1281,6 +1346,8 @@ static void b53_force_port_config(struct b53_device *dev, int port,
if (port == dev->imp_port) {
off = B53_PORT_OVERRIDE_CTRL;
val = PORT_OVERRIDE_EN;
+ } else if (is5325(dev)) {
+ return;
} else {
off = B53_GMII_PORT_OVERRIDE_CTRL(port);
val = GMII_PO_EN;
@@ -1311,10 +1378,19 @@ static void b53_force_port_config(struct b53_device *dev, int port,
return;
}
- if (rx_pause)
- reg |= PORT_OVERRIDE_RX_FLOW;
- if (tx_pause)
- reg |= PORT_OVERRIDE_TX_FLOW;
+ if (rx_pause) {
+ if (is5325(dev))
+ reg |= PORT_OVERRIDE_LP_FLOW_25;
+ else
+ reg |= PORT_OVERRIDE_RX_FLOW;
+ }
+
+ if (tx_pause) {
+ if (is5325(dev))
+ reg |= PORT_OVERRIDE_LP_FLOW_25;
+ else
+ reg |= PORT_OVERRIDE_TX_FLOW;
+ }
b53_write8(dev, B53_CTRL_PAGE, off, reg);
}
@@ -1764,6 +1840,45 @@ static int b53_arl_read(struct b53_device *dev, u64 mac,
return *idx >= dev->num_arl_bins ? -ENOSPC : -ENOENT;
}
+static int b53_arl_read_25(struct b53_device *dev, u64 mac,
+ u16 vid, struct b53_arl_entry *ent, u8 *idx)
+{
+ DECLARE_BITMAP(free_bins, B53_ARLTBL_MAX_BIN_ENTRIES);
+ unsigned int i;
+ int ret;
+
+ ret = b53_arl_op_wait(dev);
+ if (ret)
+ return ret;
+
+ bitmap_zero(free_bins, dev->num_arl_bins);
+
+ /* Read the bins */
+ for (i = 0; i < dev->num_arl_bins; i++) {
+ u64 mac_vid;
+
+ b53_read64(dev, B53_ARLIO_PAGE,
+ B53_ARLTBL_MAC_VID_ENTRY(i), &mac_vid);
+
+ b53_arl_to_entry_25(ent, mac_vid);
+
+ if (!(mac_vid & ARLTBL_VALID_25)) {
+ set_bit(i, free_bins);
+ continue;
+ }
+ if ((mac_vid & ARLTBL_MAC_MASK) != mac)
+ continue;
+ if (dev->vlan_enabled &&
+ ((mac_vid >> ARLTBL_VID_S_65) & ARLTBL_VID_MASK_25) != vid)
+ continue;
+ *idx = i;
+ return 0;
+ }
+
+ *idx = find_first_bit(free_bins, dev->num_arl_bins);
+ return *idx >= dev->num_arl_bins ? -ENOSPC : -ENOENT;
+}
+
static int b53_arl_op(struct b53_device *dev, int op, int port,
const unsigned char *addr, u16 vid, bool is_valid)
{
@@ -1778,14 +1893,18 @@ static int b53_arl_op(struct b53_device *dev, int op, int port,
/* Perform a read for the given MAC and VID */
b53_write48(dev, B53_ARLIO_PAGE, B53_MAC_ADDR_IDX, mac);
- b53_write16(dev, B53_ARLIO_PAGE, B53_VLAN_ID_IDX, vid);
+ if (!is5325m(dev))
+ b53_write16(dev, B53_ARLIO_PAGE, B53_VLAN_ID_IDX, vid);
/* Issue a read operation for this MAC */
ret = b53_arl_rw_op(dev, 1);
if (ret)
return ret;
- ret = b53_arl_read(dev, mac, vid, &ent, &idx);
+ if (is5325(dev) || is5365(dev))
+ ret = b53_arl_read_25(dev, mac, vid, &ent, &idx);
+ else
+ ret = b53_arl_read(dev, mac, vid, &ent, &idx);
/* If this is a read, just finish now */
if (op)
@@ -1829,12 +1948,17 @@ static int b53_arl_op(struct b53_device *dev, int op, int port,
ent.is_static = true;
ent.is_age = false;
memcpy(ent.mac, addr, ETH_ALEN);
- b53_arl_from_entry(&mac_vid, &fwd_entry, &ent);
+ if (is5325(dev) || is5365(dev))
+ b53_arl_from_entry_25(&mac_vid, &ent);
+ else
+ b53_arl_from_entry(&mac_vid, &fwd_entry, &ent);
b53_write64(dev, B53_ARLIO_PAGE,
B53_ARLTBL_MAC_VID_ENTRY(idx), mac_vid);
- b53_write32(dev, B53_ARLIO_PAGE,
- B53_ARLTBL_DATA_ENTRY(idx), fwd_entry);
+
+ if (!is5325(dev) && !is5365(dev))
+ b53_write32(dev, B53_ARLIO_PAGE,
+ B53_ARLTBL_DATA_ENTRY(idx), fwd_entry);
return b53_arl_rw_op(dev, 0);
}
@@ -1846,12 +1970,6 @@ int b53_fdb_add(struct dsa_switch *ds, int port,
struct b53_device *priv = ds->priv;
int ret;
- /* 5325 and 5365 require some more massaging, but could
- * be supported eventually
- */
- if (is5325(priv) || is5365(priv))
- return -EOPNOTSUPP;
-
mutex_lock(&priv->arl_mutex);
ret = b53_arl_op(priv, 0, port, addr, vid, true);
mutex_unlock(&priv->arl_mutex);
@@ -1878,10 +1996,15 @@ EXPORT_SYMBOL(b53_fdb_del);
static int b53_arl_search_wait(struct b53_device *dev)
{
unsigned int timeout = 1000;
- u8 reg;
+ u8 reg, offset;
+
+ if (is5325(dev) || is5365(dev))
+ offset = B53_ARL_SRCH_CTL_25;
+ else
+ offset = B53_ARL_SRCH_CTL;
do {
- b53_read8(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, &reg);
+ b53_read8(dev, B53_ARLIO_PAGE, offset, &reg);
if (!(reg & ARL_SRCH_STDN))
return 0;
@@ -1898,13 +2021,24 @@ static void b53_arl_search_rd(struct b53_device *dev, u8 idx,
struct b53_arl_entry *ent)
{
u64 mac_vid;
- u32 fwd_entry;
- b53_read64(dev, B53_ARLIO_PAGE,
- B53_ARL_SRCH_RSTL_MACVID(idx), &mac_vid);
- b53_read32(dev, B53_ARLIO_PAGE,
- B53_ARL_SRCH_RSTL(idx), &fwd_entry);
- b53_arl_to_entry(ent, mac_vid, fwd_entry);
+ if (is5325(dev)) {
+ b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL_0_MACVID_25,
+ &mac_vid);
+ b53_arl_to_entry_25(ent, mac_vid);
+ } else if (is5365(dev)) {
+ b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL_0_MACVID_65,
+ &mac_vid);
+ b53_arl_to_entry_25(ent, mac_vid);
+ } else {
+ u32 fwd_entry;
+
+ b53_read64(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL_MACVID(idx),
+ &mac_vid);
+ b53_read32(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_RSTL(idx),
+ &fwd_entry);
+ b53_arl_to_entry(ent, mac_vid, fwd_entry);
+ }
}
static int b53_fdb_copy(int port, const struct b53_arl_entry *ent,
@@ -1925,14 +2059,20 @@ int b53_fdb_dump(struct dsa_switch *ds, int port,
struct b53_device *priv = ds->priv;
struct b53_arl_entry results[2];
unsigned int count = 0;
+ u8 offset;
int ret;
u8 reg;
mutex_lock(&priv->arl_mutex);
+ if (is5325(priv) || is5365(priv))
+ offset = B53_ARL_SRCH_CTL_25;
+ else
+ offset = B53_ARL_SRCH_CTL;
+
/* Start search operation */
reg = ARL_SRCH_STDN;
- b53_write8(priv, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, reg);
+ b53_write8(priv, offset, B53_ARL_SRCH_CTL, reg);
do {
ret = b53_arl_search_wait(priv);
@@ -2165,7 +2305,13 @@ int b53_br_flags_pre(struct dsa_switch *ds, int port,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack)
{
- if (flags.mask & ~(BR_FLOOD | BR_MCAST_FLOOD | BR_LEARNING))
+ struct b53_device *dev = ds->priv;
+ unsigned long mask = (BR_FLOOD | BR_MCAST_FLOOD);
+
+ if (!is5325(dev))
+ mask |= BR_LEARNING;
+
+ if (flags.mask & ~mask)
return -EINVAL;
return 0;
@@ -2241,8 +2387,11 @@ enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port,
goto out;
}
- /* Older models require a different 6 byte tag */
- if (is5325(dev) || is5365(dev) || is63xx(dev)) {
+ /* Older models require different 6 byte tags */
+ if (is5325(dev) || is5365(dev)) {
+ dev->tag_protocol = DSA_TAG_PROTO_BRCM_LEGACY_FCS;
+ goto out;
+ } else if (is63xx(dev)) {
dev->tag_protocol = DSA_TAG_PROTO_BRCM_LEGACY;
goto out;
}
@@ -2830,6 +2979,9 @@ static int b53_switch_init(struct b53_device *dev)
}
}
+ if (is5325e(dev))
+ dev->num_arl_buckets = 512;
+
dev->num_ports = fls(dev->enabled_ports);
dev->ds->num_ports = min_t(unsigned int, dev->num_ports, DSA_MAX_PORTS);
@@ -2931,10 +3083,24 @@ int b53_switch_detect(struct b53_device *dev)
b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, 0xf);
b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, &tmp);
- if (tmp == 0xf)
+ if (tmp == 0xf) {
+ u32 phy_id;
+ int val;
+
dev->chip_id = BCM5325_DEVICE_ID;
- else
+
+ val = b53_phy_read16(dev->ds, 0, MII_PHYSID1);
+ phy_id = (val & 0xffff) << 16;
+ val = b53_phy_read16(dev->ds, 0, MII_PHYSID2);
+ phy_id |= (val & 0xfff0);
+
+ if (phy_id == 0x00406330)
+ dev->variant_id = B53_VARIANT_5325M;
+ else if (phy_id == 0x0143bc30)
+ dev->variant_id = B53_VARIANT_5325E;
+ } else {
dev->chip_id = BCM5365_DEVICE_ID;
+ }
break;
case BCM5389_DEVICE_ID:
case BCM5395_DEVICE_ID:
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index a5ef7071ba07..b1b9e8882ba4 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -84,6 +84,12 @@ enum {
BCM53134_DEVICE_ID = 0x5075,
};
+enum b53_variant_id {
+ B53_VARIANT_NONE = 0,
+ B53_VARIANT_5325E,
+ B53_VARIANT_5325M,
+};
+
struct b53_pcs {
struct phylink_pcs pcs;
struct b53_device *dev;
@@ -118,6 +124,7 @@ struct b53_device {
/* chip specific data */
u32 chip_id;
+ enum b53_variant_id variant_id;
u8 core_rev;
u8 vta_regs[3];
u8 duplex_reg;
@@ -165,6 +172,18 @@ static inline int is5325(struct b53_device *dev)
return dev->chip_id == BCM5325_DEVICE_ID;
}
+static inline int is5325e(struct b53_device *dev)
+{
+ return is5325(dev) &&
+ dev->variant_id == B53_VARIANT_5325E;
+}
+
+static inline int is5325m(struct b53_device *dev)
+{
+ return is5325(dev) &&
+ dev->variant_id == B53_VARIANT_5325M;
+}
+
static inline int is5365(struct b53_device *dev)
{
#ifdef CONFIG_BCM47XX
@@ -298,6 +317,19 @@ static inline void b53_arl_to_entry(struct b53_arl_entry *ent,
ent->vid = mac_vid >> ARLTBL_VID_S;
}
+static inline void b53_arl_to_entry_25(struct b53_arl_entry *ent,
+ u64 mac_vid)
+{
+ memset(ent, 0, sizeof(*ent));
+ ent->port = (mac_vid >> ARLTBL_DATA_PORT_ID_S_25) &
+ ARLTBL_DATA_PORT_ID_MASK_25;
+ ent->is_valid = !!(mac_vid & ARLTBL_VALID_25);
+ ent->is_age = !!(mac_vid & ARLTBL_AGE_25);
+ ent->is_static = !!(mac_vid & ARLTBL_STATIC_25);
+ u64_to_ether_addr(mac_vid, ent->mac);
+ ent->vid = mac_vid >> ARLTBL_VID_S_65;
+}
+
static inline void b53_arl_from_entry(u64 *mac_vid, u32 *fwd_entry,
const struct b53_arl_entry *ent)
{
@@ -312,6 +344,22 @@ static inline void b53_arl_from_entry(u64 *mac_vid, u32 *fwd_entry,
*fwd_entry |= ARLTBL_AGE;
}
+static inline void b53_arl_from_entry_25(u64 *mac_vid,
+ const struct b53_arl_entry *ent)
+{
+ *mac_vid = ether_addr_to_u64(ent->mac);
+ *mac_vid |= (u64)(ent->port & ARLTBL_DATA_PORT_ID_MASK_25) <<
+ ARLTBL_DATA_PORT_ID_S_25;
+ *mac_vid |= (u64)(ent->vid & ARLTBL_VID_MASK_25) <<
+ ARLTBL_VID_S_65;
+ if (ent->is_valid)
+ *mac_vid |= ARLTBL_VALID_25;
+ if (ent->is_static)
+ *mac_vid |= ARLTBL_STATIC_25;
+ if (ent->is_age)
+ *mac_vid |= ARLTBL_AGE_25;
+}
+
#ifdef CONFIG_BCM47XX
#include <linux/bcm47xx_nvram.h>
diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h
index 1fbc5a204bc7..309fe0e46dad 100644
--- a/drivers/net/dsa/b53/b53_regs.h
+++ b/drivers/net/dsa/b53/b53_regs.h
@@ -29,6 +29,7 @@
#define B53_ARLIO_PAGE 0x05 /* ARL Access */
#define B53_FRAMEBUF_PAGE 0x06 /* Management frame access */
#define B53_MEM_ACCESS_PAGE 0x08 /* Memory access */
+#define B53_IEEE_PAGE 0x0a /* IEEE 802.1X */
/* PHY Registers */
#define B53_PORT_MII_PAGE(i) (0x10 + (i)) /* Port i MII Registers */
@@ -95,17 +96,22 @@
#define PORT_OVERRIDE_SPEED_10M (0 << PORT_OVERRIDE_SPEED_S)
#define PORT_OVERRIDE_SPEED_100M (1 << PORT_OVERRIDE_SPEED_S)
#define PORT_OVERRIDE_SPEED_1000M (2 << PORT_OVERRIDE_SPEED_S)
+#define PORT_OVERRIDE_LP_FLOW_25 BIT(3) /* BCM5325 only */
#define PORT_OVERRIDE_RV_MII_25 BIT(4) /* BCM5325 only */
#define PORT_OVERRIDE_RX_FLOW BIT(4)
#define PORT_OVERRIDE_TX_FLOW BIT(5)
#define PORT_OVERRIDE_SPEED_2000M BIT(6) /* BCM5301X only, requires setting 1000M */
#define PORT_OVERRIDE_EN BIT(7) /* Use the register contents */
-/* Power-down mode control */
+/* Power-down mode control (8 bit) */
#define B53_PD_MODE_CTRL_25 0x0f
+#define PD_MODE_PORT_MASK 0x1f
+/* Bit 0 also powers down the switch. */
+#define PD_MODE_POWER_DOWN_PORT(i) BIT(i)
/* IP Multicast control (8 bit) */
#define B53_IP_MULTICAST_CTRL 0x21
+#define B53_IP_MCAST_25 BIT(0)
#define B53_IPMC_FWD_EN BIT(1)
#define B53_UC_FWD_EN BIT(6)
#define B53_MC_FWD_EN BIT(7)
@@ -324,9 +330,10 @@
#define ARLTBL_VID_MASK 0xfff
#define ARLTBL_DATA_PORT_ID_S_25 48
#define ARLTBL_DATA_PORT_ID_MASK_25 0xf
-#define ARLTBL_AGE_25 BIT(61)
-#define ARLTBL_STATIC_25 BIT(62)
-#define ARLTBL_VALID_25 BIT(63)
+#define ARLTBL_VID_S_65 53
+#define ARLTBL_AGE_25 BIT_ULL(61)
+#define ARLTBL_STATIC_25 BIT_ULL(62)
+#define ARLTBL_VALID_25 BIT_ULL(63)
/* ARL Table Data Entry N Registers (32 bit) */
#define B53_ARLTBL_DATA_ENTRY(n) ((0x10 * (n)) + 0x18)
@@ -366,6 +373,18 @@
#define B53_ARL_SRCH_RSTL(x) (B53_ARL_SRCH_RSTL_0 + ((x) * 0x10))
/*************************************************************************
+ * IEEE 802.1X Registers
+ *************************************************************************/
+
+/* Multicast DLF Drop Control register (16 bit) */
+#define B53_IEEE_MCAST_DLF 0x94
+#define B53_IEEE_MCAST_DROP_EN BIT(11)
+
+/* Unicast DLF Drop Control register (16 bit) */
+#define B53_IEEE_UCAST_DLF 0x96
+#define B53_IEEE_UCAST_DROP_EN BIT(11)
+
+/*************************************************************************
* Port VLAN Registers
*************************************************************************/
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 7c142c17b3f6..6e1daf0018bc 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -2786,8 +2786,7 @@ static int ksz_irq_common_setup(struct ksz_device *dev, struct ksz_irq *kirq)
kirq->dev = dev;
kirq->masked = ~0;
- kirq->domain = irq_domain_create_simple(of_fwnode_handle(dev->dev->of_node),
- kirq->nirqs, 0,
+ kirq->domain = irq_domain_create_simple(dev_fwnode(dev->dev), kirq->nirqs, 0,
&ksz_irq_domain_ops, kirq);
if (!kirq->domain)
return -ENOMEM;
diff --git a/drivers/net/dsa/microchip/ksz_ptp.c b/drivers/net/dsa/microchip/ksz_ptp.c
index 8ab664e85f13..35fc21b1ee48 100644
--- a/drivers/net/dsa/microchip/ksz_ptp.c
+++ b/drivers/net/dsa/microchip/ksz_ptp.c
@@ -1130,8 +1130,8 @@ int ksz_ptp_irq_setup(struct dsa_switch *ds, u8 p)
init_completion(&port->tstamp_msg_comp);
- ptpirq->domain = irq_domain_create_linear(of_fwnode_handle(dev->dev->of_node),
- ptpirq->nirqs, &ksz_ptp_irq_domain_ops, ptpirq);
+ ptpirq->domain = irq_domain_create_linear(dev_fwnode(dev->dev), ptpirq->nirqs,
+ &ksz_ptp_irq_domain_ops, ptpirq);
if (!ptpirq->domain)
return -ENOMEM;
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index df213c37b4fe..e5bed4237ff4 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -2112,7 +2112,7 @@ mt7530_gpio_get(struct gpio_chip *gc, unsigned int offset)
return !!(mt7530_read(priv, MT7530_LED_GPIO_DATA) & bit);
}
-static void
+static int
mt7530_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
{
struct mt7530_priv *priv = gpiochip_get_data(gc);
@@ -2122,6 +2122,8 @@ mt7530_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
mt7530_set(priv, MT7530_LED_GPIO_DATA, bit);
else
mt7530_clear(priv, MT7530_LED_GPIO_DATA, bit);
+
+ return 0;
}
static int
@@ -2185,7 +2187,7 @@ mt7530_setup_gpio(struct mt7530_priv *priv)
gc->direction_input = mt7530_gpio_direction_input;
gc->direction_output = mt7530_gpio_direction_output;
gc->get = mt7530_gpio_get;
- gc->set = mt7530_gpio_set;
+ gc->set_rv = mt7530_gpio_set;
gc->base = -1;
gc->ngpio = 15;
gc->can_sleep = true;
diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c
index aaf97c1e3167..30a6ffa7817b 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.c
+++ b/drivers/net/dsa/mv88e6xxx/global2.c
@@ -1154,10 +1154,8 @@ int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip)
if (err)
return err;
- chip->g2_irq.domain = irq_domain_create_simple(of_fwnode_handle(chip->dev->of_node),
- 16, 0,
- &mv88e6xxx_g2_irq_domain_ops,
- chip);
+ chip->g2_irq.domain = irq_domain_create_simple(dev_fwnode(chip->dev), 16, 0,
+ &mv88e6xxx_g2_irq_domain_ops, chip);
if (!chip->g2_irq.domain)
return -ENOMEM;
diff --git a/drivers/net/dsa/qca/ar9331.c b/drivers/net/dsa/qca/ar9331.c
index 79a29676ca6f..0526aa96146e 100644
--- a/drivers/net/dsa/qca/ar9331.c
+++ b/drivers/net/dsa/qca/ar9331.c
@@ -821,8 +821,8 @@ static int ar9331_sw_irq_init(struct ar9331_sw_priv *priv)
return ret;
}
- priv->irqdomain = irq_domain_create_linear(of_fwnode_handle(np), 1,
- &ar9331_sw_irqdomain_ops, priv);
+ priv->irqdomain = irq_domain_create_linear(dev_fwnode(dev), 1, &ar9331_sw_irqdomain_ops,
+ priv);
if (!priv->irqdomain) {
dev_err(dev, "failed to create IRQ domain\n");
return -EINVAL;
diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c
index f18aa321053d..4f9687ab3b2b 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-core.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-core.c
@@ -2258,14 +2258,14 @@ static int vsc73xx_gpio_get(struct gpio_chip *chip, unsigned int offset)
return !!(val & BIT(offset));
}
-static void vsc73xx_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int val)
+static int vsc73xx_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int val)
{
struct vsc73xx *vsc = gpiochip_get_data(chip);
u32 tmp = val ? BIT(offset) : 0;
- vsc73xx_update_bits(vsc, VSC73XX_BLOCK_SYSTEM, 0,
- VSC73XX_GPIO, BIT(offset), tmp);
+ return vsc73xx_update_bits(vsc, VSC73XX_BLOCK_SYSTEM, 0,
+ VSC73XX_GPIO, BIT(offset), tmp);
}
static int vsc73xx_gpio_direction_output(struct gpio_chip *chip,
@@ -2317,7 +2317,7 @@ static int vsc73xx_gpio_probe(struct vsc73xx *vsc)
vsc->gc.parent = vsc->dev;
vsc->gc.base = -1;
vsc->gc.get = vsc73xx_gpio_get;
- vsc->gc.set = vsc73xx_gpio_set;
+ vsc->gc.set_rv = vsc73xx_gpio_set;
vsc->gc.direction_input = vsc73xx_gpio_direction_input;
vsc->gc.direction_output = vsc73xx_gpio_direction_output;
vsc->gc.get_direction = vsc73xx_gpio_get_direction;
diff --git a/drivers/net/ethernet/airoha/airoha_ppe.c b/drivers/net/ethernet/airoha/airoha_ppe.c
index 0e217acfc5ef..c354d536bc66 100644
--- a/drivers/net/ethernet/airoha/airoha_ppe.c
+++ b/drivers/net/ethernet/airoha/airoha_ppe.c
@@ -232,6 +232,7 @@ static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth,
FIELD_PREP(AIROHA_FOE_IB1_BIND_UDP, l4proto == IPPROTO_UDP) |
FIELD_PREP(AIROHA_FOE_IB1_BIND_VLAN_LAYER, data->vlan.num) |
FIELD_PREP(AIROHA_FOE_IB1_BIND_VPM, data->vlan.num) |
+ FIELD_PREP(AIROHA_FOE_IB1_BIND_PPPOE, data->pppoe.num) |
AIROHA_FOE_IB1_BIND_TTL;
hwe->ib1 = val;
@@ -281,33 +282,42 @@ static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth,
hwe->ipv6.data = qdata;
hwe->ipv6.ib2 = val;
l2 = &hwe->ipv6.l2;
+ l2->etype = ETH_P_IPV6;
} else {
hwe->ipv4.data = qdata;
hwe->ipv4.ib2 = val;
l2 = &hwe->ipv4.l2.common;
+ l2->etype = ETH_P_IP;
}
l2->dest_mac_hi = get_unaligned_be32(data->eth.h_dest);
l2->dest_mac_lo = get_unaligned_be16(data->eth.h_dest + 4);
if (type <= PPE_PKT_TYPE_IPV4_DSLITE) {
+ struct airoha_foe_mac_info *mac_info;
+
l2->src_mac_hi = get_unaligned_be32(data->eth.h_source);
hwe->ipv4.l2.src_mac_lo =
get_unaligned_be16(data->eth.h_source + 4);
+
+ mac_info = (struct airoha_foe_mac_info *)l2;
+ mac_info->pppoe_id = data->pppoe.sid;
} else {
- l2->src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID, smac_id);
+ l2->src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID, smac_id) |
+ FIELD_PREP(AIROHA_FOE_MAC_PPPOE_ID,
+ data->pppoe.sid);
}
if (data->vlan.num) {
- l2->etype = dsa_port >= 0 ? BIT(dsa_port) : 0;
l2->vlan1 = data->vlan.hdr[0].id;
if (data->vlan.num == 2)
l2->vlan2 = data->vlan.hdr[1].id;
- } else if (dsa_port >= 0) {
- l2->etype = BIT(15) | BIT(dsa_port);
- } else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
- l2->etype = ETH_P_IPV6;
- } else {
- l2->etype = ETH_P_IP;
+ }
+
+ if (dsa_port >= 0) {
+ l2->etype = BIT(dsa_port);
+ l2->etype |= !data->vlan.num ? BIT(15) : 0;
+ } else if (data->pppoe.num) {
+ l2->etype = ETH_P_PPP_SES;
}
return 0;
@@ -959,6 +969,11 @@ static int airoha_ppe_flow_offload_replace(struct airoha_gdm_port *port,
case FLOW_ACTION_VLAN_POP:
break;
case FLOW_ACTION_PPPOE_PUSH:
+ if (data.pppoe.num == 1 || data.vlan.num == 2)
+ return -EOPNOTSUPP;
+
+ data.pppoe.sid = act->pppoe.sid;
+ data.pppoe.num++;
break;
default:
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/amazon/Kconfig b/drivers/net/ethernet/amazon/Kconfig
index c37fa393b99e..95dcc3969f0c 100644
--- a/drivers/net/ethernet/amazon/Kconfig
+++ b/drivers/net/ethernet/amazon/Kconfig
@@ -19,7 +19,9 @@ if NET_VENDOR_AMAZON
config ENA_ETHERNET
tristate "Elastic Network Adapter (ENA) support"
depends on PCI_MSI && !CPU_BIG_ENDIAN
+ depends on PTP_1588_CLOCK_OPTIONAL
select DIMLIB
+ select NET_DEVLINK
help
This driver supports Elastic Network Adapter (ENA)"
diff --git a/drivers/net/ethernet/amazon/ena/Makefile b/drivers/net/ethernet/amazon/ena/Makefile
index 6ab615365172..6d8036bc1823 100644
--- a/drivers/net/ethernet/amazon/ena/Makefile
+++ b/drivers/net/ethernet/amazon/ena/Makefile
@@ -5,4 +5,4 @@
obj-$(CONFIG_ENA_ETHERNET) += ena.o
-ena-y := ena_netdev.o ena_com.o ena_eth_com.o ena_ethtool.o ena_xdp.o
+ena-y := ena_netdev.o ena_com.o ena_eth_com.o ena_ethtool.o ena_xdp.o ena_phc.o ena_devlink.o ena_debugfs.o
diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
index 9d9fa6559354..562869a0fdba 100644
--- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
@@ -60,6 +60,7 @@ enum ena_admin_aq_feature_id {
ENA_ADMIN_AENQ_CONFIG = 26,
ENA_ADMIN_LINK_CONFIG = 27,
ENA_ADMIN_HOST_ATTR_CONFIG = 28,
+ ENA_ADMIN_PHC_CONFIG = 29,
ENA_ADMIN_FEATURES_OPCODE_NUM = 32,
};
@@ -127,6 +128,14 @@ enum ena_admin_get_stats_scope {
ENA_ADMIN_ETH_TRAFFIC = 1,
};
+enum ena_admin_phc_type {
+ ENA_ADMIN_PHC_TYPE_READLESS = 0,
+};
+
+enum ena_admin_phc_error_flags {
+ ENA_ADMIN_PHC_ERROR_FLAG_TIMESTAMP = BIT(0),
+};
+
/* ENA SRD configuration for ENI */
enum ena_admin_ena_srd_flags {
/* Feature enabled */
@@ -943,7 +952,9 @@ struct ena_admin_host_info {
* 4 : rss_configurable_function_key
* 5 : reserved
* 6 : rx_page_reuse
- * 31:7 : reserved
+ * 7 : reserved
+ * 8 : phc
+ * 31:9 : reserved
*/
u32 driver_supported_features;
};
@@ -1023,6 +1034,43 @@ struct ena_admin_queue_ext_feature_desc {
};
};
+struct ena_admin_feature_phc_desc {
+ /* PHC type as defined in enum ena_admin_get_phc_type,
+ * used only for GET command.
+ */
+ u8 type;
+
+ /* Reserved - MBZ */
+ u8 reserved1[3];
+
+ /* PHC doorbell address as an offset to PCIe MMIO REG BAR,
+ * used only for GET command.
+ */
+ u32 doorbell_offset;
+
+ /* Max time for valid PHC retrieval, passing this threshold will
+ * fail the get-time request and block PHC requests for
+ * block_timeout_usec, used only for GET command.
+ */
+ u32 expire_timeout_usec;
+
+ /* PHC requests block period, blocking starts if PHC request expired
+ * in order to prevent floods on busy device,
+ * used only for GET command.
+ */
+ u32 block_timeout_usec;
+
+ /* Shared PHC physical address (ena_admin_phc_resp),
+ * used only for SET command.
+ */
+ struct ena_common_mem_addr output_address;
+
+ /* Shared PHC Size (ena_admin_phc_resp),
+ * used only for SET command.
+ */
+ u32 output_length;
+};
+
struct ena_admin_get_feat_resp {
struct ena_admin_acq_common_desc acq_common_desc;
@@ -1052,6 +1100,8 @@ struct ena_admin_get_feat_resp {
struct ena_admin_feature_intr_moder_desc intr_moderation;
struct ena_admin_ena_hw_hints hw_hints;
+
+ struct ena_admin_feature_phc_desc phc;
} u;
};
@@ -1085,6 +1135,9 @@ struct ena_admin_set_feat_cmd {
/* LLQ configuration */
struct ena_admin_feature_llq_desc llq;
+
+ /* PHC configuration */
+ struct ena_admin_feature_phc_desc phc;
} u;
};
@@ -1162,6 +1215,23 @@ struct ena_admin_ena_mmio_req_read_less_resp {
u32 reg_val;
};
+struct ena_admin_phc_resp {
+ /* Request Id, received from DB register */
+ u16 req_id;
+
+ u8 reserved1[6];
+
+ /* PHC timestamp (nsec) */
+ u64 timestamp;
+
+ u8 reserved2[12];
+
+ /* Bit field of enum ena_admin_phc_error_flags */
+ u32 error_flags;
+
+ u8 reserved3[32];
+};
+
/* aq_common_desc */
#define ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0)
#define ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK BIT(0)
@@ -1260,6 +1330,8 @@ struct ena_admin_ena_mmio_req_read_less_resp {
#define ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK BIT(4)
#define ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE_SHIFT 6
#define ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE_MASK BIT(6)
+#define ENA_ADMIN_HOST_INFO_PHC_SHIFT 8
+#define ENA_ADMIN_HOST_INFO_PHC_MASK BIT(8)
/* aenq_common_desc */
#define ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK BIT(0)
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 66445617fbfb..e67b592e5697 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -41,6 +41,12 @@
#define ENA_MAX_ADMIN_POLL_US 5000
+/* PHC definitions */
+#define ENA_PHC_DEFAULT_EXPIRE_TIMEOUT_USEC 10
+#define ENA_PHC_DEFAULT_BLOCK_TIMEOUT_USEC 1000
+#define ENA_PHC_REQ_ID_OFFSET 0xDEAD
+#define ENA_PHC_ERROR_FLAGS (ENA_ADMIN_PHC_ERROR_FLAG_TIMESTAMP)
+
/*****************************************************************************/
/*****************************************************************************/
/*****************************************************************************/
@@ -1641,6 +1647,267 @@ void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
ena_dev->admin_queue.polling = polling;
}
+bool ena_com_phc_supported(struct ena_com_dev *ena_dev)
+{
+ return ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_PHC_CONFIG);
+}
+
+int ena_com_phc_init(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_phc_info *phc = &ena_dev->phc;
+
+ memset(phc, 0x0, sizeof(*phc));
+
+ /* Allocate shared mem used PHC timestamp retrieved from device */
+ phc->virt_addr = dma_alloc_coherent(ena_dev->dmadev,
+ sizeof(*phc->virt_addr),
+ &phc->phys_addr,
+ GFP_KERNEL);
+ if (unlikely(!phc->virt_addr))
+ return -ENOMEM;
+
+ spin_lock_init(&phc->lock);
+
+ phc->virt_addr->req_id = 0;
+ phc->virt_addr->timestamp = 0;
+
+ return 0;
+}
+
+int ena_com_phc_config(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_phc_info *phc = &ena_dev->phc;
+ struct ena_admin_get_feat_resp get_feat_resp;
+ struct ena_admin_set_feat_resp set_feat_resp;
+ struct ena_admin_set_feat_cmd set_feat_cmd;
+ int ret = 0;
+
+ /* Get device PHC default configuration */
+ ret = ena_com_get_feature(ena_dev,
+ &get_feat_resp,
+ ENA_ADMIN_PHC_CONFIG,
+ 0);
+ if (unlikely(ret)) {
+ netdev_err(ena_dev->net_device,
+ "Failed to get PHC feature configuration, error: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* Supporting only readless PHC retrieval */
+ if (get_feat_resp.u.phc.type != ENA_ADMIN_PHC_TYPE_READLESS) {
+ netdev_err(ena_dev->net_device,
+ "Unsupported PHC type, error: %d\n",
+ -EOPNOTSUPP);
+ return -EOPNOTSUPP;
+ }
+
+ /* Update PHC doorbell offset according to device value,
+ * used to write req_id to PHC bar
+ */
+ phc->doorbell_offset = get_feat_resp.u.phc.doorbell_offset;
+
+ /* Update PHC expire timeout according to device
+ * or default driver value
+ */
+ phc->expire_timeout_usec = (get_feat_resp.u.phc.expire_timeout_usec) ?
+ get_feat_resp.u.phc.expire_timeout_usec :
+ ENA_PHC_DEFAULT_EXPIRE_TIMEOUT_USEC;
+
+ /* Update PHC block timeout according to device
+ * or default driver value
+ */
+ phc->block_timeout_usec = (get_feat_resp.u.phc.block_timeout_usec) ?
+ get_feat_resp.u.phc.block_timeout_usec :
+ ENA_PHC_DEFAULT_BLOCK_TIMEOUT_USEC;
+
+ /* Sanity check - expire timeout must not exceed block timeout */
+ if (phc->expire_timeout_usec > phc->block_timeout_usec)
+ phc->expire_timeout_usec = phc->block_timeout_usec;
+
+ /* Prepare PHC feature command */
+ memset(&set_feat_cmd, 0x0, sizeof(set_feat_cmd));
+ set_feat_cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
+ set_feat_cmd.feat_common.feature_id = ENA_ADMIN_PHC_CONFIG;
+ set_feat_cmd.u.phc.output_length = sizeof(*phc->virt_addr);
+ ret = ena_com_mem_addr_set(ena_dev,
+ &set_feat_cmd.u.phc.output_address,
+ phc->phys_addr);
+ if (unlikely(ret)) {
+ netdev_err(ena_dev->net_device,
+ "Failed setting PHC output address, error: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* Send PHC feature command to the device */
+ ret = ena_com_execute_admin_command(&ena_dev->admin_queue,
+ (struct ena_admin_aq_entry *)&set_feat_cmd,
+ sizeof(set_feat_cmd),
+ (struct ena_admin_acq_entry *)&set_feat_resp,
+ sizeof(set_feat_resp));
+
+ if (unlikely(ret)) {
+ netdev_err(ena_dev->net_device,
+ "Failed to enable PHC, error: %d\n",
+ ret);
+ return ret;
+ }
+
+ phc->active = true;
+ netdev_dbg(ena_dev->net_device, "PHC is active in the device\n");
+
+ return ret;
+}
+
+void ena_com_phc_destroy(struct ena_com_dev *ena_dev)
+{
+ struct ena_com_phc_info *phc = &ena_dev->phc;
+ unsigned long flags = 0;
+
+ /* In case PHC is not supported by the device, silently exiting */
+ if (!phc->virt_addr)
+ return;
+
+ spin_lock_irqsave(&phc->lock, flags);
+ phc->active = false;
+ spin_unlock_irqrestore(&phc->lock, flags);
+
+ dma_free_coherent(ena_dev->dmadev,
+ sizeof(*phc->virt_addr),
+ phc->virt_addr,
+ phc->phys_addr);
+ phc->virt_addr = NULL;
+}
+
+int ena_com_phc_get_timestamp(struct ena_com_dev *ena_dev, u64 *timestamp)
+{
+ volatile struct ena_admin_phc_resp *resp = ena_dev->phc.virt_addr;
+ const ktime_t zero_system_time = ktime_set(0, 0);
+ struct ena_com_phc_info *phc = &ena_dev->phc;
+ ktime_t expire_time;
+ ktime_t block_time;
+ unsigned long flags = 0;
+ int ret = 0;
+
+ if (!phc->active) {
+ netdev_err(ena_dev->net_device, "PHC feature is not active in the device\n");
+ return -EOPNOTSUPP;
+ }
+
+ spin_lock_irqsave(&phc->lock, flags);
+
+ /* Check if PHC is in blocked state */
+ if (unlikely(ktime_compare(phc->system_time, zero_system_time))) {
+ /* Check if blocking time expired */
+ block_time = ktime_add_us(phc->system_time, phc->block_timeout_usec);
+ if (!ktime_after(ktime_get(), block_time)) {
+ /* PHC is still in blocked state, skip PHC request */
+ phc->stats.phc_skp++;
+ ret = -EBUSY;
+ goto skip;
+ }
+
+ /* PHC is in active state, update statistics according
+ * to req_id and error_flags
+ */
+ if (READ_ONCE(resp->req_id) != phc->req_id) {
+ /* Device didn't update req_id during blocking time,
+ * this indicates on a device error
+ */
+ netdev_err(ena_dev->net_device,
+ "PHC get time request 0x%x failed (device error)\n",
+ phc->req_id);
+ phc->stats.phc_err_dv++;
+ } else if (resp->error_flags & ENA_PHC_ERROR_FLAGS) {
+ /* Device updated req_id during blocking time but got
+ * a PHC error, this occurs if device:
+ * - exceeded the get time request limit
+ * - received an invalid timestamp
+ */
+ netdev_err(ena_dev->net_device,
+ "PHC get time request 0x%x failed (error 0x%x)\n",
+ phc->req_id,
+ resp->error_flags);
+ phc->stats.phc_err_ts += !!(resp->error_flags &
+ ENA_ADMIN_PHC_ERROR_FLAG_TIMESTAMP);
+ } else {
+ /* Device updated req_id during blocking time
+ * with valid timestamp
+ */
+ phc->stats.phc_exp++;
+ }
+ }
+
+ /* Setting relative timeouts */
+ phc->system_time = ktime_get();
+ block_time = ktime_add_us(phc->system_time, phc->block_timeout_usec);
+ expire_time = ktime_add_us(phc->system_time, phc->expire_timeout_usec);
+
+ /* We expect the device to return this req_id once
+ * the new PHC timestamp is updated
+ */
+ phc->req_id++;
+
+ /* Initialize PHC shared memory with different req_id value
+ * to be able to identify once the device changes it to req_id
+ */
+ resp->req_id = phc->req_id + ENA_PHC_REQ_ID_OFFSET;
+
+ /* Writing req_id to PHC bar */
+ writel(phc->req_id, ena_dev->reg_bar + phc->doorbell_offset);
+
+ /* Stalling until the device updates req_id */
+ while (1) {
+ if (unlikely(ktime_after(ktime_get(), expire_time))) {
+ /* Gave up waiting for updated req_id, PHC enters into
+ * blocked state until passing blocking time,
+ * during this time any get PHC timestamp will fail with
+ * device busy error
+ */
+ ret = -EBUSY;
+ break;
+ }
+
+ /* Check if req_id was updated by the device */
+ if (READ_ONCE(resp->req_id) != phc->req_id) {
+ /* req_id was not updated by the device yet,
+ * check again on next loop
+ */
+ continue;
+ }
+
+ /* req_id was updated by the device which indicates that
+ * PHC timestamp and error_flags are updated too,
+ * checking errors before retrieving timestamp
+ */
+ if (unlikely(resp->error_flags & ENA_PHC_ERROR_FLAGS)) {
+ /* Retrieved invalid PHC timestamp, PHC enters into
+ * blocked state until passing blocking time,
+ * during this time any get PHC timestamp requests
+ * will fail with device busy error
+ */
+ ret = -EBUSY;
+ break;
+ }
+
+ /* PHC timestamp value is returned to the caller */
+ *timestamp = resp->timestamp;
+
+ /* Update statistic on valid PHC timestamp retrieval */
+ phc->stats.phc_cnt++;
+
+ /* This indicates PHC state is active */
+ phc->system_time = zero_system_time;
+ break;
+ }
+
+skip:
+ spin_unlock_irqrestore(&phc->lock, flags);
+
+ return ret;
+}
+
int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
{
struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
index 9414e93d107b..64df2c48c9a6 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_com.h
@@ -210,6 +210,14 @@ struct ena_com_stats_admin {
u64 no_completion;
};
+struct ena_com_stats_phc {
+ u64 phc_cnt;
+ u64 phc_exp;
+ u64 phc_skp;
+ u64 phc_err_dv;
+ u64 phc_err_ts;
+};
+
struct ena_com_admin_queue {
void *q_dmadev;
struct ena_com_dev *ena_dev;
@@ -258,6 +266,47 @@ struct ena_com_mmio_read {
spinlock_t lock;
};
+/* PTP hardware clock (PHC) MMIO read data info */
+struct ena_com_phc_info {
+ /* Internal PHC statistics */
+ struct ena_com_stats_phc stats;
+
+ /* PHC shared memory - virtual address */
+ struct ena_admin_phc_resp *virt_addr;
+
+ /* System time of last PHC request */
+ ktime_t system_time;
+
+ /* Spin lock to ensure a single outstanding PHC read */
+ spinlock_t lock;
+
+ /* PHC doorbell address as an offset to PCIe MMIO REG BAR */
+ u32 doorbell_offset;
+
+ /* Shared memory read expire timeout (usec)
+ * Max time for valid PHC retrieval, passing this threshold will fail
+ * the get time request and block new PHC requests for block_timeout_usec
+ * in order to prevent floods on busy device
+ */
+ u32 expire_timeout_usec;
+
+ /* Shared memory read abort timeout (usec)
+ * PHC requests block period, blocking starts once PHC request expired
+ * in order to prevent floods on busy device,
+ * any PHC requests during block period will be skipped
+ */
+ u32 block_timeout_usec;
+
+ /* PHC shared memory - physical address */
+ dma_addr_t phys_addr;
+
+ /* Request id sent to the device */
+ u16 req_id;
+
+ /* True if PHC is active in the device */
+ bool active;
+};
+
struct ena_rss {
/* Indirect table */
u16 *host_rss_ind_tbl;
@@ -317,6 +366,7 @@ struct ena_com_dev {
u32 ena_min_poll_delay_us;
struct ena_com_mmio_read mmio_read;
+ struct ena_com_phc_info phc;
struct ena_rss rss;
u32 supported_features;
@@ -382,6 +432,40 @@ struct ena_aenq_handlers {
*/
int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev);
+/* ena_com_phc_init - Allocate and initialize PHC feature
+ * @ena_dev: ENA communication layer struct
+ * @note: This method assumes PHC is supported by the device
+ * @return - 0 on success, negative value on failure
+ */
+int ena_com_phc_init(struct ena_com_dev *ena_dev);
+
+/* ena_com_phc_supported - Return if PHC feature is supported by the device
+ * @ena_dev: ENA communication layer struct
+ * @note: This method must be called after getting supported features
+ * @return - supported or not
+ */
+bool ena_com_phc_supported(struct ena_com_dev *ena_dev);
+
+/* ena_com_phc_config - Configure PHC feature
+ * @ena_dev: ENA communication layer struct
+ * Configure PHC feature in driver and device
+ * @note: This method assumes PHC is supported by the device
+ * @return - 0 on success, negative value on failure
+ */
+int ena_com_phc_config(struct ena_com_dev *ena_dev);
+
+/* ena_com_phc_destroy - Destroy PHC feature
+ * @ena_dev: ENA communication layer struct
+ */
+void ena_com_phc_destroy(struct ena_com_dev *ena_dev);
+
+/* ena_com_phc_get_timestamp - Retrieve PHC timestamp
+ * @ena_dev: ENA communication layer struct
+ * @timestamp: Retrieved PHC timestamp
+ * @return - 0 on success, negative value on failure
+ */
+int ena_com_phc_get_timestamp(struct ena_com_dev *ena_dev, u64 *timestamp);
+
/* ena_com_set_mmio_read_mode - Enable/disable the indirect mmio reg read mechanism
* @ena_dev: ENA communication layer struct
* @readless_supported: readless mode (enable/disable)
diff --git a/drivers/net/ethernet/amazon/ena/ena_debugfs.c b/drivers/net/ethernet/amazon/ena/ena_debugfs.c
new file mode 100644
index 000000000000..46ed80986724
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_debugfs.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ */
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/seq_file.h>
+#include <linux/pci.h>
+#include "ena_debugfs.h"
+#include "ena_phc.h"
+
+static int phc_stats_show(struct seq_file *file, void *priv)
+{
+ struct ena_adapter *adapter = file->private;
+
+ if (!ena_phc_is_active(adapter))
+ return 0;
+
+ seq_printf(file,
+ "phc_cnt: %llu\n",
+ adapter->ena_dev->phc.stats.phc_cnt);
+ seq_printf(file,
+ "phc_exp: %llu\n",
+ adapter->ena_dev->phc.stats.phc_exp);
+ seq_printf(file,
+ "phc_skp: %llu\n",
+ adapter->ena_dev->phc.stats.phc_skp);
+ seq_printf(file,
+ "phc_err_dv: %llu\n",
+ adapter->ena_dev->phc.stats.phc_err_dv);
+ seq_printf(file,
+ "phc_err_ts: %llu\n",
+ adapter->ena_dev->phc.stats.phc_err_ts);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(phc_stats);
+
+void ena_debugfs_init(struct net_device *dev)
+{
+ struct ena_adapter *adapter = netdev_priv(dev);
+
+ adapter->debugfs_base =
+ debugfs_create_dir(dev_name(&adapter->pdev->dev), NULL);
+
+ debugfs_create_file("phc_stats",
+ 0400,
+ adapter->debugfs_base,
+ adapter,
+ &phc_stats_fops);
+}
+
+void ena_debugfs_terminate(struct net_device *dev)
+{
+ struct ena_adapter *adapter = netdev_priv(dev);
+
+ debugfs_remove_recursive(adapter->debugfs_base);
+}
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/net/ethernet/amazon/ena/ena_debugfs.h b/drivers/net/ethernet/amazon/ena/ena_debugfs.h
new file mode 100644
index 000000000000..dc61dd998867
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_debugfs.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ */
+
+#ifndef __ENA_DEBUGFS_H__
+#define __ENA_DEBUGFS_H__
+
+#include <linux/debugfs.h>
+#include <linux/netdevice.h>
+#include "ena_netdev.h"
+
+#ifdef CONFIG_DEBUG_FS
+
+void ena_debugfs_init(struct net_device *dev);
+
+void ena_debugfs_terminate(struct net_device *dev);
+
+#else /* CONFIG_DEBUG_FS */
+
+static inline void ena_debugfs_init(struct net_device *dev) {}
+
+static inline void ena_debugfs_terminate(struct net_device *dev) {}
+
+#endif /* CONFIG_DEBUG_FS */
+
+#endif /* __ENA_DEBUGFS_H__ */
diff --git a/drivers/net/ethernet/amazon/ena/ena_devlink.c b/drivers/net/ethernet/amazon/ena/ena_devlink.c
new file mode 100644
index 000000000000..ac81c24016dd
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_devlink.c
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ */
+
+#include "linux/pci.h"
+#include "ena_devlink.h"
+#include "ena_phc.h"
+
+static int ena_devlink_enable_phc_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct ena_adapter *adapter = ENA_DEVLINK_PRIV(devlink);
+
+ if (!val.vbool)
+ return 0;
+
+ if (!ena_com_phc_supported(adapter->ena_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Device doesn't support PHC");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct devlink_param ena_devlink_params[] = {
+ DEVLINK_PARAM_GENERIC(ENABLE_PHC,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL,
+ NULL,
+ ena_devlink_enable_phc_validate),
+};
+
+void ena_devlink_params_get(struct devlink *devlink)
+{
+ struct ena_adapter *adapter = ENA_DEVLINK_PRIV(devlink);
+ union devlink_param_value val;
+ int err;
+
+ err = devl_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_PHC,
+ &val);
+ if (err) {
+ netdev_err(adapter->netdev, "Failed to query PHC param\n");
+ return;
+ }
+
+ ena_phc_enable(adapter, val.vbool);
+}
+
+void ena_devlink_disable_phc_param(struct devlink *devlink)
+{
+ union devlink_param_value value;
+
+ value.vbool = false;
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_PHC,
+ value);
+}
+
+static void ena_devlink_port_register(struct devlink *devlink)
+{
+ struct ena_adapter *adapter = ENA_DEVLINK_PRIV(devlink);
+ struct devlink_port_attrs attrs = {};
+
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ devlink_port_attrs_set(&adapter->devlink_port, &attrs);
+ devl_port_register(devlink, &adapter->devlink_port, 0);
+}
+
+static void ena_devlink_port_unregister(struct devlink *devlink)
+{
+ struct ena_adapter *adapter = ENA_DEVLINK_PRIV(devlink);
+
+ devl_port_unregister(&adapter->devlink_port);
+}
+
+static int ena_devlink_reload_down(struct devlink *devlink,
+ bool netns_change,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ struct netlink_ext_ack *extack)
+{
+ struct ena_adapter *adapter = ENA_DEVLINK_PRIV(devlink);
+
+ if (netns_change) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Namespace change is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ ena_devlink_port_unregister(devlink);
+
+ rtnl_lock();
+ ena_destroy_device(adapter, false);
+ rtnl_unlock();
+
+ return 0;
+}
+
+static int ena_devlink_reload_up(struct devlink *devlink,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ u32 *actions_performed,
+ struct netlink_ext_ack *extack)
+{
+ struct ena_adapter *adapter = ENA_DEVLINK_PRIV(devlink);
+ int err = 0;
+
+ rtnl_lock();
+ /* Check that no other routine initialized the device (e.g.
+ * ena_fw_reset_device()). Also we're under devlink_mutex here,
+ * so devlink isn't freed under our feet.
+ */
+ if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
+ err = ena_restore_device(adapter);
+
+ rtnl_unlock();
+
+ ena_devlink_port_register(devlink);
+
+ if (!err)
+ *actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
+
+ return err;
+}
+
+static const struct devlink_ops ena_devlink_ops = {
+ .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT),
+ .reload_down = ena_devlink_reload_down,
+ .reload_up = ena_devlink_reload_up,
+};
+
+static int ena_devlink_configure_params(struct devlink *devlink)
+{
+ struct ena_adapter *adapter = ENA_DEVLINK_PRIV(devlink);
+ union devlink_param_value value;
+ int rc;
+
+ rc = devlink_params_register(devlink, ena_devlink_params,
+ ARRAY_SIZE(ena_devlink_params));
+ if (rc) {
+ netdev_err(adapter->netdev, "Failed to register devlink params\n");
+ return rc;
+ }
+
+ value.vbool = ena_phc_is_enabled(adapter);
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_PHC,
+ value);
+
+ return 0;
+}
+
+struct devlink *ena_devlink_alloc(struct ena_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+ struct devlink *devlink;
+
+ devlink = devlink_alloc(&ena_devlink_ops,
+ sizeof(struct ena_adapter *),
+ dev);
+ if (!devlink) {
+ netdev_err(adapter->netdev,
+ "Failed to allocate devlink struct\n");
+ return NULL;
+ }
+
+ ENA_DEVLINK_PRIV(devlink) = adapter;
+ adapter->devlink = devlink;
+
+ if (ena_devlink_configure_params(devlink))
+ goto free_devlink;
+
+ return devlink;
+
+free_devlink:
+ devlink_free(devlink);
+ return NULL;
+}
+
+static void ena_devlink_configure_params_clean(struct devlink *devlink)
+{
+ devlink_params_unregister(devlink, ena_devlink_params,
+ ARRAY_SIZE(ena_devlink_params));
+}
+
+void ena_devlink_free(struct devlink *devlink)
+{
+ ena_devlink_configure_params_clean(devlink);
+
+ devlink_free(devlink);
+}
+
+void ena_devlink_register(struct devlink *devlink, struct device *dev)
+{
+ devl_lock(devlink);
+ ena_devlink_port_register(devlink);
+ devl_register(devlink);
+ devl_unlock(devlink);
+}
+
+void ena_devlink_unregister(struct devlink *devlink)
+{
+ devl_lock(devlink);
+ ena_devlink_port_unregister(devlink);
+ devl_unregister(devlink);
+ devl_unlock(devlink);
+}
diff --git a/drivers/net/ethernet/amazon/ena/ena_devlink.h b/drivers/net/ethernet/amazon/ena/ena_devlink.h
new file mode 100644
index 000000000000..7a19ce4830d9
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_devlink.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) Amazon.com, Inc. or its affiliates.
+ * All rights reserved.
+ */
+#ifndef DEVLINK_H
+#define DEVLINK_H
+
+#include "ena_netdev.h"
+#include <net/devlink.h>
+
+#define ENA_DEVLINK_PRIV(devlink) \
+ (*(struct ena_adapter **)devlink_priv(devlink))
+
+struct devlink *ena_devlink_alloc(struct ena_adapter *adapter);
+void ena_devlink_free(struct devlink *devlink);
+void ena_devlink_register(struct devlink *devlink, struct device *dev);
+void ena_devlink_unregister(struct devlink *devlink);
+void ena_devlink_params_get(struct devlink *devlink);
+void ena_devlink_disable_phc_param(struct devlink *devlink);
+
+#endif /* DEVLINK_H */
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index a3c934c3de71..a81d3a7a3bb9 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -5,9 +5,11 @@
#include <linux/ethtool.h>
#include <linux/pci.h>
+#include <linux/net_tstamp.h>
#include "ena_netdev.h"
#include "ena_xdp.h"
+#include "ena_phc.h"
struct ena_stats {
char name[ETH_GSTRING_LEN];
@@ -298,6 +300,18 @@ static void ena_get_ethtool_stats(struct net_device *netdev,
ena_get_stats(adapter, data, true);
}
+static int ena_get_ts_info(struct net_device *netdev,
+ struct kernel_ethtool_ts_info *info)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
+
+ info->phc_index = ena_phc_get_index(adapter);
+
+ return 0;
+}
+
static int ena_get_sw_stats_count(struct ena_adapter *adapter)
{
return adapter->num_io_queues * (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX)
@@ -721,9 +735,11 @@ static u16 ena_flow_data_to_flow_hash(u32 hash_fields)
return data;
}
-static int ena_get_rss_hash(struct ena_com_dev *ena_dev,
- struct ethtool_rxnfc *cmd)
+static int ena_get_rxfh_fields(struct net_device *netdev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
enum ena_admin_flow_hash_proto proto;
u16 hash_fields;
int rc;
@@ -772,9 +788,12 @@ static int ena_get_rss_hash(struct ena_com_dev *ena_dev,
return 0;
}
-static int ena_set_rss_hash(struct ena_com_dev *ena_dev,
- struct ethtool_rxnfc *cmd)
+static int ena_set_rxfh_fields(struct net_device *netdev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
enum ena_admin_flow_hash_proto proto;
u16 hash_fields;
@@ -816,26 +835,6 @@ static int ena_set_rss_hash(struct ena_com_dev *ena_dev,
return ena_com_fill_hash_ctrl(ena_dev, proto, hash_fields);
}
-static int ena_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info)
-{
- struct ena_adapter *adapter = netdev_priv(netdev);
- int rc = 0;
-
- switch (info->cmd) {
- case ETHTOOL_SRXFH:
- rc = ena_set_rss_hash(adapter->ena_dev, info);
- break;
- case ETHTOOL_SRXCLSRLDEL:
- case ETHTOOL_SRXCLSRLINS:
- default:
- netif_err(adapter, drv, netdev,
- "Command parameter %d is not supported\n", info->cmd);
- rc = -EOPNOTSUPP;
- }
-
- return rc;
-}
-
static int ena_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
u32 *rules)
{
@@ -847,9 +846,6 @@ static int ena_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
info->data = adapter->num_io_queues;
rc = 0;
break;
- case ETHTOOL_GRXFH:
- rc = ena_get_rss_hash(adapter->ena_dev, info);
- break;
case ETHTOOL_GRXCLSRLCNT:
case ETHTOOL_GRXCLSRULE:
case ETHTOOL_GRXCLSRLALL:
@@ -1098,16 +1094,17 @@ static const struct ethtool_ops ena_ethtool_ops = {
.get_strings = ena_get_ethtool_strings,
.get_ethtool_stats = ena_get_ethtool_stats,
.get_rxnfc = ena_get_rxnfc,
- .set_rxnfc = ena_set_rxnfc,
.get_rxfh_indir_size = ena_get_rxfh_indir_size,
.get_rxfh_key_size = ena_get_rxfh_key_size,
.get_rxfh = ena_get_rxfh,
.set_rxfh = ena_set_rxfh,
+ .get_rxfh_fields = ena_get_rxfh_fields,
+ .set_rxfh_fields = ena_set_rxfh_fields,
.get_channels = ena_get_channels,
.set_channels = ena_set_channels,
.get_tunable = ena_get_tunable,
.set_tunable = ena_set_tunable,
- .get_ts_info = ethtool_op_get_ts_info,
+ .get_ts_info = ena_get_ts_info,
};
void ena_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 86fd08f375df..92d149d4f091 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -19,6 +19,12 @@
#include "ena_pci_id_tbl.h"
#include "ena_xdp.h"
+#include "ena_phc.h"
+
+#include "ena_devlink.h"
+
+#include "ena_debugfs.h"
+
MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
MODULE_DESCRIPTION(DEVICE_NAME);
MODULE_LICENSE("GPL");
@@ -39,8 +45,6 @@ MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
static int ena_rss_init_default(struct ena_adapter *adapter);
static void check_for_admin_com_state(struct ena_adapter *adapter);
-static int ena_destroy_device(struct ena_adapter *adapter, bool graceful);
-static int ena_restore_device(struct ena_adapter *adapter);
static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
@@ -2743,7 +2747,8 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pd
ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK |
ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_MASK |
ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK |
- ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE_MASK;
+ ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE_MASK |
+ ENA_ADMIN_HOST_INFO_PHC_MASK;
rc = ena_com_set_host_attributes(ena_dev);
if (rc) {
@@ -3135,6 +3140,8 @@ static int ena_device_init(struct ena_adapter *adapter, struct pci_dev *pdev,
goto err_mmio_read_less;
}
+ ena_devlink_params_get(adapter->devlink);
+
/* ENA admin level init */
rc = ena_com_admin_init(ena_dev, &aenq_handlers);
if (rc) {
@@ -3188,6 +3195,10 @@ static int ena_device_init(struct ena_adapter *adapter, struct pci_dev *pdev,
if (unlikely(rc))
goto err_admin_init;
+ rc = ena_phc_init(adapter);
+ if (unlikely(rc && (rc != -EOPNOTSUPP)))
+ netdev_err(netdev, "Failed initializing PHC, error: %d\n", rc);
+
return 0;
err_admin_init:
@@ -3233,7 +3244,7 @@ err_disable_msix:
return rc;
}
-static int ena_destroy_device(struct ena_adapter *adapter, bool graceful)
+int ena_destroy_device(struct ena_adapter *adapter, bool graceful)
{
struct net_device *netdev = adapter->netdev;
struct ena_com_dev *ena_dev = adapter->ena_dev;
@@ -3271,6 +3282,8 @@ static int ena_destroy_device(struct ena_adapter *adapter, bool graceful)
ena_com_admin_destroy(ena_dev);
+ ena_phc_destroy(adapter);
+
ena_com_mmio_reg_read_request_destroy(ena_dev);
/* return reset reason to default value */
@@ -3282,7 +3295,7 @@ static int ena_destroy_device(struct ena_adapter *adapter, bool graceful)
return rc;
}
-static int ena_restore_device(struct ena_adapter *adapter)
+int ena_restore_device(struct ena_adapter *adapter)
{
struct ena_com_dev_get_features_ctx get_feat_ctx;
struct ena_com_dev *ena_dev = adapter->ena_dev;
@@ -3344,6 +3357,7 @@ err_device_destroy:
ena_com_wait_for_abort_completion(ena_dev);
ena_com_admin_destroy(ena_dev);
ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE);
+ ena_phc_destroy(adapter);
ena_com_mmio_reg_read_request_destroy(ena_dev);
err:
clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
@@ -3867,6 +3881,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct ena_adapter *adapter;
struct net_device *netdev;
static int adapters_found;
+ struct devlink *devlink;
u32 max_num_io_queues;
bool wd_state;
int bars, rc;
@@ -3932,10 +3947,16 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, adapter);
+ rc = ena_phc_alloc(adapter);
+ if (rc) {
+ netdev_err(netdev, "ena_phc_alloc failed\n");
+ goto err_netdev_destroy;
+ }
+
rc = ena_com_allocate_customer_metrics_buffer(ena_dev);
if (rc) {
netdev_err(netdev, "ena_com_allocate_customer_metrics_buffer failed\n");
- goto err_netdev_destroy;
+ goto err_free_phc;
}
rc = ena_map_llq_mem_bar(pdev, ena_dev, bars);
@@ -3944,12 +3965,20 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_metrics_destroy;
}
+ /* Need to do this before ena_device_init */
+ devlink = ena_devlink_alloc(adapter);
+ if (!devlink) {
+ netdev_err(netdev, "ena_devlink_alloc failed\n");
+ rc = -ENOMEM;
+ goto err_metrics_destroy;
+ }
+
rc = ena_device_init(adapter, pdev, &get_feat_ctx, &wd_state);
if (rc) {
dev_err(&pdev->dev, "ENA device init failed\n");
if (rc == -ETIME)
rc = -EPROBE_DEFER;
- goto err_metrics_destroy;
+ goto ena_devlink_destroy;
}
/* Initial TX and RX interrupt delay. Assumes 1 usec granularity.
@@ -4033,6 +4062,8 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_rss;
}
+ ena_debugfs_init(netdev);
+
INIT_WORK(&adapter->reset_task, ena_fw_reset_device);
adapter->last_keep_alive_jiffies = jiffies;
@@ -4054,6 +4085,12 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapters_found++;
+ /* From this point, the devlink device is visible to users.
+ * Perform the registration last to ensure that all the resources
+ * are available and that the netdevice is registered.
+ */
+ ena_devlink_register(devlink, &pdev->dev);
+
return 0;
err_rss:
@@ -4070,8 +4107,12 @@ err_worker_destroy:
err_device_destroy:
ena_com_delete_host_info(ena_dev);
ena_com_admin_destroy(ena_dev);
+ena_devlink_destroy:
+ ena_devlink_free(devlink);
err_metrics_destroy:
ena_com_delete_customer_metrics_buffer(ena_dev);
+err_free_phc:
+ ena_phc_free(adapter);
err_netdev_destroy:
free_netdev(netdev);
err_free_region:
@@ -4102,6 +4143,8 @@ static void __ena_shutoff(struct pci_dev *pdev, bool shutdown)
ena_dev = adapter->ena_dev;
netdev = adapter->netdev;
+ ena_debugfs_terminate(netdev);
+
/* Make sure timer and reset routine won't be called after
* freeing device resources.
*/
@@ -4112,6 +4155,11 @@ static void __ena_shutoff(struct pci_dev *pdev, bool shutdown)
adapter->reset_reason = ENA_REGS_RESET_SHUTDOWN;
ena_destroy_device(adapter, true);
+ ena_phc_free(adapter);
+
+ ena_devlink_unregister(adapter->devlink);
+ ena_devlink_free(adapter->devlink);
+
if (shutdown) {
netif_device_detach(netdev);
dev_close(netdev);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 6e12ae3b12e5..006f9a3acea6 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -16,6 +16,7 @@
#include <linux/skbuff.h>
#include <net/xdp.h>
#include <uapi/linux/bpf.h>
+#include <net/devlink.h>
#include "ena_com.h"
#include "ena_eth_com.h"
@@ -110,6 +111,8 @@
#define ENA_MMIO_DISABLE_REG_READ BIT(0)
+struct ena_phc_info;
+
struct ena_irq {
irq_handler_t handler;
void *data;
@@ -348,6 +351,8 @@ struct ena_adapter {
char name[ENA_NAME_MAX_LEN];
+ struct ena_phc_info *phc_info;
+
unsigned long flags;
/* TX */
struct ena_ring tx_ring[ENA_MAX_NUM_IO_QUEUES]
@@ -383,6 +388,13 @@ struct ena_adapter {
struct bpf_prog *xdp_bpf_prog;
u32 xdp_first_ring;
u32 xdp_num_queues;
+
+ struct devlink *devlink;
+ struct devlink_port devlink_port;
+#ifdef CONFIG_DEBUG_FS
+
+ struct dentry *debugfs_base;
+#endif /* CONFIG_DEBUG_FS */
};
void ena_set_ethtool_ops(struct net_device *netdev);
@@ -412,6 +424,8 @@ static inline void ena_reset_device(struct ena_adapter *adapter,
set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
}
+int ena_destroy_device(struct ena_adapter *adapter, bool graceful);
+int ena_restore_device(struct ena_adapter *adapter);
int handle_invalid_req_id(struct ena_ring *ring, u16 req_id,
struct ena_tx_buffer *tx_info, bool is_xdp);
diff --git a/drivers/net/ethernet/amazon/ena/ena_phc.c b/drivers/net/ethernet/amazon/ena/ena_phc.c
new file mode 100644
index 000000000000..7867e893fd15
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_phc.c
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright 2015-2022 Amazon.com, Inc. or its affiliates. All rights reserved.
+ */
+
+#include <linux/pci.h>
+#include "ena_netdev.h"
+#include "ena_phc.h"
+#include "ena_devlink.h"
+
+static int ena_phc_adjtime(struct ptp_clock_info *clock_info, s64 delta)
+{
+ return -EOPNOTSUPP;
+}
+
+static int ena_phc_adjfine(struct ptp_clock_info *clock_info, long scaled_ppm)
+{
+ return -EOPNOTSUPP;
+}
+
+static int ena_phc_feature_enable(struct ptp_clock_info *clock_info,
+ struct ptp_clock_request *rq,
+ int on)
+{
+ return -EOPNOTSUPP;
+}
+
+static int ena_phc_gettimex64(struct ptp_clock_info *clock_info,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct ena_phc_info *phc_info =
+ container_of(clock_info, struct ena_phc_info, clock_info);
+ unsigned long flags;
+ u64 timestamp_nsec;
+ int rc;
+
+ spin_lock_irqsave(&phc_info->lock, flags);
+
+ ptp_read_system_prets(sts);
+
+ rc = ena_com_phc_get_timestamp(phc_info->adapter->ena_dev,
+ &timestamp_nsec);
+
+ ptp_read_system_postts(sts);
+
+ spin_unlock_irqrestore(&phc_info->lock, flags);
+
+ *ts = ns_to_timespec64(timestamp_nsec);
+
+ return rc;
+}
+
+static int ena_phc_settime64(struct ptp_clock_info *clock_info,
+ const struct timespec64 *ts)
+{
+ return -EOPNOTSUPP;
+}
+
+static struct ptp_clock_info ena_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 0,
+ .pps = 0,
+ .adjtime = ena_phc_adjtime,
+ .adjfine = ena_phc_adjfine,
+ .gettimex64 = ena_phc_gettimex64,
+ .settime64 = ena_phc_settime64,
+ .enable = ena_phc_feature_enable,
+};
+
+/* Enable/Disable PHC by the kernel, affects on the next init flow */
+void ena_phc_enable(struct ena_adapter *adapter, bool enable)
+{
+ struct ena_phc_info *phc_info = adapter->phc_info;
+
+ if (!phc_info) {
+ netdev_err(adapter->netdev, "phc_info is not allocated\n");
+ return;
+ }
+
+ phc_info->enabled = enable;
+}
+
+/* Check if PHC is enabled by the kernel */
+bool ena_phc_is_enabled(struct ena_adapter *adapter)
+{
+ struct ena_phc_info *phc_info = adapter->phc_info;
+
+ return (phc_info && phc_info->enabled);
+}
+
+/* PHC is activated if ptp clock is registered in the kernel */
+bool ena_phc_is_active(struct ena_adapter *adapter)
+{
+ struct ena_phc_info *phc_info = adapter->phc_info;
+
+ return (phc_info && phc_info->clock);
+}
+
+static int ena_phc_register(struct ena_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct ptp_clock_info *clock_info;
+ struct ena_phc_info *phc_info;
+ int rc = 0;
+
+ phc_info = adapter->phc_info;
+ clock_info = &phc_info->clock_info;
+
+ /* PHC may already be registered in case of a reset */
+ if (ena_phc_is_active(adapter))
+ return 0;
+
+ phc_info->adapter = adapter;
+
+ spin_lock_init(&phc_info->lock);
+
+ /* Fill the ptp_clock_info struct and register PTP clock */
+ *clock_info = ena_ptp_clock_info;
+ snprintf(clock_info->name,
+ sizeof(clock_info->name),
+ "ena-ptp-%02x",
+ PCI_SLOT(pdev->devfn));
+
+ phc_info->clock = ptp_clock_register(clock_info, &pdev->dev);
+ if (IS_ERR(phc_info->clock)) {
+ rc = PTR_ERR(phc_info->clock);
+ netdev_err(adapter->netdev, "Failed registering ptp clock, error: %d\n",
+ rc);
+ phc_info->clock = NULL;
+ }
+
+ return rc;
+}
+
+static void ena_phc_unregister(struct ena_adapter *adapter)
+{
+ struct ena_phc_info *phc_info = adapter->phc_info;
+
+ /* During reset flow, PHC must stay registered
+ * to keep kernel's PHC index
+ */
+ if (ena_phc_is_active(adapter) &&
+ !test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) {
+ ptp_clock_unregister(phc_info->clock);
+ phc_info->clock = NULL;
+ }
+}
+
+int ena_phc_alloc(struct ena_adapter *adapter)
+{
+ /* Allocate driver specific PHC info */
+ adapter->phc_info = vzalloc(sizeof(*adapter->phc_info));
+ if (unlikely(!adapter->phc_info)) {
+ netdev_err(adapter->netdev, "Failed to alloc phc_info\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void ena_phc_free(struct ena_adapter *adapter)
+{
+ if (adapter->phc_info) {
+ vfree(adapter->phc_info);
+ adapter->phc_info = NULL;
+ }
+}
+
+int ena_phc_init(struct ena_adapter *adapter)
+{
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
+ struct net_device *netdev = adapter->netdev;
+ int rc = -EOPNOTSUPP;
+
+ /* Validate PHC feature is supported in the device */
+ if (!ena_com_phc_supported(ena_dev)) {
+ netdev_dbg(netdev, "PHC feature is not supported by the device\n");
+ goto err_ena_com_phc_init;
+ }
+
+ /* Validate PHC feature is enabled by the kernel */
+ if (!ena_phc_is_enabled(adapter)) {
+ netdev_dbg(netdev, "PHC feature is not enabled by the kernel\n");
+ goto err_ena_com_phc_init;
+ }
+
+ /* Initialize device specific PHC info */
+ rc = ena_com_phc_init(ena_dev);
+ if (unlikely(rc)) {
+ netdev_err(netdev, "Failed to init phc, error: %d\n", rc);
+ goto err_ena_com_phc_init;
+ }
+
+ /* Configure PHC feature in driver and device */
+ rc = ena_com_phc_config(ena_dev);
+ if (unlikely(rc)) {
+ netdev_err(netdev, "Failed to config phc, error: %d\n", rc);
+ goto err_ena_com_phc_config;
+ }
+
+ /* Register to PTP class driver */
+ rc = ena_phc_register(adapter);
+ if (unlikely(rc)) {
+ netdev_err(netdev, "Failed to register phc, error: %d\n", rc);
+ goto err_ena_com_phc_config;
+ }
+
+ return 0;
+
+err_ena_com_phc_config:
+ ena_com_phc_destroy(ena_dev);
+err_ena_com_phc_init:
+ ena_phc_enable(adapter, false);
+ ena_devlink_disable_phc_param(adapter->devlink);
+ return rc;
+}
+
+void ena_phc_destroy(struct ena_adapter *adapter)
+{
+ ena_phc_unregister(adapter);
+ ena_com_phc_destroy(adapter->ena_dev);
+}
+
+int ena_phc_get_index(struct ena_adapter *adapter)
+{
+ if (ena_phc_is_active(adapter))
+ return ptp_clock_index(adapter->phc_info->clock);
+
+ return -1;
+}
diff --git a/drivers/net/ethernet/amazon/ena/ena_phc.h b/drivers/net/ethernet/amazon/ena/ena_phc.h
new file mode 100644
index 000000000000..7364fe714e44
--- /dev/null
+++ b/drivers/net/ethernet/amazon/ena/ena_phc.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright 2015-2022 Amazon.com, Inc. or its affiliates. All rights reserved.
+ */
+
+#ifndef ENA_PHC_H
+#define ENA_PHC_H
+
+#include <linux/ptp_clock_kernel.h>
+
+struct ena_phc_info {
+ /* PTP hardware capabilities */
+ struct ptp_clock_info clock_info;
+
+ /* Registered PTP clock device */
+ struct ptp_clock *clock;
+
+ /* Adapter specific private data structure */
+ struct ena_adapter *adapter;
+
+ /* PHC lock */
+ spinlock_t lock;
+
+ /* Enabled by kernel */
+ bool enabled;
+};
+
+void ena_phc_enable(struct ena_adapter *adapter, bool enable);
+bool ena_phc_is_enabled(struct ena_adapter *adapter);
+bool ena_phc_is_active(struct ena_adapter *adapter);
+int ena_phc_get_index(struct ena_adapter *adapter);
+int ena_phc_init(struct ena_adapter *adapter);
+void ena_phc_destroy(struct ena_adapter *adapter);
+int ena_phc_alloc(struct ena_adapter *adapter);
+void ena_phc_free(struct ena_adapter *adapter);
+
+#endif /* ENA_PHC_H */
diff --git a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
index a2efebafd686..51068dc1cc2a 100644
--- a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
@@ -53,6 +53,11 @@ enum ena_regs_reset_reason_types {
#define ENA_REGS_MMIO_RESP_HI_OFF 0x64
#define ENA_REGS_RSS_IND_ENTRY_UPDATE_OFF 0x68
+/* phc_registers offsets */
+
+/* 100 base */
+#define ENA_REGS_PHC_DB_OFF 0x100
+
/* version register */
#define ENA_REGS_VERSION_MINOR_VERSION_MASK 0xff
#define ENA_REGS_VERSION_MAJOR_VERSION_SHIFT 8
@@ -129,4 +134,7 @@ enum ena_regs_reset_reason_types {
#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_SHIFT 16
#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_MASK 0xffff0000
+/* phc_db_req_id register */
+#define ENA_REGS_PHC_DB_REQ_ID_MASK 0xffff
+
#endif /* _ENA_REGS_H_ */
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
index 0d61b8580d72..a6ea477bce3c 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
@@ -605,10 +605,8 @@ next:
bcmasp_intf_rx_desc_write(intf, intf->rx_edpkt_dma_read);
- if (processed < budget) {
- napi_complete_done(&intf->rx_napi, processed);
+ if (processed < budget && napi_complete_done(&intf->rx_napi, processed))
bcmasp_enable_rx_irq(intf, 1);
- }
return processed;
}
@@ -1281,6 +1279,8 @@ struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv,
ndev->hw_features |= ndev->features;
ndev->needed_headroom += sizeof(struct bcmasp_pkt_offload);
+ netdev_sw_irq_coalesce_default_on(ndev);
+
return intf;
err_free_netdev:
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 44199855ebfb..528ce9ca4f54 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -3318,8 +3318,11 @@ static int bnx2x_set_phys_id(struct net_device *dev,
return 0;
}
-static int bnx2x_get_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
+static int bnx2x_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *info)
{
+ struct bnx2x *bp = netdev_priv(dev);
+
switch (info->flow_type) {
case TCP_V4_FLOW:
case TCP_V6_FLOW:
@@ -3361,20 +3364,21 @@ static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
case ETHTOOL_GRXRINGS:
info->data = BNX2X_NUM_ETH_QUEUES(bp);
return 0;
- case ETHTOOL_GRXFH:
- return bnx2x_get_rss_flags(bp, info);
default:
DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
return -EOPNOTSUPP;
}
}
-static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
+static int bnx2x_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *info,
+ struct netlink_ext_ack *extack)
{
+ struct bnx2x *bp = netdev_priv(dev);
int udp_rss_requested;
DP(BNX2X_MSG_ETHTOOL,
- "Set rss flags command parameters: flow type = %d, data = %llu\n",
+ "Set rss flags command parameters: flow type = %d, data = %u\n",
info->flow_type, info->data);
switch (info->flow_type) {
@@ -3460,19 +3464,6 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
}
}
-static int bnx2x_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
-{
- struct bnx2x *bp = netdev_priv(dev);
-
- switch (info->cmd) {
- case ETHTOOL_SRXFH:
- return bnx2x_set_rss_flags(bp, info);
- default:
- DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
- return -EOPNOTSUPP;
- }
-}
-
static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev)
{
return T_ETH_INDIRECTION_TABLE_SIZE;
@@ -3684,10 +3675,11 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
.set_phys_id = bnx2x_set_phys_id,
.get_ethtool_stats = bnx2x_get_ethtool_stats,
.get_rxnfc = bnx2x_get_rxnfc,
- .set_rxnfc = bnx2x_set_rxnfc,
.get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
.get_rxfh = bnx2x_get_rxfh,
.set_rxfh = bnx2x_set_rxfh,
+ .get_rxfh_fields = bnx2x_get_rxfh_fields,
+ .set_rxfh_fields = bnx2x_set_rxfh_fields,
.get_channels = bnx2x_get_channels,
.set_channels = bnx2x_set_channels,
.get_module_info = bnx2x_get_module_info,
@@ -3711,10 +3703,11 @@ static const struct ethtool_ops bnx2x_vf_ethtool_ops = {
.get_strings = bnx2x_get_strings,
.get_ethtool_stats = bnx2x_get_ethtool_stats,
.get_rxnfc = bnx2x_get_rxnfc,
- .set_rxnfc = bnx2x_set_rxnfc,
.get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
.get_rxfh = bnx2x_get_rxfh,
.set_rxfh = bnx2x_set_rxfh,
+ .get_rxfh_fields = bnx2x_get_rxfh_fields,
+ .set_rxfh_fields = bnx2x_set_rxfh_fields,
.get_channels = bnx2x_get_channels,
.set_channels = bnx2x_set_channels,
.get_link_ksettings = bnx2x_get_vf_link_ksettings,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index c9a1a1d504c0..3ee4b848ef53 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -10219,8 +10219,7 @@ static int bnx2x_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
static const struct udp_tunnel_nic_info bnx2x_udp_tunnels = {
.sync_table = bnx2x_udp_tunnel_sync,
- .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
- UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+ .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
.tables = {
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index ae89a981e052..f391e63aa79d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -477,6 +477,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct bnxt_tx_ring_info *txr;
struct bnxt_sw_tx_bd *tx_buf;
__le32 lflags = 0;
+ skb_frag_t *frag;
i = skb_get_queue_mapping(skb);
if (unlikely(i >= bp->tx_nr_rings)) {
@@ -563,7 +564,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
- !lflags) {
+ skb_frags_readable(skb) && !lflags) {
struct tx_push_buffer *tx_push_buf = txr->tx_push;
struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
@@ -598,9 +599,9 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb_copy_from_linear_data(skb, pdata, len);
pdata += len;
for (j = 0; j < last_frag; j++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
void *fptr;
+ frag = &skb_shinfo(skb)->frags[j];
fptr = skb_frag_address_safe(frag);
if (!fptr)
goto normal_tx;
@@ -708,8 +709,7 @@ normal_tx:
cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
txbd0 = txbd;
for (i = 0; i < last_frag; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-
+ frag = &skb_shinfo(skb)->frags[i];
prod = NEXT_TX(prod);
txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
@@ -721,7 +721,8 @@ normal_tx:
goto tx_dma_error;
tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
- dma_unmap_addr_set(tx_buf, mapping, mapping);
+ netmem_dma_unmap_addr_set(skb_frag_netmem(frag), tx_buf,
+ mapping, mapping);
txbd->tx_bd_haddr = cpu_to_le64(mapping);
@@ -778,9 +779,11 @@ tx_dma_error:
for (i = 0; i < last_frag; i++) {
prod = NEXT_TX(prod);
tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
- dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
- skb_frag_size(&skb_shinfo(skb)->frags[i]),
- DMA_TO_DEVICE);
+ frag = &skb_shinfo(skb)->frags[i];
+ netmem_dma_unmap_page_attrs(&pdev->dev,
+ dma_unmap_addr(tx_buf, mapping),
+ skb_frag_size(frag),
+ DMA_TO_DEVICE, 0);
}
tx_free:
@@ -809,6 +812,7 @@ static bool __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
u16 hw_cons = txr->tx_hw_cons;
unsigned int tx_bytes = 0;
u16 cons = txr->tx_cons;
+ skb_frag_t *frag;
int tx_pkts = 0;
bool rc = false;
@@ -848,13 +852,14 @@ static bool __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
last = tx_buf->nr_frags;
for (j = 0; j < last; j++) {
+ frag = &skb_shinfo(skb)->frags[j];
cons = NEXT_TX(cons);
tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
- dma_unmap_page(
- &pdev->dev,
- dma_unmap_addr(tx_buf, mapping),
- skb_frag_size(&skb_shinfo(skb)->frags[j]),
- DMA_TO_DEVICE);
+ netmem_dma_unmap_page_attrs(&pdev->dev,
+ dma_unmap_addr(tx_buf,
+ mapping),
+ skb_frag_size(frag),
+ DMA_TO_DEVICE, 0);
}
if (unlikely(is_ts_pkt)) {
if (BNXT_CHIP_P5(bp)) {
@@ -1810,7 +1815,7 @@ static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
{
struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
- /* if vf-rep dev is NULL, the must belongs to the PF */
+ /* if vf-rep dev is NULL, it must belong to the PF */
return dev ? dev : bp->dev;
}
@@ -3425,9 +3430,11 @@ static void bnxt_free_one_tx_ring_skbs(struct bnxt *bp,
skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
tx_buf = &txr->tx_buf_ring[ring_idx];
- dma_unmap_page(&pdev->dev,
- dma_unmap_addr(tx_buf, mapping),
- skb_frag_size(frag), DMA_TO_DEVICE);
+ netmem_dma_unmap_page_attrs(&pdev->dev,
+ dma_unmap_addr(tx_buf,
+ mapping),
+ skb_frag_size(frag),
+ DMA_TO_DEVICE, 0);
}
dev_kfree_skb(skb);
}
@@ -7119,7 +7126,7 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
default:
netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
ring_type);
- return -1;
+ return -EINVAL;
}
resp = hwrm_req_hold(bp, req);
@@ -14124,28 +14131,13 @@ static void bnxt_unlock_sp(struct bnxt *bp)
netdev_unlock(bp->dev);
}
-/* Same as bnxt_lock_sp() with additional rtnl_lock */
-static void bnxt_rtnl_lock_sp(struct bnxt *bp)
-{
- clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
- rtnl_lock();
- netdev_lock(bp->dev);
-}
-
-static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
-{
- set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
- netdev_unlock(bp->dev);
- rtnl_unlock();
-}
-
/* Only called from bnxt_sp_task() */
static void bnxt_reset(struct bnxt *bp, bool silent)
{
- bnxt_rtnl_lock_sp(bp);
+ bnxt_lock_sp(bp);
if (test_bit(BNXT_STATE_OPEN, &bp->state))
bnxt_reset_task(bp, silent);
- bnxt_rtnl_unlock_sp(bp);
+ bnxt_unlock_sp(bp);
}
/* Only called from bnxt_sp_task() */
@@ -14153,9 +14145,9 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
{
int i;
- bnxt_rtnl_lock_sp(bp);
+ bnxt_lock_sp(bp);
if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
- bnxt_rtnl_unlock_sp(bp);
+ bnxt_unlock_sp(bp);
return;
}
/* Disable and flush TPA before resetting the RX ring */
@@ -14194,7 +14186,7 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
}
if (bp->flags & BNXT_FLAG_TPA)
bnxt_set_tpa(bp, true);
- bnxt_rtnl_unlock_sp(bp);
+ bnxt_unlock_sp(bp);
}
static void bnxt_fw_fatal_close(struct bnxt *bp)
@@ -15086,17 +15078,15 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
fallthrough;
case BNXT_FW_RESET_STATE_OPENING:
- while (!rtnl_trylock()) {
+ while (!netdev_trylock(bp->dev)) {
bnxt_queue_fw_reset_work(bp, HZ / 10);
return;
}
- netdev_lock(bp->dev);
rc = bnxt_open(bp->dev);
if (rc) {
netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
bnxt_fw_reset_abort(bp, rc);
netdev_unlock(bp->dev);
- rtnl_unlock();
goto ulp_start;
}
@@ -15116,7 +15106,6 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bnxt_dl_health_fw_status_update(bp, true);
}
netdev_unlock(bp->dev);
- rtnl_unlock();
bnxt_ulp_start(bp, 0);
bnxt_reenable_sriov(bp);
netdev_lock(bp->dev);
@@ -15642,8 +15631,7 @@ static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int ta
static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
.set_port = bnxt_udp_tunnel_set_port,
.unset_port = bnxt_udp_tunnel_unset_port,
- .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
- UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+ .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
.tables = {
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
@@ -15651,8 +15639,7 @@ static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
}, bnxt_udp_tunnels_p7 = {
.set_port = bnxt_udp_tunnel_set_port,
.unset_port = bnxt_udp_tunnel_unset_port,
- .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
- UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+ .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
.tables = {
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
@@ -16062,7 +16049,7 @@ err_reset:
rc);
napi_enable_locked(&bnapi->napi);
bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
- netif_close(dev);
+ bnxt_reset_task(bp, true);
return rc;
}
@@ -16777,6 +16764,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (BNXT_SUPPORTS_QUEUE_API(bp))
dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops;
dev->request_ops_lock = true;
+ dev->netmem_tx = true;
rc = register_netdev(dev);
if (rc)
@@ -16878,7 +16866,6 @@ static int bnxt_resume(struct device *device)
struct bnxt *bp = netdev_priv(dev);
int rc = 0;
- rtnl_lock();
netdev_lock(dev);
rc = pci_enable_device(bp->pdev);
if (rc) {
@@ -16923,7 +16910,6 @@ static int bnxt_resume(struct device *device)
resume_exit:
netdev_unlock(bp->dev);
- rtnl_unlock();
bnxt_ulp_start(bp, rc);
if (!rc)
bnxt_reenable_sriov(bp);
@@ -17089,7 +17075,6 @@ static void bnxt_io_resume(struct pci_dev *pdev)
int err;
netdev_info(bp->dev, "PCI Slot Resume\n");
- rtnl_lock();
netdev_lock(netdev);
err = bnxt_hwrm_func_qcaps(bp);
@@ -17107,7 +17092,6 @@ static void bnxt_io_resume(struct pci_dev *pdev)
netif_device_attach(netdev);
netdev_unlock(netdev);
- rtnl_unlock();
bnxt_ulp_start(bp, err);
if (!err)
bnxt_reenable_sriov(bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index f5d490bf997e..4c10373abffd 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -1587,8 +1587,11 @@ static u64 get_ethtool_ipv6_rss(struct bnxt *bp)
return 0;
}
-static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
+static int bnxt_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct bnxt *bp = netdev_priv(dev);
+
cmd->data = 0;
switch (cmd->flow_type) {
case TCP_V4_FLOW:
@@ -1647,10 +1650,15 @@ static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
#define RXH_4TUPLE (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)
#define RXH_2TUPLE (RXH_IP_SRC | RXH_IP_DST)
-static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
+static int bnxt_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
{
- u32 rss_hash_cfg = bp->rss_hash_cfg;
+ struct bnxt *bp = netdev_priv(dev);
int tuple, rc = 0;
+ u32 rss_hash_cfg;
+
+ rss_hash_cfg = bp->rss_hash_cfg;
if (cmd->data == RXH_4TUPLE)
tuple = 4;
@@ -1768,10 +1776,6 @@ static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
rc = bnxt_grxclsrule(bp, cmd);
break;
- case ETHTOOL_GRXFH:
- rc = bnxt_grxfh(bp, cmd);
- break;
-
default:
rc = -EOPNOTSUPP;
break;
@@ -1786,10 +1790,6 @@ static int bnxt_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
int rc;
switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- rc = bnxt_srxfh(bp, cmd);
- break;
-
case ETHTOOL_SRXCLSRLINS:
rc = bnxt_srxclsrlins(bp, cmd);
break;
@@ -5521,6 +5521,8 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.get_rxfh_key_size = bnxt_get_rxfh_key_size,
.get_rxfh = bnxt_get_rxfh,
.set_rxfh = bnxt_set_rxfh,
+ .get_rxfh_fields = bnxt_get_rxfh_fields,
+ .set_rxfh_fields = bnxt_set_rxfh_fields,
.create_rxfh_context = bnxt_create_rxfh_context,
.modify_rxfh_context = bnxt_modify_rxfh_context,
.remove_rxfh_context = bnxt_remove_rxfh_context,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 5ddddd89052f..bc0d80356568 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -823,7 +823,7 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
int tx_ok = 0, rx_ok = 0, rss_ok = 0;
int avail_cp, avail_stat;
- /* Check if we can enable requested num of vf's. At a mininum
+ /* Check if we can enable requested num of vf's. At a minimum
* we require 1 RX 1 TX rings for each VF. In this minimum conf
* features like TPA will not be available.
*/
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index d2ca90407cce..0599d3016224 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -1316,7 +1316,7 @@ static int bnxt_tc_get_decap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
/* Check if there's another flow using the same tunnel decap.
* If not, add this tunnel to the table and resolve the other
- * tunnel header fileds. Ignore src_port in the tunnel_key,
+ * tunnel header fields. Ignore src_port in the tunnel_key,
* since it is not required for decap filters.
*/
decap_key->tp_src = 0;
@@ -1410,7 +1410,7 @@ static int bnxt_tc_get_encap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
/* Check if there's another flow using the same tunnel encap.
* If not, add this tunnel to the table and resolve the other
- * tunnel header fileds
+ * tunnel header fields
*/
encap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->encap_table,
&tc_info->encap_ht_params,
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index fa0077bc67b7..4f40f6afe88f 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2472,10 +2472,8 @@ static int bcmgenet_rx_poll(struct napi_struct *napi, int budget)
work_done = bcmgenet_desc_rx(ring, budget);
- if (work_done < budget) {
- napi_complete_done(napi, work_done);
+ if (work_done < budget && napi_complete_done(napi, work_done))
bcmgenet_rx_ring_int_enable(ring);
- }
if (ring->dim.use_dim) {
dim_update_sample(ring->dim.event_ctr, ring->dim.packets,
@@ -3988,6 +3986,8 @@ static int bcmgenet_probe(struct platform_device *pdev)
dev->hw_features |= dev->features;
dev->vlan_features |= dev->features;
+ netdev_sw_irq_coalesce_default_on(dev);
+
/* Request the WOL interrupt and advertise suspend if available */
priv->wol_irq_disabled = true;
if (priv->wol_irq > 0) {
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index b6437ba7a2eb..573e8b279e52 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -169,10 +169,15 @@ void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
reg &= ~EXT_GPHY_RESET;
} else {
+ reg |= EXT_GPHY_RESET;
+ bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
+ mdelay(1);
+
reg |= EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN |
- EXT_GPHY_RESET | EXT_CFG_IDDQ_GLOBAL_PWR;
+ EXT_CFG_IDDQ_GLOBAL_PWR;
bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
mdelay(1);
+
reg |= EXT_CK25_DIS;
}
bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index d1f1ae5ea161..53aaf6b08e39 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -5654,6 +5654,20 @@ static int __maybe_unused macb_runtime_resume(struct device *dev)
return 0;
}
+static void macb_shutdown(struct platform_device *pdev)
+{
+ struct net_device *netdev = platform_get_drvdata(pdev);
+
+ rtnl_lock();
+
+ if (netif_running(netdev))
+ dev_close(netdev);
+
+ netif_device_detach(netdev);
+
+ rtnl_unlock();
+}
+
static const struct dev_pm_ops macb_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(macb_suspend, macb_resume)
SET_RUNTIME_PM_OPS(macb_runtime_suspend, macb_runtime_resume, NULL)
@@ -5667,6 +5681,7 @@ static struct platform_driver macb_driver = {
.of_match_table = of_match_ptr(macb_dt_ids),
.pm = &macb_pm_ops,
},
+ .shutdown = macb_shutdown,
};
module_platform_driver(macb_driver);
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
index ff8f2f9f9cae..75f22f74774c 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
@@ -1208,45 +1208,6 @@ int setup_cn23xx_octeon_pf_device(struct octeon_device *oct)
}
EXPORT_SYMBOL_GPL(setup_cn23xx_octeon_pf_device);
-int validate_cn23xx_pf_config_info(struct octeon_device *oct,
- struct octeon_config *conf23xx)
-{
- if (CFG_GET_IQ_MAX_Q(conf23xx) > CN23XX_MAX_INPUT_QUEUES) {
- dev_err(&oct->pci_dev->dev, "%s: Num IQ (%d) exceeds Max (%d)\n",
- __func__, CFG_GET_IQ_MAX_Q(conf23xx),
- CN23XX_MAX_INPUT_QUEUES);
- return 1;
- }
-
- if (CFG_GET_OQ_MAX_Q(conf23xx) > CN23XX_MAX_OUTPUT_QUEUES) {
- dev_err(&oct->pci_dev->dev, "%s: Num OQ (%d) exceeds Max (%d)\n",
- __func__, CFG_GET_OQ_MAX_Q(conf23xx),
- CN23XX_MAX_OUTPUT_QUEUES);
- return 1;
- }
-
- if (CFG_GET_IQ_INSTR_TYPE(conf23xx) != OCTEON_32BYTE_INSTR &&
- CFG_GET_IQ_INSTR_TYPE(conf23xx) != OCTEON_64BYTE_INSTR) {
- dev_err(&oct->pci_dev->dev, "%s: Invalid instr type for IQ\n",
- __func__);
- return 1;
- }
-
- if (!CFG_GET_OQ_REFILL_THRESHOLD(conf23xx)) {
- dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n",
- __func__);
- return 1;
- }
-
- if (!(CFG_GET_OQ_INTR_TIME(conf23xx))) {
- dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n",
- __func__);
- return 1;
- }
-
- return 0;
-}
-
int cn23xx_fw_loaded(struct octeon_device *oct)
{
u64 val;
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
index 234b96b4f488..bbe9f3133b07 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
@@ -54,9 +54,6 @@ struct oct_vf_stats {
int setup_cn23xx_octeon_pf_device(struct octeon_device *oct);
-int validate_cn23xx_pf_config_info(struct octeon_device *oct,
- struct octeon_config *conf23xx);
-
u32 cn23xx_pf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us);
int cn23xx_sriov_config(struct octeon_device *oct);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index d0ff0c170b1a..fc6053414b7d 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -516,8 +516,8 @@ static int nicvf_set_ringparam(struct net_device *netdev,
return 0;
}
-static int nicvf_get_rss_hash_opts(struct nicvf *nic,
- struct ethtool_rxnfc *info)
+static int nicvf_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *info)
{
info->data = 0;
@@ -552,25 +552,28 @@ static int nicvf_get_rxnfc(struct net_device *dev,
info->data = nic->rx_queues;
ret = 0;
break;
- case ETHTOOL_GRXFH:
- return nicvf_get_rss_hash_opts(nic, info);
default:
break;
}
return ret;
}
-static int nicvf_set_rss_hash_opts(struct nicvf *nic,
- struct ethtool_rxnfc *info)
+static int nicvf_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *info,
+ struct netlink_ext_ack *extack)
{
- struct nicvf_rss_info *rss = &nic->rss_info;
- u64 rss_cfg = nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
+ struct nicvf *nic = netdev_priv(dev);
+ struct nicvf_rss_info *rss;
+ u64 rss_cfg;
+
+ rss = &nic->rss_info;
+ rss_cfg = nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
if (!rss->enable)
netdev_err(nic->netdev,
"RSS is disabled, hash cannot be set\n");
- netdev_info(nic->netdev, "Set RSS flow type = %d, data = %lld\n",
+ netdev_info(nic->netdev, "Set RSS flow type = %d, data = %u\n",
info->flow_type, info->data);
if (!(info->data & RXH_IP_SRC) || !(info->data & RXH_IP_DST))
@@ -628,19 +631,6 @@ static int nicvf_set_rss_hash_opts(struct nicvf *nic,
return 0;
}
-static int nicvf_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
-{
- struct nicvf *nic = netdev_priv(dev);
-
- switch (info->cmd) {
- case ETHTOOL_SRXFH:
- return nicvf_set_rss_hash_opts(nic, info);
- default:
- break;
- }
- return -EOPNOTSUPP;
-}
-
static u32 nicvf_get_rxfh_key_size(struct net_device *netdev)
{
return RSS_HASH_KEY_SIZE * sizeof(u64);
@@ -872,11 +862,12 @@ static const struct ethtool_ops nicvf_ethtool_ops = {
.get_ringparam = nicvf_get_ringparam,
.set_ringparam = nicvf_set_ringparam,
.get_rxnfc = nicvf_get_rxnfc,
- .set_rxnfc = nicvf_set_rxnfc,
.get_rxfh_key_size = nicvf_get_rxfh_key_size,
.get_rxfh_indir_size = nicvf_get_rxfh_indir_size,
.get_rxfh = nicvf_get_rxfh,
.set_rxfh = nicvf_set_rxfh,
+ .get_rxfh_fields = nicvf_get_rxfh_fields,
+ .set_rxfh_fields = nicvf_set_rxfh_fields,
.get_channels = nicvf_get_channels,
.set_channels = nicvf_set_channels,
.get_pauseparam = nicvf_get_pauseparam,
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
index 9749d1239f58..5d5f3380ecca 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
@@ -176,43 +176,6 @@ again:
EXPORT_SYMBOL(t3_l2t_send_slow);
-void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e)
-{
-again:
- switch (e->state) {
- case L2T_STATE_STALE: /* entry is stale, kick off revalidation */
- neigh_event_send(e->neigh, NULL);
- spin_lock_bh(&e->lock);
- if (e->state == L2T_STATE_STALE) {
- e->state = L2T_STATE_VALID;
- }
- spin_unlock_bh(&e->lock);
- return;
- case L2T_STATE_VALID: /* fast-path, send the packet on */
- return;
- case L2T_STATE_RESOLVING:
- spin_lock_bh(&e->lock);
- if (e->state != L2T_STATE_RESOLVING) {
- /* ARP already completed */
- spin_unlock_bh(&e->lock);
- goto again;
- }
- spin_unlock_bh(&e->lock);
-
- /*
- * Only the first packet added to the arpq should kick off
- * resolution. However, because the alloc_skb below can fail,
- * we allow each packet added to the arpq to retry resolution
- * as a way of recovering from transient memory exhaustion.
- * A better way would be to use a work request to retry L2T
- * entries when there's no memory.
- */
- neigh_event_send(e->neigh, NULL);
- }
-}
-
-EXPORT_SYMBOL(t3_l2t_send_event);
-
/*
* Allocate a free L2T entry. Must be called with l2t_data.lock held.
*/
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
index 646ca0bc25bd..33558f177497 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
@@ -113,7 +113,6 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
struct net_device *dev, const void *daddr);
int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
struct l2t_entry *e);
-void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e);
struct l2t_data *t3_init_l2t(unsigned int l2t_capacity);
int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 1546c3db08f0..23326235d4ab 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -1730,6 +1730,60 @@ static int cxgb4_ntuple_get_filter(struct net_device *dev,
return 0;
}
+static int cxgb4_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *info)
+{
+ const struct port_info *pi = netdev_priv(dev);
+ unsigned int v = pi->rss_mode;
+
+ info->data = 0;
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case UDP_V4_FLOW:
+ if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) &&
+ (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case IPV4_FLOW:
+ if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case TCP_V6_FLOW:
+ if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case UDP_V6_FLOW:
+ if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) &&
+ (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case IPV6_FLOW:
+ if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ }
+ return 0;
+}
+
static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
u32 *rules)
{
@@ -1739,56 +1793,6 @@ static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
int ret = 0;
switch (info->cmd) {
- case ETHTOOL_GRXFH: {
- unsigned int v = pi->rss_mode;
-
- info->data = 0;
- switch (info->flow_type) {
- case TCP_V4_FLOW:
- if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST |
- RXH_L4_B_0_1 | RXH_L4_B_2_3;
- else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST;
- break;
- case UDP_V4_FLOW:
- if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) &&
- (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
- info->data = RXH_IP_SRC | RXH_IP_DST |
- RXH_L4_B_0_1 | RXH_L4_B_2_3;
- else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST;
- break;
- case SCTP_V4_FLOW:
- case AH_ESP_V4_FLOW:
- case IPV4_FLOW:
- if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST;
- break;
- case TCP_V6_FLOW:
- if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST |
- RXH_L4_B_0_1 | RXH_L4_B_2_3;
- else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST;
- break;
- case UDP_V6_FLOW:
- if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) &&
- (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
- info->data = RXH_IP_SRC | RXH_IP_DST |
- RXH_L4_B_0_1 | RXH_L4_B_2_3;
- else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST;
- break;
- case SCTP_V6_FLOW:
- case AH_ESP_V6_FLOW:
- case IPV6_FLOW:
- if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
- info->data = RXH_IP_SRC | RXH_IP_DST;
- break;
- }
- return 0;
- }
case ETHTOOL_GRXRINGS:
info->data = pi->nqsets;
return 0;
@@ -2199,6 +2203,7 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
.get_rxfh_indir_size = get_rss_table_size,
.get_rxfh = get_rss_table,
.set_rxfh = set_rss_table,
+ .get_rxfh_fields = cxgb4_get_rxfh_fields,
.self_test = cxgb4_self_test,
.flash_device = set_flash,
.get_ts_info = get_ts_info,
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index 529160926a96..a50f5dad34d5 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -528,8 +528,10 @@ static int enic_grxclsrule(struct enic *enic, struct ethtool_rxnfc *cmd)
return 0;
}
-static int enic_get_rx_flow_hash(struct enic *enic, struct ethtool_rxnfc *cmd)
+static int enic_get_rx_flow_hash(struct net_device *dev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct enic *enic = netdev_priv(dev);
u8 rss_hash_type = 0;
cmd->data = 0;
@@ -597,9 +599,6 @@ static int enic_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
ret = enic_grxclsrule(enic, cmd);
spin_unlock_bh(&enic->rfs_h.lock);
break;
- case ETHTOOL_GRXFH:
- ret = enic_get_rx_flow_hash(enic, cmd);
- break;
default:
ret = -EOPNOTSUPP;
break;
@@ -693,6 +692,7 @@ static const struct ethtool_ops enic_ethtool_ops = {
.get_rxfh_key_size = enic_get_rxfh_key_size,
.get_rxfh = enic_get_rxfh,
.set_rxfh = enic_set_rxfh,
+ .get_rxfh_fields = enic_get_rx_flow_hash,
.get_link_ksettings = enic_get_ksettings,
.get_ts_info = enic_get_ts_info,
.get_channels = enic_get_channels,
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index da9b7715df05..cc60ee454bf9 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -99,6 +99,13 @@ static const struct net_device_ops netdev_ops = {
.ndo_tx_timeout = rio_tx_timeout,
};
+static bool is_support_rmon_mmio(struct pci_dev *pdev)
+{
+ return pdev->vendor == PCI_VENDOR_ID_DLINK &&
+ pdev->device == 0x4000 &&
+ pdev->revision == 0x0c;
+}
+
static int
rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
{
@@ -131,18 +138,22 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
np = netdev_priv(dev);
+ if (is_support_rmon_mmio(pdev))
+ np->rmon_enable = true;
+
/* IO registers range. */
ioaddr = pci_iomap(pdev, 0, 0);
if (!ioaddr)
goto err_out_dev;
np->eeprom_addr = ioaddr;
-#ifdef MEM_MAPPING
- /* MM registers range. */
- ioaddr = pci_iomap(pdev, 1, 0);
- if (!ioaddr)
- goto err_out_iounmap;
-#endif
+ if (np->rmon_enable) {
+ /* MM registers range. */
+ ioaddr = pci_iomap(pdev, 1, 0);
+ if (!ioaddr)
+ goto err_out_iounmap;
+ }
+
np->ioaddr = ioaddr;
np->chip_id = chip_idx;
np->pdev = pdev;
@@ -289,9 +300,8 @@ err_out_unmap_tx:
dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
np->tx_ring_dma);
err_out_iounmap:
-#ifdef MEM_MAPPING
- pci_iounmap(pdev, np->ioaddr);
-#endif
+ if (np->rmon_enable)
+ pci_iounmap(pdev, np->ioaddr);
pci_iounmap(pdev, np->eeprom_addr);
err_out_dev:
free_netdev (dev);
@@ -578,7 +588,8 @@ static void rio_hw_init(struct net_device *dev)
dw8(TxDMAPollPeriod, 0xff);
dw8(RxDMABurstThresh, 0x30);
dw8(RxDMAUrgentThresh, 0x30);
- dw32(RmonStatMask, 0x0007ffff);
+ if (!np->rmon_enable)
+ dw32(RmonStatMask, 0x0007ffff);
/* clear statistics */
clear_stats (dev);
@@ -1076,9 +1087,6 @@ get_stats (struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->ioaddr;
-#ifdef MEM_MAPPING
- int i;
-#endif
unsigned int stat_reg;
unsigned long flags;
@@ -1123,10 +1131,10 @@ get_stats (struct net_device *dev)
dr16(MacControlFramesXmtd);
dr16(FramesWEXDeferal);
-#ifdef MEM_MAPPING
- for (i = 0x100; i <= 0x150; i += 4)
- dr32(i);
-#endif
+ if (np->rmon_enable)
+ for (int i = 0x100; i <= 0x150; i += 4)
+ dr32(i);
+
dr16(TxJumboFrames);
dr16(RxJumboFrames);
dr16(TCPCheckSumErrors);
@@ -1143,9 +1151,6 @@ clear_stats (struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->ioaddr;
-#ifdef MEM_MAPPING
- int i;
-#endif
/* All statistics registers need to be acknowledged,
else statistic overflow could cause problems */
@@ -1181,10 +1186,9 @@ clear_stats (struct net_device *dev)
dr16(BcstFramesXmtdOk);
dr16(MacControlFramesXmtd);
dr16(FramesWEXDeferal);
-#ifdef MEM_MAPPING
- for (i = 0x100; i <= 0x150; i += 4)
- dr32(i);
-#endif
+ if (np->rmon_enable)
+ for (int i = 0x100; i <= 0x150; i += 4)
+ dr32(i);
dr16(TxJumboFrames);
dr16(RxJumboFrames);
dr16(TCPCheckSumErrors);
@@ -1810,9 +1814,8 @@ rio_remove1 (struct pci_dev *pdev)
np->rx_ring_dma);
dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
np->tx_ring_dma);
-#ifdef MEM_MAPPING
- pci_iounmap(pdev, np->ioaddr);
-#endif
+ if (np->rmon_enable)
+ pci_iounmap(pdev, np->ioaddr);
pci_iounmap(pdev, np->eeprom_addr);
free_netdev (dev);
pci_release_regions (pdev);
diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h
index ba679025e866..4788cc94639d 100644
--- a/drivers/net/ethernet/dlink/dl2k.h
+++ b/drivers/net/ethernet/dlink/dl2k.h
@@ -403,6 +403,8 @@ struct netdev_private {
u16 negotiate; /* Negotiated media */
int phy_addr; /* PHY addresses. */
u16 led_mode; /* LED mode read from EEPROM (IP1000A only) */
+
+ bool rmon_enable;
};
/* The station address location in the EEPROM. */
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index f001a649f58f..f9216326bdfe 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -1073,10 +1073,19 @@ static void be_set_msg_level(struct net_device *netdev, u32 level)
adapter->msg_enable = level;
}
-static u64 be_get_rss_hash_opts(struct be_adapter *adapter, u64 flow_type)
+static int be_get_rxfh_fields(struct net_device *netdev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ u64 flow_type = cmd->flow_type;
u64 data = 0;
+ if (!be_multi_rxq(adapter)) {
+ dev_info(&adapter->pdev->dev,
+ "ethtool::get_rxfh: RX flow hashing is disabled\n");
+ return -EINVAL;
+ }
+
switch (flow_type) {
case TCP_V4_FLOW:
if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV4)
@@ -1104,7 +1113,8 @@ static u64 be_get_rss_hash_opts(struct be_adapter *adapter, u64 flow_type)
break;
}
- return data;
+ cmd->data = data;
+ return 0;
}
static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
@@ -1119,9 +1129,6 @@ static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
}
switch (cmd->cmd) {
- case ETHTOOL_GRXFH:
- cmd->data = be_get_rss_hash_opts(adapter, cmd->flow_type);
- break;
case ETHTOOL_GRXRINGS:
cmd->data = adapter->num_rx_qs;
break;
@@ -1132,11 +1139,19 @@ static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
return 0;
}
-static int be_set_rss_hash_opts(struct be_adapter *adapter,
- struct ethtool_rxnfc *cmd)
+static int be_set_rxfh_fields(struct net_device *netdev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
{
- int status;
+ struct be_adapter *adapter = netdev_priv(netdev);
u32 rss_flags = adapter->rss_info.rss_flags;
+ int status;
+
+ if (!be_multi_rxq(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "ethtool::set_rxfh: RX flow hashing is disabled\n");
+ return -EINVAL;
+ }
if (cmd->data != L3_RSS_FLAGS &&
cmd->data != (L3_RSS_FLAGS | L4_RSS_FLAGS))
@@ -1195,28 +1210,6 @@ static int be_set_rss_hash_opts(struct be_adapter *adapter,
return be_cmd_status(status);
}
-static int be_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
-{
- struct be_adapter *adapter = netdev_priv(netdev);
- int status = 0;
-
- if (!be_multi_rxq(adapter)) {
- dev_err(&adapter->pdev->dev,
- "ethtool::set_rxnfc: RX flow hashing is disabled\n");
- return -EINVAL;
- }
-
- switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- status = be_set_rss_hash_opts(adapter, cmd);
- break;
- default:
- return -EINVAL;
- }
-
- return status;
-}
-
static void be_get_channels(struct net_device *netdev,
struct ethtool_channels *ch)
{
@@ -1449,7 +1442,8 @@ const struct ethtool_ops be_ethtool_ops = {
.flash_device = be_do_flash,
.self_test = be_self_test,
.get_rxnfc = be_get_rxnfc,
- .set_rxnfc = be_set_rxnfc,
+ .get_rxfh_fields = be_get_rxfh_fields,
+ .set_rxfh_fields = be_set_rxfh_fields,
.get_rxfh_indir_size = be_get_rxfh_indir_size,
.get_rxfh_key_size = be_get_rxfh_key_size,
.get_rxfh = be_get_rxfh,
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 3d2e21592119..f49400ba9729 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4031,8 +4031,7 @@ static int be_vxlan_unset_port(struct net_device *netdev, unsigned int table,
static const struct udp_tunnel_nic_info be_udp_tunnels = {
.set_port = be_vxlan_set_port,
.unset_port = be_vxlan_unset_port,
- .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
- UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+ .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
.tables = {
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
},
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 23c23cca2620..3edc8d142dd5 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -28,7 +28,6 @@
#include <linux/percpu.h>
#include <linux/dma-mapping.h>
#include <linux/sort.h>
-#include <linux/phy_fixed.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <soc/fsl/bman.h>
@@ -3150,7 +3149,6 @@ static const struct net_device_ops dpaa_ops = {
.ndo_stop = dpaa_eth_stop,
.ndo_tx_timeout = dpaa_tx_timeout,
.ndo_get_stats64 = dpaa_get_stats64,
- .ndo_change_carrier = fixed_phy_change_carrier,
.ndo_set_mac_address = dpaa_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = dpaa_set_rx_mode,
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
index 9986f6e1f587..0c588e03b15e 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -263,8 +263,8 @@ static void dpaa_get_strings(struct net_device *net_dev, u32 stringset,
ethtool_puts(&data, dpaa_stats_global[i]);
}
-static int dpaa_get_hash_opts(struct net_device *dev,
- struct ethtool_rxnfc *cmd)
+static int dpaa_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *cmd)
{
struct dpaa_priv *priv = netdev_priv(dev);
@@ -299,22 +299,6 @@ static int dpaa_get_hash_opts(struct net_device *dev,
return 0;
}
-static int dpaa_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
- u32 *unused)
-{
- int ret = -EOPNOTSUPP;
-
- switch (cmd->cmd) {
- case ETHTOOL_GRXFH:
- ret = dpaa_get_hash_opts(dev, cmd);
- break;
- default:
- break;
- }
-
- return ret;
-}
-
static void dpaa_set_hash(struct net_device *net_dev, bool enable)
{
struct mac_device *mac_dev;
@@ -329,8 +313,9 @@ static void dpaa_set_hash(struct net_device *net_dev, bool enable)
priv->keygen_in_use = enable;
}
-static int dpaa_set_hash_opts(struct net_device *dev,
- struct ethtool_rxnfc *nfc)
+static int dpaa_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
{
int ret = -EINVAL;
@@ -364,21 +349,6 @@ static int dpaa_set_hash_opts(struct net_device *dev,
return ret;
}
-static int dpaa_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
-{
- int ret = -EOPNOTSUPP;
-
- switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- ret = dpaa_set_hash_opts(dev, cmd);
- break;
- default:
- break;
- }
-
- return ret;
-}
-
static int dpaa_get_ts_info(struct net_device *net_dev,
struct kernel_ethtool_ts_info *info)
{
@@ -510,8 +480,8 @@ const struct ethtool_ops dpaa_ethtool_ops = {
.get_strings = dpaa_get_strings,
.get_link_ksettings = dpaa_get_link_ksettings,
.set_link_ksettings = dpaa_set_link_ksettings,
- .get_rxnfc = dpaa_get_rxnfc,
- .set_rxnfc = dpaa_set_rxnfc,
+ .get_rxfh_fields = dpaa_get_rxfh_fields,
+ .set_rxfh_fields = dpaa_set_rxfh_fields,
.get_ts_info = dpaa_get_ts_info,
.get_coalesce = dpaa_get_coalesce,
.set_coalesce = dpaa_set_coalesce,
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index 74ef77cb7078..00474ed11d53 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -719,13 +719,6 @@ static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
int i, j = 0;
switch (rxnfc->cmd) {
- case ETHTOOL_GRXFH:
- /* we purposely ignore cmd->flow_type for now, because the
- * classifier only supports a single set of fields for all
- * protocols
- */
- rxnfc->data = priv->rx_hash_fields;
- break;
case ETHTOOL_GRXRINGS:
rxnfc->data = dpaa2_eth_queue_count(priv);
break;
@@ -767,11 +760,6 @@ static int dpaa2_eth_set_rxnfc(struct net_device *net_dev,
int err = 0;
switch (rxnfc->cmd) {
- case ETHTOOL_SRXFH:
- if ((rxnfc->data & DPAA2_RXH_SUPPORTED) != rxnfc->data)
- return -EOPNOTSUPP;
- err = dpaa2_eth_set_hash(net_dev, rxnfc->data);
- break;
case ETHTOOL_SRXCLSRLINS:
err = dpaa2_eth_update_cls_rule(net_dev, &rxnfc->fs, rxnfc->fs.location);
break;
@@ -785,6 +773,28 @@ static int dpaa2_eth_set_rxnfc(struct net_device *net_dev,
return err;
}
+static int dpaa2_eth_get_rxfh_fields(struct net_device *net_dev,
+ struct ethtool_rxfh_fields *rxnfc)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+
+ /* we purposely ignore cmd->flow_type for now, because the
+ * classifier only supports a single set of fields for all
+ * protocols
+ */
+ rxnfc->data = priv->rx_hash_fields;
+ return 0;
+}
+
+static int dpaa2_eth_set_rxfh_fields(struct net_device *net_dev,
+ const struct ethtool_rxfh_fields *rxnfc,
+ struct netlink_ext_ack *extack)
+{
+ if ((rxnfc->data & DPAA2_RXH_SUPPORTED) != rxnfc->data)
+ return -EOPNOTSUPP;
+ return dpaa2_eth_set_hash(net_dev, rxnfc->data);
+}
+
int dpaa2_phc_index = -1;
EXPORT_SYMBOL(dpaa2_phc_index);
@@ -939,6 +949,8 @@ const struct ethtool_ops dpaa2_ethtool_ops = {
.get_strings = dpaa2_eth_get_strings,
.get_rxnfc = dpaa2_eth_get_rxnfc,
.set_rxnfc = dpaa2_eth_set_rxnfc,
+ .get_rxfh_fields = dpaa2_eth_get_rxfh_fields,
+ .set_rxfh_fields = dpaa2_eth_set_rxfh_fields,
.get_ts_info = dpaa2_eth_get_ts_info,
.get_tunable = dpaa2_eth_get_tunable,
.set_tunable = dpaa2_eth_set_tunable,
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
index a466c2379146..4b0ae7d9af92 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
@@ -448,7 +448,5 @@ bool dpaa2_xsk_tx(struct dpaa2_eth_priv *priv,
percpu_stats->tx_errors++;
}
- xsk_tx_release(ch->xsk_pool);
-
return total_enqueued == budget;
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index dcc3fbac3481..e4287725832e 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -1375,6 +1375,7 @@ static void enetc_get_offloads(struct enetc_bdr *rx_ring,
}
if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_VLAN) {
+ struct enetc_hw *hw = &priv->si->hw;
__be16 tpid = 0;
switch (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TPID) {
@@ -1385,15 +1386,12 @@ static void enetc_get_offloads(struct enetc_bdr *rx_ring,
tpid = htons(ETH_P_8021AD);
break;
case 2:
- tpid = htons(enetc_port_rd(&priv->si->hw,
- ENETC_PCVLANR1));
+ tpid = htons(enetc_rd_hot(hw, ENETC_SICVLANR1) &
+ SICVLANR_ETYPE);
break;
case 3:
- tpid = htons(enetc_port_rd(&priv->si->hw,
- ENETC_PCVLANR2));
- break;
- default:
- break;
+ tpid = htons(enetc_rd_hot(hw, ENETC_SICVLANR2) &
+ SICVLANR_ETYPE);
}
__vlan_hwaccel_put_tag(skb, tpid, le16_to_cpu(rxbd->r.vlan_opt));
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index d38cd36be4a6..2e5cef646741 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -467,7 +467,8 @@ static void enetc_get_rmon_stats(struct net_device *ndev,
#define ENETC_RSSHASH_L3 (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO | RXH_IP_SRC | \
RXH_IP_DST)
#define ENETC_RSSHASH_L4 (ENETC_RSSHASH_L3 | RXH_L4_B_0_1 | RXH_L4_B_2_3)
-static int enetc_get_rsshash(struct ethtool_rxnfc *rxnfc)
+static int enetc_get_rxfh_fields(struct net_device *netdev,
+ struct ethtool_rxfh_fields *rxnfc)
{
static const u32 rsshash[] = {
[TCP_V4_FLOW] = ENETC_RSSHASH_L4,
@@ -584,9 +585,6 @@ static int enetc_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc,
case ETHTOOL_GRXRINGS:
rxnfc->data = priv->num_rx_rings;
break;
- case ETHTOOL_GRXFH:
- /* get RSS hash config */
- return enetc_get_rsshash(rxnfc);
case ETHTOOL_GRXCLSRLCNT:
/* total number of entries */
rxnfc->data = priv->si->num_fs_entries;
@@ -639,8 +637,6 @@ static int enetc4_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc
case ETHTOOL_GRXRINGS:
rxnfc->data = priv->num_rx_rings;
break;
- case ETHTOOL_GRXFH:
- return enetc_get_rsshash(rxnfc);
default:
return -EOPNOTSUPP;
}
@@ -1228,6 +1224,7 @@ const struct ethtool_ops enetc_pf_ethtool_ops = {
.get_rxfh_indir_size = enetc_get_rxfh_indir_size,
.get_rxfh = enetc_get_rxfh,
.set_rxfh = enetc_set_rxfh,
+ .get_rxfh_fields = enetc_get_rxfh_fields,
.get_ringparam = enetc_get_ringparam,
.get_coalesce = enetc_get_coalesce,
.set_coalesce = enetc_set_coalesce,
@@ -1258,6 +1255,7 @@ const struct ethtool_ops enetc_vf_ethtool_ops = {
.get_rxfh_indir_size = enetc_get_rxfh_indir_size,
.get_rxfh = enetc_get_rxfh,
.set_rxfh = enetc_set_rxfh,
+ .get_rxfh_fields = enetc_get_rxfh_fields,
.get_ringparam = enetc_get_ringparam,
.get_coalesce = enetc_get_coalesce,
.set_coalesce = enetc_set_coalesce,
@@ -1284,6 +1282,7 @@ const struct ethtool_ops enetc4_pf_ethtool_ops = {
.get_rxfh_indir_size = enetc_get_rxfh_indir_size,
.get_rxfh = enetc_get_rxfh,
.set_rxfh = enetc_set_rxfh,
+ .get_rxfh_fields = enetc_get_rxfh_fields,
};
void enetc_set_ethtool_ops(struct net_device *ndev)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index 53e8d18c7a34..74082b98fdbb 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -43,6 +43,9 @@
#define ENETC_SIPMAR0 0x80
#define ENETC_SIPMAR1 0x84
+#define ENETC_SICVLANR1 0x90
+#define ENETC_SICVLANR2 0x94
+#define SICVLANR_ETYPE GENMASK(15, 0)
/* VF-PF Message passing */
#define ENETC_DEFAULT_MSG_SIZE 1024 /* and max size */
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index c81f2ea588f2..5c8fdcef759b 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -14,14 +14,14 @@
#define FEC_H
/****************************************************************************/
+#include <dt-bindings/firmware/imx/rsrc.h>
+#include <linux/bpf.h>
#include <linux/clocksource.h>
+#include <linux/firmware/imx/sci.h>
#include <linux/net_tstamp.h>
#include <linux/pm_qos.h>
-#include <linux/bpf.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/timecounter.h>
-#include <dt-bindings/firmware/imx/rsrc.h>
-#include <linux/firmware/imx/sci.h>
#include <net/xdp.h>
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
@@ -115,7 +115,7 @@
#define IEEE_T_MCOL 0x254 /* Frames tx'd with multiple collision */
#define IEEE_T_DEF 0x258 /* Frames tx'd after deferral delay */
#define IEEE_T_LCOL 0x25c /* Frames tx'd with late collision */
-#define IEEE_T_EXCOL 0x260 /* Frames tx'd with excesv collisions */
+#define IEEE_T_EXCOL 0x260 /* Frames tx'd with excessive collisions */
#define IEEE_T_MACERR 0x264 /* Frames tx'd with TX FIFO underrun */
#define IEEE_T_CSERR 0x268 /* Frames tx'd with carrier sense err */
#define IEEE_T_SQE 0x26c /* Frames tx'd with SQE err */
@@ -342,7 +342,7 @@ struct bufdesc_ex {
#define FEC_TX_BD_FTYPE(X) (((X) & 0xf) << 20)
/* The number of Tx and Rx buffers. These are allocated from the page
- * pool. The code may assume these are power of two, so it it best
+ * pool. The code may assume these are power of two, so it is best
* to keep them that size.
* We don't need to allocate pages for the transmitter. We just use
* the skbuffer directly.
@@ -460,7 +460,7 @@ struct bufdesc_ex {
#define FEC_QUIRK_SINGLE_MDIO (1 << 11)
/* Controller supports RACC register */
#define FEC_QUIRK_HAS_RACC (1 << 12)
-/* Controller supports interrupt coalesc */
+/* Controller supports interrupt coalesce */
#define FEC_QUIRK_HAS_COALESCE (1 << 13)
/* Interrupt doesn't wake CPU from deep idle */
#define FEC_QUIRK_ERR006687 (1 << 14)
@@ -495,7 +495,7 @@ struct bufdesc_ex {
*/
#define FEC_QUIRK_HAS_EEE (1 << 20)
-/* i.MX8QM ENET IP version add new feture to generate delayed TXC/RXC
+/* i.MX8QM ENET IP version add new feature to generate delayed TXC/RXC
* as an alternative option to make sure it works well with various PHYs.
* For the implementation of delayed clock, ENET takes synchronized 250MHz
* clocks to generate 2ns delay.
@@ -614,7 +614,6 @@ struct fec_enet_private {
unsigned int num_tx_queues;
unsigned int num_rx_queues;
- /* The saved address of a sent-in-place packet/buffer, for skfree(). */
struct fec_enet_priv_tx_q *tx_queue[FEC_ENET_MAX_TX_QS];
struct fec_enet_priv_rx_q *rx_queue[FEC_ENET_MAX_RX_QS];
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 17e9bddb9ddd..63dac4272045 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -22,56 +22,55 @@
* Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
*/
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/pm_runtime.h>
-#include <linux/ptrace.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
+#include <linux/bitops.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
+#include <linux/cacheflush.h>
+#include <linux/clk.h>
+#include <linux/crc32.h>
#include <linux/delay.h>
-#include <linux/netdevice.h>
+#include <linux/errno.h>
#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <net/ip.h>
-#include <net/page_pool/helpers.h>
-#include <net/selftests.h>
-#include <net/tso.h>
-#include <linux/tcp.h>
-#include <linux/udp.h>
+#include <linux/fec.h>
+#include <linux/filter.h>
+#include <linux/gpio/consumer.h>
#include <linux/icmp.h>
-#include <linux/spinlock.h>
-#include <linux/workqueue.h>
-#include <linux/bitops.h>
+#include <linux/if_vlan.h>
+#include <linux/in.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/ip.h>
#include <linux/irq.h>
-#include <linux/clk.h>
-#include <linux/crc32.h>
-#include <linux/platform_device.h>
-#include <linux/property.h>
+#include <linux/kernel.h>
#include <linux/mdio.h>
-#include <linux/phy.h>
-#include <linux/fec.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
#include <linux/of.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
-#include <linux/regulator/consumer.h>
-#include <linux/if_vlan.h>
+#include <linux/phy.h>
#include <linux/pinctrl/consumer.h>
-#include <linux/gpio/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/prefetch.h>
-#include <linux/mfd/syscon.h>
+#include <linux/property.h>
+#include <linux/ptrace.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/workqueue.h>
+#include <net/ip.h>
+#include <net/page_pool/helpers.h>
+#include <net/selftests.h>
+#include <net/tso.h>
#include <soc/imx/cpuidle.h>
-#include <linux/filter.h>
-#include <linux/bpf.h>
-#include <linux/bpf_trace.h>
-
-#include <asm/cacheflush.h>
#include "fec.h"
@@ -131,7 +130,7 @@ static const struct fec_devinfo fec_mvf600_info = {
FEC_QUIRK_HAS_MDIO_C45,
};
-static const struct fec_devinfo fec_imx6x_info = {
+static const struct fec_devinfo fec_imx6sx_info = {
.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
@@ -196,7 +195,7 @@ static const struct of_device_id fec_dt_ids[] = {
{ .compatible = "fsl,imx28-fec", .data = &fec_imx28_info, },
{ .compatible = "fsl,imx6q-fec", .data = &fec_imx6q_info, },
{ .compatible = "fsl,mvf600-fec", .data = &fec_mvf600_info, },
- { .compatible = "fsl,imx6sx-fec", .data = &fec_imx6x_info, },
+ { .compatible = "fsl,imx6sx-fec", .data = &fec_imx6sx_info, },
{ .compatible = "fsl,imx6ul-fec", .data = &fec_imx6ul_info, },
{ .compatible = "fsl,imx8mq-fec", .data = &fec_imx8mq_info, },
{ .compatible = "fsl,imx8qm-fec", .data = &fec_imx8qm_info, },
@@ -276,6 +275,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#define FEC_ECR_MAGICEN BIT(2)
#define FEC_ECR_SLEEP BIT(3)
#define FEC_ECR_EN1588 BIT(4)
+#define FEC_ECR_SPEED BIT(5)
#define FEC_ECR_BYTESWP BIT(8)
/* FEC RCR bits definition */
#define FEC_RCR_LOOP BIT(0)
@@ -1207,7 +1207,7 @@ fec_restart(struct net_device *ndev)
/* 1G, 100M or 10M */
if (ndev->phydev) {
if (ndev->phydev->speed == SPEED_1000)
- ecntl |= (1 << 5);
+ ecntl |= FEC_ECR_SPEED;
else if (ndev->phydev->speed == SPEED_100)
rcntl &= ~FEC_RCR_10BASET;
else
@@ -1706,13 +1706,29 @@ xdp_err:
return ret;
}
+static void fec_enet_rx_vlan(const struct net_device *ndev, struct sk_buff *skb)
+{
+ if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+ const struct vlan_ethhdr *vlan_header = skb_vlan_eth_hdr(skb);
+ const u16 vlan_tag = ntohs(vlan_header->h_vlan_TCI);
+
+ /* Push and remove the vlan tag */
+
+ memmove(skb->data + VLAN_HLEN, skb->data, ETH_ALEN * 2);
+ skb_pull(skb, VLAN_HLEN);
+ __vlan_hwaccel_put_tag(skb,
+ htons(ETH_P_8021Q),
+ vlan_tag);
+ }
+}
+
/* During a receive, the bd_rx.cur points to the current incoming buffer.
* When we update through the ring, if the next incoming buffer has
* not been given to the system, we just set the empty indicator,
* effectively tossing the packet.
*/
static int
-fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
+fec_enet_rx_queue(struct net_device *ndev, u16 queue_id, int budget)
{
struct fec_enet_private *fep = netdev_priv(ndev);
struct fec_enet_priv_rx_q *rxq;
@@ -1720,11 +1736,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
unsigned short status;
struct sk_buff *skb;
ushort pkt_len;
- __u8 *data;
int pkt_received = 0;
struct bufdesc_ex *ebdp = NULL;
- bool vlan_packet_rcvd = false;
- u16 vlan_tag;
int index = 0;
bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME;
struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog);
@@ -1843,10 +1856,11 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
skb_mark_for_recycle(skb);
if (unlikely(need_swap)) {
+ u8 *data;
+
data = page_address(page) + FEC_ENET_XDP_HEADROOM;
swap_buffer(data, pkt_len);
}
- data = skb->data;
/* Extract the enhanced buffer descriptor */
ebdp = NULL;
@@ -1854,20 +1868,9 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
ebdp = (struct bufdesc_ex *)bdp;
/* If this is a VLAN packet remove the VLAN Tag */
- vlan_packet_rcvd = false;
- if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
- fep->bufdesc_ex &&
- (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))) {
- /* Push and remove the vlan tag */
- struct vlan_hdr *vlan_header =
- (struct vlan_hdr *) (data + ETH_HLEN);
- vlan_tag = ntohs(vlan_header->h_vlan_TCI);
-
- vlan_packet_rcvd = true;
-
- memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2);
- skb_pull(skb, VLAN_HLEN);
- }
+ if (fep->bufdesc_ex &&
+ (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN)))
+ fec_enet_rx_vlan(ndev, skb);
skb->protocol = eth_type_trans(skb, ndev);
@@ -1886,12 +1889,6 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
}
}
- /* Handle received VLAN packets */
- if (vlan_packet_rcvd)
- __vlan_hwaccel_put_tag(skb,
- htons(ETH_P_8021Q),
- vlan_tag);
-
skb_record_rx_queue(skb, queue_id);
napi_gro_receive(&fep->napi, skb);
@@ -1939,7 +1936,7 @@ static int fec_enet_rx(struct net_device *ndev, int budget)
/* Make sure that AVB queues are processed first. */
for (i = fep->num_rx_queues - 1; i >= 0; i--)
- done += fec_enet_rx_queue(ndev, budget - done, i);
+ done += fec_enet_rx_queue(ndev, i, budget - done);
return done;
}
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index 2bfaf14f65c8..3fc29afc9854 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -619,7 +619,7 @@ static void mpc52xx_fec_hw_init(struct net_device *dev)
out_be32(&fec->rfifo_alarm, 0x0000030c);
out_be32(&fec->tfifo_alarm, 0x00000100);
- /* begin transmittion when 256 bytes are in FIFO (or EOF or FIFO full) */
+ /* begin transmission when 256 bytes are in FIFO (or EOF or FIFO full) */
out_be32(&fec->x_wmrk, FEC_FIFO_WMRK_256B);
/* enable crc generation */
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 876d90832596..afe162c9eed8 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -7,30 +7,30 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/ptrace.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/pci.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/netdevice.h>
+#include <linux/errno.h>
#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/spinlock.h>
-#include <linux/workqueue.h>
-#include <linux/bitops.h>
+#include <linux/fec.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/ioport.h>
#include <linux/irq.h>
-#include <linux/clk.h>
-#include <linux/platform_device.h>
-#include <linux/phy.h>
-#include <linux/fec.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
#include <linux/of.h>
#include <linux/of_net.h>
+#include <linux/pci.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/ptrace.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
#include "fec.h"
@@ -117,7 +117,7 @@ static u64 fec_ptp_read(const struct cyclecounter *cc)
* @fep: the fec_enet_private structure handle
* @enable: enable the channel pps output
*
- * This function enble the PPS ouput on the timer channel.
+ * This function enables the PPS output on the timer channel.
*/
static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
{
@@ -172,7 +172,7 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
* very close to the second point, which means NSEC_PER_SEC
* - ts.tv_nsec is close to be zero(For example 20ns); Since the timer
* is still running when we calculate the first compare event, it is
- * possible that the remaining nanoseonds run out before the compare
+ * possible that the remaining nanoseconds run out before the compare
* counter is calculated and written into TCCR register. To avoid
* this possibility, we will set the compare event to be the next
* of next second. The current setting is 31-bit timer and wrap
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index 3925441143fa..0291093f2e4e 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -1225,7 +1225,7 @@ int memac_initialization(struct mac_device *mac_dev,
* be careful and not enable this if we are using MII or RGMII, since
* those configurations modes don't use in-band autonegotiation.
*/
- if (!of_property_read_bool(mac_node, "managed") &&
+ if (!of_property_present(mac_node, "managed") &&
mac_dev->phy_if != PHY_INTERFACE_MODE_MII &&
!phy_interface_mode_is_rgmii(mac_dev->phy_if))
mac_dev->phylink_config.default_an_inband = true;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index bcbcad613512..7c0f049f0938 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -97,6 +97,7 @@
#include <linux/phy_fixed.h>
#include <linux/of.h>
#include <linux/of_net.h>
+#include <linux/property.h>
#include "gianfar.h"
@@ -571,18 +572,6 @@ static int gfar_parse_group(struct device_node *np,
return 0;
}
-static int gfar_of_group_count(struct device_node *np)
-{
- struct device_node *child;
- int num = 0;
-
- for_each_available_child_of_node(np, child)
- if (of_node_name_eq(child, "queue-group"))
- num++;
-
- return num;
-}
-
/* Reads the controller's registers to determine what interface
* connects it to the PHY.
*/
@@ -654,8 +643,10 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
num_rx_qs = 1;
} else { /* MQ_MG_MODE */
/* get the actual number of supported groups */
- unsigned int num_grps = gfar_of_group_count(np);
+ unsigned int num_grps;
+ num_grps = device_get_named_child_node_count(&ofdev->dev,
+ "queue-group");
if (num_grps == 0 || num_grps > MAXGROUPS) {
dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
num_grps);
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 781d92e703cb..28f53cf2a174 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -781,14 +781,26 @@ err:
return ret;
}
-static int gfar_set_hash_opts(struct gfar_private *priv,
- struct ethtool_rxnfc *cmd)
+static int gfar_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
{
+ struct gfar_private *priv = netdev_priv(dev);
+ int ret;
+
+ if (test_bit(GFAR_RESETTING, &priv->state))
+ return -EBUSY;
+
+ mutex_lock(&priv->rx_queue_access);
+
+ ret = 0;
/* write the filer rules here */
if (!gfar_ethflow_to_filer_table(priv, cmd->data, cmd->flow_type))
- return -EINVAL;
+ ret = -EINVAL;
- return 0;
+ mutex_unlock(&priv->rx_queue_access);
+
+ return ret;
}
static int gfar_check_filer_hardware(struct gfar_private *priv)
@@ -1398,9 +1410,6 @@ static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
mutex_lock(&priv->rx_queue_access);
switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- ret = gfar_set_hash_opts(priv, cmd);
- break;
case ETHTOOL_SRXCLSRLINS:
if ((cmd->fs.ring_cookie != RX_CLS_FLOW_DISC &&
cmd->fs.ring_cookie >= priv->num_rx_queues) ||
@@ -1508,6 +1517,7 @@ const struct ethtool_ops gfar_ethtool_ops = {
#endif
.set_rxnfc = gfar_set_nfc,
.get_rxnfc = gfar_get_nfc,
+ .set_rxfh_fields = gfar_set_rxfh_fields,
.get_ts_info = gfar_get_ts_info,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
diff --git a/drivers/net/ethernet/google/Kconfig b/drivers/net/ethernet/google/Kconfig
index 564862a57124..14c9431e15e5 100644
--- a/drivers/net/ethernet/google/Kconfig
+++ b/drivers/net/ethernet/google/Kconfig
@@ -18,6 +18,7 @@ if NET_VENDOR_GOOGLE
config GVE
tristate "Google Virtual NIC (gVNIC) support"
depends on (PCI_MSI && (X86 || CPU_LITTLE_ENDIAN))
+ depends on PTP_1588_CLOCK_OPTIONAL
select PAGE_POOL
help
This driver supports Google Virtual NIC (gVNIC)"
diff --git a/drivers/net/ethernet/google/gve/Makefile b/drivers/net/ethernet/google/gve/Makefile
index 4520f1c07a63..e0ec227a50f7 100644
--- a/drivers/net/ethernet/google/gve/Makefile
+++ b/drivers/net/ethernet/google/gve/Makefile
@@ -1,5 +1,7 @@
# Makefile for the Google virtual Ethernet (gve) driver
obj-$(CONFIG_GVE) += gve.o
-gve-objs := gve_main.o gve_tx.o gve_tx_dqo.o gve_rx.o gve_rx_dqo.o gve_ethtool.o gve_adminq.o gve_utils.o gve_flow_rule.o \
+gve-y := gve_main.o gve_tx.o gve_tx_dqo.o gve_rx.o gve_rx_dqo.o gve_ethtool.o gve_adminq.o gve_utils.o gve_flow_rule.o \
gve_buffer_mgmt_dqo.o
+
+gve-$(CONFIG_PTP_1588_CLOCK) += gve_ptp.o
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index 2fab38c8ee78..cf91195d5f39 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -11,7 +11,9 @@
#include <linux/dmapool.h>
#include <linux/ethtool_netlink.h>
#include <linux/netdevice.h>
+#include <linux/net_tstamp.h>
#include <linux/pci.h>
+#include <linux/ptp_clock_kernel.h>
#include <linux/u64_stats_sync.h>
#include <net/page_pool/helpers.h>
#include <net/xdp.h>
@@ -400,8 +402,16 @@ enum gve_packet_state {
GVE_PACKET_STATE_TIMED_OUT_COMPL,
};
+enum gve_tx_pending_packet_dqo_type {
+ GVE_TX_PENDING_PACKET_DQO_SKB,
+ GVE_TX_PENDING_PACKET_DQO_XDP_FRAME
+};
+
struct gve_tx_pending_packet_dqo {
- struct sk_buff *skb; /* skb for this packet */
+ union {
+ struct sk_buff *skb;
+ struct xdp_frame *xdpf;
+ };
/* 0th element corresponds to the linear portion of `skb`, should be
* unmapped with `dma_unmap_single`.
@@ -431,7 +441,10 @@ struct gve_tx_pending_packet_dqo {
/* Identifies the current state of the packet as defined in
* `enum gve_packet_state`.
*/
- u8 state;
+ u8 state : 2;
+
+ /* gve_tx_pending_packet_dqo_type */
+ u8 type : 1;
/* If packet is an outstanding miss completion, then the packet is
* freed if the corresponding re-injection completion is not received
@@ -453,6 +466,9 @@ struct gve_tx_ring {
/* DQO fields. */
struct {
+ /* Spinlock for XDP tx traffic */
+ spinlock_t xdp_lock;
+
/* Linked list of gve_tx_pending_packet_dqo. Index into
* pending_packets, or -1 if empty.
*
@@ -750,6 +766,12 @@ struct gve_rss_config {
u32 *hash_lut;
};
+struct gve_ptp {
+ struct ptp_clock_info info;
+ struct ptp_clock *clock;
+ struct gve_priv *priv;
+};
+
struct gve_priv {
struct net_device *dev;
struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */
@@ -781,7 +803,7 @@ struct gve_priv {
struct gve_tx_queue_config tx_cfg;
struct gve_rx_queue_config rx_cfg;
- u32 num_ntfy_blks; /* spilt between TX and RX so must be even */
+ u32 num_ntfy_blks; /* split between TX and RX so must be even */
struct gve_registers __iomem *reg_bar0; /* see gve_register.h */
__be32 __iomem *db_bar2; /* "array" of doorbells */
@@ -813,6 +835,7 @@ struct gve_priv {
u32 adminq_set_driver_parameter_cnt;
u32 adminq_report_stats_cnt;
u32 adminq_report_link_speed_cnt;
+ u32 adminq_report_nic_timestamp_cnt;
u32 adminq_get_ptype_map_cnt;
u32 adminq_verify_driver_compatibility_cnt;
u32 adminq_query_flow_rules_cnt;
@@ -870,6 +893,14 @@ struct gve_priv {
u16 rss_lut_size;
bool cache_rss_config;
struct gve_rss_config rss_config;
+
+ /* True if the device supports reading the nic clock */
+ bool nic_timestamp_supported;
+ struct gve_ptp *ptp;
+ struct kernel_hwtstamp_config ts_config;
+ struct gve_nic_ts_report *nic_ts_report;
+ dma_addr_t nic_ts_report_bus;
+ u64 last_sync_nic_counter; /* Clock counter from last NIC TS report */
};
enum gve_service_task_flags_bit {
@@ -1138,6 +1169,7 @@ static inline bool gve_supports_xdp_xmit(struct gve_priv *priv)
{
switch (priv->queue_format) {
case GVE_GQI_QPL_FORMAT:
+ case GVE_DQO_RDA_FORMAT:
return true;
default:
return false;
@@ -1161,11 +1193,15 @@ void gve_free_queue_page_list(struct gve_priv *priv,
u32 id);
/* tx handling */
netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
-int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
- u32 flags);
+int gve_xdp_xmit_gqi(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags);
+int gve_xdp_xmit_dqo(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags);
int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
void *data, int len, void *frame_p);
void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid);
+int gve_xdp_xmit_one_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
+ struct xdp_frame *xdpf);
bool gve_tx_poll(struct gve_notify_block *block, int budget);
bool gve_xdp_poll(struct gve_notify_block *block, int budget);
int gve_xsk_tx_poll(struct gve_notify_block *block, int budget);
@@ -1249,6 +1285,24 @@ int gve_del_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
int gve_flow_rules_reset(struct gve_priv *priv);
/* RSS config */
int gve_init_rss_config(struct gve_priv *priv, u16 num_queues);
+/* PTP and timestamping */
+#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
+int gve_clock_nic_ts_read(struct gve_priv *priv);
+int gve_init_clock(struct gve_priv *priv);
+void gve_teardown_clock(struct gve_priv *priv);
+#else /* CONFIG_PTP_1588_CLOCK */
+static inline int gve_clock_nic_ts_read(struct gve_priv *priv)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int gve_init_clock(struct gve_priv *priv)
+{
+ return 0;
+}
+
+static inline void gve_teardown_clock(struct gve_priv *priv) { }
+#endif /* CONFIG_PTP_1588_CLOCK */
/* report stats handling */
void gve_handle_report_stats(struct gve_priv *priv);
/* exported by ethtool.c */
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index 3e8fc33cc11f..4f33d094a2ef 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -46,6 +46,7 @@ void gve_parse_device_option(struct gve_priv *priv,
struct gve_device_option_buffer_sizes **dev_op_buffer_sizes,
struct gve_device_option_flow_steering **dev_op_flow_steering,
struct gve_device_option_rss_config **dev_op_rss_config,
+ struct gve_device_option_nic_timestamp **dev_op_nic_timestamp,
struct gve_device_option_modify_ring **dev_op_modify_ring)
{
u32 req_feat_mask = be32_to_cpu(option->required_features_mask);
@@ -225,6 +226,23 @@ void gve_parse_device_option(struct gve_priv *priv,
"RSS config");
*dev_op_rss_config = (void *)(option + 1);
break;
+ case GVE_DEV_OPT_ID_NIC_TIMESTAMP:
+ if (option_length < sizeof(**dev_op_nic_timestamp) ||
+ req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_NIC_TIMESTAMP) {
+ dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
+ "Nic Timestamp",
+ (int)sizeof(**dev_op_nic_timestamp),
+ GVE_DEV_OPT_REQ_FEAT_MASK_NIC_TIMESTAMP,
+ option_length, req_feat_mask);
+ break;
+ }
+
+ if (option_length > sizeof(**dev_op_nic_timestamp))
+ dev_warn(&priv->pdev->dev,
+ GVE_DEVICE_OPTION_TOO_BIG_FMT,
+ "Nic Timestamp");
+ *dev_op_nic_timestamp = (void *)(option + 1);
+ break;
default:
/* If we don't recognize the option just continue
* without doing anything.
@@ -246,6 +264,7 @@ gve_process_device_options(struct gve_priv *priv,
struct gve_device_option_buffer_sizes **dev_op_buffer_sizes,
struct gve_device_option_flow_steering **dev_op_flow_steering,
struct gve_device_option_rss_config **dev_op_rss_config,
+ struct gve_device_option_nic_timestamp **dev_op_nic_timestamp,
struct gve_device_option_modify_ring **dev_op_modify_ring)
{
const int num_options = be16_to_cpu(descriptor->num_device_options);
@@ -269,6 +288,7 @@ gve_process_device_options(struct gve_priv *priv,
dev_op_dqo_rda, dev_op_jumbo_frames,
dev_op_dqo_qpl, dev_op_buffer_sizes,
dev_op_flow_steering, dev_op_rss_config,
+ dev_op_nic_timestamp,
dev_op_modify_ring);
dev_opt = next_opt;
}
@@ -306,6 +326,7 @@ int gve_adminq_alloc(struct device *dev, struct gve_priv *priv)
priv->adminq_set_driver_parameter_cnt = 0;
priv->adminq_report_stats_cnt = 0;
priv->adminq_report_link_speed_cnt = 0;
+ priv->adminq_report_nic_timestamp_cnt = 0;
priv->adminq_get_ptype_map_cnt = 0;
priv->adminq_query_flow_rules_cnt = 0;
priv->adminq_cfg_flow_rule_cnt = 0;
@@ -442,6 +463,8 @@ static int gve_adminq_kick_and_wait(struct gve_priv *priv)
int tail, head;
int i;
+ lockdep_assert_held(&priv->adminq_lock);
+
tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
head = priv->adminq_prod_cnt;
@@ -467,9 +490,6 @@ static int gve_adminq_kick_and_wait(struct gve_priv *priv)
return 0;
}
-/* This function is not threadsafe - the caller is responsible for any
- * necessary locks.
- */
static int gve_adminq_issue_cmd(struct gve_priv *priv,
union gve_adminq_command *cmd_orig)
{
@@ -477,6 +497,8 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv,
u32 opcode;
u32 tail;
+ lockdep_assert_held(&priv->adminq_lock);
+
tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
// Check if next command will overflow the buffer.
@@ -544,6 +566,9 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv,
case GVE_ADMINQ_REPORT_LINK_SPEED:
priv->adminq_report_link_speed_cnt++;
break;
+ case GVE_ADMINQ_REPORT_NIC_TIMESTAMP:
+ priv->adminq_report_nic_timestamp_cnt++;
+ break;
case GVE_ADMINQ_GET_PTYPE_MAP:
priv->adminq_get_ptype_map_cnt++;
break;
@@ -564,6 +589,7 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv,
break;
default:
dev_err(&priv->pdev->dev, "unknown AQ command opcode %d\n", opcode);
+ return -EINVAL;
}
return 0;
@@ -625,7 +651,7 @@ static int gve_adminq_execute_extended_cmd(struct gve_priv *priv, u32 opcode,
/* The device specifies that the management vector can either be the first irq
* or the last irq. ntfy_blk_msix_base_idx indicates the first irq assigned to
- * the ntfy blks. It if is 0 then the management vector is last, if it is 1 then
+ * the ntfy blks. If it is 0 then the management vector is last, if it is 1 then
* the management vector is first.
*
* gve arranges the msix vectors so that the management vector is last.
@@ -709,13 +735,19 @@ int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_que
int err;
int i;
+ mutex_lock(&priv->adminq_lock);
+
for (i = start_id; i < start_id + num_queues; i++) {
err = gve_adminq_create_tx_queue(priv, i);
if (err)
- return err;
+ goto out;
}
- return gve_adminq_kick_and_wait(priv);
+ err = gve_adminq_kick_and_wait(priv);
+
+out:
+ mutex_unlock(&priv->adminq_lock);
+ return err;
}
static void gve_adminq_get_create_rx_queue_cmd(struct gve_priv *priv,
@@ -788,13 +820,19 @@ int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues)
int err;
int i;
+ mutex_lock(&priv->adminq_lock);
+
for (i = 0; i < num_queues; i++) {
err = gve_adminq_create_rx_queue(priv, i);
if (err)
- return err;
+ goto out;
}
- return gve_adminq_kick_and_wait(priv);
+ err = gve_adminq_kick_and_wait(priv);
+
+out:
+ mutex_unlock(&priv->adminq_lock);
+ return err;
}
static int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index)
@@ -820,13 +858,19 @@ int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_qu
int err;
int i;
+ mutex_lock(&priv->adminq_lock);
+
for (i = start_id; i < start_id + num_queues; i++) {
err = gve_adminq_destroy_tx_queue(priv, i);
if (err)
- return err;
+ goto out;
}
- return gve_adminq_kick_and_wait(priv);
+ err = gve_adminq_kick_and_wait(priv);
+
+out:
+ mutex_unlock(&priv->adminq_lock);
+ return err;
}
static void gve_adminq_make_destroy_rx_queue_cmd(union gve_adminq_command *cmd,
@@ -861,13 +905,19 @@ int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues)
int err;
int i;
+ mutex_lock(&priv->adminq_lock);
+
for (i = 0; i < num_queues; i++) {
err = gve_adminq_destroy_rx_queue(priv, i);
if (err)
- return err;
+ goto out;
}
- return gve_adminq_kick_and_wait(priv);
+ err = gve_adminq_kick_and_wait(priv);
+
+out:
+ mutex_unlock(&priv->adminq_lock);
+ return err;
}
static void gve_set_default_desc_cnt(struct gve_priv *priv,
@@ -904,6 +954,8 @@ static void gve_enable_supported_features(struct gve_priv *priv,
*dev_op_flow_steering,
const struct gve_device_option_rss_config
*dev_op_rss_config,
+ const struct gve_device_option_nic_timestamp
+ *dev_op_nic_timestamp,
const struct gve_device_option_modify_ring
*dev_op_modify_ring)
{
@@ -980,10 +1032,15 @@ static void gve_enable_supported_features(struct gve_priv *priv,
"RSS device option enabled with key size of %u, lut size of %u.\n",
priv->rss_key_size, priv->rss_lut_size);
}
+
+ if (dev_op_nic_timestamp &&
+ (supported_features_mask & GVE_SUP_NIC_TIMESTAMP_MASK))
+ priv->nic_timestamp_supported = true;
}
int gve_adminq_describe_device(struct gve_priv *priv)
{
+ struct gve_device_option_nic_timestamp *dev_op_nic_timestamp = NULL;
struct gve_device_option_flow_steering *dev_op_flow_steering = NULL;
struct gve_device_option_buffer_sizes *dev_op_buffer_sizes = NULL;
struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL;
@@ -1024,6 +1081,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
&dev_op_buffer_sizes,
&dev_op_flow_steering,
&dev_op_rss_config,
+ &dev_op_nic_timestamp,
&dev_op_modify_ring);
if (err)
goto free_device_descriptor;
@@ -1088,7 +1146,8 @@ int gve_adminq_describe_device(struct gve_priv *priv)
gve_enable_supported_features(priv, supported_features_mask,
dev_op_jumbo_frames, dev_op_dqo_qpl,
dev_op_buffer_sizes, dev_op_flow_steering,
- dev_op_rss_config, dev_op_modify_ring);
+ dev_op_rss_config, dev_op_nic_timestamp,
+ dev_op_modify_ring);
free_device_descriptor:
dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus);
@@ -1200,6 +1259,22 @@ int gve_adminq_report_link_speed(struct gve_priv *priv)
return err;
}
+int gve_adminq_report_nic_ts(struct gve_priv *priv,
+ dma_addr_t nic_ts_report_addr)
+{
+ union gve_adminq_command cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opcode = cpu_to_be32(GVE_ADMINQ_REPORT_NIC_TIMESTAMP);
+ cmd.report_nic_ts = (struct gve_adminq_report_nic_ts) {
+ .nic_ts_report_len =
+ cpu_to_be64(sizeof(struct gve_nic_ts_report)),
+ .nic_ts_report_addr = cpu_to_be64(nic_ts_report_addr),
+ };
+
+ return gve_adminq_execute_cmd(priv, &cmd);
+}
+
int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv,
struct gve_ptype_lut *ptype_lut)
{
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h
index 228217458275..22a74b6aa17e 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.h
+++ b/drivers/net/ethernet/google/gve/gve_adminq.h
@@ -27,6 +27,7 @@ enum gve_adminq_opcodes {
GVE_ADMINQ_GET_PTYPE_MAP = 0xE,
GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY = 0xF,
GVE_ADMINQ_QUERY_FLOW_RULES = 0x10,
+ GVE_ADMINQ_REPORT_NIC_TIMESTAMP = 0x11,
GVE_ADMINQ_QUERY_RSS = 0x12,
/* For commands that are larger than 56 bytes */
@@ -174,6 +175,12 @@ struct gve_device_option_rss_config {
static_assert(sizeof(struct gve_device_option_rss_config) == 8);
+struct gve_device_option_nic_timestamp {
+ __be32 supported_features_mask;
+};
+
+static_assert(sizeof(struct gve_device_option_nic_timestamp) == 4);
+
/* Terminology:
*
* RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA
@@ -192,6 +199,7 @@ enum gve_dev_opt_id {
GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8,
GVE_DEV_OPT_ID_BUFFER_SIZES = 0xa,
GVE_DEV_OPT_ID_FLOW_STEERING = 0xb,
+ GVE_DEV_OPT_ID_NIC_TIMESTAMP = 0xd,
GVE_DEV_OPT_ID_RSS_CONFIG = 0xe,
};
@@ -206,6 +214,7 @@ enum gve_dev_opt_req_feat_mask {
GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_FLOW_STEERING = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_RSS_CONFIG = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_NIC_TIMESTAMP = 0x0,
};
enum gve_sup_feature_mask {
@@ -214,6 +223,7 @@ enum gve_sup_feature_mask {
GVE_SUP_BUFFER_SIZES_MASK = 1 << 4,
GVE_SUP_FLOW_STEERING_MASK = 1 << 5,
GVE_SUP_RSS_CONFIG_MASK = 1 << 7,
+ GVE_SUP_NIC_TIMESTAMP_MASK = 1 << 8,
};
#define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0
@@ -392,6 +402,21 @@ struct gve_adminq_report_link_speed {
static_assert(sizeof(struct gve_adminq_report_link_speed) == 8);
+struct gve_adminq_report_nic_ts {
+ __be64 nic_ts_report_len;
+ __be64 nic_ts_report_addr;
+};
+
+static_assert(sizeof(struct gve_adminq_report_nic_ts) == 16);
+
+struct gve_nic_ts_report {
+ __be64 nic_timestamp; /* NIC clock in nanoseconds */
+ __be64 reserved1;
+ __be64 reserved2;
+ __be64 reserved3;
+ __be64 reserved4;
+};
+
struct stats {
__be32 stat_name;
__be32 queue_id;
@@ -451,7 +476,7 @@ struct gve_ptype_entry {
};
struct gve_ptype_map {
- struct gve_ptype_entry ptypes[1 << 10]; /* PTYPES are always 10 bits. */
+ struct gve_ptype_entry ptypes[GVE_NUM_PTYPES]; /* PTYPES are always 10 bits. */
};
struct gve_adminq_get_ptype_map {
@@ -585,6 +610,7 @@ union gve_adminq_command {
struct gve_adminq_query_flow_rules query_flow_rules;
struct gve_adminq_configure_rss configure_rss;
struct gve_adminq_query_rss query_rss;
+ struct gve_adminq_report_nic_ts report_nic_ts;
struct gve_adminq_extended_command extended_command;
};
};
@@ -624,6 +650,8 @@ int gve_adminq_reset_flow_rules(struct gve_priv *priv);
int gve_adminq_query_flow_rules(struct gve_priv *priv, u16 query_opcode, u32 starting_loc);
int gve_adminq_configure_rss(struct gve_priv *priv, struct ethtool_rxfh_param *rxfh);
int gve_adminq_query_rss_config(struct gve_priv *priv, struct ethtool_rxfh_param *rxfh);
+int gve_adminq_report_nic_ts(struct gve_priv *priv,
+ dma_addr_t nic_ts_report_addr);
struct gve_ptype_lut;
int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv,
diff --git a/drivers/net/ethernet/google/gve/gve_desc_dqo.h b/drivers/net/ethernet/google/gve/gve_desc_dqo.h
index f79cd0591110..d17da841b5a0 100644
--- a/drivers/net/ethernet/google/gve/gve_desc_dqo.h
+++ b/drivers/net/ethernet/google/gve/gve_desc_dqo.h
@@ -247,7 +247,8 @@ struct gve_rx_compl_desc_dqo {
};
__le32 hash;
__le32 reserved6;
- __le64 reserved7;
+ __le32 reserved7;
+ __le32 ts; /* timestamp in nanosecs */
} __packed;
static_assert(sizeof(struct gve_rx_compl_desc_dqo) == 32);
diff --git a/drivers/net/ethernet/google/gve/gve_dqo.h b/drivers/net/ethernet/google/gve/gve_dqo.h
index e83773fb891f..bb278727f4d9 100644
--- a/drivers/net/ethernet/google/gve/gve_dqo.h
+++ b/drivers/net/ethernet/google/gve/gve_dqo.h
@@ -37,6 +37,7 @@ netdev_features_t gve_features_check_dqo(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features);
bool gve_tx_poll_dqo(struct gve_notify_block *block, bool do_clean);
+bool gve_xdp_poll_dqo(struct gve_notify_block *block);
int gve_rx_poll_dqo(struct gve_notify_block *block, int budget);
int gve_tx_alloc_rings_dqo(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *cfg);
@@ -60,6 +61,7 @@ int gve_clean_tx_done_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
struct napi_struct *napi);
void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx);
void gve_rx_write_doorbell_dqo(const struct gve_priv *priv, int queue_idx);
+void gve_xdp_tx_flush_dqo(struct gve_priv *priv, u32 xdp_qid);
static inline void
gve_tx_put_doorbell_dqo(const struct gve_priv *priv,
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index 3c1da0cf3f61..d0a223250845 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -76,7 +76,7 @@ static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] __nonstring_array
"adminq_dcfg_device_resources_cnt", "adminq_set_driver_parameter_cnt",
"adminq_report_stats_cnt", "adminq_report_link_speed_cnt", "adminq_get_ptype_map_cnt",
"adminq_query_flow_rules", "adminq_cfg_flow_rule", "adminq_cfg_rss_cnt",
- "adminq_query_rss_cnt",
+ "adminq_query_rss_cnt", "adminq_report_nic_timestamp_cnt",
};
static const char gve_gstrings_priv_flags[][ETH_GSTRING_LEN] = {
@@ -456,6 +456,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = priv->adminq_cfg_flow_rule_cnt;
data[i++] = priv->adminq_cfg_rss_cnt;
data[i++] = priv->adminq_query_rss_cnt;
+ data[i++] = priv->adminq_report_nic_timestamp_cnt;
}
static void gve_get_channels(struct net_device *netdev,
@@ -667,7 +668,7 @@ static u32 gve_get_priv_flags(struct net_device *netdev)
struct gve_priv *priv = netdev_priv(netdev);
u32 ret_flags = 0;
- /* Only 1 flag exists currently: report-stats (BIT(O)), so set that flag. */
+ /* Only 1 flag exists currently: report-stats (BIT(0)), so set that flag. */
if (priv->ethtool_flags & BIT(0))
ret_flags |= BIT(0);
return ret_flags;
@@ -798,9 +799,6 @@ static int gve_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
case ETHTOOL_SRXCLSRLDEL:
err = gve_del_flow_rule(priv, cmd);
break;
- case ETHTOOL_SRXFH:
- err = -EOPNOTSUPP;
- break;
default:
err = -EOPNOTSUPP;
break;
@@ -835,9 +833,6 @@ static int gve_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, u
case ETHTOOL_GRXCLSRLALL:
err = gve_get_flow_rule_ids(priv, cmd, (u32 *)rule_locs);
break;
- case ETHTOOL_GRXFH:
- err = -EOPNOTSUPP;
- break;
default:
err = -EOPNOTSUPP;
break;
@@ -928,6 +923,27 @@ static int gve_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rx
return 0;
}
+static int gve_get_ts_info(struct net_device *netdev,
+ struct kernel_ethtool_ts_info *info)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+
+ ethtool_op_get_ts_info(netdev, info);
+
+ if (priv->nic_timestamp_supported) {
+ info->so_timestamping |= SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->rx_filters |= BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_ALL);
+
+ if (priv->ptp)
+ info->phc_index = ptp_clock_index(priv->ptp->clock);
+ }
+
+ return 0;
+}
+
const struct ethtool_ops gve_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT,
@@ -956,5 +972,5 @@ const struct ethtool_ops gve_ethtool_ops = {
.get_priv_flags = gve_get_priv_flags,
.set_priv_flags = gve_set_priv_flags,
.get_link_ksettings = gve_get_link_ksettings,
- .get_ts_info = ethtool_op_get_ts_info,
+ .get_ts_info = gve_get_ts_info,
};
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index dc35a23ec47f..27f97a1d2957 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -414,8 +414,12 @@ int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
bool reschedule = false;
int work_done = 0;
- if (block->tx)
- reschedule |= gve_tx_poll_dqo(block, /*do_clean=*/true);
+ if (block->tx) {
+ if (block->tx->q_num < priv->tx_cfg.num_queues)
+ reschedule |= gve_tx_poll_dqo(block, /*do_clean=*/true);
+ else
+ reschedule |= gve_xdp_poll_dqo(block);
+ }
if (!budget)
return 0;
@@ -619,9 +623,12 @@ static int gve_setup_device_resources(struct gve_priv *priv)
err = gve_alloc_counter_array(priv);
if (err)
goto abort_with_rss_config_cache;
- err = gve_alloc_notify_blocks(priv);
+ err = gve_init_clock(priv);
if (err)
goto abort_with_counter;
+ err = gve_alloc_notify_blocks(priv);
+ if (err)
+ goto abort_with_clock;
err = gve_alloc_stats_report(priv);
if (err)
goto abort_with_ntfy_blocks;
@@ -674,6 +681,8 @@ abort_with_stats_report:
gve_free_stats_report(priv);
abort_with_ntfy_blocks:
gve_free_notify_blocks(priv);
+abort_with_clock:
+ gve_teardown_clock(priv);
abort_with_counter:
gve_free_counter_array(priv);
abort_with_rss_config_cache:
@@ -722,6 +731,7 @@ static void gve_teardown_device_resources(struct gve_priv *priv)
gve_free_counter_array(priv);
gve_free_notify_blocks(priv);
gve_free_stats_report(priv);
+ gve_teardown_clock(priv);
gve_clear_device_resources_ok(priv);
}
@@ -1510,6 +1520,19 @@ out:
return err;
}
+static int gve_xdp_xmit(struct net_device *dev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+
+ if (priv->queue_format == GVE_GQI_QPL_FORMAT)
+ return gve_xdp_xmit_gqi(dev, n, frames, flags);
+ else if (priv->queue_format == GVE_DQO_RDA_FORMAT)
+ return gve_xdp_xmit_dqo(dev, n, frames, flags);
+
+ return -EOPNOTSUPP;
+}
+
static int gve_xsk_pool_enable(struct net_device *dev,
struct xsk_buff_pool *pool,
u16 qid)
@@ -1645,9 +1668,8 @@ static int verify_xdp_configuration(struct net_device *dev)
return -EOPNOTSUPP;
}
- if (priv->queue_format != GVE_GQI_QPL_FORMAT) {
- netdev_warn(dev, "XDP is not supported in mode %d.\n",
- priv->queue_format);
+ if (priv->header_split_enabled) {
+ netdev_warn(dev, "XDP is not supported when header-data split is enabled.\n");
return -EOPNOTSUPP;
}
@@ -1727,7 +1749,7 @@ int gve_adjust_config(struct gve_priv *priv,
{
int err;
- /* Allocate resources for the new confiugration */
+ /* Allocate resources for the new configuration */
err = gve_queues_mem_alloc(priv, tx_alloc_cfg, rx_alloc_cfg);
if (err) {
netif_err(priv, drv, priv->dev,
@@ -1971,10 +1993,13 @@ u16 gve_get_pkt_buf_size(const struct gve_priv *priv, bool enable_hsplit)
return GVE_DEFAULT_RX_BUFFER_SIZE;
}
-/* header-split is not supported on non-DQO_RDA yet even if device advertises it */
+/* Header split is only supported on DQ RDA queue format. If XDP is enabled,
+ * header split is not allowed.
+ */
bool gve_header_split_supported(const struct gve_priv *priv)
{
- return priv->header_buf_size && priv->queue_format == GVE_DQO_RDA_FORMAT;
+ return priv->header_buf_size &&
+ priv->queue_format == GVE_DQO_RDA_FORMAT && !priv->xdp_prog;
}
int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split)
@@ -2023,6 +2048,12 @@ static int gve_set_features(struct net_device *netdev,
if ((netdev->features & NETIF_F_LRO) != (features & NETIF_F_LRO)) {
netdev->features ^= NETIF_F_LRO;
+ if (priv->xdp_prog && (netdev->features & NETIF_F_LRO)) {
+ netdev_warn(netdev,
+ "XDP is not supported when LRO is on.\n");
+ err = -EOPNOTSUPP;
+ goto revert_features;
+ }
if (netif_running(netdev)) {
err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
if (err)
@@ -2042,6 +2073,46 @@ revert_features:
return err;
}
+static int gve_get_ts_config(struct net_device *dev,
+ struct kernel_hwtstamp_config *kernel_config)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+
+ *kernel_config = priv->ts_config;
+ return 0;
+}
+
+static int gve_set_ts_config(struct net_device *dev,
+ struct kernel_hwtstamp_config *kernel_config,
+ struct netlink_ext_ack *extack)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+
+ if (kernel_config->tx_type != HWTSTAMP_TX_OFF) {
+ NL_SET_ERR_MSG_MOD(extack, "TX timestamping is not supported");
+ return -ERANGE;
+ }
+
+ if (kernel_config->rx_filter != HWTSTAMP_FILTER_NONE) {
+ if (!priv->nic_ts_report) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "RX timestamping is not supported");
+ kernel_config->rx_filter = HWTSTAMP_FILTER_NONE;
+ return -EOPNOTSUPP;
+ }
+
+ kernel_config->rx_filter = HWTSTAMP_FILTER_ALL;
+ gve_clock_nic_ts_read(priv);
+ ptp_schedule_worker(priv->ptp->clock, 0);
+ } else {
+ ptp_cancel_worker_sync(priv->ptp->clock);
+ }
+
+ priv->ts_config.rx_filter = kernel_config->rx_filter;
+
+ return 0;
+}
+
static const struct net_device_ops gve_netdev_ops = {
.ndo_start_xmit = gve_start_xmit,
.ndo_features_check = gve_features_check,
@@ -2053,6 +2124,8 @@ static const struct net_device_ops gve_netdev_ops = {
.ndo_bpf = gve_xdp,
.ndo_xdp_xmit = gve_xdp_xmit,
.ndo_xsk_wakeup = gve_xsk_wakeup,
+ .ndo_hwtstamp_get = gve_get_ts_config,
+ .ndo_hwtstamp_set = gve_set_ts_config,
};
static void gve_handle_status(struct gve_priv *priv, u32 status)
@@ -2182,6 +2255,9 @@ static void gve_set_netdev_xdp_features(struct gve_priv *priv)
xdp_features = NETDEV_XDP_ACT_BASIC;
xdp_features |= NETDEV_XDP_ACT_REDIRECT;
xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
+ } else if (priv->queue_format == GVE_DQO_RDA_FORMAT) {
+ xdp_features = NETDEV_XDP_ACT_BASIC;
+ xdp_features |= NETDEV_XDP_ACT_REDIRECT;
} else {
xdp_features = 0;
}
@@ -2236,7 +2312,7 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
goto err;
}
- /* Big TCP is only supported on DQ*/
+ /* Big TCP is only supported on DQO */
if (!gve_is_gqi(priv))
netif_set_tso_max_size(priv->dev, GVE_DQO_TX_MAX);
@@ -2272,6 +2348,9 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
priv->rx_coalesce_usecs = GVE_RX_IRQ_RATELIMIT_US_DQO;
}
+ priv->ts_config.tx_type = HWTSTAMP_TX_OFF;
+ priv->ts_config.rx_filter = HWTSTAMP_FILTER_NONE;
+
setup_device:
gve_set_netdev_xdp_features(priv);
err = gve_setup_device_resources(priv);
diff --git a/drivers/net/ethernet/google/gve/gve_ptp.c b/drivers/net/ethernet/google/gve/gve_ptp.c
new file mode 100644
index 000000000000..e96247c9d68d
--- /dev/null
+++ b/drivers/net/ethernet/google/gve/gve_ptp.c
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Google virtual Ethernet (gve) driver
+ *
+ * Copyright (C) 2025 Google LLC
+ */
+
+#include "gve.h"
+#include "gve_adminq.h"
+
+/* Interval to schedule a nic timestamp calibration, 250ms. */
+#define GVE_NIC_TS_SYNC_INTERVAL_MS 250
+
+/* Read the nic timestamp from hardware via the admin queue. */
+int gve_clock_nic_ts_read(struct gve_priv *priv)
+{
+ u64 nic_raw;
+ int err;
+
+ err = gve_adminq_report_nic_ts(priv, priv->nic_ts_report_bus);
+ if (err)
+ return err;
+
+ nic_raw = be64_to_cpu(priv->nic_ts_report->nic_timestamp);
+ WRITE_ONCE(priv->last_sync_nic_counter, nic_raw);
+
+ return 0;
+}
+
+static long gve_ptp_do_aux_work(struct ptp_clock_info *info)
+{
+ const struct gve_ptp *ptp = container_of(info, struct gve_ptp, info);
+ struct gve_priv *priv = ptp->priv;
+ int err;
+
+ if (gve_get_reset_in_progress(priv) || !gve_get_admin_queue_ok(priv))
+ goto out;
+
+ err = gve_clock_nic_ts_read(priv);
+ if (err && net_ratelimit())
+ dev_err(&priv->pdev->dev,
+ "%s read err %d\n", __func__, err);
+
+out:
+ return msecs_to_jiffies(GVE_NIC_TS_SYNC_INTERVAL_MS);
+}
+
+static const struct ptp_clock_info gve_ptp_caps = {
+ .owner = THIS_MODULE,
+ .name = "gve clock",
+ .do_aux_work = gve_ptp_do_aux_work,
+};
+
+static int gve_ptp_init(struct gve_priv *priv)
+{
+ struct gve_ptp *ptp;
+ int err;
+
+ if (!priv->nic_timestamp_supported) {
+ dev_dbg(&priv->pdev->dev, "Device does not support PTP\n");
+ return -EOPNOTSUPP;
+ }
+
+ priv->ptp = kzalloc(sizeof(*priv->ptp), GFP_KERNEL);
+ if (!priv->ptp)
+ return -ENOMEM;
+
+ ptp = priv->ptp;
+ ptp->info = gve_ptp_caps;
+ ptp->clock = ptp_clock_register(&ptp->info, &priv->pdev->dev);
+
+ if (IS_ERR(ptp->clock)) {
+ dev_err(&priv->pdev->dev, "PTP clock registration failed\n");
+ err = PTR_ERR(ptp->clock);
+ goto free_ptp;
+ }
+
+ ptp->priv = priv;
+ return 0;
+
+free_ptp:
+ kfree(ptp);
+ priv->ptp = NULL;
+ return err;
+}
+
+static void gve_ptp_release(struct gve_priv *priv)
+{
+ struct gve_ptp *ptp = priv->ptp;
+
+ if (!ptp)
+ return;
+
+ if (ptp->clock)
+ ptp_clock_unregister(ptp->clock);
+
+ kfree(ptp);
+ priv->ptp = NULL;
+}
+
+int gve_init_clock(struct gve_priv *priv)
+{
+ int err;
+
+ if (!priv->nic_timestamp_supported)
+ return 0;
+
+ err = gve_ptp_init(priv);
+ if (err)
+ return err;
+
+ priv->nic_ts_report =
+ dma_alloc_coherent(&priv->pdev->dev,
+ sizeof(struct gve_nic_ts_report),
+ &priv->nic_ts_report_bus,
+ GFP_KERNEL);
+ if (!priv->nic_ts_report) {
+ dev_err(&priv->pdev->dev, "%s dma alloc error\n", __func__);
+ err = -ENOMEM;
+ goto release_ptp;
+ }
+
+ return 0;
+
+release_ptp:
+ gve_ptp_release(priv);
+ return err;
+}
+
+void gve_teardown_clock(struct gve_priv *priv)
+{
+ gve_ptp_release(priv);
+
+ if (priv->nic_ts_report) {
+ dma_free_coherent(&priv->pdev->dev,
+ sizeof(struct gve_nic_ts_report),
+ priv->nic_ts_report, priv->nic_ts_report_bus);
+ priv->nic_ts_report = NULL;
+ }
+}
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index dcb0545baa50..96743e1d80f3 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -8,6 +8,7 @@
#include "gve_dqo.h"
#include "gve_adminq.h"
#include "gve_utils.h"
+#include <linux/bpf.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/skbuff.h>
@@ -437,6 +438,29 @@ static void gve_rx_skb_hash(struct sk_buff *skb,
skb_set_hash(skb, le32_to_cpu(compl_desc->hash), hash_type);
}
+/* Expand the hardware timestamp to the full 64 bits of width, and add it to the
+ * skb.
+ *
+ * This algorithm works by using the passed hardware timestamp to generate a
+ * diff relative to the last read of the nic clock. This diff can be positive or
+ * negative, as it is possible that we have read the clock more recently than
+ * the hardware has received this packet. To detect this, we use the high bit of
+ * the diff, and assume that the read is more recent if the high bit is set. In
+ * this case we invert the process.
+ *
+ * Note that this means if the time delta between packet reception and the last
+ * clock read is greater than ~2 seconds, this will provide invalid results.
+ */
+static void gve_rx_skb_hwtstamp(struct gve_rx_ring *rx, u32 hwts)
+{
+ u64 last_read = READ_ONCE(rx->gve->last_sync_nic_counter);
+ struct sk_buff *skb = rx->ctx.skb_head;
+ u32 low = (u32)last_read;
+ s32 diff = hwts - low;
+
+ skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(last_read + diff);
+}
+
static void gve_rx_free_skb(struct napi_struct *napi, struct gve_rx_ring *rx)
{
if (!rx->ctx.skb_head)
@@ -547,27 +571,66 @@ static int gve_rx_append_frags(struct napi_struct *napi,
return 0;
}
+static int gve_xdp_tx_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct xdp_buff *xdp)
+{
+ struct gve_tx_ring *tx;
+ struct xdp_frame *xdpf;
+ u32 tx_qid;
+ int err;
+
+ xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf))
+ return -ENOSPC;
+
+ tx_qid = gve_xdp_tx_queue_id(priv, rx->q_num);
+ tx = &priv->tx[tx_qid];
+ spin_lock(&tx->dqo_tx.xdp_lock);
+ err = gve_xdp_xmit_one_dqo(priv, tx, xdpf);
+ spin_unlock(&tx->dqo_tx.xdp_lock);
+
+ return err;
+}
+
static void gve_xdp_done_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
struct xdp_buff *xdp, struct bpf_prog *xprog,
int xdp_act,
struct gve_rx_buf_state_dqo *buf_state)
{
- u64_stats_update_begin(&rx->statss);
+ int err;
switch (xdp_act) {
case XDP_ABORTED:
case XDP_DROP:
default:
- rx->xdp_actions[xdp_act]++;
+ gve_free_buffer(rx, buf_state);
break;
case XDP_TX:
- rx->xdp_tx_errors++;
+ err = gve_xdp_tx_dqo(priv, rx, xdp);
+ if (unlikely(err))
+ goto err;
+ gve_reuse_buffer(rx, buf_state);
break;
case XDP_REDIRECT:
- rx->xdp_redirect_errors++;
+ err = xdp_do_redirect(priv->dev, xdp, xprog);
+ if (unlikely(err))
+ goto err;
+ gve_reuse_buffer(rx, buf_state);
break;
}
+ u64_stats_update_begin(&rx->statss);
+ if ((u32)xdp_act < GVE_XDP_ACTIONS)
+ rx->xdp_actions[xdp_act]++;
+ u64_stats_update_end(&rx->statss);
+ return;
+err:
+ u64_stats_update_begin(&rx->statss);
+ if (xdp_act == XDP_TX)
+ rx->xdp_tx_errors++;
+ else if (xdp_act == XDP_REDIRECT)
+ rx->xdp_redirect_errors++;
u64_stats_update_end(&rx->statss);
gve_free_buffer(rx, buf_state);
+ return;
}
/* Returns 0 if descriptor is completed successfully.
@@ -767,6 +830,9 @@ static int gve_rx_complete_skb(struct gve_rx_ring *rx, struct napi_struct *napi,
if (feat & NETIF_F_RXCSUM)
gve_rx_skb_csum(rx->ctx.skb_head, desc, ptype);
+ if (rx->gve->ts_config.rx_filter == HWTSTAMP_FILTER_ALL)
+ gve_rx_skb_hwtstamp(rx, le32_to_cpu(desc->ts));
+
/* RSC packets must set gso_size otherwise the TCP stack will complain
* that packets are larger than MTU.
*/
@@ -786,16 +852,27 @@ static int gve_rx_complete_skb(struct gve_rx_ring *rx, struct napi_struct *napi,
int gve_rx_poll_dqo(struct gve_notify_block *block, int budget)
{
- struct napi_struct *napi = &block->napi;
- netdev_features_t feat = napi->dev->features;
-
- struct gve_rx_ring *rx = block->rx;
- struct gve_rx_compl_queue_dqo *complq = &rx->dqo.complq;
-
+ struct gve_rx_compl_queue_dqo *complq;
+ struct napi_struct *napi;
+ netdev_features_t feat;
+ struct gve_rx_ring *rx;
+ struct gve_priv *priv;
+ u64 xdp_redirects;
u32 work_done = 0;
u64 bytes = 0;
+ u64 xdp_txs;
int err;
+ napi = &block->napi;
+ feat = napi->dev->features;
+
+ rx = block->rx;
+ priv = rx->gve;
+ complq = &rx->dqo.complq;
+
+ xdp_redirects = rx->xdp_actions[XDP_REDIRECT];
+ xdp_txs = rx->xdp_actions[XDP_TX];
+
while (work_done < budget) {
struct gve_rx_compl_desc_dqo *compl_desc =
&complq->desc_ring[complq->head];
@@ -869,6 +946,12 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget)
rx->ctx.skb_tail = NULL;
}
+ if (xdp_txs != rx->xdp_actions[XDP_TX])
+ gve_xdp_tx_flush_dqo(priv, rx->q_num);
+
+ if (xdp_redirects != rx->xdp_actions[XDP_REDIRECT])
+ xdp_do_flush();
+
gve_rx_post_buffers_dqo(rx);
u64_stats_update_begin(&rx->statss);
diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
index 1b40bf0c811a..c6ff0968929d 100644
--- a/drivers/net/ethernet/google/gve/gve_tx.c
+++ b/drivers/net/ethernet/google/gve/gve_tx.c
@@ -823,8 +823,8 @@ static int gve_tx_fill_xdp(struct gve_priv *priv, struct gve_tx_ring *tx,
return ndescs;
}
-int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
- u32 flags)
+int gve_xdp_xmit_gqi(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags)
{
struct gve_priv *priv = netdev_priv(dev);
struct gve_tx_ring *tx;
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
index 9d705d94b065..ce5370b741ec 100644
--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
@@ -9,6 +9,7 @@
#include "gve_utils.h"
#include "gve_dqo.h"
#include <net/ip.h>
+#include <linux/bpf.h>
#include <linux/tcp.h>
#include <linux/slab.h>
#include <linux/skbuff.h>
@@ -110,6 +111,14 @@ static bool gve_has_pending_packet(struct gve_tx_ring *tx)
return false;
}
+void gve_xdp_tx_flush_dqo(struct gve_priv *priv, u32 xdp_qid)
+{
+ u32 tx_qid = gve_xdp_tx_queue_id(priv, xdp_qid);
+ struct gve_tx_ring *tx = &priv->tx[tx_qid];
+
+ gve_tx_put_doorbell_dqo(priv, tx->q_resources, tx->dqo_tx.tail);
+}
+
static struct gve_tx_pending_packet_dqo *
gve_alloc_pending_packet(struct gve_tx_ring *tx)
{
@@ -198,7 +207,8 @@ void gve_tx_stop_ring_dqo(struct gve_priv *priv, int idx)
gve_remove_napi(priv, ntfy_idx);
gve_clean_tx_done_dqo(priv, tx, /*napi=*/NULL);
- netdev_tx_reset_queue(tx->netdev_txq);
+ if (tx->netdev_txq)
+ netdev_tx_reset_queue(tx->netdev_txq);
gve_tx_clean_pending_packets(tx);
gve_tx_remove_from_block(priv, idx);
}
@@ -276,7 +286,8 @@ void gve_tx_start_ring_dqo(struct gve_priv *priv, int idx)
gve_tx_add_to_block(priv, idx);
- tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx);
+ if (idx < priv->tx_cfg.num_queues)
+ tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx);
gve_add_napi(priv, ntfy_idx, gve_napi_poll_dqo);
}
@@ -295,6 +306,7 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv,
memset(tx, 0, sizeof(*tx));
tx->q_num = idx;
tx->dev = hdev;
+ spin_lock_init(&tx->dqo_tx.xdp_lock);
atomic_set_release(&tx->dqo_compl.hw_tx_head, 0);
/* Queue sizes must be a power of 2 */
@@ -439,12 +451,28 @@ static u32 num_avail_tx_slots(const struct gve_tx_ring *tx)
return tx->mask - num_used;
}
+/* Checks if the requested number of slots are available in the ring */
+static bool gve_has_tx_slots_available(struct gve_tx_ring *tx, u32 slots_req)
+{
+ u32 num_avail = num_avail_tx_slots(tx);
+
+ slots_req += GVE_TX_MIN_DESC_PREVENT_CACHE_OVERLAP;
+
+ if (num_avail >= slots_req)
+ return true;
+
+ /* Update cached TX head pointer */
+ tx->dqo_tx.head = atomic_read_acquire(&tx->dqo_compl.hw_tx_head);
+
+ return num_avail_tx_slots(tx) >= slots_req;
+}
+
static bool gve_has_avail_slots_tx_dqo(struct gve_tx_ring *tx,
int desc_count, int buf_count)
{
return gve_has_pending_packet(tx) &&
- num_avail_tx_slots(tx) >= desc_count &&
- gve_has_free_tx_qpl_bufs(tx, buf_count);
+ gve_has_tx_slots_available(tx, desc_count) &&
+ gve_has_free_tx_qpl_bufs(tx, buf_count);
}
/* Stops the queue if available descriptors is less than 'count'.
@@ -456,12 +484,6 @@ static int gve_maybe_stop_tx_dqo(struct gve_tx_ring *tx,
if (likely(gve_has_avail_slots_tx_dqo(tx, desc_count, buf_count)))
return 0;
- /* Update cached TX head pointer */
- tx->dqo_tx.head = atomic_read_acquire(&tx->dqo_compl.hw_tx_head);
-
- if (likely(gve_has_avail_slots_tx_dqo(tx, desc_count, buf_count)))
- return 0;
-
/* No space, so stop the queue */
tx->stop_queue++;
netif_tx_stop_queue(tx->netdev_txq);
@@ -472,8 +494,6 @@ static int gve_maybe_stop_tx_dqo(struct gve_tx_ring *tx,
/* After stopping queue, check if we can transmit again in order to
* avoid TOCTOU bug.
*/
- tx->dqo_tx.head = atomic_read_acquire(&tx->dqo_compl.hw_tx_head);
-
if (likely(!gve_has_avail_slots_tx_dqo(tx, desc_count, buf_count)))
return -EBUSY;
@@ -500,11 +520,9 @@ static void gve_extract_tx_metadata_dqo(const struct sk_buff *skb,
}
static void gve_tx_fill_pkt_desc_dqo(struct gve_tx_ring *tx, u32 *desc_idx,
- struct sk_buff *skb, u32 len, u64 addr,
+ bool enable_csum, u32 len, u64 addr,
s16 compl_tag, bool eop, bool is_gso)
{
- const bool checksum_offload_en = skb->ip_summed == CHECKSUM_PARTIAL;
-
while (len > 0) {
struct gve_tx_pkt_desc_dqo *desc =
&tx->dqo.tx_ring[*desc_idx].pkt;
@@ -515,7 +533,7 @@ static void gve_tx_fill_pkt_desc_dqo(struct gve_tx_ring *tx, u32 *desc_idx,
.buf_addr = cpu_to_le64(addr),
.dtype = GVE_TX_PKT_DESC_DTYPE_DQO,
.end_of_packet = cur_eop,
- .checksum_offload_enable = checksum_offload_en,
+ .checksum_offload_enable = enable_csum,
.compl_tag = cpu_to_le16(compl_tag),
.buf_size = cur_len,
};
@@ -612,6 +630,25 @@ gve_tx_fill_general_ctx_desc(struct gve_tx_general_context_desc_dqo *desc,
};
}
+static void gve_tx_update_tail(struct gve_tx_ring *tx, u32 desc_idx)
+{
+ u32 last_desc_idx = (desc_idx - 1) & tx->mask;
+ u32 last_report_event_interval =
+ (last_desc_idx - tx->dqo_tx.last_re_idx) & tx->mask;
+
+ /* Commit the changes to our state */
+ tx->dqo_tx.tail = desc_idx;
+
+ /* Request a descriptor completion on the last descriptor of the
+ * packet if we are allowed to by the HW enforced interval.
+ */
+
+ if (unlikely(last_report_event_interval >= GVE_TX_MIN_RE_INTERVAL)) {
+ tx->dqo.tx_ring[last_desc_idx].pkt.report_event = true;
+ tx->dqo_tx.last_re_idx = last_desc_idx;
+ }
+}
+
static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
struct sk_buff *skb,
struct gve_tx_pending_packet_dqo *pkt,
@@ -619,6 +656,7 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
u32 *desc_idx,
bool is_gso)
{
+ bool enable_csum = skb->ip_summed == CHECKSUM_PARTIAL;
const struct skb_shared_info *shinfo = skb_shinfo(skb);
int i;
@@ -644,7 +682,7 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr);
++pkt->num_bufs;
- gve_tx_fill_pkt_desc_dqo(tx, desc_idx, skb, len, addr,
+ gve_tx_fill_pkt_desc_dqo(tx, desc_idx, enable_csum, len, addr,
completion_tag,
/*eop=*/shinfo->nr_frags == 0, is_gso);
}
@@ -664,7 +702,7 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
dma[pkt->num_bufs], addr);
++pkt->num_bufs;
- gve_tx_fill_pkt_desc_dqo(tx, desc_idx, skb, len, addr,
+ gve_tx_fill_pkt_desc_dqo(tx, desc_idx, enable_csum, len, addr,
completion_tag, is_eop, is_gso);
}
@@ -709,6 +747,7 @@ static int gve_tx_add_skb_copy_dqo(struct gve_tx_ring *tx,
u32 *desc_idx,
bool is_gso)
{
+ bool enable_csum = skb->ip_summed == CHECKSUM_PARTIAL;
u32 copy_offset = 0;
dma_addr_t dma_addr;
u32 copy_len;
@@ -730,7 +769,7 @@ static int gve_tx_add_skb_copy_dqo(struct gve_tx_ring *tx,
copy_offset += copy_len;
dma_sync_single_for_device(tx->dev, dma_addr,
copy_len, DMA_TO_DEVICE);
- gve_tx_fill_pkt_desc_dqo(tx, desc_idx, skb,
+ gve_tx_fill_pkt_desc_dqo(tx, desc_idx, enable_csum,
copy_len,
dma_addr,
completion_tag,
@@ -768,6 +807,7 @@ static int gve_tx_add_skb_dqo(struct gve_tx_ring *tx,
return -ENOMEM;
pkt->skb = skb;
+ pkt->type = GVE_TX_PENDING_PACKET_DQO_SKB;
completion_tag = pkt - tx->dqo.pending_packets;
gve_extract_tx_metadata_dqo(skb, &metadata);
@@ -800,24 +840,7 @@ static int gve_tx_add_skb_dqo(struct gve_tx_ring *tx,
tx->dqo_tx.posted_packet_desc_cnt += pkt->num_bufs;
- /* Commit the changes to our state */
- tx->dqo_tx.tail = desc_idx;
-
- /* Request a descriptor completion on the last descriptor of the
- * packet if we are allowed to by the HW enforced interval.
- */
- {
- u32 last_desc_idx = (desc_idx - 1) & tx->mask;
- u32 last_report_event_interval =
- (last_desc_idx - tx->dqo_tx.last_re_idx) & tx->mask;
-
- if (unlikely(last_report_event_interval >=
- GVE_TX_MIN_RE_INTERVAL)) {
- tx->dqo.tx_ring[last_desc_idx].pkt.report_event = true;
- tx->dqo_tx.last_re_idx = last_desc_idx;
- }
- }
-
+ gve_tx_update_tail(tx, desc_idx);
return 0;
err:
@@ -951,9 +974,8 @@ static int gve_try_tx_skb(struct gve_priv *priv, struct gve_tx_ring *tx,
/* Metadata + (optional TSO) + data descriptors. */
total_num_descs = 1 + skb_is_gso(skb) + num_buffer_descs;
- if (unlikely(gve_maybe_stop_tx_dqo(tx, total_num_descs +
- GVE_TX_MIN_DESC_PREVENT_CACHE_OVERLAP,
- num_buffer_descs))) {
+ if (unlikely(gve_maybe_stop_tx_dqo(tx, total_num_descs,
+ num_buffer_descs))) {
return -1;
}
@@ -1107,16 +1129,32 @@ static void gve_handle_packet_completion(struct gve_priv *priv,
}
}
tx->dqo_tx.completed_packet_desc_cnt += pending_packet->num_bufs;
- if (tx->dqo.qpl)
- gve_free_tx_qpl_bufs(tx, pending_packet);
- else
+
+ switch (pending_packet->type) {
+ case GVE_TX_PENDING_PACKET_DQO_SKB:
+ if (tx->dqo.qpl)
+ gve_free_tx_qpl_bufs(tx, pending_packet);
+ else
+ gve_unmap_packet(tx->dev, pending_packet);
+ (*pkts)++;
+ *bytes += pending_packet->skb->len;
+
+ napi_consume_skb(pending_packet->skb, is_napi);
+ pending_packet->skb = NULL;
+ gve_free_pending_packet(tx, pending_packet);
+ break;
+ case GVE_TX_PENDING_PACKET_DQO_XDP_FRAME:
gve_unmap_packet(tx->dev, pending_packet);
+ (*pkts)++;
+ *bytes += pending_packet->xdpf->len;
- *bytes += pending_packet->skb->len;
- (*pkts)++;
- napi_consume_skb(pending_packet->skb, is_napi);
- pending_packet->skb = NULL;
- gve_free_pending_packet(tx, pending_packet);
+ xdp_return_frame(pending_packet->xdpf);
+ pending_packet->xdpf = NULL;
+ gve_free_pending_packet(tx, pending_packet);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
}
static void gve_handle_miss_completion(struct gve_priv *priv,
@@ -1287,9 +1325,10 @@ int gve_clean_tx_done_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
num_descs_cleaned++;
}
- netdev_tx_completed_queue(tx->netdev_txq,
- pkt_compl_pkts + miss_compl_pkts,
- pkt_compl_bytes + miss_compl_bytes);
+ if (tx->netdev_txq)
+ netdev_tx_completed_queue(tx->netdev_txq,
+ pkt_compl_pkts + miss_compl_pkts,
+ pkt_compl_bytes + miss_compl_bytes);
remove_miss_completions(priv, tx);
remove_timed_out_completions(priv, tx);
@@ -1325,3 +1364,98 @@ bool gve_tx_poll_dqo(struct gve_notify_block *block, bool do_clean)
compl_desc = &tx->dqo.compl_ring[tx->dqo_compl.head];
return compl_desc->generation != tx->dqo_compl.cur_gen_bit;
}
+
+bool gve_xdp_poll_dqo(struct gve_notify_block *block)
+{
+ struct gve_tx_compl_desc *compl_desc;
+ struct gve_tx_ring *tx = block->tx;
+ struct gve_priv *priv = block->priv;
+
+ gve_clean_tx_done_dqo(priv, tx, &block->napi);
+
+ /* Return true if we still have work. */
+ compl_desc = &tx->dqo.compl_ring[tx->dqo_compl.head];
+ return compl_desc->generation != tx->dqo_compl.cur_gen_bit;
+}
+
+int gve_xdp_xmit_one_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
+ struct xdp_frame *xdpf)
+{
+ struct gve_tx_pending_packet_dqo *pkt;
+ u32 desc_idx = tx->dqo_tx.tail;
+ s16 completion_tag;
+ int num_descs = 1;
+ dma_addr_t addr;
+ int err;
+
+ if (unlikely(!gve_has_tx_slots_available(tx, num_descs)))
+ return -EBUSY;
+
+ pkt = gve_alloc_pending_packet(tx);
+ if (unlikely(!pkt))
+ return -EBUSY;
+
+ pkt->type = GVE_TX_PENDING_PACKET_DQO_XDP_FRAME;
+ pkt->num_bufs = 0;
+ pkt->xdpf = xdpf;
+ completion_tag = pkt - tx->dqo.pending_packets;
+
+ /* Generate Packet Descriptor */
+ addr = dma_map_single(tx->dev, xdpf->data, xdpf->len, DMA_TO_DEVICE);
+ err = dma_mapping_error(tx->dev, addr);
+ if (unlikely(err))
+ goto err;
+
+ dma_unmap_len_set(pkt, len[pkt->num_bufs], xdpf->len);
+ dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr);
+ pkt->num_bufs++;
+
+ gve_tx_fill_pkt_desc_dqo(tx, &desc_idx,
+ false, xdpf->len,
+ addr, completion_tag, true,
+ false);
+
+ gve_tx_update_tail(tx, desc_idx);
+ return 0;
+
+err:
+ pkt->xdpf = NULL;
+ pkt->num_bufs = 0;
+ gve_free_pending_packet(tx, pkt);
+ return err;
+}
+
+int gve_xdp_xmit_dqo(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+ struct gve_tx_ring *tx;
+ int i, err = 0, qid;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ qid = gve_xdp_tx_queue_id(priv,
+ smp_processor_id() % priv->tx_cfg.num_xdp_queues);
+
+ tx = &priv->tx[qid];
+
+ spin_lock(&tx->dqo_tx.xdp_lock);
+ for (i = 0; i < n; i++) {
+ err = gve_xdp_xmit_one_dqo(priv, tx, frames[i]);
+ if (err)
+ break;
+ }
+
+ if (flags & XDP_XMIT_FLUSH)
+ gve_tx_put_doorbell_dqo(priv, tx->q_resources, tx->dqo_tx.tail);
+
+ spin_unlock(&tx->dqo_tx.xdp_lock);
+
+ u64_stats_update_begin(&tx->statss);
+ tx->xdp_xmit += n;
+ tx->xdp_xmit_errors += n - i;
+ u64_stats_update_end(&tx->statss);
+
+ return i ? i : err;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 4e44f28288f9..8dc7d6fae224 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -690,9 +690,9 @@ struct hnae3_ae_ops {
int (*set_rss)(struct hnae3_handle *handle, const u32 *indir,
const u8 *key, const u8 hfunc);
int (*set_rss_tuple)(struct hnae3_handle *handle,
- struct ethtool_rxnfc *cmd);
+ const struct ethtool_rxfh_fields *cmd);
int (*get_rss_tuple)(struct hnae3_handle *handle,
- struct ethtool_rxnfc *cmd);
+ struct ethtool_rxfh_fields *cmd);
int (*get_tc_size)(struct hnae3_handle *handle);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
index 4ad4e8ab2f1f..37396ca4ecfc 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
@@ -348,7 +348,7 @@ static int hclge_comm_cmd_csq_clean(struct hclge_comm_hw *hw)
static int hclge_comm_cmd_csq_done(struct hclge_comm_hw *hw)
{
u32 head = hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG);
- return head == hw->cmq.csq.next_to_use;
+ return head == (u32)hw->cmq.csq.next_to_use;
}
static u32 hclge_get_cmdq_tx_timeout(u16 opcode, u32 tx_timeout)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c
index 4e2bb6556b1c..1eca53aaf598 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c
@@ -151,7 +151,7 @@ EXPORT_SYMBOL_GPL(hclge_comm_set_rss_hash_key);
int hclge_comm_set_rss_tuple(struct hnae3_ae_dev *ae_dev,
struct hclge_comm_hw *hw,
struct hclge_comm_rss_cfg *rss_cfg,
- struct ethtool_rxnfc *nfc)
+ const struct ethtool_rxfh_fields *nfc)
{
struct hclge_comm_rss_input_tuple_cmd *req;
struct hclge_desc desc;
@@ -422,7 +422,7 @@ int hclge_comm_set_rss_algo_key(struct hclge_comm_hw *hw, const u8 hfunc,
}
EXPORT_SYMBOL_GPL(hclge_comm_set_rss_algo_key);
-static u8 hclge_comm_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
+static u8 hclge_comm_get_rss_hash_bits(const struct ethtool_rxfh_fields *nfc)
{
u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_COMM_S_PORT_BIT : 0;
@@ -448,7 +448,7 @@ static u8 hclge_comm_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
}
int hclge_comm_init_rss_tuple_cmd(struct hclge_comm_rss_cfg *rss_cfg,
- struct ethtool_rxnfc *nfc,
+ const struct ethtool_rxfh_fields *nfc,
struct hnae3_ae_dev *ae_dev,
struct hclge_comm_rss_input_tuple_cmd *req)
{
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h
index cdafa63fe38b..cbc02b50c6e7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h
@@ -108,7 +108,7 @@ void hclge_comm_get_rss_indir_tbl(struct hclge_comm_rss_cfg *rss_cfg,
int hclge_comm_set_rss_algo_key(struct hclge_comm_hw *hw, const u8 hfunc,
const u8 *key);
int hclge_comm_init_rss_tuple_cmd(struct hclge_comm_rss_cfg *rss_cfg,
- struct ethtool_rxnfc *nfc,
+ const struct ethtool_rxfh_fields *nfc,
struct hnae3_ae_dev *ae_dev,
struct hclge_comm_rss_input_tuple_cmd *req);
u64 hclge_comm_convert_rss_tuple(u8 tuple_sets);
@@ -129,5 +129,5 @@ int hclge_comm_set_rss_hash_key(struct hclge_comm_rss_cfg *rss_cfg,
int hclge_comm_set_rss_tuple(struct hnae3_ae_dev *ae_dev,
struct hclge_comm_hw *hw,
struct hclge_comm_rss_cfg *rss_cfg,
- struct ethtool_rxnfc *nfc);
+ const struct ethtool_rxfh_fields *nfc);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 4e5d8bc39a1b..35e57eebcf57 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -684,7 +684,7 @@ static int hns3_dbg_rx_queue_info(struct hnae3_handle *h,
char *buf, int len)
{
char data_str[ARRAY_SIZE(rx_queue_info_items)][HNS3_DBG_DATA_STR_LEN];
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
char *result[ARRAY_SIZE(rx_queue_info_items)];
struct hns3_nic_priv *priv = h->priv;
char content[HNS3_DBG_INFO_LEN];
@@ -789,7 +789,7 @@ static int hns3_dbg_tx_queue_info(struct hnae3_handle *h,
char *buf, int len)
{
char data_str[ARRAY_SIZE(tx_queue_info_items)][HNS3_DBG_DATA_STR_LEN];
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
char *result[ARRAY_SIZE(tx_queue_info_items)];
struct hns3_nic_priv *priv = h->priv;
char content[HNS3_DBG_INFO_LEN];
@@ -1034,7 +1034,7 @@ static int hns3_dbg_tx_bd_info(struct hns3_dbg_data *d, char *buf, int len)
static void
hns3_dbg_dev_caps(struct hnae3_handle *h, char *buf, int len, int *pos)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
unsigned long *caps = ae_dev->caps;
u32 i, state;
@@ -1239,7 +1239,7 @@ static const struct hns3_dbg_func hns3_dbg_cmd_func[] = {
static int hns3_dbg_read_cmd(struct hns3_dbg_data *dbg_data,
enum hnae3_dbg_cmd cmd, char *buf, int len)
{
- const struct hnae3_ae_ops *ops = dbg_data->handle->ae_algo->ops;
+ const struct hnae3_ae_ops *ops = hns3_get_ops(dbg_data->handle);
const struct hns3_dbg_func *cmd_func;
u32 i;
@@ -1364,7 +1364,7 @@ hns3_dbg_common_file_init(struct hnae3_handle *handle, u32 cmd)
int hns3_dbg_init(struct hnae3_handle *handle)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
const char *name = pci_name(handle->pdev);
int ret;
u32 i;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index b03b8758c777..49fcee7a6d0f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -547,9 +547,9 @@ void hns3_set_vector_coalesce_rx_ql(struct hns3_enet_tqp_vector *tqp_vector,
static void hns3_vector_coalesce_init(struct hns3_enet_tqp_vector *tqp_vector,
struct hns3_nic_priv *priv)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal;
struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal;
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle);
struct hns3_enet_coalesce *ptx_coal = &priv->tx_coal;
struct hns3_enet_coalesce *prx_coal = &priv->rx_coal;
@@ -960,7 +960,7 @@ static void hns3_nic_set_rx_mode(struct net_device *netdev)
void hns3_request_update_promisc_mode(struct hnae3_handle *handle)
{
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
if (ops->request_update_promisc_mode)
ops->request_update_promisc_mode(handle);
@@ -1304,7 +1304,7 @@ static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
{
struct hns3_nic_priv *priv = netdev_priv(skb->dev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle);
union l4_hdr_info l4;
/* device version above V3(include V3), the hardware can
@@ -1504,7 +1504,7 @@ static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring,
* VLAN enabled, only one VLAN header is allowed in skb, otherwise it
* will cause RAS error.
*/
- ae_dev = pci_get_drvdata(handle->pdev);
+ ae_dev = hns3_get_ae_dev(handle);
if (unlikely(skb_vlan_tagged_multi(skb) &&
ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
handle->port_base_vlan_state ==
@@ -1690,8 +1690,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, dma_addr_t dma,
#define HNS3_LIKELY_BD_NUM 1
struct hns3_desc *desc = &ring->desc[ring->next_to_use];
- unsigned int frag_buf_num;
- int k, sizeoflast;
+ unsigned int frag_buf_num, k;
+ int sizeoflast;
if (likely(size <= HNS3_MAX_BD_SIZE)) {
desc->addr = cpu_to_le64(dma);
@@ -1863,7 +1863,7 @@ static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size,
unsigned int bd_num, u8 max_non_tso_bd_num)
{
unsigned int tot_len = 0;
- int i;
+ unsigned int i;
for (i = 0; i < max_non_tso_bd_num - 1U; i++)
tot_len += bd_size[i];
@@ -1891,7 +1891,7 @@ static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size,
void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size)
{
- int i;
+ u32 i;
for (i = 0; i < MAX_SKB_FRAGS; i++)
size[i] = skb_frag_size(&shinfo->frags[i]);
@@ -2207,9 +2207,9 @@ static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring,
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
u32 nfrag = skb_shinfo(skb)->nr_frags + 1;
struct sg_table *sgt;
- int i, bd_num = 0;
+ int bd_num = 0;
dma_addr_t dma;
- u32 cb_len;
+ u32 cb_len, i;
int nents;
if (skb_has_frag_list(skb))
@@ -2447,7 +2447,7 @@ static int hns3_nic_set_features(struct net_device *netdev,
if ((netdev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) &&
h->ae_algo->ops->cls_flower_active(h)) {
netdev_err(netdev,
- "there are offloaded TC filters active, cannot disable HW TC offload");
+ "there are offloaded TC filters active, cannot disable HW TC offload\n");
return -EINVAL;
}
@@ -2544,7 +2544,7 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
struct hnae3_handle *handle = priv->ae_handle;
struct rtnl_link_stats64 ring_total_stats;
struct hns3_enet_ring *ring;
- unsigned int idx;
+ int idx;
if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
return;
@@ -2770,7 +2770,7 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
static int hns3_get_timeout_queue(struct net_device *ndev)
{
- int i;
+ unsigned int i;
/* Find the stopped queue the same way the stack does */
for (i = 0; i < ndev->num_tx_queues; i++) {
@@ -2851,7 +2851,7 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = hns3_get_handle(ndev);
struct hns3_enet_ring *tx_ring;
- int timeout_queue;
+ u32 timeout_queue;
timeout_queue = hns3_get_timeout_queue(ndev);
if (timeout_queue >= ndev->num_tx_queues) {
@@ -3821,7 +3821,7 @@ static int hns3_gro_complete(struct sk_buff *skb, u32 l234info)
{
__be16 type = skb->protocol;
struct tcphdr *th;
- int depth = 0;
+ u32 depth = 0;
while (eth_type_vlan(type)) {
struct vlan_hdr *vh;
@@ -4747,7 +4747,7 @@ map_ring_fail:
static void hns3_nic_init_coal_cfg(struct hns3_nic_priv *priv)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle);
struct hns3_enet_coalesce *tx_coal = &priv->tx_coal;
struct hns3_enet_coalesce *rx_coal = &priv->rx_coal;
@@ -5226,7 +5226,7 @@ static void hns3_info_show(struct hns3_nic_priv *priv)
static void hns3_set_cq_period_mode(struct hns3_nic_priv *priv,
enum dim_cq_period_mode mode, bool is_tx)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle);
struct hnae3_handle *handle = priv->ae_handle;
int i;
@@ -5264,7 +5264,7 @@ void hns3_cq_period_mode_init(struct hns3_nic_priv *priv,
static void hns3_state_init(struct hnae3_handle *handle)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
struct net_device *netdev = handle->kinfo.netdev;
struct hns3_nic_priv *priv = netdev_priv(netdev);
@@ -5934,7 +5934,7 @@ static const struct hns3_hw_error_info hns3_hw_err[] = {
static void hns3_process_hw_error(struct hnae3_handle *handle,
enum hnae3_hw_error_type type)
{
- int i;
+ u32 i;
for (i = 0; i < ARRAY_SIZE(hns3_hw_err); i++) {
if (hns3_hw_err[i].type == type) {
@@ -5961,8 +5961,8 @@ static int __init hns3_init_module(void)
{
int ret;
- pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
- pr_info("%s: %s\n", hns3_driver_name, hns3_copyright);
+ pr_debug("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
+ pr_debug("%s: %s\n", hns3_driver_name, hns3_copyright);
client.type = HNAE3_CLIENT_KNIC;
snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH, "%s",
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index d36c4ed16d8d..d3bad5d1b888 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -621,7 +621,7 @@ struct hns3_reset_type_map {
enum hnae3_reset_type rst_type;
};
-static inline int ring_space(struct hns3_enet_ring *ring)
+static inline u32 ring_space(struct hns3_enet_ring *ring)
{
/* This smp_load_acquire() pairs with smp_store_release() in
* hns3_nic_reclaim_one_desc called by hns3_clean_tx_ring.
@@ -692,7 +692,7 @@ static inline unsigned int hns3_page_order(struct hns3_enet_ring *ring)
/* iterator for handling rings in ring group */
#define hns3_for_each_ring(pos, head) \
- for (pos = (head).ring; (pos); pos = (pos)->next)
+ for ((pos) = (head).ring; (pos); (pos) = (pos)->next)
#define hns3_get_handle(ndev) \
(((struct hns3_nic_priv *)netdev_priv(ndev))->ae_handle)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 6715222aeb66..d5454e126c85 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -86,7 +86,7 @@ static int hns3_get_sset_count(struct net_device *netdev, int stringset);
static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
int ret;
if (!h->ae_algo->ops->set_loopback ||
@@ -171,7 +171,7 @@ static void hns3_lp_setup_skb(struct sk_buff *skb)
* the purpose of mac or serdes selftest.
*/
handle = hns3_get_handle(ndev);
- ae_dev = pci_get_drvdata(handle->pdev);
+ ae_dev = hns3_get_ae_dev(handle);
if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
ethh->h_dest[5] += HNS3_NIC_LB_DST_MAC_ADDR;
eth_zero_addr(ethh->h_source);
@@ -436,7 +436,7 @@ static void hns3_self_test(struct net_device *ndev,
data[i] = HNS3_NIC_LB_TEST_UNEXECUTED;
if (hns3_nic_resetting(ndev)) {
- netdev_err(ndev, "dev resetting!");
+ netdev_err(ndev, "dev resetting!\n");
goto failure;
}
@@ -489,7 +489,7 @@ static const struct hns3_pflag_desc hns3_priv_flags[HNAE3_PFLAG_MAX] = {
static int hns3_get_sset_count(struct net_device *netdev, int stringset)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- const struct hnae3_ae_ops *ops = h->ae_algo->ops;
+ const struct hnae3_ae_ops *ops = hns3_get_ops(h);
if (!ops->get_sset_count)
return -EOPNOTSUPP;
@@ -540,8 +540,8 @@ static void hns3_get_strings_tqps(struct hnae3_handle *handle, u8 **data)
static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- const struct hnae3_ae_ops *ops = h->ae_algo->ops;
- int i;
+ const struct hnae3_ae_ops *ops = hns3_get_ops(h);
+ u32 i;
if (!ops->get_strings)
return;
@@ -569,7 +569,7 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data)
struct hns3_nic_priv *nic_priv = handle->priv;
struct hns3_enet_ring *ring;
u8 *stat;
- int i, j;
+ u32 i, j;
/* get stats for Tx */
for (i = 0; i < kinfo->num_tqps; i++) {
@@ -692,7 +692,7 @@ static void hns3_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *param)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
if (!test_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps))
return;
@@ -706,7 +706,7 @@ static int hns3_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *param)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
if (!test_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps))
return -EOPNOTSUPP;
@@ -725,7 +725,7 @@ static int hns3_set_pauseparam(struct net_device *netdev,
static void hns3_get_ksettings(struct hnae3_handle *h,
struct ethtool_link_ksettings *cmd)
{
- const struct hnae3_ae_ops *ops = h->ae_algo->ops;
+ const struct hnae3_ae_ops *ops = hns3_get_ops(h);
/* 1.auto_neg & speed & duplex from cmd */
if (ops->get_ksettings_an_result)
@@ -751,7 +751,7 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
const struct hnae3_ae_ops *ops;
u8 module_type;
u8 media_type;
@@ -794,7 +794,7 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
break;
default:
- netdev_warn(netdev, "Unknown media type");
+ netdev_warn(netdev, "Unknown media type\n");
return 0;
}
@@ -814,7 +814,7 @@ static int hns3_check_ksettings_param(const struct net_device *netdev,
const struct ethtool_link_ksettings *cmd)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
u8 module_type = HNAE3_MODULE_TYPE_UNKNOWN;
u8 media_type = HNAE3_MEDIA_TYPE_UNKNOWN;
u32 lane_num;
@@ -842,7 +842,7 @@ static int hns3_check_ksettings_param(const struct net_device *netdev,
if (cmd->base.duplex == DUPLEX_HALF &&
media_type != HNAE3_MEDIA_TYPE_COPPER) {
netdev_err(netdev,
- "only copper port supports half duplex!");
+ "only copper port supports half duplex!\n");
return -EINVAL;
}
@@ -861,8 +861,8 @@ static int hns3_set_link_ksettings(struct net_device *netdev,
const struct ethtool_link_ksettings *cmd)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
int ret;
/* Chip don't support this mode. */
@@ -932,7 +932,7 @@ static u32 hns3_get_rss_key_size(struct net_device *netdev)
static u32 hns3_get_rss_indir_size(struct net_device *netdev)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
return ae_dev->dev_specs.rss_ind_tbl_size;
}
@@ -954,7 +954,7 @@ static int hns3_set_rss(struct net_device *netdev,
struct netlink_ext_ack *extack)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
if (!h->ae_algo->ops->set_rss)
return -EOPNOTSUPP;
@@ -978,6 +978,16 @@ static int hns3_set_rss(struct net_device *netdev,
rxfh->hfunc);
}
+static int hns3_get_rxfh_fields(struct net_device *netdev,
+ struct ethtool_rxfh_fields *cmd)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (h->ae_algo->ops->get_rss_tuple)
+ return h->ae_algo->ops->get_rss_tuple(h, cmd);
+ return -EOPNOTSUPP;
+}
+
static int hns3_get_rxnfc(struct net_device *netdev,
struct ethtool_rxnfc *cmd,
u32 *rule_locs)
@@ -988,10 +998,6 @@ static int hns3_get_rxnfc(struct net_device *netdev,
case ETHTOOL_GRXRINGS:
cmd->data = h->kinfo.num_tqps;
return 0;
- case ETHTOOL_GRXFH:
- if (h->ae_algo->ops->get_rss_tuple)
- return h->ae_algo->ops->get_rss_tuple(h, cmd);
- return -EOPNOTSUPP;
case ETHTOOL_GRXCLSRLCNT:
if (h->ae_algo->ops->get_fd_rule_cnt)
return h->ae_algo->ops->get_fd_rule_cnt(h, cmd);
@@ -1024,8 +1030,8 @@ static int hns3_set_reset(struct net_device *netdev, u32 *flags)
{
enum hnae3_reset_type rst_type = HNAE3_NONE_RESET;
struct hnae3_handle *h = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
- const struct hnae3_ae_ops *ops = h->ae_algo->ops;
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
+ const struct hnae3_ae_ops *ops = hns3_get_ops(h);
const struct hns3_reset_type_map *rst_type_map;
enum ethtool_reset_flags rst_flags;
u32 i, size;
@@ -1189,7 +1195,7 @@ static int hns3_set_tx_push(struct net_device *netdev, u32 tx_push)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(h);
u32 old_state = test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state);
if (!test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps) && tx_push)
@@ -1275,15 +1281,22 @@ static int hns3_set_ringparam(struct net_device *ndev,
return ret;
}
+static int hns3_set_rxfh_fields(struct net_device *netdev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (h->ae_algo->ops->set_rss_tuple)
+ return h->ae_algo->ops->set_rss_tuple(h, cmd);
+ return -EOPNOTSUPP;
+}
+
static int hns3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- if (h->ae_algo->ops->set_rss_tuple)
- return h->ae_algo->ops->set_rss_tuple(h, cmd);
- return -EOPNOTSUPP;
case ETHTOOL_SRXCLSRLINS:
if (h->ae_algo->ops->add_fd_entry)
return h->ae_algo->ops->add_fd_entry(h, cmd);
@@ -1300,7 +1313,7 @@ static int hns3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
static int hns3_nway_reset(struct net_device *netdev)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
struct phy_device *phy = netdev->phydev;
int autoneg;
@@ -1308,7 +1321,7 @@ static int hns3_nway_reset(struct net_device *netdev)
return 0;
if (hns3_nic_resetting(netdev)) {
- netdev_err(netdev, "dev resetting!");
+ netdev_err(netdev, "dev resetting!\n");
return -EBUSY;
}
@@ -1377,7 +1390,7 @@ static int hns3_check_gl_coalesce_para(struct net_device *netdev,
struct ethtool_coalesce *cmd)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
u32 rx_gl, tx_gl;
if (cmd->rx_coalesce_usecs > ae_dev->dev_specs.max_int_gl) {
@@ -1449,7 +1462,7 @@ static int hns3_check_ql_coalesce_param(struct net_device *netdev,
struct ethtool_coalesce *cmd)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
if ((cmd->tx_max_coalesced_frames || cmd->rx_max_coalesced_frames) &&
!ae_dev->dev_specs.int_ql_max) {
@@ -1473,7 +1486,7 @@ hns3_check_cqe_coalesce_param(struct net_device *netdev,
struct kernel_ethtool_coalesce *kernel_coal)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
if ((kernel_coal->use_cqe_mode_tx || kernel_coal->use_cqe_mode_rx) &&
!hnae3_ae_dev_cq_supported(ae_dev)) {
@@ -1649,8 +1662,8 @@ static void hns3_get_fec_stats(struct net_device *netdev,
struct ethtool_fec_stats *fec_stats)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
if (!hnae3_ae_dev_fec_stats_supported(ae_dev) || !ops->get_fec_stats)
return;
@@ -1700,8 +1713,8 @@ static int hns3_get_fecparam(struct net_device *netdev,
struct ethtool_fecparam *fec)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
u8 fec_ability;
u8 fec_mode;
@@ -1725,8 +1738,8 @@ static int hns3_set_fecparam(struct net_device *netdev,
struct ethtool_fecparam *fec)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
u32 fec_mode;
if (!test_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps))
@@ -1747,8 +1760,8 @@ static int hns3_get_module_info(struct net_device *netdev,
#define HNS3_SFF_8636_V1_3 0x03
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
struct hns3_sfp_type sfp_type;
int ret;
@@ -1797,8 +1810,8 @@ static int hns3_get_module_eeprom(struct net_device *netdev,
struct ethtool_eeprom *ee, u8 *data)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
- const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2 ||
!ops->get_module_eeprom)
@@ -1924,7 +1937,7 @@ static int hns3_set_tunable(struct net_device *netdev,
int i, ret = 0;
if (hns3_nic_resetting(netdev) || !priv->ring) {
- netdev_err(netdev, "failed to set tunable value, dev resetting!");
+ netdev_err(netdev, "failed to set tunable value, dev resetting!\n");
return -EBUSY;
}
@@ -2105,6 +2118,8 @@ static const struct ethtool_ops hns3vf_ethtool_ops = {
.get_rxfh_indir_size = hns3_get_rss_indir_size,
.get_rxfh = hns3_get_rss,
.set_rxfh = hns3_set_rss,
+ .get_rxfh_fields = hns3_get_rxfh_fields,
+ .set_rxfh_fields = hns3_set_rxfh_fields,
.get_link_ksettings = hns3_get_link_ksettings,
.get_channels = hns3_get_channels,
.set_channels = hns3_set_channels,
@@ -2142,6 +2157,8 @@ static const struct ethtool_ops hns3_ethtool_ops = {
.get_rxfh_indir_size = hns3_get_rss_indir_size,
.get_rxfh = hns3_get_rss,
.set_rxfh = hns3_set_rss,
+ .get_rxfh_fields = hns3_get_rxfh_fields,
+ .set_rxfh_fields = hns3_set_rxfh_fields,
.get_link_ksettings = hns3_get_link_ksettings,
.set_link_ksettings = hns3_set_link_ksettings,
.nway_reset = hns3_nway_reset,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index c46490693594..f130020a1227 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -830,10 +830,10 @@ hclge_dbg_dump_reg_tqp(struct hclge_dev *hdev,
{
const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
const struct hclge_dbg_reg_common_msg *reg_msg = &reg_info->reg_msg;
+ u32 index, entry, i, cnt, min_num;
struct hclge_desc *desc_src;
- u32 index, entry, i, cnt;
- int bd_num, min_num, ret;
struct hclge_desc *desc;
+ int bd_num, ret;
ret = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset, &bd_num);
if (ret)
@@ -885,9 +885,9 @@ hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
const struct hclge_dbg_reg_common_msg *reg_msg = &reg_info->reg_msg;
const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
struct hclge_desc *desc_src;
- int bd_num, min_num, ret;
+ int bd_num, min_num, ret, i;
struct hclge_desc *desc;
- u32 entry, i;
+ u32 entry;
ret = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset, &bd_num);
if (ret)
@@ -1279,7 +1279,7 @@ static int hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev,
{
const struct hclge_dbg_reg_type_info *reg_info;
int pos = 0, ret = 0;
- int i;
+ u32 i;
for (i = 0; i < ARRAY_SIZE(hclge_dbg_reg_info); i++) {
reg_info = &hclge_dbg_reg_info[i];
@@ -2110,7 +2110,7 @@ static int hclge_dbg_dump_mng_table(struct hclge_dev *hdev, char *buf, int len)
for (i = 0; i < HCLGE_DBG_MNG_TBL_MAX; i++) {
hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_ETHERTYPE_IDX_RD,
true);
- req0 = (struct hclge_mac_ethertype_idx_rd_cmd *)&desc.data;
+ req0 = (struct hclge_mac_ethertype_idx_rd_cmd *)desc.data;
req0->index = cpu_to_le16(i);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -2648,9 +2648,8 @@ static void hclge_dbg_dump_mac_list(struct hclge_dev *hdev, char *buf, int len,
struct hclge_mac_node *mac_node, *tmp;
struct hclge_vport *vport;
struct list_head *list;
- u32 func_id;
+ u32 func_id, i;
int pos = 0;
- int i;
for (i = 0; i < ARRAY_SIZE(mac_list_items); i++)
result[i] = &data_str[i][0];
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index a7de67699a01..35c984a256ab 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -582,7 +582,7 @@ static u64 *hclge_comm_get_stats(struct hclge_dev *hdev,
int size, u64 *data)
{
u64 *buf = data;
- u32 i;
+ int i;
for (i = 0; i < size; i++) {
if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num)
@@ -599,7 +599,7 @@ static void hclge_comm_get_strings(struct hclge_dev *hdev, u32 stringset,
const struct hclge_comm_stats_str strs[],
int size, u8 **data)
{
- u32 i;
+ int i;
if (stringset != ETH_SS_STATS)
return;
@@ -2358,7 +2358,7 @@ static int hclge_common_thrd_config(struct hclge_dev *hdev,
for (i = 0; i < 2; i++) {
hclge_cmd_setup_basic_desc(&desc[i],
HCLGE_OPC_RX_COM_THRD_ALLOC, false);
- req = (struct hclge_rx_com_thrd *)&desc[i].data;
+ req = (struct hclge_rx_com_thrd *)desc[i].data;
/* The first descriptor set the NEXT bit to 1 */
if (i == 0)
@@ -2624,7 +2624,7 @@ int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex, u8 lan
int ret;
duplex = hclge_check_speed_dup(duplex, speed);
- if (!mac->support_autoneg && mac->speed == speed &&
+ if (!mac->support_autoneg && mac->speed == (u32)speed &&
mac->duplex == duplex && (mac->lane_num == lane_num || lane_num == 0))
return 0;
@@ -2652,7 +2652,7 @@ static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
if (ret)
return ret;
- hdev->hw.mac.req_speed = speed;
+ hdev->hw.mac.req_speed = (u32)speed;
hdev->hw.mac.req_duplex = duplex;
return 0;
@@ -3446,7 +3446,7 @@ static int hclge_tp_port_init(struct hclge_dev *hdev)
static int hclge_update_port_info(struct hclge_dev *hdev)
{
struct hclge_mac *mac = &hdev->hw.mac;
- int speed;
+ u32 speed;
int ret;
/* get the port info from SFP cmd if not copper port */
@@ -4872,7 +4872,7 @@ static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
}
static int hclge_set_rss_tuple(struct hnae3_handle *handle,
- struct ethtool_rxnfc *nfc)
+ const struct ethtool_rxfh_fields *nfc)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
@@ -4890,7 +4890,7 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle,
}
static int hclge_get_rss_tuple(struct hnae3_handle *handle,
- struct ethtool_rxnfc *nfc)
+ struct ethtool_rxfh_fields *nfc)
{
struct hclge_vport *vport = hclge_get_vport(handle);
u8 tuple_sets;
@@ -6989,7 +6989,7 @@ static int hclge_get_all_rules(struct hnae3_handle *handle,
struct hclge_dev *hdev = vport->back;
struct hclge_fd_rule *rule;
struct hlist_node *node2;
- int cnt = 0;
+ u32 cnt = 0;
if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return -EOPNOTSUPP;
@@ -8223,14 +8223,14 @@ static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
word_num = vfid / 32;
bit_num = vfid % 32;
if (clr)
- desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
+ desc[1].data[word_num] &= cpu_to_le32(~(1U << bit_num));
else
desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
} else {
word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
bit_num = vfid % 32;
if (clr)
- desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
+ desc[2].data[word_num] &= cpu_to_le32(~(1U << bit_num));
else
desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
}
@@ -9292,7 +9292,7 @@ static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
static int init_mgr_tbl(struct hclge_dev *hdev)
{
int ret;
- int i;
+ u32 i;
for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
@@ -10713,7 +10713,7 @@ int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
mutex_lock(&hdev->vport_lock);
/* VF's mps must fit within hdev->mps */
- if (vport->vport_id && max_frm_size > hdev->mps) {
+ if (vport->vport_id && (u32)max_frm_size > hdev->mps) {
mutex_unlock(&hdev->vport_lock);
return -EINVAL;
} else if (vport->vport_id) {
@@ -10724,7 +10724,7 @@ int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
/* PF's mps must be greater then VF's mps */
for (i = 1; i < hdev->num_alloc_vport; i++)
- if (max_frm_size < hdev->vport[i].mps) {
+ if ((u32)max_frm_size < hdev->vport[i].mps) {
dev_err(&hdev->pdev->dev,
"failed to set pf mtu for less than vport %d, mps = %u.\n",
i, hdev->vport[i].mps);
@@ -11214,7 +11214,7 @@ static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
{
struct hnae3_client *client = vport->nic.client;
struct hclge_dev *hdev = ae_dev->priv;
- int rst_cnt = hdev->rst_stats.reset_cnt;
+ u32 rst_cnt = hdev->rst_stats.reset_cnt;
int ret;
ret = client->ops->init_instance(&vport->nic);
@@ -11258,7 +11258,7 @@ static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
{
struct hclge_dev *hdev = ae_dev->priv;
struct hnae3_client *client;
- int rst_cnt;
+ u32 rst_cnt;
int ret;
if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
@@ -11423,7 +11423,7 @@ static int hclge_pci_init(struct hclge_dev *hdev)
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(&pdev->dev,
- "can't set consistent PCI DMA");
+ "can't set consistent PCI DMA\n");
goto err_disable_device;
}
dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
@@ -12088,7 +12088,7 @@ static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
int min_tx_rate, int max_tx_rate)
{
if (min_tx_rate != 0 ||
- max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
+ max_tx_rate < 0 || (u32)max_tx_rate > hdev->hw.mac.max_speed) {
dev_err(&hdev->pdev->dev,
"min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
@@ -12113,7 +12113,7 @@ static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
if (!vport)
return -EINVAL;
- if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
+ if (!force && (u32)max_tx_rate == vport->vf_info.max_tx_rate)
return 0;
ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
@@ -12904,7 +12904,7 @@ static struct hnae3_ae_algo ae_algo = {
static int __init hclge_init(void)
{
- pr_info("%s is initializing\n", HCLGE_NAME);
+ pr_debug("%s is initializing\n", HCLGE_NAME);
hclge_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGE_NAME);
if (!hclge_wq) {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 59c863306657..c7ff12a6c076 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -749,16 +749,17 @@ static int hclge_get_rss_key(struct hclge_vport *vport,
#define HCLGE_RSS_MBX_RESP_LEN 8
struct hclge_dev *hdev = vport->back;
struct hclge_comm_rss_cfg *rss_cfg;
+ int rss_hash_key_size;
u8 index;
index = mbx_req->msg.data[0];
rss_cfg = &hdev->rss_cfg;
+ rss_hash_key_size = sizeof(rss_cfg->rss_hash_key);
/* Check the query index of rss_hash_key from VF, make sure no
* more than the size of rss_hash_key.
*/
- if (((index + 1) * HCLGE_RSS_MBX_RESP_LEN) >
- sizeof(rss_cfg->rss_hash_key)) {
+ if (((index + 1) * HCLGE_RSS_MBX_RESP_LEN) > rss_hash_key_size) {
dev_warn(&hdev->pdev->dev,
"failed to get the rss hash key, the index(%u) invalid !\n",
index);
@@ -800,7 +801,7 @@ static void hclge_handle_link_change_event(struct hclge_dev *hdev,
static bool hclge_cmd_crq_empty(struct hclge_hw *hw)
{
- u32 tail = hclge_read_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG);
+ int tail = hclge_read_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG);
return tail == hw->hw.cmq.crq.next_to_use;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 9a456ebf9b7c..96553109f44c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -151,7 +151,7 @@ int hclge_mac_mdio_config(struct hclge_dev *hdev)
mdio_bus->parent = &hdev->pdev->dev;
mdio_bus->priv = hdev;
- mdio_bus->phy_mask = ~(1 << mac->phy_addr);
+ mdio_bus->phy_mask = ~(1U << mac->phy_addr);
ret = mdiobus_register(mdio_bus);
if (ret) {
dev_err(mdio_bus->parent,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
index 63483636c074..61faddcc3dd0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
@@ -25,7 +25,7 @@ struct ifreq;
#define HCLGE_PTP_TIME_SEC_H_MASK GENMASK(15, 0)
#define HCLGE_PTP_TIME_SEC_L_REG 0x54
#define HCLGE_PTP_TIME_NSEC_REG 0x58
-#define HCLGE_PTP_TIME_NSEC_MASK GENMASK(29, 0)
+#define HCLGE_PTP_TIME_NSEC_MASK 0x3fffffffLL
#define HCLGE_PTP_TIME_NSEC_NEG BIT(31)
#define HCLGE_PTP_TIME_SYNC_REG 0x5C
#define HCLGE_PTP_TIME_SYNC_EN BIT(0)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index c4f35e8e2177..33136a1e02cf 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -606,7 +606,7 @@ static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
}
static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
- struct ethtool_rxnfc *nfc)
+ const struct ethtool_rxfh_fields *nfc)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
int ret;
@@ -624,7 +624,7 @@ static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
}
static int hclgevf_get_rss_tuple(struct hnae3_handle *handle,
- struct ethtool_rxnfc *nfc)
+ struct ethtool_rxfh_fields *nfc)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
u8 tuple_sets;
@@ -2465,7 +2465,7 @@ static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
struct hnae3_client *client)
{
struct hclgevf_dev *hdev = ae_dev->priv;
- int rst_cnt = hdev->rst_stats.rst_cnt;
+ u32 rst_cnt = hdev->rst_stats.rst_cnt;
int ret;
ret = client->ops->init_instance(&hdev->nic);
@@ -2625,7 +2625,7 @@ static int hclgevf_pci_init(struct hclgevf_dev *hdev)
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (ret) {
- dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting");
+ dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting\n");
goto err_disable_device;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index 85c2a634c8f9..f5c99ca54369 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -159,7 +159,7 @@ static bool hclgevf_cmd_crq_empty(struct hclgevf_hw *hw)
{
u32 tail = hclgevf_read_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG);
- return tail == hw->hw.cmq.crq.next_to_use;
+ return tail == (u32)hw->hw.cmq.crq.next_to_use;
}
static void hclgevf_handle_mbx_response(struct hclgevf_dev *hdev,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
index 7d9d9dbc7560..9de01e344e27 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_regs.c
@@ -127,37 +127,38 @@ void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
struct hnae3_queue *tqp;
- int i, j, reg_um;
+ int i, j, reg_num;
u32 *reg = data;
*version = hdev->fw_version;
reg += hclgevf_reg_get_header(reg);
/* fetching per-VF registers values from VF PCIe register space */
- reg_um = ARRAY_SIZE(cmdq_reg_addr_list);
- reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_CMDQ, reg_um, reg);
- for (i = 0; i < reg_um; i++)
+ reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
+ reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_CMDQ, reg_num, reg);
+ for (i = 0; i < reg_num; i++)
*reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
- reg_um = ARRAY_SIZE(common_reg_addr_list);
- reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_COMMON, reg_um, reg);
- for (i = 0; i < reg_um; i++)
+ reg_num = ARRAY_SIZE(common_reg_addr_list);
+ reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_COMMON, reg_num, reg);
+ for (i = 0; i < reg_num; i++)
*reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]);
- reg_um = ARRAY_SIZE(ring_reg_addr_list);
+ reg_num = ARRAY_SIZE(ring_reg_addr_list);
for (j = 0; j < hdev->num_tqps; j++) {
- reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_RING, reg_um, reg);
+ reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_RING, reg_num, reg);
tqp = &hdev->htqp[j].q;
- for (i = 0; i < reg_um; i++)
+ for (i = 0; i < reg_num; i++)
*reg++ = readl_relaxed(tqp->io_base -
HCLGEVF_TQP_REG_OFFSET +
ring_reg_addr_list[i]);
}
- reg_um = ARRAY_SIZE(tqp_intr_reg_addr_list);
+ reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
for (j = 0; j < hdev->num_msi_used - 1; j++) {
- reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_TQP_INTR, reg_um, reg);
- for (i = 0; i < reg_um; i++)
+ reg += hclgevf_reg_get_tlv(HCLGEVF_REG_TAG_TQP_INTR,
+ reg_num, reg);
+ for (i = 0; i < reg_num; i++)
*reg++ = hclgevf_read_dev(&hdev->hw,
tqp_intr_reg_addr_list[i] +
HCLGEVF_RING_INT_REG_OFFSET * j);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
index c559dd4291d3..e9f338e9dbe7 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
@@ -919,9 +919,10 @@ static int hinic_set_channels(struct net_device *netdev,
return 0;
}
-static int hinic_get_rss_hash_opts(struct hinic_dev *nic_dev,
- struct ethtool_rxnfc *cmd)
+static int hinic_get_rxfh_fields(struct net_device *netdev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct hinic_dev *nic_dev = netdev_priv(netdev);
struct hinic_rss_type rss_type = { 0 };
int err;
@@ -964,7 +965,7 @@ static int hinic_get_rss_hash_opts(struct hinic_dev *nic_dev,
return 0;
}
-static int set_l4_rss_hash_ops(struct ethtool_rxnfc *cmd,
+static int set_l4_rss_hash_ops(const struct ethtool_rxfh_fields *cmd,
struct hinic_rss_type *rss_type)
{
u8 rss_l4_en = 0;
@@ -1000,16 +1001,18 @@ static int set_l4_rss_hash_ops(struct ethtool_rxnfc *cmd,
return 0;
}
-static int hinic_set_rss_hash_opts(struct hinic_dev *nic_dev,
- struct ethtool_rxnfc *cmd)
+static int hinic_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
{
- struct hinic_rss_type *rss_type = &nic_dev->rss_type;
+ struct hinic_dev *nic_dev = netdev_priv(dev);
+ struct hinic_rss_type *rss_type;
int err;
- if (!(nic_dev->flags & HINIC_RSS_ENABLE)) {
- cmd->data = 0;
+ rss_type = &nic_dev->rss_type;
+
+ if (!(nic_dev->flags & HINIC_RSS_ENABLE))
return -EOPNOTSUPP;
- }
/* RSS does not support anything other than hashing
* to queues on src and dst IPs and ports
@@ -1108,26 +1111,6 @@ static int hinic_get_rxnfc(struct net_device *netdev,
case ETHTOOL_GRXRINGS:
cmd->data = nic_dev->num_qps;
break;
- case ETHTOOL_GRXFH:
- err = hinic_get_rss_hash_opts(nic_dev, cmd);
- break;
- default:
- err = -EOPNOTSUPP;
- break;
- }
-
- return err;
-}
-
-static int hinic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
-{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- int err = 0;
-
- switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- err = hinic_set_rss_hash_opts(nic_dev, cmd);
- break;
default:
err = -EOPNOTSUPP;
break;
@@ -1797,11 +1780,12 @@ static const struct ethtool_ops hinic_ethtool_ops = {
.get_channels = hinic_get_channels,
.set_channels = hinic_set_channels,
.get_rxnfc = hinic_get_rxnfc,
- .set_rxnfc = hinic_set_rxnfc,
.get_rxfh_key_size = hinic_get_rxfh_key_size,
.get_rxfh_indir_size = hinic_get_rxfh_indir_size,
.get_rxfh = hinic_get_rxfh,
.set_rxfh = hinic_set_rxfh,
+ .get_rxfh_fields = hinic_get_rxfh_fields,
+ .set_rxfh_fields = hinic_set_rxfh_fields,
.get_sset_count = hinic_get_sset_count,
.get_ethtool_stats = hinic_get_ethtool_stats,
.get_strings = hinic_get_strings,
@@ -1829,11 +1813,12 @@ static const struct ethtool_ops hinicvf_ethtool_ops = {
.get_channels = hinic_get_channels,
.set_channels = hinic_set_channels,
.get_rxnfc = hinic_get_rxnfc,
- .set_rxnfc = hinic_set_rxnfc,
.get_rxfh_key_size = hinic_get_rxfh_key_size,
.get_rxfh_indir_size = hinic_get_rxfh_indir_size,
.get_rxfh = hinic_get_rxfh,
.set_rxfh = hinic_set_rxfh,
+ .get_rxfh_fields = hinic_get_rxfh_fields,
+ .set_rxfh_fields = hinic_set_rxfh_fields,
.get_sset_count = hinic_get_sset_count,
.get_ethtool_stats = hinic_get_ethtool_stats,
.get_strings = hinic_get_strings,
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
index ae08257dd1d2..3f7f73430be4 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
@@ -482,7 +482,6 @@ static netdev_tx_t hinic3_send_one_skb(struct sk_buff *skb,
{
struct hinic3_sq_wqe_combo wqe_combo = {};
struct hinic3_tx_info *tx_info;
- struct hinic3_txq *tx_q = txq;
u32 offload, queue_info = 0;
struct hinic3_sq_task task;
u16 wqebb_cnt, num_sge;
@@ -506,9 +505,9 @@ static netdev_tx_t hinic3_send_one_skb(struct sk_buff *skb,
if (likely(wqebb_cnt > txq->tx_stop_thrs))
txq->tx_stop_thrs = min(wqebb_cnt, txq->tx_start_thrs);
- netif_subqueue_try_stop(netdev, tx_q->sq->q_id,
- hinic3_wq_free_wqebbs(&tx_q->sq->wq),
- tx_q->tx_start_thrs);
+ netif_subqueue_try_stop(netdev, txq->sq->q_id,
+ hinic3_wq_free_wqebbs(&txq->sq->wq),
+ txq->tx_start_thrs);
return NETDEV_TX_BUSY;
}
@@ -542,12 +541,11 @@ static netdev_tx_t hinic3_send_one_skb(struct sk_buff *skb,
goto err_drop_pkt;
}
- netdev_tx_sent_queue(netdev_get_tx_queue(netdev, txq->sq->q_id),
- skb->len);
- netif_subqueue_maybe_stop(netdev, tx_q->sq->q_id,
- hinic3_wq_free_wqebbs(&tx_q->sq->wq),
- tx_q->tx_stop_thrs,
- tx_q->tx_start_thrs);
+ netif_subqueue_sent(netdev, txq->sq->q_id, skb->len);
+ netif_subqueue_maybe_stop(netdev, txq->sq->q_id,
+ hinic3_wq_free_wqebbs(&txq->sq->wq),
+ txq->tx_stop_thrs,
+ txq->tx_start_thrs);
hinic3_prepare_sq_ctrl(&wqe_combo, queue_info, num_sge, owner);
hinic3_write_db(txq->sq, 0, DB_CFLAG_DP_SQ,
@@ -631,7 +629,6 @@ bool hinic3_tx_poll(struct hinic3_txq *txq, int budget)
struct net_device *netdev = txq->netdev;
u16 hw_ci, sw_ci, q_id = txq->sq->q_id;
struct hinic3_tx_info *tx_info;
- struct hinic3_txq *tx_q = txq;
unsigned int bytes_compl = 0;
unsigned int pkts = 0;
u16 wqebb_cnt = 0;
@@ -663,8 +660,8 @@ bool hinic3_tx_poll(struct hinic3_txq *txq, int budget)
hinic3_wq_put_wqebbs(&txq->sq->wq, wqebb_cnt);
netif_subqueue_completed_wake(netdev, q_id, pkts, bytes_compl,
- hinic3_wq_free_wqebbs(&tx_q->sq->wq),
- tx_q->tx_start_thrs);
+ hinic3_wq_free_wqebbs(&txq->sq->wq),
+ txq->tx_start_thrs);
return pkts == HINIC3_TX_POLL_WEIGHT;
}
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 9364bc2b4eb1..c0bbb12eed2e 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -2096,54 +2096,47 @@ static void e1000_get_strings(struct net_device __always_unused *netdev,
}
}
-static int e1000_get_rxnfc(struct net_device *netdev,
- struct ethtool_rxnfc *info,
- u32 __always_unused *rule_locs)
+static int e1000_get_rxfh_fields(struct net_device *netdev,
+ struct ethtool_rxfh_fields *info)
{
- info->data = 0;
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 mrqc;
- switch (info->cmd) {
- case ETHTOOL_GRXFH: {
- struct e1000_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- u32 mrqc;
+ info->data = 0;
- mrqc = er32(MRQC);
+ mrqc = er32(MRQC);
- if (!(mrqc & E1000_MRQC_RSS_FIELD_MASK))
- return 0;
-
- switch (info->flow_type) {
- case TCP_V4_FLOW:
- if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
- info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- fallthrough;
- case UDP_V4_FLOW:
- case SCTP_V4_FLOW:
- case AH_ESP_V4_FLOW:
- case IPV4_FLOW:
- if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
- info->data |= RXH_IP_SRC | RXH_IP_DST;
- break;
- case TCP_V6_FLOW:
- if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
- info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- fallthrough;
- case UDP_V6_FLOW:
- case SCTP_V6_FLOW:
- case AH_ESP_V6_FLOW:
- case IPV6_FLOW:
- if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
- info->data |= RXH_IP_SRC | RXH_IP_DST;
- break;
- default:
- break;
- }
+ if (!(mrqc & E1000_MRQC_RSS_FIELD_MASK))
return 0;
- }
+
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ fallthrough;
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case IPV4_FLOW:
+ if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
+ info->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case TCP_V6_FLOW:
+ if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ fallthrough;
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case IPV6_FLOW:
+ if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
+ info->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
default:
- return -EOPNOTSUPP;
+ break;
}
+ return 0;
}
static int e1000e_get_eee(struct net_device *netdev, struct ethtool_keee *edata)
@@ -2352,7 +2345,7 @@ static const struct ethtool_ops e1000_ethtool_ops = {
.get_sset_count = e1000e_get_sset_count,
.get_coalesce = e1000_get_coalesce,
.set_coalesce = e1000_set_coalesce,
- .get_rxnfc = e1000_get_rxnfc,
+ .get_rxfh_fields = e1000_get_rxfh_fields,
.get_ts_info = e1000e_get_ts_info,
.get_eee = e1000e_get_eee,
.set_eee = e1000e_set_eee,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index 1bc5b6c0b897..1954a04460d1 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -691,9 +691,11 @@ static int fm10k_set_coalesce(struct net_device *dev,
return 0;
}
-static int fm10k_get_rss_hash_opts(struct fm10k_intfc *interface,
- struct ethtool_rxnfc *cmd)
+static int fm10k_get_rssh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct fm10k_intfc *interface = netdev_priv(dev);
+
cmd->data = 0;
/* Report default options for RSS on fm10k */
@@ -743,9 +745,6 @@ static int fm10k_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
cmd->data = interface->num_rx_queues;
ret = 0;
break;
- case ETHTOOL_GRXFH:
- ret = fm10k_get_rss_hash_opts(interface, cmd);
- break;
default:
break;
}
@@ -753,9 +752,11 @@ static int fm10k_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
return ret;
}
-static int fm10k_set_rss_hash_opt(struct fm10k_intfc *interface,
- struct ethtool_rxnfc *nfc)
+static int fm10k_set_rssh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
{
+ struct fm10k_intfc *interface = netdev_priv(dev);
int rss_ipv4_udp = test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP,
interface->flags);
int rss_ipv6_udp = test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP,
@@ -871,22 +872,6 @@ static int fm10k_set_rss_hash_opt(struct fm10k_intfc *interface,
return 0;
}
-static int fm10k_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
-{
- struct fm10k_intfc *interface = netdev_priv(dev);
- int ret = -EOPNOTSUPP;
-
- switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- ret = fm10k_set_rss_hash_opt(interface, cmd);
- break;
- default:
- break;
- }
-
- return ret;
-}
-
static int fm10k_mbx_test(struct fm10k_intfc *interface, u64 *data)
{
struct fm10k_hw *hw = &interface->hw;
@@ -1176,7 +1161,6 @@ static const struct ethtool_ops fm10k_ethtool_ops = {
.get_coalesce = fm10k_get_coalesce,
.set_coalesce = fm10k_set_coalesce,
.get_rxnfc = fm10k_get_rxnfc,
- .set_rxnfc = fm10k_set_rxnfc,
.get_regs = fm10k_get_regs,
.get_regs_len = fm10k_get_regs_len,
.self_test = fm10k_self_test,
@@ -1186,6 +1170,8 @@ static const struct ethtool_ops fm10k_ethtool_ops = {
.get_rxfh_key_size = fm10k_get_rssrk_size,
.get_rxfh = fm10k_get_rssh,
.set_rxfh = fm10k_set_rssh,
+ .get_rxfh_fields = fm10k_get_rssh_fields,
+ .set_rxfh_fields = fm10k_set_rssh_fields,
.get_channels = fm10k_get_channels,
.set_channels = fm10k_set_channels,
.get_ts_info = ethtool_op_get_ts_info,
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index c67963bfe14e..54d5fdc303ca 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -548,6 +548,7 @@ struct i40e_pf {
u16 empr_count; /* EMP reset count */
u16 pfr_count; /* PF reset count */
u16 sw_int_count; /* SW interrupt count */
+ u32 link_down_events;
struct mutex switch_mutex;
u16 lan_vsi; /* our default LAN VSI */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 8a7a83f83ee5..2ff17d50135c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -3,6 +3,7 @@
/* ethtool support for i40e */
+#include <linux/net/intel/libie/pctype.h>
#include "i40e_devids.h"
#include "i40e_diag.h"
#include "i40e_txrx_common.h"
@@ -2749,6 +2750,15 @@ skip_ol_tests:
netif_info(pf, drv, netdev, "testing failed\n");
}
+static void i40e_get_link_ext_stats(struct net_device *netdev,
+ struct ethtool_link_ext_stats *stats)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+
+ stats->link_down_events = pf->link_down_events;
+}
+
static void i40e_get_wol(struct net_device *netdev,
struct ethtool_wolinfo *wol)
{
@@ -3129,15 +3139,12 @@ static int i40e_set_per_queue_coalesce(struct net_device *netdev, u32 queue,
return __i40e_set_coalesce(netdev, ec, queue);
}
-/**
- * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type
- * @pf: pointer to the physical function struct
- * @cmd: ethtool rxnfc command
- *
- * Returns Success if the flow is supported, else Invalid Input.
- **/
-static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
+static int i40e_get_rxfh_fields(struct net_device *netdev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
u8 flow_pctype = 0;
u64 i_set = 0;
@@ -3146,16 +3153,16 @@ static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
switch (cmd->flow_type) {
case TCP_V4_FLOW:
- flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+ flow_pctype = LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP;
break;
case UDP_V4_FLOW:
- flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+ flow_pctype = LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP;
break;
case TCP_V6_FLOW:
- flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
+ flow_pctype = LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP;
break;
case UDP_V6_FLOW:
- flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
+ flow_pctype = LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP;
break;
case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
@@ -3412,28 +3419,28 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
switch (rule->flow_type) {
case SCTP_V4_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP;
break;
case TCP_V4_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP;
break;
case UDP_V4_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP;
break;
case SCTP_V6_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP;
break;
case TCP_V6_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP;
break;
case UDP_V6_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP;
break;
case IP_USER_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER;
break;
case IPV6_USER_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER;
break;
default:
/* If we have stored a filter with a flow type not listed here
@@ -3535,9 +3542,6 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
cmd->data = vsi->rss_size;
ret = 0;
break;
- case ETHTOOL_GRXFH:
- ret = i40e_get_rss_hash_opts(pf, cmd);
- break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = pf->fdir_pf_active_filters;
/* report total rule count */
@@ -3566,7 +3570,7 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
* Returns value of bits to be set per user request
**/
static u64 i40e_get_rss_hash_bits(struct i40e_hw *hw,
- struct ethtool_rxnfc *nfc,
+ const struct ethtool_rxfh_fields *nfc,
u64 i_setc)
{
u64 i_set = i_setc;
@@ -3611,15 +3615,13 @@ static u64 i40e_get_rss_hash_bits(struct i40e_hw *hw,
}
#define FLOW_PCTYPES_SIZE 64
-/**
- * i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash
- * @pf: pointer to the physical function struct
- * @nfc: ethtool rxnfc command
- *
- * Returns Success if the flow input set is supported.
- **/
-static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
+static int i40e_set_rxfh_fields(struct net_device *netdev,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
u64 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
@@ -3643,40 +3645,40 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
switch (nfc->flow_type) {
case TCP_V4_FLOW:
- set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_TCP, flow_pctypes);
+ set_bit(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP, flow_pctypes);
if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE,
pf->hw.caps))
- set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK,
+ set_bit(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK,
flow_pctypes);
break;
case TCP_V6_FLOW:
- set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_TCP, flow_pctypes);
+ set_bit(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP, flow_pctypes);
if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE,
pf->hw.caps))
- set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK,
+ set_bit(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK,
flow_pctypes);
break;
case UDP_V4_FLOW:
- set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_UDP, flow_pctypes);
+ set_bit(LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP, flow_pctypes);
if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE,
pf->hw.caps)) {
- set_bit(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP,
+ set_bit(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP,
flow_pctypes);
- set_bit(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP,
+ set_bit(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP,
flow_pctypes);
}
- hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4);
+ hena |= BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV4);
break;
case UDP_V6_FLOW:
- set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_UDP, flow_pctypes);
+ set_bit(LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP, flow_pctypes);
if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE,
pf->hw.caps)) {
- set_bit(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP,
+ set_bit(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP,
flow_pctypes);
- set_bit(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP,
+ set_bit(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP,
flow_pctypes);
}
- hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6);
+ hena |= BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV6);
break;
case AH_ESP_V4_FLOW:
case AH_V4_FLOW:
@@ -3685,7 +3687,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
if ((nfc->data & RXH_L4_B_0_1) ||
(nfc->data & RXH_L4_B_2_3))
return -EINVAL;
- hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
+ hena |= BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER);
break;
case AH_ESP_V6_FLOW:
case AH_V6_FLOW:
@@ -3694,15 +3696,15 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
if ((nfc->data & RXH_L4_B_0_1) ||
(nfc->data & RXH_L4_B_2_3))
return -EINVAL;
- hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
+ hena |= BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER);
break;
case IPV4_FLOW:
- hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4);
+ hena |= BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER) |
+ BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV4);
break;
case IPV6_FLOW:
- hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6);
+ hena |= BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER) |
+ BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV6);
break;
default:
return -EINVAL;
@@ -4312,36 +4314,36 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
switch (fsp->flow_type & ~FLOW_EXT) {
case SCTP_V4_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP;
fdir_filter_count = &pf->fd_sctp4_filter_cnt;
break;
case TCP_V4_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP;
fdir_filter_count = &pf->fd_tcp4_filter_cnt;
break;
case UDP_V4_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP;
fdir_filter_count = &pf->fd_udp4_filter_cnt;
break;
case SCTP_V6_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP;
fdir_filter_count = &pf->fd_sctp6_filter_cnt;
break;
case TCP_V6_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP;
fdir_filter_count = &pf->fd_tcp6_filter_cnt;
break;
case UDP_V6_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP;
fdir_filter_count = &pf->fd_udp6_filter_cnt;
break;
case IP_USER_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER;
fdir_filter_count = &pf->fd_ip4_filter_cnt;
flex_l3 = true;
break;
case IPV6_USER_FLOW:
- index = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
+ index = LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER;
fdir_filter_count = &pf->fd_ip6_filter_cnt;
flex_l3 = true;
break;
@@ -4677,8 +4679,8 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
* separate support, we'll always assume and enforce that the two flow
* types must have matching input sets.
*/
- if (index == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER)
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
+ if (index == LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER)
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_FRAG_IPV4,
new_mask);
/* Add the new offset and update table, if necessary */
@@ -4954,13 +4956,9 @@ static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
- struct i40e_pf *pf = vsi->back;
int ret = -EOPNOTSUPP;
switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- ret = i40e_set_rss_hash_opt(pf, cmd);
- break;
case ETHTOOL_SRXCLSRLINS:
ret = i40e_add_fdir_ethtool(vsi, cmd);
break;
@@ -5809,6 +5807,7 @@ static const struct ethtool_ops i40e_ethtool_ops = {
.get_regs = i40e_get_regs,
.nway_reset = i40e_nway_reset,
.get_link = ethtool_op_get_link,
+ .get_link_ext_stats = i40e_get_link_ext_stats,
.get_wol = i40e_get_wol,
.set_wol = i40e_set_wol,
.set_eeprom = i40e_set_eeprom,
@@ -5835,6 +5834,8 @@ static const struct ethtool_ops i40e_ethtool_ops = {
.get_rxfh_indir_size = i40e_get_rxfh_indir_size,
.get_rxfh = i40e_get_rxfh,
.set_rxfh = i40e_set_rxfh,
+ .get_rxfh_fields = i40e_get_rxfh_fields,
+ .set_rxfh_fields = i40e_set_rxfh_fields,
.get_channels = i40e_get_channels,
.set_channels = i40e_set_channels,
.get_module_info = i40e_get_module_info,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index f1c9e575703e..3b4f59d978a5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -3,6 +3,7 @@
#include <generated/utsrelease.h>
#include <linux/crash_dump.h>
+#include <linux/net/intel/libie/pctype.h>
#include <linux/if_bridge.h>
#include <linux/if_macvlan.h>
#include <linux/module.h>
@@ -9188,47 +9189,47 @@ static void i40e_fdir_filter_exit(struct i40e_pf *pf)
i40e_reset_fdir_filter_cnt(pf);
/* Reprogram the default input set for TCP/IPv4 */
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
/* Reprogram the default input set for TCP/IPv6 */
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_TCP,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP,
I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
/* Reprogram the default input set for UDP/IPv4 */
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
/* Reprogram the default input set for UDP/IPv6 */
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_UDP,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP,
I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
/* Reprogram the default input set for SCTP/IPv4 */
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
/* Reprogram the default input set for SCTP/IPv6 */
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_SCTP,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP,
I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
/* Reprogram the default input set for Other/IPv4 */
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_FRAG_IPV4,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
/* Reprogram the default input set for Other/IPv6 */
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_OTHER,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV6,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_FRAG_IPV6,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
}
@@ -9656,7 +9657,7 @@ static void i40e_reenable_fdir_atr(struct i40e_pf *pf)
* settings. It is safe to restore the default input set
* because there are no active TCPv4 filter rules.
*/
- i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
+ i40e_write_fd_input_set(pf, LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
@@ -9959,6 +9960,9 @@ static void i40e_link_event(struct i40e_pf *pf)
new_link == netif_carrier_ok(vsi->netdev)))
return;
+ if (!new_link && old_link)
+ pf->link_down_events++;
+
i40e_print_link_message(vsi, new_link);
/* Notify the base of the switch tree connected to
@@ -12507,7 +12511,7 @@ static int i40e_pf_config_rss(struct i40e_pf *pf)
/* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
- hena |= i40e_pf_get_default_rss_hena(pf);
+ hena |= i40e_pf_get_default_rss_hashcfg(pf);
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
@@ -15891,7 +15895,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pf->udp_tunnel_nic.set_port = i40e_udp_tunnel_set_port;
pf->udp_tunnel_nic.unset_port = i40e_udp_tunnel_unset_port;
- pf->udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
pf->udp_tunnel_nic.shared = &pf->udp_tunnel_shared;
pf->udp_tunnel_nic.tables[0].n_entries = I40E_MAX_PF_UDP_OFFLOAD_PORTS;
pf->udp_tunnel_nic.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index c006f716a3bd..048c33039130 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2,6 +2,7 @@
/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include <linux/bpf_trace.h>
+#include <linux/net/intel/libie/pctype.h>
#include <linux/net/intel/libie/rx.h>
#include <linux/prefetch.h>
#include <linux/sctp.h>
@@ -397,12 +398,12 @@ static int i40e_add_del_fdir_udp(struct i40e_vsi *vsi,
ret = i40e_prepare_fdir_filter
(pf, fd_data, add, raw_packet,
I40E_UDPIP_DUMMY_PACKET_LEN,
- I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
+ LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP);
else
ret = i40e_prepare_fdir_filter
(pf, fd_data, add, raw_packet,
I40E_UDPIP6_DUMMY_PACKET_LEN,
- I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
+ LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP);
if (ret) {
kfree(raw_packet);
@@ -444,12 +445,12 @@ static int i40e_add_del_fdir_tcp(struct i40e_vsi *vsi,
ret = i40e_prepare_fdir_filter
(pf, fd_data, add, raw_packet,
I40E_TCPIP_DUMMY_PACKET_LEN,
- I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+ LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP);
else
ret = i40e_prepare_fdir_filter
(pf, fd_data, add, raw_packet,
I40E_TCPIP6_DUMMY_PACKET_LEN,
- I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+ LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP);
if (ret) {
kfree(raw_packet);
@@ -499,12 +500,12 @@ static int i40e_add_del_fdir_sctp(struct i40e_vsi *vsi,
ret = i40e_prepare_fdir_filter
(pf, fd_data, add, raw_packet,
I40E_SCTPIP_DUMMY_PACKET_LEN,
- I40E_FILTER_PCTYPE_NONF_IPV4_SCTP);
+ LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP);
else
ret = i40e_prepare_fdir_filter
(pf, fd_data, add, raw_packet,
I40E_SCTPIP6_DUMMY_PACKET_LEN,
- I40E_FILTER_PCTYPE_NONF_IPV6_SCTP);
+ LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP);
if (ret) {
kfree(raw_packet);
@@ -543,11 +544,11 @@ static int i40e_add_del_fdir_ip(struct i40e_vsi *vsi,
int i;
if (ipv4) {
- iter_start = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
- iter_end = I40E_FILTER_PCTYPE_FRAG_IPV4;
+ iter_start = LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER;
+ iter_end = LIBIE_FILTER_PCTYPE_FRAG_IPV4;
} else {
- iter_start = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
- iter_end = I40E_FILTER_PCTYPE_FRAG_IPV6;
+ iter_start = LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER;
+ iter_end = LIBIE_FILTER_PCTYPE_FRAG_IPV6;
}
for (i = iter_start; i <= iter_end; i++) {
@@ -2948,9 +2949,9 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
flex_ptype = FIELD_PREP(I40E_TXD_FLTR_QW0_QINDEX_MASK,
tx_ring->queue_index);
flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ?
- (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
+ (LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP <<
I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
- (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
+ (LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP <<
I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 7c26c9a2bf65..1e5fd63d47f4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -4,6 +4,7 @@
#ifndef _I40E_TXRX_H_
#define _I40E_TXRX_H_
+#include <linux/net/intel/libie/pctype.h>
#include <net/xdp.h>
#include "i40e_type.h"
@@ -71,30 +72,30 @@ enum i40e_dyn_idx {
#define I40E_SW_ITR I40E_IDX_ITR2
/* Supported RSS offloads */
-#define I40E_DEFAULT_RSS_HENA ( \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
- BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
-
-#define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
-
-#define i40e_pf_get_default_rss_hena(pf) \
+#define I40E_DEFAULT_RSS_HASHCFG ( \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV4) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV6) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_L2_PAYLOAD))
+
+#define I40E_DEFAULT_RSS_HASHCFG_EXPANDED (I40E_DEFAULT_RSS_HASHCFG | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
+
+#define i40e_pf_get_default_rss_hashcfg(pf) \
(test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE, (pf)->hw.caps) ? \
- I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
+ I40E_DEFAULT_RSS_HASHCFG_EXPANDED : I40E_DEFAULT_RSS_HASHCFG)
/* Supported Rx Buffer Sizes (a multiple of 128) */
#define I40E_RXBUFFER_256 256
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 28568e126850..a09ed83835ff 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -929,38 +929,6 @@ struct i40e_filter_program_desc {
#define I40E_TXD_FLTR_QW0_PCTYPE_MASK (0x3FUL << \
I40E_TXD_FLTR_QW0_PCTYPE_SHIFT)
-/* Packet Classifier Types for filters */
-enum i40e_filter_pctype {
- /* Note: Values 0-28 are reserved for future use.
- * Value 29, 30, 32 are not supported on XL710 and X710.
- */
- I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
- I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
- I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
- I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32,
- I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
- I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
- I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
- I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
- /* Note: Values 37-38 are reserved for future use.
- * Value 39, 40, 42 are not supported on XL710 and X710.
- */
- I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
- I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
- I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
- I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42,
- I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
- I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44,
- I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45,
- I40E_FILTER_PCTYPE_FRAG_IPV6 = 46,
- /* Note: Value 47 is reserved for future use */
- I40E_FILTER_PCTYPE_FCOE_OX = 48,
- I40E_FILTER_PCTYPE_FCOE_RX = 49,
- I40E_FILTER_PCTYPE_FCOE_OTHER = 50,
- /* Note: Values 51-62 are reserved for future use */
- I40E_FILTER_PCTYPE_L2_PAYLOAD = 63,
-};
-
enum i40e_filter_program_desc_dest {
I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET = 0x0,
I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX = 0x1,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 88e6bef69342..b232edf68ab1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -812,7 +812,7 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx)
}
if (!idx) {
- u64 hena = i40e_pf_get_default_rss_hena(pf);
+ u64 hashcfg = i40e_pf_get_default_rss_hashcfg(pf);
u8 broadcast[ETH_ALEN];
vf->lan_vsi_idx = vsi->idx;
@@ -841,8 +841,9 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx)
dev_info(&pf->pdev->dev,
"Could not allocate VF broadcast filter\n");
spin_unlock_bh(&vsi->mac_filter_hash_lock);
- wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena);
- wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32));
+ wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hashcfg);
+ wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id),
+ (u32)(hashcfg >> 32));
/* program mac filter only for VF VSI */
ret = i40e_sync_vsi_filters(vsi);
if (ret)
@@ -3447,15 +3448,15 @@ err:
}
/**
- * i40e_vc_get_rss_hena
+ * i40e_vc_get_rss_hashcfg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
*
- * Return the RSS HENA bits allowed by the hardware
+ * Return the RSS Hash configuration bits allowed by the hardware
**/
-static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
+static int i40e_vc_get_rss_hashcfg(struct i40e_vf *vf, u8 *msg)
{
- struct virtchnl_rss_hena *vrh = NULL;
+ struct virtchnl_rss_hashcfg *vrh = NULL;
struct i40e_pf *pf = vf->pf;
int aq_ret = 0;
int len = 0;
@@ -3464,7 +3465,7 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
aq_ret = -EINVAL;
goto err;
}
- len = sizeof(struct virtchnl_rss_hena);
+ len = sizeof(struct virtchnl_rss_hashcfg);
vrh = kzalloc(len, GFP_KERNEL);
if (!vrh) {
@@ -3472,26 +3473,26 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
len = 0;
goto err;
}
- vrh->hena = i40e_pf_get_default_rss_hena(pf);
+ vrh->hashcfg = i40e_pf_get_default_rss_hashcfg(pf);
err:
/* send the response back to the VF */
- aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS,
+ aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS,
aq_ret, (u8 *)vrh, len);
kfree(vrh);
return aq_ret;
}
/**
- * i40e_vc_set_rss_hena
+ * i40e_vc_set_rss_hashcfg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
*
- * Set the RSS HENA bits for the VF
+ * Set the RSS Hash configuration bits for the VF
**/
-static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)
+static int i40e_vc_set_rss_hashcfg(struct i40e_vf *vf, u8 *msg)
{
- struct virtchnl_rss_hena *vrh =
- (struct virtchnl_rss_hena *)msg;
+ struct virtchnl_rss_hashcfg *vrh =
+ (struct virtchnl_rss_hashcfg *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
int aq_ret = 0;
@@ -3500,13 +3501,14 @@ static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)
aq_ret = -EINVAL;
goto err;
}
- i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena);
+ i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id),
+ (u32)vrh->hashcfg);
i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id),
- (u32)(vrh->hena >> 32));
+ (u32)(vrh->hashcfg >> 32));
/* send the response to the VF */
err:
- return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret);
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HASHCFG, aq_ret);
}
/**
@@ -4253,11 +4255,11 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
case VIRTCHNL_OP_CONFIG_RSS_LUT:
ret = i40e_vc_config_rss_lut(vf, msg);
break;
- case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
- ret = i40e_vc_get_rss_hena(vf, msg);
+ case VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS:
+ ret = i40e_vc_get_rss_hashcfg(vf, msg);
break;
- case VIRTCHNL_OP_SET_RSS_HENA:
- ret = i40e_vc_set_rss_hena(vf, msg);
+ case VIRTCHNL_OP_SET_RSS_HASHCFG:
+ ret = i40e_vc_set_rss_hashcfg(vf, msg);
break;
case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
ret = i40e_vc_enable_vlan_stripping(vf, msg);
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index f7a98ff43a57..a87e0c6d4017 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -114,8 +114,6 @@ struct iavf_q_vector {
u16 reg_idx; /* register index of the interrupt */
char name[IFNAMSIZ + 15];
bool arm_wb_state;
- cpumask_t affinity_mask;
- struct irq_affinity_notify affinity_notify;
};
/* Helper macros to switch between ints/sec and what the register uses.
@@ -315,8 +313,8 @@ struct iavf_adapter {
#define IAVF_FLAG_AQ_CONFIGURE_RSS BIT_ULL(9) /* direct AQ config */
#define IAVF_FLAG_AQ_GET_CONFIG BIT_ULL(10)
/* Newer style, RSS done by the PF so we can ignore hardware vagaries. */
-#define IAVF_FLAG_AQ_GET_HENA BIT_ULL(11)
-#define IAVF_FLAG_AQ_SET_HENA BIT_ULL(12)
+#define IAVF_FLAG_AQ_GET_RSS_HASHCFG BIT_ULL(11)
+#define IAVF_FLAG_AQ_SET_RSS_HASHCFG BIT_ULL(12)
#define IAVF_FLAG_AQ_SET_RSS_KEY BIT_ULL(13)
#define IAVF_FLAG_AQ_SET_RSS_LUT BIT_ULL(14)
#define IAVF_FLAG_AQ_SET_RSS_HFUNC BIT_ULL(15)
@@ -456,7 +454,7 @@ struct iavf_adapter {
u32 aq_wait_count;
/* RSS stuff */
enum virtchnl_rss_algorithm hfunc;
- u64 hena;
+ u64 rss_hashcfg;
u16 rss_key_size;
u16 rss_lut_size;
u8 *rss_key;
@@ -600,8 +598,8 @@ void iavf_set_promiscuous(struct iavf_adapter *adapter);
bool iavf_promiscuous_mode_changed(struct iavf_adapter *adapter);
void iavf_request_stats(struct iavf_adapter *adapter);
int iavf_request_reset(struct iavf_adapter *adapter);
-void iavf_get_hena(struct iavf_adapter *adapter);
-void iavf_set_hena(struct iavf_adapter *adapter);
+void iavf_get_rss_hashcfg(struct iavf_adapter *adapter);
+void iavf_set_rss_hashcfg(struct iavf_adapter *adapter);
void iavf_set_rss_key(struct iavf_adapter *adapter);
void iavf_set_rss_lut(struct iavf_adapter *adapter);
void iavf_set_rss_hfunc(struct iavf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index 2b2b315205b5..05d72be3fe80 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -1307,14 +1307,7 @@ static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
return iavf_fdir_del_fltr(adapter, false, fsp->location);
}
-/**
- * iavf_adv_rss_parse_hdrs - parses headers from RSS hash input
- * @cmd: ethtool rxnfc command
- *
- * This function parses the rxnfc command and returns intended
- * header types for RSS configuration
- */
-static u32 iavf_adv_rss_parse_hdrs(struct ethtool_rxnfc *cmd)
+static u32 iavf_adv_rss_parse_hdrs(const struct ethtool_rxfh_fields *cmd)
{
u32 hdrs = IAVF_ADV_RSS_FLOW_SEG_HDR_NONE;
@@ -1350,15 +1343,8 @@ static u32 iavf_adv_rss_parse_hdrs(struct ethtool_rxnfc *cmd)
return hdrs;
}
-/**
- * iavf_adv_rss_parse_hash_flds - parses hash fields from RSS hash input
- * @cmd: ethtool rxnfc command
- * @symm: true if Symmetric Topelitz is set
- *
- * This function parses the rxnfc command and returns intended hash fields for
- * RSS configuration
- */
-static u64 iavf_adv_rss_parse_hash_flds(struct ethtool_rxnfc *cmd, bool symm)
+static u64
+iavf_adv_rss_parse_hash_flds(const struct ethtool_rxfh_fields *cmd, bool symm)
{
u64 hfld = IAVF_ADV_RSS_HASH_INVALID;
@@ -1416,17 +1402,12 @@ static u64 iavf_adv_rss_parse_hash_flds(struct ethtool_rxnfc *cmd, bool symm)
return hfld;
}
-/**
- * iavf_set_adv_rss_hash_opt - Enable/Disable flow types for RSS hash
- * @adapter: pointer to the VF adapter structure
- * @cmd: ethtool rxnfc command
- *
- * Returns Success if the flow input set is supported.
- */
static int
-iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
- struct ethtool_rxnfc *cmd)
+iavf_set_rxfh_fields(struct net_device *netdev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
struct iavf_adv_rss *rss_old, *rss_new;
bool rss_new_add = false;
bool symm = false;
@@ -1493,17 +1474,10 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
return err;
}
-/**
- * iavf_get_adv_rss_hash_opt - Retrieve hash fields for a given flow-type
- * @adapter: pointer to the VF adapter structure
- * @cmd: ethtool rxnfc command
- *
- * Returns Success if the flow input set is supported.
- */
static int
-iavf_get_adv_rss_hash_opt(struct iavf_adapter *adapter,
- struct ethtool_rxnfc *cmd)
+iavf_get_rxfh_fields(struct net_device *netdev, struct ethtool_rxfh_fields *cmd)
{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
struct iavf_adv_rss *rss;
u64 hash_flds;
u32 hdrs;
@@ -1568,9 +1542,6 @@ static int iavf_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
case ETHTOOL_SRXCLSRLDEL:
ret = iavf_del_fdir_ethtool(adapter, cmd);
break;
- case ETHTOOL_SRXFH:
- ret = iavf_set_adv_rss_hash_opt(adapter, cmd);
- break;
default:
break;
}
@@ -1612,9 +1583,6 @@ static int iavf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
case ETHTOOL_GRXCLSRLALL:
ret = iavf_get_fdir_fltr_ids(adapter, cmd, (u32 *)rule_locs);
break;
- case ETHTOOL_GRXFH:
- ret = iavf_get_adv_rss_hash_opt(adapter, cmd);
- break;
default:
break;
}
@@ -1812,6 +1780,8 @@ static const struct ethtool_ops iavf_ethtool_ops = {
.get_rxfh_indir_size = iavf_get_rxfh_indir_size,
.get_rxfh = iavf_get_rxfh,
.set_rxfh = iavf_set_rxfh,
+ .get_rxfh_fields = iavf_get_rxfh_fields,
+ .set_rxfh_fields = iavf_set_rxfh_fields,
.get_channels = iavf_get_channels,
.set_channels = iavf_set_channels,
.get_rxfh_key_size = iavf_get_rxfh_key_size,
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 81d7249d1149..c859a096de9f 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -528,33 +528,6 @@ static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter)
}
/**
- * iavf_irq_affinity_notify - Callback for affinity changes
- * @notify: context as to what irq was changed
- * @mask: the new affinity mask
- *
- * This is a callback function used by the irq_set_affinity_notifier function
- * so that we may register to receive changes to the irq affinity masks.
- **/
-static void iavf_irq_affinity_notify(struct irq_affinity_notify *notify,
- const cpumask_t *mask)
-{
- struct iavf_q_vector *q_vector =
- container_of(notify, struct iavf_q_vector, affinity_notify);
-
- cpumask_copy(&q_vector->affinity_mask, mask);
-}
-
-/**
- * iavf_irq_affinity_release - Callback for affinity notifier release
- * @ref: internal core kernel usage
- *
- * This is a callback function used by the irq_set_affinity_notifier function
- * to inform the current notification subscriber that they will no longer
- * receive notifications.
- **/
-static void iavf_irq_affinity_release(struct kref *ref) {}
-
-/**
* iavf_request_traffic_irqs - Initialize MSI-X interrupts
* @adapter: board private structure
* @basename: device basename
@@ -568,7 +541,6 @@ iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename)
unsigned int vector, q_vectors;
unsigned int rx_int_idx = 0, tx_int_idx = 0;
int irq_num, err;
- int cpu;
iavf_irq_disable(adapter);
/* Decrement for Other and TCP Timer vectors */
@@ -603,17 +575,6 @@ iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename)
"Request_irq failed, error: %d\n", err);
goto free_queue_irqs;
}
- /* register for affinity change notifications */
- q_vector->affinity_notify.notify = iavf_irq_affinity_notify;
- q_vector->affinity_notify.release =
- iavf_irq_affinity_release;
- irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
- /* Spread the IRQ affinity hints across online CPUs. Note that
- * get_cpu_mask returns a mask with a permanent lifetime so
- * it's safe to use as a hint for irq_update_affinity_hint.
- */
- cpu = cpumask_local_spread(q_vector->v_idx, -1);
- irq_update_affinity_hint(irq_num, get_cpu_mask(cpu));
}
return 0;
@@ -622,8 +583,6 @@ free_queue_irqs:
while (vector) {
vector--;
irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
- irq_set_affinity_notifier(irq_num, NULL);
- irq_update_affinity_hint(irq_num, NULL);
free_irq(irq_num, &adapter->q_vectors[vector]);
}
return err;
@@ -665,6 +624,7 @@ static int iavf_request_misc_irq(struct iavf_adapter *adapter)
**/
static void iavf_free_traffic_irqs(struct iavf_adapter *adapter)
{
+ struct iavf_q_vector *q_vector;
int vector, irq_num, q_vectors;
if (!adapter->msix_entries)
@@ -673,10 +633,10 @@ static void iavf_free_traffic_irqs(struct iavf_adapter *adapter)
q_vectors = adapter->num_msix_vectors - NONQ_VECS;
for (vector = 0; vector < q_vectors; vector++) {
+ q_vector = &adapter->q_vectors[vector];
+ netif_napi_set_irq_locked(&q_vector->napi, -1);
irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
- irq_set_affinity_notifier(irq_num, NULL);
- irq_update_affinity_hint(irq_num, NULL);
- free_irq(irq_num, &adapter->q_vectors[vector]);
+ free_irq(irq_num, q_vector);
}
}
@@ -1823,12 +1783,13 @@ static int iavf_init_rss(struct iavf_adapter *adapter)
/* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
if (adapter->vf_res->vf_cap_flags &
VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
- adapter->hena = IAVF_DEFAULT_RSS_HENA_EXPANDED;
+ adapter->rss_hashcfg =
+ IAVF_DEFAULT_RSS_HASHCFG_EXPANDED;
else
- adapter->hena = IAVF_DEFAULT_RSS_HENA;
+ adapter->rss_hashcfg = IAVF_DEFAULT_RSS_HASHCFG;
- wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->hena);
- wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->hena >> 32));
+ wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->rss_hashcfg);
+ wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->rss_hashcfg >> 32));
}
iavf_fill_rss_lut(adapter);
@@ -1846,7 +1807,7 @@ static int iavf_init_rss(struct iavf_adapter *adapter)
**/
static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
{
- int q_idx = 0, num_q_vectors;
+ int q_idx = 0, num_q_vectors, irq_num;
struct iavf_q_vector *q_vector;
num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
@@ -1856,14 +1817,15 @@ static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
return -ENOMEM;
for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
+ irq_num = adapter->msix_entries[q_idx + NONQ_VECS].vector;
q_vector = &adapter->q_vectors[q_idx];
q_vector->adapter = adapter;
q_vector->vsi = &adapter->vsi;
q_vector->v_idx = q_idx;
q_vector->reg_idx = q_idx;
- cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
- netif_napi_add_locked(adapter->netdev, &q_vector->napi,
- iavf_napi_poll);
+ netif_napi_add_config_locked(adapter->netdev, &q_vector->napi,
+ iavf_napi_poll, q_idx);
+ netif_napi_set_irq_locked(&q_vector->napi, irq_num);
}
return 0;
@@ -2195,12 +2157,12 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS;
return 0;
}
- if (adapter->aq_required & IAVF_FLAG_AQ_GET_HENA) {
- iavf_get_hena(adapter);
+ if (adapter->aq_required & IAVF_FLAG_AQ_GET_RSS_HASHCFG) {
+ iavf_get_rss_hashcfg(adapter);
return 0;
}
- if (adapter->aq_required & IAVF_FLAG_AQ_SET_HENA) {
- iavf_set_hena(adapter);
+ if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_HASHCFG) {
+ iavf_set_rss_hashcfg(adapter);
return 0;
}
if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) {
@@ -5387,6 +5349,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_alloc_etherdev;
}
+ netif_set_affinity_auto(netdev);
SET_NETDEV_DEV(netdev, &pdev->dev);
pci_set_drvdata(pdev, netdev);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index 422312b8b54a..aaf70c625655 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -723,7 +723,7 @@ static void iavf_clean_rx_ring(struct iavf_ring *rx_ring)
for (u32 i = rx_ring->next_to_clean; i != rx_ring->next_to_use; ) {
const struct libeth_fqe *rx_fqes = &rx_ring->rx_fqes[i];
- page_pool_put_full_page(rx_ring->pp, rx_fqes->page, false);
+ libeth_rx_recycle_slow(rx_fqes->netmem);
if (unlikely(++i == rx_ring->count))
i = 0;
@@ -1197,10 +1197,11 @@ static void iavf_add_rx_frag(struct sk_buff *skb,
const struct libeth_fqe *rx_buffer,
unsigned int size)
{
- u32 hr = rx_buffer->page->pp->p.offset;
+ u32 hr = netmem_get_pp(rx_buffer->netmem)->p.offset;
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
- rx_buffer->offset + hr, size, rx_buffer->truesize);
+ skb_add_rx_frag_netmem(skb, skb_shinfo(skb)->nr_frags,
+ rx_buffer->netmem, rx_buffer->offset + hr,
+ size, rx_buffer->truesize);
}
/**
@@ -1214,12 +1215,13 @@ static void iavf_add_rx_frag(struct sk_buff *skb,
static struct sk_buff *iavf_build_skb(const struct libeth_fqe *rx_buffer,
unsigned int size)
{
- u32 hr = rx_buffer->page->pp->p.offset;
+ struct page *buf_page = __netmem_to_page(rx_buffer->netmem);
+ u32 hr = buf_page->pp->p.offset;
struct sk_buff *skb;
void *va;
/* prefetch first cache line of first page */
- va = page_address(rx_buffer->page) + rx_buffer->offset;
+ va = page_address(buf_page) + rx_buffer->offset;
net_prefetch(va + hr);
/* build an skb around the page buffer */
@@ -1648,7 +1650,8 @@ int iavf_napi_poll(struct napi_struct *napi, int budget)
* continue to poll, otherwise we must stop polling so the
* interrupt can move to the correct cpu.
*/
- if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) {
+ if (!cpumask_test_cpu(cpu_id,
+ &q_vector->napi.config->affinity_mask)) {
/* Tell napi that we are done polling */
napi_complete_done(napi, work_done);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.h b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
index 79ad554f2d53..df49b0b1d54a 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
@@ -4,6 +4,8 @@
#ifndef _IAVF_TXRX_H_
#define _IAVF_TXRX_H_
+#include <linux/net/intel/libie/pctype.h>
+
/* Interrupt Throttling and Rate Limiting Goodies */
#define IAVF_DEFAULT_IRQ_WORK 256
@@ -59,26 +61,26 @@ enum iavf_dyn_idx_t {
#define IAVF_PE_ITR IAVF_IDX_ITR2
/* Supported RSS offloads */
-#define IAVF_DEFAULT_RSS_HENA ( \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_UDP) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_TCP) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_FRAG_IPV4) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_UDP) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_TCP) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_FRAG_IPV6) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_L2_PAYLOAD))
-
-#define IAVF_DEFAULT_RSS_HENA_EXPANDED (IAVF_DEFAULT_RSS_HENA | \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
- BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
+#define IAVF_DEFAULT_RSS_HASHCFG ( \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV4) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV6) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_L2_PAYLOAD))
+
+#define IAVF_DEFAULT_RSS_HASHCFG_EXPANDED (IAVF_DEFAULT_RSS_HASHCFG | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IAVF_RX_INCREMENT(r, i) \
diff --git a/drivers/net/ethernet/intel/iavf/iavf_type.h b/drivers/net/ethernet/intel/iavf/iavf_type.h
index f9e1319620f4..cb12e86ba4a6 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_type.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_type.h
@@ -463,38 +463,6 @@ enum iavf_tx_ctx_desc_cmd_bits {
IAVF_TX_CTX_DESC_SWPE = 0x40
};
-/* Packet Classifier Types for filters */
-enum iavf_filter_pctype {
- /* Note: Values 0-28 are reserved for future use.
- * Value 29, 30, 32 are not supported on XL710 and X710.
- */
- IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
- IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
- IAVF_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
- IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32,
- IAVF_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
- IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
- IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
- IAVF_FILTER_PCTYPE_FRAG_IPV4 = 36,
- /* Note: Values 37-38 are reserved for future use.
- * Value 39, 40, 42 are not supported on XL710 and X710.
- */
- IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
- IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
- IAVF_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
- IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42,
- IAVF_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
- IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP = 44,
- IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER = 45,
- IAVF_FILTER_PCTYPE_FRAG_IPV6 = 46,
- /* Note: Value 47 is reserved for future use */
- IAVF_FILTER_PCTYPE_FCOE_OX = 48,
- IAVF_FILTER_PCTYPE_FCOE_RX = 49,
- IAVF_FILTER_PCTYPE_FCOE_OTHER = 50,
- /* Note: Values 51-62 are reserved for future use */
- IAVF_FILTER_PCTYPE_L2_PAYLOAD = 63,
-};
-
#define IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT 30
#define IAVF_TXD_CTX_QW1_TSO_LEN_MASK (0x3FFFFULL << \
IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT)
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 07f0d0a0f1e2..31a4289fc0ee 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -1145,12 +1145,12 @@ void iavf_request_stats(struct iavf_adapter *adapter)
}
/**
- * iavf_get_hena
+ * iavf_get_rss_hashcfg
* @adapter: adapter structure
*
- * Request hash enable capabilities from PF
+ * Request RSS Hash enable bits from PF
**/
-void iavf_get_hena(struct iavf_adapter *adapter)
+void iavf_get_rss_hashcfg(struct iavf_adapter *adapter)
{
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
@@ -1158,20 +1158,20 @@ void iavf_get_hena(struct iavf_adapter *adapter)
adapter->current_op);
return;
}
- adapter->current_op = VIRTCHNL_OP_GET_RSS_HENA_CAPS;
- adapter->aq_required &= ~IAVF_FLAG_AQ_GET_HENA;
- iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_RSS_HENA_CAPS, NULL, 0);
+ adapter->current_op = VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS;
+ adapter->aq_required &= ~IAVF_FLAG_AQ_GET_RSS_HASHCFG;
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS, NULL, 0);
}
/**
- * iavf_set_hena
+ * iavf_set_rss_hashcfg
* @adapter: adapter structure
*
* Request the PF to set our RSS hash capabilities
**/
-void iavf_set_hena(struct iavf_adapter *adapter)
+void iavf_set_rss_hashcfg(struct iavf_adapter *adapter)
{
- struct virtchnl_rss_hena vrh;
+ struct virtchnl_rss_hashcfg vrh;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
@@ -1179,10 +1179,10 @@ void iavf_set_hena(struct iavf_adapter *adapter)
adapter->current_op);
return;
}
- vrh.hena = adapter->hena;
- adapter->current_op = VIRTCHNL_OP_SET_RSS_HENA;
- adapter->aq_required &= ~IAVF_FLAG_AQ_SET_HENA;
- iavf_send_pf_msg(adapter, VIRTCHNL_OP_SET_RSS_HENA, (u8 *)&vrh,
+ vrh.hashcfg = adapter->rss_hashcfg;
+ adapter->current_op = VIRTCHNL_OP_SET_RSS_HASHCFG;
+ adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_HASHCFG;
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_SET_RSS_HASHCFG, (u8 *)&vrh,
sizeof(vrh));
}
@@ -2752,11 +2752,12 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
if (v_opcode != adapter->current_op)
return;
break;
- case VIRTCHNL_OP_GET_RSS_HENA_CAPS: {
- struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg;
+ case VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS: {
+ struct virtchnl_rss_hashcfg *vrh =
+ (struct virtchnl_rss_hashcfg *)msg;
if (msglen == sizeof(*vrh))
- adapter->hena = vrh->hena;
+ adapter->rss_hashcfg = vrh->hashcfg;
else
dev_warn(&adapter->pdev->dev,
"Invalid message %d from PF\n", v_opcode);
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 9e0d9f710441..d0f9c9492363 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -53,7 +53,7 @@ ice-$(CONFIG_PCI_IOV) += \
ice_vf_mbx.o \
ice_vf_vsi_vlan_ops.o \
ice_vf_lib.o
-ice-$(CONFIG_PTP_1588_CLOCK) += ice_ptp.o ice_ptp_hw.o ice_dpll.o
+ice-$(CONFIG_PTP_1588_CLOCK) += ice_ptp.o ice_ptp_hw.o ice_dpll.o ice_tspll.o
ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o
ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o
ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index ddd0ad68185b..657e1f608f1a 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -67,6 +67,7 @@
#include "ice_sriov.h"
#include "ice_vf_mbx.h"
#include "ice_ptp.h"
+#include "ice_tspll.h"
#include "ice_fdir.h"
#include "ice_xsk.h"
#include "ice_arfs.h"
@@ -614,6 +615,7 @@ struct ice_pf {
u16 globr_count; /* Global reset count */
u16 empr_count; /* EMP reset count */
u16 pfr_count; /* PF reset count */
+ u32 link_down_events;
u8 wol_ena : 1; /* software state of WoL */
u32 wakeup_reason; /* last wakeup reason */
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index bdee499f991a..0ae7387e0599 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -2272,6 +2272,22 @@ struct ice_aqc_get_pkg_info_resp {
struct ice_aqc_get_pkg_info pkg_info[];
};
+#define ICE_CGU_INPUT_PHASE_OFFSET_BYTES 6
+
+struct ice_cgu_input_measure {
+ u8 phase_offset[ICE_CGU_INPUT_PHASE_OFFSET_BYTES];
+ __le32 freq;
+} __packed __aligned(sizeof(__le16));
+
+#define ICE_AQC_GET_CGU_IN_MEAS_DPLL_IDX_M ICE_M(0xf, 0)
+
+/* Get CGU input measure command response data structure (indirect 0x0C59) */
+struct ice_aqc_get_cgu_input_measure {
+ u8 dpll_idx_opt;
+ u8 length;
+ u8 rsvd[6];
+};
+
#define ICE_AQC_GET_CGU_MAX_PHASE_ADJ GENMASK(30, 0)
/* Get CGU abilities command response data structure (indirect 0x0C61) */
@@ -2721,6 +2737,7 @@ struct ice_aq_desc {
struct ice_aqc_add_get_update_free_vsi vsi_cmd;
struct ice_aqc_add_update_free_vsi_resp add_update_free_vsi_res;
struct ice_aqc_download_pkg download_pkg;
+ struct ice_aqc_get_cgu_input_measure get_cgu_input_measure;
struct ice_aqc_set_cgu_input_config set_cgu_input_config;
struct ice_aqc_get_cgu_input_config get_cgu_input_config;
struct ice_aqc_set_cgu_output_config set_cgu_output_config;
@@ -2772,6 +2789,8 @@ enum ice_aq_err {
ICE_AQ_RC_OK = 0, /* Success */
ICE_AQ_RC_EPERM = 1, /* Operation not permitted */
ICE_AQ_RC_ENOENT = 2, /* No such element */
+ ICE_AQ_RC_ESRCH = 3, /* Bad opcode */
+ ICE_AQ_RC_EAGAIN = 8, /* Try again */
ICE_AQ_RC_ENOMEM = 9, /* Out of memory */
ICE_AQ_RC_EBUSY = 12, /* Device or resource busy */
ICE_AQ_RC_EEXIST = 13, /* Object already exists */
@@ -2927,6 +2946,7 @@ enum ice_adminq_opc {
ice_aqc_opc_get_pkg_info_list = 0x0C43,
/* 1588/SyncE commands/events */
+ ice_aqc_opc_get_cgu_input_measure = 0x0C59,
ice_aqc_opc_get_cgu_abilities = 0x0C61,
ice_aqc_opc_set_cgu_input_config = 0x0C62,
ice_aqc_opc_get_cgu_input_config = 0x0C63,
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 6db4ad8fc70b..270f936ce807 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -623,7 +623,10 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
return 0;
}
- ice_alloc_rx_bufs(ring, num_bufs);
+ if (ring->vsi->type == ICE_VSI_CTRL)
+ ice_init_ctrl_rx_descs(ring, num_bufs);
+ else
+ ice_alloc_rx_bufs(ring, num_bufs);
return 0;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_cgu_regs.h b/drivers/net/ethernet/intel/ice/ice_cgu_regs.h
deleted file mode 100644
index 10d9d74f3545..000000000000
--- a/drivers/net/ethernet/intel/ice/ice_cgu_regs.h
+++ /dev/null
@@ -1,181 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2018-2021, Intel Corporation. */
-
-#ifndef _ICE_CGU_REGS_H_
-#define _ICE_CGU_REGS_H_
-
-#define NAC_CGU_DWORD9 0x24
-union nac_cgu_dword9 {
- struct {
- u32 time_ref_freq_sel : 3;
- u32 clk_eref1_en : 1;
- u32 clk_eref0_en : 1;
- u32 time_ref_en : 1;
- u32 time_sync_en : 1;
- u32 one_pps_out_en : 1;
- u32 clk_ref_synce_en : 1;
- u32 clk_synce1_en : 1;
- u32 clk_synce0_en : 1;
- u32 net_clk_ref1_en : 1;
- u32 net_clk_ref0_en : 1;
- u32 clk_synce1_amp : 2;
- u32 misc6 : 1;
- u32 clk_synce0_amp : 2;
- u32 one_pps_out_amp : 2;
- u32 misc24 : 12;
- };
- u32 val;
-};
-
-#define NAC_CGU_DWORD16_E825C 0x40
-union nac_cgu_dword16_e825c {
- struct {
- u32 synce_remndr : 6;
- u32 synce_phlmt_en : 1;
- u32 misc13 : 17;
- u32 tspll_ck_refclkfreq : 8;
- };
- u32 val;
-};
-
-#define NAC_CGU_DWORD19 0x4c
-union nac_cgu_dword19 {
- struct {
- u32 tspll_fbdiv_intgr : 8;
- u32 fdpll_ulck_thr : 5;
- u32 misc15 : 3;
- u32 tspll_ndivratio : 4;
- u32 tspll_iref_ndivratio : 3;
- u32 misc19 : 1;
- u32 japll_ndivratio : 4;
- u32 japll_iref_ndivratio : 3;
- u32 misc27 : 1;
- };
- u32 val;
-};
-
-#define NAC_CGU_DWORD22 0x58
-union nac_cgu_dword22 {
- struct {
- u32 fdpll_frac_div_out_nc : 2;
- u32 fdpll_lock_int_for : 1;
- u32 synce_hdov_int_for : 1;
- u32 synce_lock_int_for : 1;
- u32 fdpll_phlead_slip_nc : 1;
- u32 fdpll_acc1_ovfl_nc : 1;
- u32 fdpll_acc2_ovfl_nc : 1;
- u32 synce_status_nc : 6;
- u32 fdpll_acc1f_ovfl : 1;
- u32 misc18 : 1;
- u32 fdpllclk_div : 4;
- u32 time1588clk_div : 4;
- u32 synceclk_div : 4;
- u32 synceclk_sel_div2 : 1;
- u32 fdpllclk_sel_div2 : 1;
- u32 time1588clk_sel_div2 : 1;
- u32 misc3 : 1;
- };
- u32 val;
-};
-
-#define NAC_CGU_DWORD23_E825C 0x5C
-union nac_cgu_dword23_e825c {
- struct {
- u32 cgupll_fbdiv_intgr : 10;
- u32 ux56pll_fbdiv_intgr : 10;
- u32 misc20 : 4;
- u32 ts_pll_enable : 1;
- u32 time_sync_tspll_align_sel : 1;
- u32 ext_synce_sel : 1;
- u32 ref1588_ck_div : 4;
- u32 time_ref_sel : 1;
-
- };
- u32 val;
-};
-
-#define NAC_CGU_DWORD24 0x60
-union nac_cgu_dword24 {
- struct {
- u32 tspll_fbdiv_frac : 22;
- u32 misc20 : 2;
- u32 ts_pll_enable : 1;
- u32 time_sync_tspll_align_sel : 1;
- u32 ext_synce_sel : 1;
- u32 ref1588_ck_div : 4;
- u32 time_ref_sel : 1;
- };
- u32 val;
-};
-
-#define TSPLL_CNTR_BIST_SETTINGS 0x344
-union tspll_cntr_bist_settings {
- struct {
- u32 i_irefgen_settling_time_cntr_7_0 : 8;
- u32 i_irefgen_settling_time_ro_standby_1_0 : 2;
- u32 reserved195 : 5;
- u32 i_plllock_sel_0 : 1;
- u32 i_plllock_sel_1 : 1;
- u32 i_plllock_cnt_6_0 : 7;
- u32 i_plllock_cnt_10_7 : 4;
- u32 reserved200 : 4;
- };
- u32 val;
-};
-
-#define TSPLL_RO_BWM_LF 0x370
-union tspll_ro_bwm_lf {
- struct {
- u32 bw_freqov_high_cri_7_0 : 8;
- u32 bw_freqov_high_cri_9_8 : 2;
- u32 biascaldone_cri : 1;
- u32 plllock_gain_tran_cri : 1;
- u32 plllock_true_lock_cri : 1;
- u32 pllunlock_flag_cri : 1;
- u32 afcerr_cri : 1;
- u32 afcdone_cri : 1;
- u32 feedfwrdgain_cal_cri_7_0 : 8;
- u32 m2fbdivmod_cri_7_0 : 8;
- };
- u32 val;
-};
-
-#define TSPLL_RO_LOCK_E825C 0x3f0
-union tspll_ro_lock_e825c {
- struct {
- u32 bw_freqov_high_cri_7_0 : 8;
- u32 bw_freqov_high_cri_9_8 : 2;
- u32 reserved455 : 1;
- u32 plllock_gain_tran_cri : 1;
- u32 plllock_true_lock_cri : 1;
- u32 pllunlock_flag_cri : 1;
- u32 afcerr_cri : 1;
- u32 afcdone_cri : 1;
- u32 feedfwrdgain_cal_cri_7_0 : 8;
- u32 reserved462 : 8;
- };
- u32 val;
-};
-
-#define TSPLL_BW_TDC_E825C 0x31c
-union tspll_bw_tdc_e825c {
- struct {
- u32 i_tdc_offset_lock_1_0 : 2;
- u32 i_bbthresh1_2_0 : 3;
- u32 i_bbthresh2_2_0 : 3;
- u32 i_tdcsel_1_0 : 2;
- u32 i_tdcovccorr_en_h : 1;
- u32 i_divretimeren : 1;
- u32 i_bw_ampmeas_window : 1;
- u32 i_bw_lowerbound_2_0 : 3;
- u32 i_bw_upperbound_2_0 : 3;
- u32 i_bw_mode_1_0 : 2;
- u32 i_ft_mode_sel_2_0 : 3;
- u32 i_bwphase_4_0 : 5;
- u32 i_plllock_sel_1_0 : 2;
- u32 i_afc_divratio : 1;
- };
- u32 val;
-};
-
-#endif /* _ICE_CGU_REGS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 4fedf0181c4e..bc292d61892c 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -2301,12 +2301,12 @@ ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
info->clk_freq = FIELD_GET(ICE_TS_CLK_FREQ_M, number);
info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0);
} else {
- info->clk_freq = ICE_TIME_REF_FREQ_156_250;
+ info->clk_freq = ICE_TSPLL_FREQ_156_250;
info->clk_src = ICE_CLK_SRC_TCXO;
}
- if (info->clk_freq < NUM_ICE_TIME_REF_FREQ) {
- info->time_ref = (enum ice_time_ref_freq)info->clk_freq;
+ if (info->clk_freq < NUM_ICE_TSPLL_FREQ) {
+ info->time_ref = (enum ice_tspll_freq)info->clk_freq;
} else {
/* Unknown clock frequency, so assume a (probably incorrect)
* default to avoid out-of-bounds look ups of frequency
@@ -2314,7 +2314,7 @@ ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
*/
ice_debug(hw, ICE_DBG_INIT, "1588 func caps: unknown clock frequency %u\n",
info->clk_freq);
- info->time_ref = ICE_TIME_REF_FREQ_25_000;
+ info->time_ref = ICE_TSPLL_FREQ_25_000;
}
ice_debug(hw, ICE_DBG_INIT, "func caps: ieee_1588 = %u\n",
@@ -4971,6 +4971,32 @@ ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
}
/**
+ * ice_aq_get_cgu_input_pin_measure - get input pin signal measurements
+ * @hw: pointer to the HW struct
+ * @dpll_idx: index of dpll to be measured
+ * @meas: array to be filled with results
+ * @meas_num: max number of results array can hold
+ *
+ * Get CGU measurements (0x0C59) of phase and frequency offsets for input
+ * pins on given dpll.
+ *
+ * Return: 0 on success or negative value on failure.
+ */
+int ice_aq_get_cgu_input_pin_measure(struct ice_hw *hw, u8 dpll_idx,
+ struct ice_cgu_input_measure *meas,
+ u16 meas_num)
+{
+ struct ice_aqc_get_cgu_input_measure *cmd;
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_input_measure);
+ cmd = &desc.params.get_cgu_input_measure;
+ cmd->dpll_idx_opt = dpll_idx & ICE_AQC_GET_CGU_IN_MEAS_DPLL_IDX_M;
+
+ return ice_aq_send_cmd(hw, &desc, meas, meas_num * sizeof(*meas), NULL);
+}
+
+/**
* ice_aq_get_cgu_abilities - get cgu abilities
* @hw: pointer to the HW struct
* @abilities: CGU abilities
@@ -6106,3 +6132,64 @@ u32 ice_get_link_speed(u16 index)
return ice_aq_to_link_speed[index];
}
+
+/**
+ * ice_read_cgu_reg - Read a CGU register
+ * @hw: Pointer to the HW struct
+ * @addr: Register address to read
+ * @val: Storage for register value read
+ *
+ * Read the contents of a register of the Clock Generation Unit. Only
+ * applicable to E82X devices.
+ *
+ * Return: 0 on success, other error codes when failed to read from CGU.
+ */
+int ice_read_cgu_reg(struct ice_hw *hw, u32 addr, u32 *val)
+{
+ struct ice_sbq_msg_input cgu_msg = {
+ .opcode = ice_sbq_msg_rd,
+ .dest_dev = ice_sbq_dev_cgu,
+ .msg_addr_low = addr
+ };
+ int err;
+
+ err = ice_sbq_rw_reg(hw, &cgu_msg, ICE_AQ_FLAG_RD);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read CGU register 0x%04x, err %d\n",
+ addr, err);
+ return err;
+ }
+
+ *val = cgu_msg.data;
+
+ return 0;
+}
+
+/**
+ * ice_write_cgu_reg - Write a CGU register
+ * @hw: Pointer to the HW struct
+ * @addr: Register address to write
+ * @val: Value to write into the register
+ *
+ * Write the specified value to a register of the Clock Generation Unit. Only
+ * applicable to E82X devices.
+ *
+ * Return: 0 on success, other error codes when failed to write to CGU.
+ */
+int ice_write_cgu_reg(struct ice_hw *hw, u32 addr, u32 val)
+{
+ struct ice_sbq_msg_input cgu_msg = {
+ .opcode = ice_sbq_msg_wr,
+ .dest_dev = ice_sbq_dev_cgu,
+ .msg_addr_low = addr,
+ .data = val
+ };
+ int err;
+
+ err = ice_sbq_rw_reg(hw, &cgu_msg, ICE_AQ_FLAG_RD);
+ if (err)
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write CGU register 0x%04x, err %d\n",
+ addr, err);
+
+ return err;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 64c530b39191..ed375babcde3 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -39,6 +39,195 @@
#define FEC_RECEIVER_ID_PCS0 (0x33 << FEC_RECV_ID_SHIFT)
#define FEC_RECEIVER_ID_PCS1 (0x34 << FEC_RECV_ID_SHIFT)
+#define ICE_CGU_R9 0x24
+union ice_cgu_r9 {
+ struct {
+ u32 time_ref_freq_sel : 3;
+ u32 clk_eref1_en : 1;
+ u32 clk_eref0_en : 1;
+ u32 time_ref_en : 1;
+ u32 time_sync_en : 1;
+ u32 one_pps_out_en : 1;
+ u32 clk_ref_synce_en : 1;
+ u32 clk_synce1_en : 1;
+ u32 clk_synce0_en : 1;
+ u32 net_clk_ref1_en : 1;
+ u32 net_clk_ref0_en : 1;
+ u32 clk_synce1_amp : 2;
+ u32 misc6 : 1;
+ u32 clk_synce0_amp : 2;
+ u32 one_pps_out_amp : 2;
+ u32 misc24 : 12;
+ };
+ u32 val;
+};
+
+#define ICE_CGU_R16 0x40
+union ice_cgu_r16 {
+ struct {
+ u32 synce_remndr : 6;
+ u32 synce_phlmt_en : 1;
+ u32 misc13 : 17;
+ u32 ck_refclkfreq : 8;
+ };
+ u32 val;
+};
+
+#define ICE_CGU_R19 0x4c
+union ice_cgu_r19_e82x {
+ struct {
+ u32 fbdiv_intgr : 8;
+ u32 fdpll_ulck_thr : 5;
+ u32 misc15 : 3;
+ u32 ndivratio : 4;
+ u32 tspll_iref_ndivratio : 3;
+ u32 misc19 : 1;
+ u32 japll_ndivratio : 4;
+ u32 japll_iref_ndivratio : 3;
+ u32 misc27 : 1;
+ };
+ u32 val;
+};
+
+union ice_cgu_r19_e825 {
+ struct {
+ u32 tspll_fbdiv_intgr : 10;
+ u32 fdpll_ulck_thr : 5;
+ u32 misc15 : 1;
+ u32 tspll_ndivratio : 4;
+ u32 tspll_iref_ndivratio : 3;
+ u32 misc19 : 1;
+ u32 japll_ndivratio : 4;
+ u32 japll_postdiv_pdivratio : 3;
+ u32 misc27 : 1;
+ };
+ u32 val;
+};
+
+#define ICE_CGU_R22 0x58
+union ice_cgu_r22 {
+ struct {
+ u32 fdpll_frac_div_out_nc : 2;
+ u32 fdpll_lock_int_for : 1;
+ u32 synce_hdov_int_for : 1;
+ u32 synce_lock_int_for : 1;
+ u32 fdpll_phlead_slip_nc : 1;
+ u32 fdpll_acc1_ovfl_nc : 1;
+ u32 fdpll_acc2_ovfl_nc : 1;
+ u32 synce_status_nc : 6;
+ u32 fdpll_acc1f_ovfl : 1;
+ u32 misc18 : 1;
+ u32 fdpllclk_div : 4;
+ u32 time1588clk_div : 4;
+ u32 synceclk_div : 4;
+ u32 synceclk_sel_div2 : 1;
+ u32 fdpllclk_sel_div2 : 1;
+ u32 time1588clk_sel_div2 : 1;
+ u32 misc3 : 1;
+ };
+ u32 val;
+};
+
+#define ICE_CGU_R23 0x5C
+union ice_cgu_r23 {
+ struct {
+ u32 cgupll_fbdiv_intgr : 10;
+ u32 ux56pll_fbdiv_intgr : 10;
+ u32 misc20 : 4;
+ u32 ts_pll_enable : 1;
+ u32 time_sync_tspll_align_sel : 1;
+ u32 ext_synce_sel : 1;
+ u32 ref1588_ck_div : 4;
+ u32 time_ref_sel : 1;
+
+ };
+ u32 val;
+};
+
+#define ICE_CGU_R24 0x60
+union ice_cgu_r24 {
+ struct {
+ u32 fbdiv_frac : 22;
+ u32 misc20 : 2;
+ u32 ts_pll_enable : 1;
+ u32 time_sync_tspll_align_sel : 1;
+ u32 ext_synce_sel : 1;
+ u32 ref1588_ck_div : 4;
+ u32 time_ref_sel : 1;
+ };
+ u32 val;
+};
+
+#define TSPLL_CNTR_BIST_SETTINGS 0x344
+union tspll_cntr_bist_settings {
+ struct {
+ u32 i_irefgen_settling_time_cntr_7_0 : 8;
+ u32 i_irefgen_settling_time_ro_standby_1_0 : 2;
+ u32 reserved195 : 5;
+ u32 i_plllock_sel_0 : 1;
+ u32 i_plllock_sel_1 : 1;
+ u32 i_plllock_cnt_6_0 : 7;
+ u32 i_plllock_cnt_10_7 : 4;
+ u32 reserved200 : 4;
+ };
+ u32 val;
+};
+
+#define TSPLL_RO_BWM_LF 0x370
+union tspll_ro_bwm_lf {
+ struct {
+ u32 bw_freqov_high_cri_7_0 : 8;
+ u32 bw_freqov_high_cri_9_8 : 2;
+ u32 biascaldone_cri : 1;
+ u32 plllock_gain_tran_cri : 1;
+ u32 plllock_true_lock_cri : 1;
+ u32 pllunlock_flag_cri : 1;
+ u32 afcerr_cri : 1;
+ u32 afcdone_cri : 1;
+ u32 feedfwrdgain_cal_cri_7_0 : 8;
+ u32 m2fbdivmod_cri_7_0 : 8;
+ };
+ u32 val;
+};
+
+#define TSPLL_RO_LOCK_E825C 0x3f0
+union tspll_ro_lock_e825c {
+ struct {
+ u32 bw_freqov_high_cri_7_0 : 8;
+ u32 bw_freqov_high_cri_9_8 : 2;
+ u32 reserved455 : 1;
+ u32 plllock_gain_tran_cri : 1;
+ u32 plllock_true_lock_cri : 1;
+ u32 pllunlock_flag_cri : 1;
+ u32 afcerr_cri : 1;
+ u32 afcdone_cri : 1;
+ u32 feedfwrdgain_cal_cri_7_0 : 8;
+ u32 reserved462 : 8;
+ };
+ u32 val;
+};
+
+#define TSPLL_BW_TDC_E825C 0x31c
+union tspll_bw_tdc_e825c {
+ struct {
+ u32 i_tdc_offset_lock_1_0 : 2;
+ u32 i_bbthresh1_2_0 : 3;
+ u32 i_bbthresh2_2_0 : 3;
+ u32 i_tdcsel_1_0 : 2;
+ u32 i_tdcovccorr_en_h : 1;
+ u32 i_divretimeren : 1;
+ u32 i_bw_ampmeas_window : 1;
+ u32 i_bw_lowerbound_2_0 : 3;
+ u32 i_bw_upperbound_2_0 : 3;
+ u32 i_bw_mode_1_0 : 2;
+ u32 i_ft_mode_sel_2_0 : 3;
+ u32 i_bwphase_4_0 : 5;
+ u32 i_plllock_sel_1_0 : 2;
+ u32 i_afc_divratio : 1;
+ };
+ u32 val;
+};
+
int ice_init_hw(struct ice_hw *hw);
void ice_deinit_hw(struct ice_hw *hw);
int ice_check_reset(struct ice_hw *hw);
@@ -229,6 +418,9 @@ void ice_replay_post(struct ice_hw *hw);
struct ice_q_ctx *
ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle);
int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in, u16 flag);
+int ice_aq_get_cgu_input_pin_measure(struct ice_hw *hw, u8 dpll_idx,
+ struct ice_cgu_input_measure *meas,
+ u16 meas_num);
int
ice_aq_get_cgu_abilities(struct ice_hw *hw,
struct ice_aqc_get_cgu_abilities *abilities);
@@ -303,4 +495,6 @@ ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
int ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle);
int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data);
bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw);
+int ice_read_cgu_reg(struct ice_hw *hw, u32 addr, u32 *val);
+int ice_write_cgu_reg(struct ice_hw *hw, u32 addr, u32 val);
#endif /* _ICE_COMMON_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c
index bce3ad6ca2a6..d6190d9e32ba 100644
--- a/drivers/net/ethernet/intel/ice/ice_dpll.c
+++ b/drivers/net/ethernet/intel/ice/ice_dpll.c
@@ -11,6 +11,30 @@
#define ICE_DPLL_RCLK_NUM_PER_PF 1
#define ICE_DPLL_PIN_ESYNC_PULSE_HIGH_PERCENT 25
#define ICE_DPLL_PIN_GEN_RCLK_FREQ 1953125
+#define ICE_DPLL_PIN_PRIO_OUTPUT 0xff
+#define ICE_DPLL_INPUT_REF_NUM 10
+#define ICE_DPLL_PHASE_OFFSET_PERIOD 2
+#define ICE_DPLL_SW_PIN_INPUT_BASE_SFP 4
+#define ICE_DPLL_SW_PIN_INPUT_BASE_QSFP 6
+#define ICE_DPLL_SW_PIN_OUTPUT_BASE 0
+
+#define ICE_DPLL_PIN_SW_INPUT_ABS(in_idx) \
+ (ICE_DPLL_SW_PIN_INPUT_BASE_SFP + (in_idx))
+
+#define ICE_DPLL_PIN_SW_1_INPUT_ABS_IDX \
+ (ICE_DPLL_PIN_SW_INPUT_ABS(ICE_DPLL_PIN_SW_1_IDX))
+
+#define ICE_DPLL_PIN_SW_2_INPUT_ABS_IDX \
+ (ICE_DPLL_PIN_SW_INPUT_ABS(ICE_DPLL_PIN_SW_2_IDX))
+
+#define ICE_DPLL_PIN_SW_OUTPUT_ABS(out_idx) \
+ (ICE_DPLL_SW_PIN_OUTPUT_BASE + (out_idx))
+
+#define ICE_DPLL_PIN_SW_1_OUTPUT_ABS_IDX \
+ (ICE_DPLL_PIN_SW_OUTPUT_ABS(ICE_DPLL_PIN_SW_1_IDX))
+
+#define ICE_DPLL_PIN_SW_2_OUTPUT_ABS_IDX \
+ (ICE_DPLL_PIN_SW_OUTPUT_ABS(ICE_DPLL_PIN_SW_2_IDX))
/**
* enum ice_dpll_pin_type - enumerate ice pin types:
@@ -18,25 +42,61 @@
* @ICE_DPLL_PIN_TYPE_INPUT: input pin
* @ICE_DPLL_PIN_TYPE_OUTPUT: output pin
* @ICE_DPLL_PIN_TYPE_RCLK_INPUT: recovery clock input pin
+ * @ICE_DPLL_PIN_TYPE_SOFTWARE: software controlled SMA/U.FL pins
*/
enum ice_dpll_pin_type {
ICE_DPLL_PIN_INVALID,
ICE_DPLL_PIN_TYPE_INPUT,
ICE_DPLL_PIN_TYPE_OUTPUT,
ICE_DPLL_PIN_TYPE_RCLK_INPUT,
+ ICE_DPLL_PIN_TYPE_SOFTWARE,
};
static const char * const pin_type_name[] = {
[ICE_DPLL_PIN_TYPE_INPUT] = "input",
[ICE_DPLL_PIN_TYPE_OUTPUT] = "output",
[ICE_DPLL_PIN_TYPE_RCLK_INPUT] = "rclk-input",
+ [ICE_DPLL_PIN_TYPE_SOFTWARE] = "software",
};
+static const char * const ice_dpll_sw_pin_sma[] = { "SMA1", "SMA2" };
+static const char * const ice_dpll_sw_pin_ufl[] = { "U.FL1", "U.FL2" };
+
static const struct dpll_pin_frequency ice_esync_range[] = {
DPLL_PIN_FREQUENCY_RANGE(0, DPLL_PIN_FREQUENCY_1_HZ),
};
/**
+ * ice_dpll_is_sw_pin - check if given pin shall be controlled by SW
+ * @pf: private board structure
+ * @index: index of a pin as understood by FW
+ * @input: true for input, false for output
+ *
+ * Check if the pin shall be controlled by SW - instead of providing raw access
+ * for pin control. For E810 NIC with dpll there is additional MUX-related logic
+ * between SMA/U.FL pins/connectors and dpll device, best to give user access
+ * with series of wrapper functions as from user perspective they convey single
+ * functionality rather then separated pins.
+ *
+ * Return:
+ * * true - pin controlled by SW
+ * * false - pin not controlled by SW
+ */
+static bool ice_dpll_is_sw_pin(struct ice_pf *pf, u8 index, bool input)
+{
+ if (input && pf->hw.device_id == ICE_DEV_ID_E810C_QSFP)
+ index -= ICE_DPLL_SW_PIN_INPUT_BASE_QSFP -
+ ICE_DPLL_SW_PIN_INPUT_BASE_SFP;
+
+ if ((input && (index == ICE_DPLL_PIN_SW_1_INPUT_ABS_IDX ||
+ index == ICE_DPLL_PIN_SW_2_INPUT_ABS_IDX)) ||
+ (!input && (index == ICE_DPLL_PIN_SW_1_OUTPUT_ABS_IDX ||
+ index == ICE_DPLL_PIN_SW_2_OUTPUT_ABS_IDX)))
+ return true;
+ return false;
+}
+
+/**
* ice_dpll_is_reset - check if reset is in progress
* @pf: private board structure
* @extack: error reporting
@@ -280,6 +340,87 @@ ice_dpll_output_frequency_get(const struct dpll_pin *pin, void *pin_priv,
}
/**
+ * ice_dpll_sw_pin_frequency_set - callback to set frequency of SW pin
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: pointer to dpll
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @frequency: on success holds pin's frequency
+ * @extack: error reporting
+ *
+ * Calls set frequency command for corresponding and active input/output pin.
+ *
+ * Context: Calls a function which acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error pin not active or couldn't get from hw
+ */
+static int
+ice_dpll_sw_pin_frequency_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u64 frequency, struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *sma = pin_priv;
+ int ret;
+
+ if (!sma->active) {
+ NL_SET_ERR_MSG(extack, "pin is not active");
+ return -EINVAL;
+ }
+ if (sma->direction == DPLL_PIN_DIRECTION_INPUT)
+ ret = ice_dpll_input_frequency_set(NULL, sma->input, dpll,
+ dpll_priv, frequency,
+ extack);
+ else
+ ret = ice_dpll_output_frequency_set(NULL, sma->output, dpll,
+ dpll_priv, frequency,
+ extack);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_sw_pin_frequency_get - callback for get frequency of SW pin
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: pointer to dpll
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @frequency: on success holds pin's frequency
+ * @extack: error reporting
+ *
+ * Calls get frequency command for corresponding active input/output.
+ *
+ * Context: Calls a function which acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error pin not active or couldn't get from hw
+ */
+static int
+ice_dpll_sw_pin_frequency_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u64 *frequency, struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *sma = pin_priv;
+ int ret;
+
+ if (!sma->active) {
+ *frequency = 0;
+ return 0;
+ }
+ if (sma->direction == DPLL_PIN_DIRECTION_INPUT) {
+ ret = ice_dpll_input_frequency_get(NULL, sma->input, dpll,
+ dpll_priv, frequency,
+ extack);
+ } else {
+ ret = ice_dpll_output_frequency_get(NULL, sma->output, dpll,
+ dpll_priv, frequency,
+ extack);
+ }
+
+ return ret;
+}
+
+/**
* ice_dpll_pin_enable - enable a pin on dplls
* @hw: board private hw structure
* @pin: pointer to a pin
@@ -375,6 +516,67 @@ ice_dpll_pin_disable(struct ice_hw *hw, struct ice_dpll_pin *pin,
}
/**
+ * ice_dpll_sw_pins_update - update status of all SW pins
+ * @pf: private board struct
+ *
+ * Determine and update pin struct fields (direction/active) of their current
+ * values for all the SW controlled pins.
+ *
+ * Context: Call with pf->dplls.lock held
+ * Return:
+ * * 0 - OK
+ * * negative - error
+ */
+static int
+ice_dpll_sw_pins_update(struct ice_pf *pf)
+{
+ struct ice_dplls *d = &pf->dplls;
+ struct ice_dpll_pin *p;
+ u8 data = 0;
+ int ret;
+
+ ret = ice_read_sma_ctrl(&pf->hw, &data);
+ if (ret)
+ return ret;
+ /* no change since last check */
+ if (d->sma_data == data)
+ return 0;
+
+ /*
+ * SMA1/U.FL1 vs SMA2/U.FL2 are using different bit scheme to decide
+ * on their direction and if are active
+ */
+ p = &d->sma[ICE_DPLL_PIN_SW_1_IDX];
+ p->active = true;
+ p->direction = DPLL_PIN_DIRECTION_INPUT;
+ if (data & ICE_SMA1_DIR_EN) {
+ p->direction = DPLL_PIN_DIRECTION_OUTPUT;
+ if (data & ICE_SMA1_TX_EN)
+ p->active = false;
+ }
+
+ p = &d->sma[ICE_DPLL_PIN_SW_2_IDX];
+ p->active = true;
+ p->direction = DPLL_PIN_DIRECTION_INPUT;
+ if ((data & ICE_SMA2_INACTIVE_MASK) == ICE_SMA2_INACTIVE_MASK)
+ p->active = false;
+ else if (data & ICE_SMA2_DIR_EN)
+ p->direction = DPLL_PIN_DIRECTION_OUTPUT;
+
+ p = &d->ufl[ICE_DPLL_PIN_SW_1_IDX];
+ if (!(data & (ICE_SMA1_DIR_EN | ICE_SMA1_TX_EN)))
+ p->active = true;
+ else
+ p->active = false;
+
+ p = &d->ufl[ICE_DPLL_PIN_SW_2_IDX];
+ p->active = (data & ICE_SMA2_DIR_EN) && !(data & ICE_SMA2_UFL2_RX_DIS);
+ d->sma_data = data;
+
+ return 0;
+}
+
+/**
* ice_dpll_pin_state_update - update pin's state
* @pf: private board struct
* @pin: structure with pin attributes to be updated
@@ -471,6 +673,11 @@ ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin,
DPLL_PIN_STATE_DISCONNECTED;
}
break;
+ case ICE_DPLL_PIN_TYPE_SOFTWARE:
+ ret = ice_dpll_sw_pins_update(pf);
+ if (ret)
+ goto err;
+ break;
default:
return -EINVAL;
}
@@ -588,6 +795,67 @@ static int ice_dpll_mode_get(const struct dpll_device *dpll, void *dpll_priv,
}
/**
+ * ice_dpll_phase_offset_monitor_set - set phase offset monitor state
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @state: feature state to be set
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Enable/disable phase offset monitor feature of dpll.
+ *
+ * Context: Acquires and releases pf->dplls.lock
+ * Return: 0 - success
+ */
+static int ice_dpll_phase_offset_monitor_set(const struct dpll_device *dpll,
+ void *dpll_priv,
+ enum dpll_feature_state state,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+
+ mutex_lock(&pf->dplls.lock);
+ if (state == DPLL_FEATURE_STATE_ENABLE)
+ d->phase_offset_monitor_period = ICE_DPLL_PHASE_OFFSET_PERIOD;
+ else
+ d->phase_offset_monitor_period = 0;
+ mutex_unlock(&pf->dplls.lock);
+
+ return 0;
+}
+
+/**
+ * ice_dpll_phase_offset_monitor_get - get phase offset monitor state
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @state: on success holds current state of phase offset monitor
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Provides current state of phase offset monitor
+ * features on dpll device.
+ *
+ * Context: Acquires and releases pf->dplls.lock
+ * Return: 0 - success
+ */
+static int ice_dpll_phase_offset_monitor_get(const struct dpll_device *dpll,
+ void *dpll_priv,
+ enum dpll_feature_state *state,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+
+ mutex_lock(&pf->dplls.lock);
+ if (d->phase_offset_monitor_period)
+ *state = DPLL_FEATURE_STATE_ENABLE;
+ else
+ *state = DPLL_FEATURE_STATE_DISABLE;
+ mutex_unlock(&pf->dplls.lock);
+
+ return 0;
+}
+
+/**
* ice_dpll_pin_state_set - set pin's state on dpll
* @pin: pointer to a pin
* @pin_priv: private data pointer passed on pin registration
@@ -793,6 +1061,270 @@ ice_dpll_input_state_get(const struct dpll_pin *pin, void *pin_priv,
}
/**
+ * ice_dpll_sma_direction_set - set direction of SMA pin
+ * @p: pointer to a pin
+ * @direction: requested direction of the pin
+ * @extack: error reporting
+ *
+ * Wrapper for dpll subsystem callback. Set direction of a SMA pin.
+ *
+ * Context: Call with pf->dplls.lock held
+ * Return:
+ * * 0 - success
+ * * negative - failed to get state
+ */
+static int ice_dpll_sma_direction_set(struct ice_dpll_pin *p,
+ enum dpll_pin_direction direction,
+ struct netlink_ext_ack *extack)
+{
+ u8 data;
+ int ret;
+
+ if (p->direction == direction && p->active)
+ return 0;
+ ret = ice_read_sma_ctrl(&p->pf->hw, &data);
+ if (ret)
+ return ret;
+
+ switch (p->idx) {
+ case ICE_DPLL_PIN_SW_1_IDX:
+ data &= ~ICE_SMA1_MASK;
+ if (direction == DPLL_PIN_DIRECTION_OUTPUT)
+ data |= ICE_SMA1_DIR_EN;
+ break;
+ case ICE_DPLL_PIN_SW_2_IDX:
+ if (direction == DPLL_PIN_DIRECTION_INPUT) {
+ data &= ~ICE_SMA2_DIR_EN;
+ } else {
+ data &= ~ICE_SMA2_TX_EN;
+ data |= ICE_SMA2_DIR_EN;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ ret = ice_write_sma_ctrl(&p->pf->hw, data);
+ if (!ret)
+ ret = ice_dpll_pin_state_update(p->pf, p,
+ ICE_DPLL_PIN_TYPE_SOFTWARE,
+ extack);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_ufl_pin_state_set - set U.FL pin state on dpll device
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @state: requested state of the pin
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Set the state of a pin.
+ *
+ * Context: Acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_ufl_pin_state_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_state state,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv, *target;
+ struct ice_dpll *d = dpll_priv;
+ enum ice_dpll_pin_type type;
+ struct ice_pf *pf = p->pf;
+ struct ice_hw *hw;
+ bool enable;
+ u8 data;
+ int ret;
+
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
+ mutex_lock(&pf->dplls.lock);
+ hw = &pf->hw;
+ ret = ice_read_sma_ctrl(hw, &data);
+ if (ret)
+ goto unlock;
+
+ ret = -EINVAL;
+ switch (p->idx) {
+ case ICE_DPLL_PIN_SW_1_IDX:
+ if (state == DPLL_PIN_STATE_CONNECTED) {
+ data &= ~ICE_SMA1_MASK;
+ enable = true;
+ } else if (state == DPLL_PIN_STATE_DISCONNECTED) {
+ data |= ICE_SMA1_TX_EN;
+ enable = false;
+ } else {
+ goto unlock;
+ }
+ target = p->output;
+ type = ICE_DPLL_PIN_TYPE_OUTPUT;
+ break;
+ case ICE_DPLL_PIN_SW_2_IDX:
+ if (state == DPLL_PIN_STATE_SELECTABLE) {
+ data |= ICE_SMA2_DIR_EN;
+ data &= ~ICE_SMA2_UFL2_RX_DIS;
+ enable = true;
+ } else if (state == DPLL_PIN_STATE_DISCONNECTED) {
+ data |= ICE_SMA2_UFL2_RX_DIS;
+ enable = false;
+ } else {
+ goto unlock;
+ }
+ target = p->input;
+ type = ICE_DPLL_PIN_TYPE_INPUT;
+ break;
+ default:
+ goto unlock;
+ }
+
+ ret = ice_write_sma_ctrl(hw, data);
+ if (ret)
+ goto unlock;
+ ret = ice_dpll_pin_state_update(pf, p, ICE_DPLL_PIN_TYPE_SOFTWARE,
+ extack);
+ if (ret)
+ goto unlock;
+
+ if (enable)
+ ret = ice_dpll_pin_enable(hw, target, d->dpll_idx, type, extack);
+ else
+ ret = ice_dpll_pin_disable(hw, target, type, extack);
+ if (!ret)
+ ret = ice_dpll_pin_state_update(pf, target, type, extack);
+
+unlock:
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_sw_pin_state_get - get SW pin state
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @state: on success holds state of the pin
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Check state of a SW pin.
+ *
+ * Context: Acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_sw_pin_state_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_state *state,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = p->pf;
+ int ret = 0;
+
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+ mutex_lock(&pf->dplls.lock);
+ if (!p->active) {
+ *state = DPLL_PIN_STATE_DISCONNECTED;
+ goto unlock;
+ }
+
+ if (p->direction == DPLL_PIN_DIRECTION_INPUT) {
+ ret = ice_dpll_pin_state_update(pf, p->input,
+ ICE_DPLL_PIN_TYPE_INPUT,
+ extack);
+ if (ret)
+ goto unlock;
+ *state = p->input->state[d->dpll_idx];
+ } else {
+ ret = ice_dpll_pin_state_update(pf, p->output,
+ ICE_DPLL_PIN_TYPE_OUTPUT,
+ extack);
+ if (ret)
+ goto unlock;
+ *state = p->output->state[d->dpll_idx];
+ }
+unlock:
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_sma_pin_state_set - set SMA pin state on dpll device
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @state: requested state of the pin
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Set state of a pin.
+ *
+ * Context: Acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - failed to get state
+ */
+static int
+ice_dpll_sma_pin_state_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_state state,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *sma = pin_priv, *target;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = sma->pf;
+ enum ice_dpll_pin_type type;
+ bool enable;
+ int ret;
+
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
+ mutex_lock(&pf->dplls.lock);
+ if (!sma->active) {
+ ret = ice_dpll_sma_direction_set(sma, sma->direction, extack);
+ if (ret)
+ goto unlock;
+ }
+ if (sma->direction == DPLL_PIN_DIRECTION_INPUT) {
+ enable = state == DPLL_PIN_STATE_SELECTABLE;
+ target = sma->input;
+ type = ICE_DPLL_PIN_TYPE_INPUT;
+ } else {
+ enable = state == DPLL_PIN_STATE_CONNECTED;
+ target = sma->output;
+ type = ICE_DPLL_PIN_TYPE_OUTPUT;
+ }
+
+ if (enable)
+ ret = ice_dpll_pin_enable(&pf->hw, target, d->dpll_idx, type,
+ extack);
+ else
+ ret = ice_dpll_pin_disable(&pf->hw, target, type, extack);
+ if (!ret)
+ ret = ice_dpll_pin_state_update(pf, target, type, extack);
+
+unlock:
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
+/**
* ice_dpll_input_prio_get - get dpll's input prio
* @pin: pointer to a pin
* @pin_priv: private data pointer passed on pin registration
@@ -860,6 +1392,47 @@ ice_dpll_input_prio_set(const struct dpll_pin *pin, void *pin_priv,
return ret;
}
+static int
+ice_dpll_sw_input_prio_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u32 *prio, struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+
+ mutex_lock(&pf->dplls.lock);
+ if (p->input && p->direction == DPLL_PIN_DIRECTION_INPUT)
+ *prio = d->input_prio[p->input->idx];
+ else
+ *prio = ICE_DPLL_PIN_PRIO_OUTPUT;
+ mutex_unlock(&pf->dplls.lock);
+
+ return 0;
+}
+
+static int
+ice_dpll_sw_input_prio_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u32 prio, struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+ int ret;
+
+ if (!p->input || p->direction != DPLL_PIN_DIRECTION_INPUT)
+ return -EINVAL;
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
+ mutex_lock(&pf->dplls.lock);
+ ret = ice_dpll_hw_input_prio_set(pf, d, p->input, prio, extack);
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
/**
* ice_dpll_input_direction - callback for get input pin direction
* @pin: pointer to a pin
@@ -911,6 +1484,76 @@ ice_dpll_output_direction(const struct dpll_pin *pin, void *pin_priv,
}
/**
+ * ice_dpll_pin_sma_direction_set - callback for set SMA pin direction
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @direction: requested pin direction
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for setting direction of a SMA pin.
+ *
+ * Context: Acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_pin_sma_direction_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_direction direction,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_pf *pf = p->pf;
+ int ret;
+
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
+ mutex_lock(&pf->dplls.lock);
+ ret = ice_dpll_sma_direction_set(p, direction, extack);
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_pin_sw_direction_get - callback for get SW pin direction
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @direction: on success holds pin direction
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for getting direction of a SMA pin.
+ *
+ * Context: Acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_pin_sw_direction_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_direction *direction,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_pf *pf = p->pf;
+
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+ mutex_lock(&pf->dplls.lock);
+ *direction = p->direction;
+ mutex_unlock(&pf->dplls.lock);
+
+ return 0;
+}
+
+/**
* ice_dpll_pin_phase_adjust_get - callback for get pin phase adjust value
* @pin: pointer to a pin
* @pin_priv: private data pointer passed on pin registration
@@ -1024,7 +1667,7 @@ ice_dpll_pin_phase_adjust_set(const struct dpll_pin *pin, void *pin_priv,
* Dpll subsystem callback. Wraps a handler for setting phase adjust on input
* pin.
*
- * Context: Calls a function which acquires pf->dplls.lock
+ * Context: Calls a function which acquires and releases pf->dplls.lock
* Return:
* * 0 - success
* * negative - error
@@ -1068,6 +1711,82 @@ ice_dpll_output_phase_adjust_set(const struct dpll_pin *pin, void *pin_priv,
ICE_DPLL_PIN_TYPE_OUTPUT);
}
+/**
+ * ice_dpll_sw_phase_adjust_get - callback for get SW pin phase adjust
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @phase_adjust: on success holds phase adjust value
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Wraps a handler for getting phase adjust on sw
+ * pin.
+ *
+ * Context: Calls a function which acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_sw_phase_adjust_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ s32 *phase_adjust,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+
+ if (p->direction == DPLL_PIN_DIRECTION_INPUT)
+ return ice_dpll_pin_phase_adjust_get(p->input->pin, p->input,
+ dpll, dpll_priv,
+ phase_adjust, extack);
+ else
+ return ice_dpll_pin_phase_adjust_get(p->output->pin, p->output,
+ dpll, dpll_priv,
+ phase_adjust, extack);
+}
+
+/**
+ * ice_dpll_sw_phase_adjust_set - callback for set SW pin phase adjust value
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @phase_adjust: phase_adjust to be set
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Wraps a handler for setting phase adjust on output
+ * pin.
+ *
+ * Context: Calls a function which acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_sw_phase_adjust_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ s32 phase_adjust,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+
+ if (!p->active) {
+ NL_SET_ERR_MSG(extack, "pin is not active");
+ return -EINVAL;
+ }
+ if (p->direction == DPLL_PIN_DIRECTION_INPUT)
+ return ice_dpll_pin_phase_adjust_set(p->input->pin, p->input,
+ dpll, dpll_priv,
+ phase_adjust, extack,
+ ICE_DPLL_PIN_TYPE_INPUT);
+ else
+ return ice_dpll_pin_phase_adjust_set(p->output->pin, p->output,
+ dpll, dpll_priv,
+ phase_adjust, extack,
+ ICE_DPLL_PIN_TYPE_OUTPUT);
+}
+
#define ICE_DPLL_PHASE_OFFSET_DIVIDER 100
#define ICE_DPLL_PHASE_OFFSET_FACTOR \
(DPLL_PHASE_OFFSET_DIVIDER / ICE_DPLL_PHASE_OFFSET_DIVIDER)
@@ -1093,12 +1812,16 @@ ice_dpll_phase_offset_get(const struct dpll_pin *pin, void *pin_priv,
const struct dpll_device *dpll, void *dpll_priv,
s64 *phase_offset, struct netlink_ext_ack *extack)
{
+ struct ice_dpll_pin *p = pin_priv;
struct ice_dpll *d = dpll_priv;
struct ice_pf *pf = d->pf;
mutex_lock(&pf->dplls.lock);
- if (d->active_input == pin)
+ if (d->active_input == pin || (p->input &&
+ d->active_input == p->input->pin))
*phase_offset = d->phase_offset * ICE_DPLL_PHASE_OFFSET_FACTOR;
+ else if (d->phase_offset_monitor_period)
+ *phase_offset = p->phase_offset * ICE_DPLL_PHASE_OFFSET_FACTOR;
else
*phase_offset = 0;
mutex_unlock(&pf->dplls.lock);
@@ -1315,6 +2038,76 @@ ice_dpll_input_esync_get(const struct dpll_pin *pin, void *pin_priv,
}
/**
+ * ice_dpll_sw_esync_set - callback for setting embedded sync on SW pin
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @freq: requested embedded sync frequency
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for setting embedded sync frequency value
+ * on SW pin.
+ *
+ * Context: Calls a function which acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_sw_esync_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u64 freq, struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+
+ if (!p->active) {
+ NL_SET_ERR_MSG(extack, "pin is not active");
+ return -EINVAL;
+ }
+ if (p->direction == DPLL_PIN_DIRECTION_INPUT)
+ return ice_dpll_input_esync_set(p->input->pin, p->input, dpll,
+ dpll_priv, freq, extack);
+ else
+ return ice_dpll_output_esync_set(p->output->pin, p->output,
+ dpll, dpll_priv, freq, extack);
+}
+
+/**
+ * ice_dpll_sw_esync_get - callback for getting embedded sync on SW pin
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @esync: on success holds embedded sync frequency and properties
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for getting embedded sync frequency value
+ * of SW pin.
+ *
+ * Context: Calls a function which acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_sw_esync_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ struct dpll_pin_esync *esync,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+
+ if (p->direction == DPLL_PIN_DIRECTION_INPUT)
+ return ice_dpll_input_esync_get(p->input->pin, p->input, dpll,
+ dpll_priv, esync, extack);
+ else
+ return ice_dpll_output_esync_get(p->output->pin, p->output,
+ dpll, dpll_priv, esync,
+ extack);
+}
+
+/**
* ice_dpll_rclk_state_on_pin_set - set a state on rclk pin
* @pin: pointer to a pin
* @pin_priv: private data pointer passed on pin registration
@@ -1427,6 +2220,35 @@ static const struct dpll_pin_ops ice_dpll_rclk_ops = {
.direction_get = ice_dpll_input_direction,
};
+static const struct dpll_pin_ops ice_dpll_pin_sma_ops = {
+ .state_on_dpll_set = ice_dpll_sma_pin_state_set,
+ .state_on_dpll_get = ice_dpll_sw_pin_state_get,
+ .direction_get = ice_dpll_pin_sw_direction_get,
+ .direction_set = ice_dpll_pin_sma_direction_set,
+ .prio_get = ice_dpll_sw_input_prio_get,
+ .prio_set = ice_dpll_sw_input_prio_set,
+ .frequency_get = ice_dpll_sw_pin_frequency_get,
+ .frequency_set = ice_dpll_sw_pin_frequency_set,
+ .phase_adjust_get = ice_dpll_sw_phase_adjust_get,
+ .phase_adjust_set = ice_dpll_sw_phase_adjust_set,
+ .phase_offset_get = ice_dpll_phase_offset_get,
+ .esync_set = ice_dpll_sw_esync_set,
+ .esync_get = ice_dpll_sw_esync_get,
+};
+
+static const struct dpll_pin_ops ice_dpll_pin_ufl_ops = {
+ .state_on_dpll_set = ice_dpll_ufl_pin_state_set,
+ .state_on_dpll_get = ice_dpll_sw_pin_state_get,
+ .direction_get = ice_dpll_pin_sw_direction_get,
+ .frequency_get = ice_dpll_sw_pin_frequency_get,
+ .frequency_set = ice_dpll_sw_pin_frequency_set,
+ .esync_set = ice_dpll_sw_esync_set,
+ .esync_get = ice_dpll_sw_esync_get,
+ .phase_adjust_get = ice_dpll_sw_phase_adjust_get,
+ .phase_adjust_set = ice_dpll_sw_phase_adjust_set,
+ .phase_offset_get = ice_dpll_phase_offset_get,
+};
+
static const struct dpll_pin_ops ice_dpll_input_ops = {
.frequency_get = ice_dpll_input_frequency_get,
.frequency_set = ice_dpll_input_frequency_set,
@@ -1459,6 +2281,13 @@ static const struct dpll_device_ops ice_dpll_ops = {
.mode_get = ice_dpll_mode_get,
};
+static const struct dpll_device_ops ice_dpll_pom_ops = {
+ .lock_status_get = ice_dpll_lock_status_get,
+ .mode_get = ice_dpll_mode_get,
+ .phase_offset_monitor_set = ice_dpll_phase_offset_monitor_set,
+ .phase_offset_monitor_get = ice_dpll_phase_offset_monitor_get,
+};
+
/**
* ice_generate_clock_id - generates unique clock_id for registering dpll.
* @pf: board private structure
@@ -1504,6 +2333,110 @@ static void ice_dpll_notify_changes(struct ice_dpll *d)
}
/**
+ * ice_dpll_is_pps_phase_monitor - check if dpll capable of phase offset monitor
+ * @pf: pf private structure
+ *
+ * Check if firmware is capable of supporting admin command to provide
+ * phase offset monitoring on all the input pins on PPS dpll.
+ *
+ * Returns:
+ * * true - PPS dpll phase offset monitoring is supported
+ * * false - PPS dpll phase offset monitoring is not supported
+ */
+static bool ice_dpll_is_pps_phase_monitor(struct ice_pf *pf)
+{
+ struct ice_cgu_input_measure meas[ICE_DPLL_INPUT_REF_NUM];
+ int ret = ice_aq_get_cgu_input_pin_measure(&pf->hw, DPLL_TYPE_PPS, meas,
+ ARRAY_SIZE(meas));
+
+ if (ret && pf->hw.adminq.sq_last_status == ICE_AQ_RC_ESRCH)
+ return false;
+
+ return true;
+}
+
+/**
+ * ice_dpll_pins_notify_mask - notify dpll subsystem about bulk pin changes
+ * @pins: array of ice_dpll_pin pointers registered within dpll subsystem
+ * @pin_num: number of pins
+ * @phase_offset_ntf_mask: bitmask of pin indexes to notify
+ *
+ * Iterate over array of pins and call dpll subsystem pin notify if
+ * corresponding pin index within bitmask is set.
+ *
+ * Context: Must be called while pf->dplls.lock is released.
+ */
+static void ice_dpll_pins_notify_mask(struct ice_dpll_pin *pins,
+ u8 pin_num,
+ u32 phase_offset_ntf_mask)
+{
+ int i = 0;
+
+ for (i = 0; i < pin_num; i++)
+ if (phase_offset_ntf_mask & (1 << i))
+ dpll_pin_change_ntf(pins[i].pin);
+}
+
+/**
+ * ice_dpll_pps_update_phase_offsets - update phase offset measurements
+ * @pf: pf private structure
+ * @phase_offset_pins_updated: returns mask of updated input pin indexes
+ *
+ * Read phase offset measurements for PPS dpll device and store values in
+ * input pins array. On success phase_offset_pins_updated - fills bitmask of
+ * updated input pin indexes, pins shall be notified.
+ *
+ * Context: Shall be called with pf->dplls.lock being locked.
+ * Returns:
+ * * 0 - success or no data available
+ * * negative - AQ failure
+ */
+static int ice_dpll_pps_update_phase_offsets(struct ice_pf *pf,
+ u32 *phase_offset_pins_updated)
+{
+ struct ice_cgu_input_measure meas[ICE_DPLL_INPUT_REF_NUM];
+ struct ice_dpll_pin *p;
+ s64 phase_offset, tmp;
+ int i, j, ret;
+
+ *phase_offset_pins_updated = 0;
+ ret = ice_aq_get_cgu_input_pin_measure(&pf->hw, DPLL_TYPE_PPS, meas,
+ ARRAY_SIZE(meas));
+ if (ret && pf->hw.adminq.sq_last_status == ICE_AQ_RC_EAGAIN) {
+ return 0;
+ } else if (ret) {
+ dev_err(ice_pf_to_dev(pf),
+ "failed to get input pin measurements dpll=%d, ret=%d %s\n",
+ DPLL_TYPE_PPS, ret,
+ ice_aq_str(pf->hw.adminq.sq_last_status));
+ return ret;
+ }
+ for (i = 0; i < pf->dplls.num_inputs; i++) {
+ p = &pf->dplls.inputs[i];
+ phase_offset = 0;
+ for (j = 0; j < ICE_CGU_INPUT_PHASE_OFFSET_BYTES; j++) {
+ tmp = meas[i].phase_offset[j];
+#ifdef __LITTLE_ENDIAN
+ phase_offset += tmp << 8 * j;
+#else
+ phase_offset += tmp << 8 *
+ (ICE_CGU_INPUT_PHASE_OFFSET_BYTES - 1 - j);
+#endif
+ }
+ phase_offset = sign_extend64(phase_offset, 47);
+ if (p->phase_offset != phase_offset) {
+ dev_dbg(ice_pf_to_dev(pf),
+ "phase offset changed for pin:%d old:%llx, new:%llx\n",
+ p->idx, p->phase_offset, phase_offset);
+ p->phase_offset = phase_offset;
+ *phase_offset_pins_updated |= (1 << i);
+ }
+ }
+
+ return 0;
+}
+
+/**
* ice_dpll_update_state - update dpll state
* @pf: pf private structure
* @d: pointer to queried dpll device
@@ -1589,14 +2522,19 @@ static void ice_dpll_periodic_work(struct kthread_work *work)
struct ice_pf *pf = container_of(d, struct ice_pf, dplls);
struct ice_dpll *de = &pf->dplls.eec;
struct ice_dpll *dp = &pf->dplls.pps;
+ u32 phase_offset_ntf = 0;
int ret = 0;
if (ice_is_reset_in_progress(pf->state))
goto resched;
mutex_lock(&pf->dplls.lock);
+ d->periodic_counter++;
ret = ice_dpll_update_state(pf, de, false);
if (!ret)
ret = ice_dpll_update_state(pf, dp, false);
+ if (!ret && dp->phase_offset_monitor_period &&
+ d->periodic_counter % dp->phase_offset_monitor_period == 0)
+ ret = ice_dpll_pps_update_phase_offsets(pf, &phase_offset_ntf);
if (ret) {
d->cgu_state_acq_err_num++;
/* stop rescheduling this worker */
@@ -1611,6 +2549,9 @@ static void ice_dpll_periodic_work(struct kthread_work *work)
mutex_unlock(&pf->dplls.lock);
ice_dpll_notify_changes(de);
ice_dpll_notify_changes(dp);
+ if (phase_offset_ntf)
+ ice_dpll_pins_notify_mask(d->inputs, d->num_inputs,
+ phase_offset_ntf);
resched:
/* Run twice a second or reschedule if update failed */
@@ -1689,7 +2630,8 @@ ice_dpll_unregister_pins(struct dpll_device *dpll, struct ice_dpll_pin *pins,
int i;
for (i = 0; i < count; i++)
- dpll_pin_unregister(dpll, pins[i].pin, ops, &pins[i]);
+ if (!pins[i].hidden)
+ dpll_pin_unregister(dpll, pins[i].pin, ops, &pins[i]);
}
/**
@@ -1712,16 +2654,19 @@ ice_dpll_register_pins(struct dpll_device *dpll, struct ice_dpll_pin *pins,
int ret, i;
for (i = 0; i < count; i++) {
- ret = dpll_pin_register(dpll, pins[i].pin, ops, &pins[i]);
- if (ret)
- goto unregister_pins;
+ if (!pins[i].hidden) {
+ ret = dpll_pin_register(dpll, pins[i].pin, ops, &pins[i]);
+ if (ret)
+ goto unregister_pins;
+ }
}
return 0;
unregister_pins:
while (--i >= 0)
- dpll_pin_unregister(dpll, pins[i].pin, ops, &pins[i]);
+ if (!pins[i].hidden)
+ dpll_pin_unregister(dpll, pins[i].pin, ops, &pins[i]);
return ret;
}
@@ -1909,6 +2854,18 @@ static void ice_dpll_deinit_pins(struct ice_pf *pf, bool cgu)
ice_dpll_unregister_pins(de->dpll, outputs,
&ice_dpll_output_ops, num_outputs);
ice_dpll_release_pins(outputs, num_outputs);
+ if (!pf->dplls.generic) {
+ ice_dpll_deinit_direct_pins(cgu, pf->dplls.ufl,
+ ICE_DPLL_PIN_SW_NUM,
+ &ice_dpll_pin_ufl_ops,
+ pf->dplls.pps.dpll,
+ pf->dplls.eec.dpll);
+ ice_dpll_deinit_direct_pins(cgu, pf->dplls.sma,
+ ICE_DPLL_PIN_SW_NUM,
+ &ice_dpll_pin_sma_ops,
+ pf->dplls.pps.dpll,
+ pf->dplls.eec.dpll);
+ }
}
}
@@ -1926,8 +2883,7 @@ static void ice_dpll_deinit_pins(struct ice_pf *pf, bool cgu)
*/
static int ice_dpll_init_pins(struct ice_pf *pf, bool cgu)
{
- u32 rclk_idx;
- int ret;
+ int ret, count;
ret = ice_dpll_init_direct_pins(pf, cgu, pf->dplls.inputs, 0,
pf->dplls.num_inputs,
@@ -1935,23 +2891,56 @@ static int ice_dpll_init_pins(struct ice_pf *pf, bool cgu)
pf->dplls.eec.dpll, pf->dplls.pps.dpll);
if (ret)
return ret;
+ count = pf->dplls.num_inputs;
if (cgu) {
ret = ice_dpll_init_direct_pins(pf, cgu, pf->dplls.outputs,
- pf->dplls.num_inputs,
+ count,
pf->dplls.num_outputs,
&ice_dpll_output_ops,
pf->dplls.eec.dpll,
pf->dplls.pps.dpll);
if (ret)
goto deinit_inputs;
+ count += pf->dplls.num_outputs;
+ if (!pf->dplls.generic) {
+ ret = ice_dpll_init_direct_pins(pf, cgu, pf->dplls.sma,
+ count,
+ ICE_DPLL_PIN_SW_NUM,
+ &ice_dpll_pin_sma_ops,
+ pf->dplls.eec.dpll,
+ pf->dplls.pps.dpll);
+ if (ret)
+ goto deinit_outputs;
+ count += ICE_DPLL_PIN_SW_NUM;
+ ret = ice_dpll_init_direct_pins(pf, cgu, pf->dplls.ufl,
+ count,
+ ICE_DPLL_PIN_SW_NUM,
+ &ice_dpll_pin_ufl_ops,
+ pf->dplls.eec.dpll,
+ pf->dplls.pps.dpll);
+ if (ret)
+ goto deinit_sma;
+ count += ICE_DPLL_PIN_SW_NUM;
+ }
+ } else {
+ count += pf->dplls.num_outputs + 2 * ICE_DPLL_PIN_SW_NUM;
}
- rclk_idx = pf->dplls.num_inputs + pf->dplls.num_outputs + pf->hw.pf_id;
- ret = ice_dpll_init_rclk_pins(pf, &pf->dplls.rclk, rclk_idx,
+ ret = ice_dpll_init_rclk_pins(pf, &pf->dplls.rclk, count + pf->hw.pf_id,
&ice_dpll_rclk_ops);
if (ret)
- goto deinit_outputs;
+ goto deinit_ufl;
return 0;
+deinit_ufl:
+ ice_dpll_deinit_direct_pins(cgu, pf->dplls.ufl,
+ ICE_DPLL_PIN_SW_NUM,
+ &ice_dpll_pin_ufl_ops,
+ pf->dplls.pps.dpll, pf->dplls.eec.dpll);
+deinit_sma:
+ ice_dpll_deinit_direct_pins(cgu, pf->dplls.sma,
+ ICE_DPLL_PIN_SW_NUM,
+ &ice_dpll_pin_sma_ops,
+ pf->dplls.pps.dpll, pf->dplls.eec.dpll);
deinit_outputs:
ice_dpll_deinit_direct_pins(cgu, pf->dplls.outputs,
pf->dplls.num_outputs,
@@ -1977,7 +2966,7 @@ static void
ice_dpll_deinit_dpll(struct ice_pf *pf, struct ice_dpll *d, bool cgu)
{
if (cgu)
- dpll_device_unregister(d->dpll, &ice_dpll_ops, d);
+ dpll_device_unregister(d->dpll, d->ops, d);
dpll_device_put(d->dpll);
}
@@ -2011,12 +3000,17 @@ ice_dpll_init_dpll(struct ice_pf *pf, struct ice_dpll *d, bool cgu,
}
d->pf = pf;
if (cgu) {
+ const struct dpll_device_ops *ops = &ice_dpll_ops;
+
+ if (type == DPLL_TYPE_PPS && ice_dpll_is_pps_phase_monitor(pf))
+ ops = &ice_dpll_pom_ops;
ice_dpll_update_state(pf, d, true);
- ret = dpll_device_register(d->dpll, type, &ice_dpll_ops, d);
+ ret = dpll_device_register(d->dpll, type, ops, d);
if (ret) {
dpll_device_put(d->dpll);
return ret;
}
+ d->ops = ops;
}
return 0;
@@ -2184,8 +3178,10 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
default:
return -EINVAL;
}
- if (num_pins != ice_cgu_get_num_pins(hw, input))
+ if (num_pins != ice_cgu_get_num_pins(hw, input)) {
+ pf->dplls.generic = true;
return ice_dpll_init_info_pins_generic(pf, input);
+ }
for (i = 0; i < num_pins; i++) {
caps = 0;
@@ -2203,10 +3199,14 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
return ret;
caps |= (DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE |
DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE);
+ if (ice_dpll_is_sw_pin(pf, i, true))
+ pins[i].hidden = true;
} else {
ret = ice_cgu_get_output_pin_state_caps(hw, i, &caps);
if (ret)
return ret;
+ if (ice_dpll_is_sw_pin(pf, i, false))
+ pins[i].hidden = true;
}
ice_dpll_phase_range_set(&pins[i].prop.phase_range,
phase_adj_max);
@@ -2246,6 +3246,89 @@ static int ice_dpll_init_info_rclk_pin(struct ice_pf *pf)
}
/**
+ * ice_dpll_init_info_sw_pins - initializes software controlled pin information
+ * @pf: board private structure
+ *
+ * Init information for software controlled pins, cache them in
+ * pf->dplls.sma and pf->dplls.ufl.
+ *
+ * Return:
+ * * 0 - success
+ * * negative - init failure reason
+ */
+static int ice_dpll_init_info_sw_pins(struct ice_pf *pf)
+{
+ u8 freq_supp_num, pin_abs_idx, input_idx_offset = 0;
+ struct ice_dplls *d = &pf->dplls;
+ struct ice_dpll_pin *pin;
+ u32 phase_adj_max, caps;
+ int i, ret;
+
+ if (pf->hw.device_id == ICE_DEV_ID_E810C_QSFP)
+ input_idx_offset = ICE_E810_RCLK_PINS_NUM;
+ phase_adj_max = max(d->input_phase_adj_max, d->output_phase_adj_max);
+ caps = DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE;
+ for (i = 0; i < ICE_DPLL_PIN_SW_NUM; i++) {
+ pin = &d->sma[i];
+ pin->idx = i;
+ pin->prop.type = DPLL_PIN_TYPE_EXT;
+ pin_abs_idx = ICE_DPLL_PIN_SW_INPUT_ABS(i) + input_idx_offset;
+ pin->prop.freq_supported =
+ ice_cgu_get_pin_freq_supp(&pf->hw, pin_abs_idx,
+ true, &freq_supp_num);
+ pin->prop.freq_supported_num = freq_supp_num;
+ pin->prop.capabilities =
+ (DPLL_PIN_CAPABILITIES_DIRECTION_CAN_CHANGE |
+ DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE |
+ caps);
+ pin->pf = pf;
+ pin->prop.board_label = ice_dpll_sw_pin_sma[i];
+ pin->input = &d->inputs[pin_abs_idx];
+ pin->output = &d->outputs[ICE_DPLL_PIN_SW_OUTPUT_ABS(i)];
+ ice_dpll_phase_range_set(&pin->prop.phase_range, phase_adj_max);
+ }
+ for (i = 0; i < ICE_DPLL_PIN_SW_NUM; i++) {
+ pin = &d->ufl[i];
+ pin->idx = i;
+ pin->prop.type = DPLL_PIN_TYPE_EXT;
+ pin->prop.capabilities = caps;
+ pin->pf = pf;
+ pin->prop.board_label = ice_dpll_sw_pin_ufl[i];
+ if (i == ICE_DPLL_PIN_SW_1_IDX) {
+ pin->direction = DPLL_PIN_DIRECTION_OUTPUT;
+ pin_abs_idx = ICE_DPLL_PIN_SW_OUTPUT_ABS(i);
+ pin->prop.freq_supported =
+ ice_cgu_get_pin_freq_supp(&pf->hw, pin_abs_idx,
+ false,
+ &freq_supp_num);
+ pin->prop.freq_supported_num = freq_supp_num;
+ pin->input = NULL;
+ pin->output = &d->outputs[pin_abs_idx];
+ } else if (i == ICE_DPLL_PIN_SW_2_IDX) {
+ pin->direction = DPLL_PIN_DIRECTION_INPUT;
+ pin_abs_idx = ICE_DPLL_PIN_SW_INPUT_ABS(i) +
+ input_idx_offset;
+ pin->output = NULL;
+ pin->input = &d->inputs[pin_abs_idx];
+ pin->prop.freq_supported =
+ ice_cgu_get_pin_freq_supp(&pf->hw, pin_abs_idx,
+ true, &freq_supp_num);
+ pin->prop.freq_supported_num = freq_supp_num;
+ pin->prop.capabilities =
+ (DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE |
+ caps);
+ }
+ ice_dpll_phase_range_set(&pin->prop.phase_range, phase_adj_max);
+ }
+ ret = ice_dpll_pin_state_update(pf, pin, ICE_DPLL_PIN_TYPE_SOFTWARE,
+ NULL);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
* ice_dpll_init_pins_info - init pins info wrapper
* @pf: board private structure
* @pin_type: type of pins being initialized
@@ -2265,6 +3348,8 @@ ice_dpll_init_pins_info(struct ice_pf *pf, enum ice_dpll_pin_type pin_type)
return ice_dpll_init_info_direct_pins(pf, pin_type);
case ICE_DPLL_PIN_TYPE_RCLK_INPUT:
return ice_dpll_init_info_rclk_pin(pf);
+ case ICE_DPLL_PIN_TYPE_SOFTWARE:
+ return ice_dpll_init_info_sw_pins(pf);
default:
return -EINVAL;
}
@@ -2351,6 +3436,9 @@ static int ice_dpll_init_info(struct ice_pf *pf, bool cgu)
ret = ice_dpll_init_pins_info(pf, ICE_DPLL_PIN_TYPE_OUTPUT);
if (ret)
goto deinit_info;
+ ret = ice_dpll_init_pins_info(pf, ICE_DPLL_PIN_TYPE_SOFTWARE);
+ if (ret)
+ goto deinit_info;
}
ret = ice_get_cgu_rclk_pin_info(&pf->hw, &d->base_rclk_idx,
diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.h b/drivers/net/ethernet/intel/ice/ice_dpll.h
index c320f1bf7d6d..a5a5b61c5115 100644
--- a/drivers/net/ethernet/intel/ice/ice_dpll.h
+++ b/drivers/net/ethernet/intel/ice/ice_dpll.h
@@ -8,6 +8,18 @@
#define ICE_DPLL_RCLK_NUM_MAX 4
+/**
+ * enum ice_dpll_pin_sw - enumerate ice software pin indices:
+ * @ICE_DPLL_PIN_SW_1_IDX: index of first SW pin
+ * @ICE_DPLL_PIN_SW_2_IDX: index of second SW pin
+ * @ICE_DPLL_PIN_SW_NUM: number of SW pins in pair
+ */
+enum ice_dpll_pin_sw {
+ ICE_DPLL_PIN_SW_1_IDX,
+ ICE_DPLL_PIN_SW_2_IDX,
+ ICE_DPLL_PIN_SW_NUM
+};
+
/** ice_dpll_pin - store info about pins
* @pin: dpll pin structure
* @pf: pointer to pf, which has registered the dpll_pin
@@ -19,6 +31,7 @@
* @prop: pin properties
* @freq: current frequency of a pin
* @phase_adjust: current phase adjust value
+ * @phase_offset: monitored phase offset value
*/
struct ice_dpll_pin {
struct dpll_pin *pin;
@@ -31,7 +44,13 @@ struct ice_dpll_pin {
struct dpll_pin_properties prop;
u32 freq;
s32 phase_adjust;
+ struct ice_dpll_pin *input;
+ struct ice_dpll_pin *output;
+ enum dpll_pin_direction direction;
+ s64 phase_offset;
u8 status;
+ bool active;
+ bool hidden;
};
/** ice_dpll - store info required for DPLL control
@@ -47,8 +66,10 @@ struct ice_dpll_pin {
* @input_prio: priorities of each input
* @dpll_state: current dpll sync state
* @prev_dpll_state: last dpll sync state
+ * @phase_offset_monitor_period: period for phase offset monitor read frequency
* @active_input: pointer to active input pin
* @prev_input: pointer to previous active input pin
+ * @ops: holds the registered ops
*/
struct ice_dpll {
struct dpll_device *dpll;
@@ -64,8 +85,10 @@ struct ice_dpll {
enum dpll_lock_status dpll_state;
enum dpll_lock_status prev_dpll_state;
enum dpll_mode mode;
+ u32 phase_offset_monitor_period;
struct dpll_pin *active_input;
struct dpll_pin *prev_input;
+ const struct dpll_device_ops *ops;
};
/** ice_dplls - store info required for CCU (clock controlling unit)
@@ -84,6 +107,7 @@ struct ice_dpll {
* @clock_id: clock_id of dplls
* @input_phase_adj_max: max phase adjust value for an input pins
* @output_phase_adj_max: max phase adjust value for an output pins
+ * @periodic_counter: counter of periodic work executions
*/
struct ice_dplls {
struct kthread_worker *kworker;
@@ -93,14 +117,19 @@ struct ice_dplls {
struct ice_dpll pps;
struct ice_dpll_pin *inputs;
struct ice_dpll_pin *outputs;
+ struct ice_dpll_pin sma[ICE_DPLL_PIN_SW_NUM];
+ struct ice_dpll_pin ufl[ICE_DPLL_PIN_SW_NUM];
struct ice_dpll_pin rclk;
u8 num_inputs;
u8 num_outputs;
- int cgu_state_acq_err_num;
+ u8 sma_data;
u8 base_rclk_idx;
+ int cgu_state_acq_err_num;
u64 clock_id;
s32 input_phase_adj_max;
s32 output_phase_adj_max;
+ u32 periodic_counter;
+ bool generic;
};
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index bbf9e6fd315b..ea7e8b879b48 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -836,6 +836,15 @@ static void ice_set_msglevel(struct net_device *netdev, u32 data)
#endif /* !CONFIG_DYNAMIC_DEBUG */
}
+static void ice_get_link_ext_stats(struct net_device *netdev,
+ struct ethtool_link_ext_stats *stats)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_pf *pf = np->vsi->back;
+
+ stats->link_down_events = pf->link_down_events;
+}
+
static int ice_get_eeprom_len(struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
@@ -2788,14 +2797,7 @@ done:
return err;
}
-/**
- * ice_parse_hdrs - parses headers from RSS hash input
- * @nfc: ethtool rxnfc command
- *
- * This function parses the rxnfc command and returns intended
- * header types for RSS configuration
- */
-static u32 ice_parse_hdrs(struct ethtool_rxnfc *nfc)
+static u32 ice_parse_hdrs(const struct ethtool_rxfh_fields *nfc)
{
u32 hdrs = ICE_FLOW_SEG_HDR_NONE;
@@ -2860,15 +2862,7 @@ static u32 ice_parse_hdrs(struct ethtool_rxnfc *nfc)
return hdrs;
}
-/**
- * ice_parse_hash_flds - parses hash fields from RSS hash input
- * @nfc: ethtool rxnfc command
- * @symm: true if Symmetric Topelitz is set
- *
- * This function parses the rxnfc command and returns intended
- * hash fields for RSS configuration
- */
-static u64 ice_parse_hash_flds(struct ethtool_rxnfc *nfc, bool symm)
+static u64 ice_parse_hash_flds(const struct ethtool_rxfh_fields *nfc, bool symm)
{
u64 hfld = ICE_HASH_INVALID;
@@ -2965,16 +2959,13 @@ static u64 ice_parse_hash_flds(struct ethtool_rxnfc *nfc, bool symm)
return hfld;
}
-/**
- * ice_set_rss_hash_opt - Enable/Disable flow types for RSS hash
- * @vsi: the VSI being configured
- * @nfc: ethtool rxnfc command
- *
- * Returns Success if the flow input set is supported.
- */
static int
-ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
+ice_set_rxfh_fields(struct net_device *netdev,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct ice_rss_hash_cfg cfg;
struct device *dev;
@@ -3020,14 +3011,11 @@ ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
return 0;
}
-/**
- * ice_get_rss_hash_opt - Retrieve hash fields for a given flow-type
- * @vsi: the VSI being configured
- * @nfc: ethtool rxnfc command
- */
-static void
-ice_get_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
+static int
+ice_get_rxfh_fields(struct net_device *netdev, struct ethtool_rxfh_fields *nfc)
{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct device *dev;
u64 hash_flds;
@@ -3040,21 +3028,21 @@ ice_get_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
if (ice_is_safe_mode(pf)) {
dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n",
vsi->vsi_num);
- return;
+ return 0;
}
hdrs = ice_parse_hdrs(nfc);
if (hdrs == ICE_FLOW_SEG_HDR_NONE) {
dev_dbg(dev, "Header type is not valid, vsi num = %d\n",
vsi->vsi_num);
- return;
+ return 0;
}
hash_flds = ice_get_rss_cfg(&pf->hw, vsi->idx, hdrs, &symm);
if (hash_flds == ICE_HASH_INVALID) {
dev_dbg(dev, "No hash fields found for the given header type, vsi num = %d\n",
vsi->vsi_num);
- return;
+ return 0;
}
if (hash_flds & ICE_FLOW_HASH_FLD_IPV4_SA ||
@@ -3081,6 +3069,8 @@ ice_get_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
hash_flds & ICE_FLOW_HASH_FLD_GTPU_UP_TEID ||
hash_flds & ICE_FLOW_HASH_FLD_GTPU_DWN_TEID)
nfc->data |= (u64)RXH_GTP_TEID;
+
+ return 0;
}
/**
@@ -3100,8 +3090,6 @@ static int ice_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
return ice_add_fdir_ethtool(vsi, cmd);
case ETHTOOL_SRXCLSRLDEL:
return ice_del_fdir_ethtool(vsi, cmd);
- case ETHTOOL_SRXFH:
- return ice_set_rss_hash_opt(vsi, cmd);
default:
break;
}
@@ -3144,10 +3132,6 @@ ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
case ETHTOOL_GRXCLSRLALL:
ret = ice_get_fdir_fltr_ids(hw, cmd, (u32 *)rule_locs);
break;
- case ETHTOOL_GRXFH:
- ice_get_rss_hash_opt(vsi, cmd);
- ret = 0;
- break;
default:
break;
}
@@ -4784,6 +4768,7 @@ static const struct ethtool_ops ice_ethtool_ops = {
.set_msglevel = ice_set_msglevel,
.self_test = ice_self_test,
.get_link = ethtool_op_get_link,
+ .get_link_ext_stats = ice_get_link_ext_stats,
.get_eeprom_len = ice_get_eeprom_len,
.get_eeprom = ice_get_eeprom,
.get_coalesce = ice_get_coalesce,
@@ -4806,6 +4791,8 @@ static const struct ethtool_ops ice_ethtool_ops = {
.get_rxfh_indir_size = ice_get_rxfh_indir_size,
.get_rxfh = ice_get_rxfh,
.set_rxfh = ice_set_rxfh,
+ .get_rxfh_fields = ice_get_rxfh_fields,
+ .set_rxfh_fields = ice_set_rxfh_fields,
.get_channels = ice_get_channels,
.set_channels = ice_set_channels,
.get_ts_info = ice_get_ts_info,
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c
index d97b751052f2..278e57686274 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.c
+++ b/drivers/net/ethernet/intel/ice/ice_flow.c
@@ -2573,38 +2573,38 @@ ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
* convert its values to their appropriate flow L3, L4 values.
*/
#define ICE_FLOW_AVF_RSS_IPV4_MASKS \
- (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4))
+ (BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV4))
#define ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS \
- (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP))
+ (BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP))
#define ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS \
- (BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP))
+ (BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP))
#define ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS \
(ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS | \
- ICE_FLOW_AVF_RSS_IPV4_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP))
+ ICE_FLOW_AVF_RSS_IPV4_MASKS | BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP))
#define ICE_FLOW_AVF_RSS_IPV6_MASKS \
- (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6))
+ (BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV6))
#define ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS \
- (BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP))
+ (BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP))
#define ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS \
- (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP))
+ (BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP))
#define ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS \
(ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS | \
- ICE_FLOW_AVF_RSS_IPV6_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP))
+ ICE_FLOW_AVF_RSS_IPV6_MASKS | BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP))
/**
* ice_add_avf_rss_cfg - add an RSS configuration for AVF driver
* @hw: pointer to the hardware structure
* @vsi: VF's VSI
- * @avf_hash: hash bit fields (ICE_AVF_FLOW_FIELD_*) to configure
+ * @avf_hash: hash bit fields (LIBIE_FILTER_PCTYPE_*) to configure
*
* This function will take the hash bitmap provided by the AVF driver via a
* message, convert it to ICE-compatible values, and configure RSS flow
@@ -2621,8 +2621,7 @@ int ice_add_avf_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi, u64 avf_hash)
return -EINVAL;
vsi_handle = vsi->idx;
- if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID ||
- !ice_is_vsi_valid(hw, vsi_handle))
+ if (!avf_hash || !ice_is_vsi_valid(hw, vsi_handle))
return -EINVAL;
/* Make sure no unsupported bits are specified */
@@ -2658,11 +2657,11 @@ int ice_add_avf_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi, u64 avf_hash)
ICE_FLOW_HASH_UDP_PORT;
hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS;
} else if (hash_flds &
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP)) {
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP)) {
rss_hash = ICE_FLOW_HASH_IPV4 |
ICE_FLOW_HASH_SCTP_PORT;
hash_flds &=
- ~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP);
+ ~BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP);
}
} else if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS) {
if (hash_flds & ICE_FLOW_AVF_RSS_IPV6_MASKS) {
@@ -2679,11 +2678,11 @@ int ice_add_avf_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi, u64 avf_hash)
ICE_FLOW_HASH_UDP_PORT;
hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS;
} else if (hash_flds &
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP)) {
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP)) {
rss_hash = ICE_FLOW_HASH_IPV6 |
ICE_FLOW_HASH_SCTP_PORT;
hash_flds &=
- ~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP);
+ ~BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP);
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h
index 6cb7bb879c98..52f906d89eca 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.h
+++ b/drivers/net/ethernet/intel/ice/ice_flow.h
@@ -4,6 +4,8 @@
#ifndef _ICE_FLOW_H_
#define _ICE_FLOW_H_
+#include <linux/net/intel/libie/pctype.h>
+
#include "ice_flex_type.h"
#include "ice_parser.h"
@@ -264,57 +266,27 @@ enum ice_flow_field {
#define ICE_FLOW_HASH_FLD_GTPU_DWN_TEID \
BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID)
-/* Flow headers and fields for AVF support */
-enum ice_flow_avf_hdr_field {
- /* Values 0 - 28 are reserved for future use */
- ICE_AVF_FLOW_FIELD_INVALID = 0,
- ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP = 29,
- ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP,
- ICE_AVF_FLOW_FIELD_IPV4_UDP,
- ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK,
- ICE_AVF_FLOW_FIELD_IPV4_TCP,
- ICE_AVF_FLOW_FIELD_IPV4_SCTP,
- ICE_AVF_FLOW_FIELD_IPV4_OTHER,
- ICE_AVF_FLOW_FIELD_FRAG_IPV4,
- /* Values 37-38 are reserved */
- ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP = 39,
- ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP,
- ICE_AVF_FLOW_FIELD_IPV6_UDP,
- ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK,
- ICE_AVF_FLOW_FIELD_IPV6_TCP,
- ICE_AVF_FLOW_FIELD_IPV6_SCTP,
- ICE_AVF_FLOW_FIELD_IPV6_OTHER,
- ICE_AVF_FLOW_FIELD_FRAG_IPV6,
- ICE_AVF_FLOW_FIELD_RSVD47,
- ICE_AVF_FLOW_FIELD_FCOE_OX,
- ICE_AVF_FLOW_FIELD_FCOE_RX,
- ICE_AVF_FLOW_FIELD_FCOE_OTHER,
- /* Values 51-62 are reserved */
- ICE_AVF_FLOW_FIELD_L2_PAYLOAD = 63,
- ICE_AVF_FLOW_FIELD_MAX
-};
-
/* Supported RSS offloads This macro is defined to support
- * VIRTCHNL_OP_GET_RSS_HENA_CAPS ops. PF driver sends the RSS hardware
+ * VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS ops. PF driver sends the RSS hardware
* capabilities to the caller of this ops.
*/
-#define ICE_DEFAULT_RSS_HENA ( \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP))
+#define ICE_DEFAULT_RSS_HASHCFG ( \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV4) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV6) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
enum ice_rss_cfg_hdr_type {
ICE_RSS_OUTER_HEADERS, /* take outer headers as inputset. */
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 03bb16191237..2f1782e9357f 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -484,8 +484,7 @@ static irqreturn_t ice_msix_clean_ctrl_vsi(int __always_unused irq, void *data)
if (!q_vector->tx.tx_ring)
return IRQ_HANDLED;
-#define FDIR_RX_DESC_CLEAN_BUDGET 64
- ice_clean_rx_irq(q_vector->rx.rx_ring, FDIR_RX_DESC_CLEAN_BUDGET);
+ ice_clean_ctrl_rx_irq(q_vector->rx.rx_ring);
ice_clean_ctrl_tx_irq(q_vector->tx.tx_ring);
return IRQ_HANDLED;
@@ -1579,7 +1578,7 @@ static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi)
return;
}
- status = ice_add_avf_rss_cfg(&pf->hw, vsi, ICE_DEFAULT_RSS_HENA);
+ status = ice_add_avf_rss_cfg(&pf->hw, vsi, ICE_DEFAULT_RSS_HASHCFG);
if (status)
dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %d\n",
vsi->vsi_num, status);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 0a11b4281092..f8ef80069e3d 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -1144,6 +1144,9 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
if (link_up == old_link && link_speed == old_link_speed)
return 0;
+ if (!link_up && old_link)
+ pf->link_down_events++;
+
ice_ptp_link_change(pf, link_up);
if (ice_is_dcb_active(pf)) {
@@ -4764,7 +4767,6 @@ int ice_init_dev(struct ice_pf *pf)
pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
- pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
pf->hw.udp_tunnel_nic.tables[0].n_entries =
@@ -7933,6 +7935,10 @@ const char *ice_aq_str(enum ice_aq_err aq_err)
return "ICE_AQ_RC_EPERM";
case ICE_AQ_RC_ENOENT:
return "ICE_AQ_RC_ENOENT";
+ case ICE_AQ_RC_ESRCH:
+ return "ICE_AQ_RC_ESRCH";
+ case ICE_AQ_RC_EAGAIN:
+ return "ICE_AQ_RC_EAGAIN";
case ICE_AQ_RC_ENOMEM:
return "ICE_AQ_RC_ENOMEM";
case ICE_AQ_RC_EBUSY:
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index 55cad824c5b9..b8cf8d64aaaa 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -4,7 +4,6 @@
#include "ice.h"
#include "ice_lib.h"
#include "ice_trace.h"
-#include "ice_cgu_regs.h"
static const char ice_pin_names[][64] = {
"SDP0",
@@ -40,21 +39,19 @@ static const struct ice_ptp_pin_desc ice_pin_desc_e810[] = {
{ ONE_PPS, { -1, 5 }, { 0, 1 }},
};
-static const char ice_pin_names_nvm[][64] = {
- "GNSS",
- "SMA1",
- "U.FL1",
- "SMA2",
- "U.FL2",
+static const char ice_pin_names_dpll[][64] = {
+ "SDP20",
+ "SDP21",
+ "SDP22",
+ "SDP23",
};
-static const struct ice_ptp_pin_desc ice_pin_desc_e810_sma[] = {
+static const struct ice_ptp_pin_desc ice_pin_desc_dpll[] = {
/* name, gpio, delay */
- { GNSS, { 1, -1 }, { 0, 0 }},
- { SMA1, { 1, 0 }, { 0, 1 }},
- { UFL1, { -1, 0 }, { 0, 1 }},
- { SMA2, { 3, 2 }, { 0, 1 }},
- { UFL2, { 3, -1 }, { 0, 0 }},
+ { SDP0, { -1, 0 }, { 0, 1 }},
+ { SDP1, { 1, -1 }, { 0, 0 }},
+ { SDP2, { -1, 2 }, { 0, 1 }},
+ { SDP3, { 3, -1 }, { 0, 0 }},
};
static struct ice_pf *ice_get_ctrl_pf(struct ice_pf *pf)
@@ -93,101 +90,6 @@ static int ice_ptp_find_pin_idx(struct ice_pf *pf, enum ptp_pin_function func,
}
/**
- * ice_ptp_update_sma_data - update SMA pins data according to pins setup
- * @pf: Board private structure
- * @sma_pins: parsed SMA pins status
- * @data: SMA data to update
- */
-static void ice_ptp_update_sma_data(struct ice_pf *pf, unsigned int sma_pins[],
- u8 *data)
-{
- const char *state1, *state2;
-
- /* Set the right state based on the desired configuration.
- * When bit is set, functionality is disabled.
- */
- *data &= ~ICE_ALL_SMA_MASK;
- if (!sma_pins[UFL1 - 1]) {
- if (sma_pins[SMA1 - 1] == PTP_PF_EXTTS) {
- state1 = "SMA1 Rx, U.FL1 disabled";
- *data |= ICE_SMA1_TX_EN;
- } else if (sma_pins[SMA1 - 1] == PTP_PF_PEROUT) {
- state1 = "SMA1 Tx U.FL1 disabled";
- *data |= ICE_SMA1_DIR_EN;
- } else {
- state1 = "SMA1 disabled, U.FL1 disabled";
- *data |= ICE_SMA1_MASK;
- }
- } else {
- /* U.FL1 Tx will always enable SMA1 Rx */
- state1 = "SMA1 Rx, U.FL1 Tx";
- }
-
- if (!sma_pins[UFL2 - 1]) {
- if (sma_pins[SMA2 - 1] == PTP_PF_EXTTS) {
- state2 = "SMA2 Rx, U.FL2 disabled";
- *data |= ICE_SMA2_TX_EN | ICE_SMA2_UFL2_RX_DIS;
- } else if (sma_pins[SMA2 - 1] == PTP_PF_PEROUT) {
- state2 = "SMA2 Tx, U.FL2 disabled";
- *data |= ICE_SMA2_DIR_EN | ICE_SMA2_UFL2_RX_DIS;
- } else {
- state2 = "SMA2 disabled, U.FL2 disabled";
- *data |= ICE_SMA2_MASK;
- }
- } else {
- if (!sma_pins[SMA2 - 1]) {
- state2 = "SMA2 disabled, U.FL2 Rx";
- *data |= ICE_SMA2_DIR_EN | ICE_SMA2_TX_EN;
- } else {
- state2 = "SMA2 Tx, U.FL2 Rx";
- *data |= ICE_SMA2_DIR_EN;
- }
- }
-
- dev_dbg(ice_pf_to_dev(pf), "%s, %s\n", state1, state2);
-}
-
-/**
- * ice_ptp_set_sma_cfg - set the configuration of the SMA control logic
- * @pf: Board private structure
- *
- * Return: 0 on success, negative error code otherwise
- */
-static int ice_ptp_set_sma_cfg(struct ice_pf *pf)
-{
- const struct ice_ptp_pin_desc *ice_pins = pf->ptp.ice_pin_desc;
- struct ptp_pin_desc *pins = pf->ptp.pin_desc;
- unsigned int sma_pins[ICE_SMA_PINS_NUM] = {};
- int err;
- u8 data;
-
- /* Read initial pin state value */
- err = ice_read_sma_ctrl(&pf->hw, &data);
- if (err)
- return err;
-
- /* Get SMA/U.FL pins states */
- for (int i = 0; i < pf->ptp.info.n_pins; i++)
- if (pins[i].func) {
- int name_idx = ice_pins[i].name_idx;
-
- switch (name_idx) {
- case SMA1:
- case UFL1:
- case SMA2:
- case UFL2:
- sma_pins[name_idx - 1] = pins[i].func;
- break;
- default:
- continue;
- }
- }
-
- ice_ptp_update_sma_data(pf, sma_pins, &data);
- return ice_write_sma_ctrl(&pf->hw, data);
-}
-
-/**
* ice_ptp_cfg_tx_interrupt - Configure Tx timestamp interrupt for the device
* @pf: Board private structure
*
@@ -1734,7 +1636,7 @@ static int ice_ptp_write_perout(struct ice_hw *hw, unsigned int chan,
int err;
/* Enable/disable CGU 1PPS output for E825C */
- err = ice_cgu_cfg_pps_out(hw, !!period);
+ err = ice_tspll_cfg_pps_out_e825c(hw, !!period);
if (err)
return err;
}
@@ -1879,63 +1781,6 @@ static void ice_ptp_enable_all_perout(struct ice_pf *pf)
}
/**
- * ice_ptp_disable_shared_pin - Disable enabled pin that shares GPIO
- * @pf: Board private structure
- * @pin: Pin index
- * @func: Assigned function
- *
- * Return: 0 on success, negative error code otherwise
- */
-static int ice_ptp_disable_shared_pin(struct ice_pf *pf, unsigned int pin,
- enum ptp_pin_function func)
-{
- unsigned int gpio_pin;
-
- switch (func) {
- case PTP_PF_PEROUT:
- gpio_pin = pf->ptp.ice_pin_desc[pin].gpio[1];
- break;
- case PTP_PF_EXTTS:
- gpio_pin = pf->ptp.ice_pin_desc[pin].gpio[0];
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- for (unsigned int i = 0; i < pf->ptp.info.n_pins; i++) {
- struct ptp_pin_desc *pin_desc = &pf->ptp.pin_desc[i];
- unsigned int chan = pin_desc->chan;
-
- /* Skip pin idx from the request */
- if (i == pin)
- continue;
-
- if (pin_desc->func == PTP_PF_PEROUT &&
- pf->ptp.ice_pin_desc[i].gpio[1] == gpio_pin) {
- pf->ptp.perout_rqs[chan].period.sec = 0;
- pf->ptp.perout_rqs[chan].period.nsec = 0;
- pin_desc->func = PTP_PF_NONE;
- pin_desc->chan = 0;
- dev_dbg(ice_pf_to_dev(pf), "Disabling pin %u with shared output GPIO pin %u\n",
- i, gpio_pin);
- return ice_ptp_cfg_perout(pf, &pf->ptp.perout_rqs[chan],
- false);
- } else if (pf->ptp.pin_desc->func == PTP_PF_EXTTS &&
- pf->ptp.ice_pin_desc[i].gpio[0] == gpio_pin) {
- pf->ptp.extts_rqs[chan].flags &= ~PTP_ENABLE_FEATURE;
- pin_desc->func = PTP_PF_NONE;
- pin_desc->chan = 0;
- dev_dbg(ice_pf_to_dev(pf), "Disabling pin %u with shared input GPIO pin %u\n",
- i, gpio_pin);
- return ice_ptp_cfg_extts(pf, &pf->ptp.extts_rqs[chan],
- false);
- }
- }
-
- return 0;
-}
-
-/**
* ice_verify_pin - verify if pin supports requested pin function
* @info: the driver's PTP info structure
* @pin: Pin index
@@ -1969,14 +1814,6 @@ static int ice_verify_pin(struct ptp_clock_info *info, unsigned int pin,
return -EOPNOTSUPP;
}
- /* On adapters with SMA_CTRL disable other pins that share same GPIO */
- if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
- ice_ptp_disable_shared_pin(pf, pin, func);
- pf->ptp.pin_desc[pin].func = func;
- pf->ptp.pin_desc[pin].chan = chan;
- return ice_ptp_set_sma_cfg(pf);
- }
-
return 0;
}
@@ -2500,14 +2337,14 @@ static void ice_ptp_setup_pin_cfg(struct ice_pf *pf)
for (unsigned int i = 0; i < pf->ptp.info.n_pins; i++) {
const struct ice_ptp_pin_desc *desc = &pf->ptp.ice_pin_desc[i];
struct ptp_pin_desc *pin = &pf->ptp.pin_desc[i];
- const char *name = NULL;
+ const char *name;
if (!ice_is_feature_supported(pf, ICE_F_SMA_CTRL))
name = ice_pin_names[desc->name_idx];
- else if (desc->name_idx != GPIO_NA)
- name = ice_pin_names_nvm[desc->name_idx];
- if (name)
- strscpy(pin->name, name, sizeof(pin->name));
+ else
+ name = ice_pin_names_dpll[desc->name_idx];
+
+ strscpy(pin->name, name, sizeof(pin->name));
pin->index = i;
}
@@ -2519,8 +2356,8 @@ static void ice_ptp_setup_pin_cfg(struct ice_pf *pf)
* ice_ptp_disable_pins - Disable PTP pins
* @pf: pointer to the PF structure
*
- * Disable the OS access to the SMA pins. Called to clear out the OS
- * indications of pin support when we fail to setup the SMA control register.
+ * Disable the OS access to the pins. Called to clear out the OS
+ * indications of pin support when we fail to setup pin array.
*/
static void ice_ptp_disable_pins(struct ice_pf *pf)
{
@@ -2561,40 +2398,30 @@ static int ice_ptp_parse_sdp_entries(struct ice_pf *pf, __le16 *entries,
for (i = 0; i < num_entries; i++) {
u16 entry = le16_to_cpu(entries[i]);
DECLARE_BITMAP(bitmap, GPIO_NA);
- unsigned int bitmap_idx;
+ unsigned int idx;
bool dir;
u16 gpio;
*bitmap = FIELD_GET(ICE_AQC_NVM_SDP_AC_PIN_M, entry);
+
+ /* Check if entry's pin bitmap is valid. */
+ if (bitmap_empty(bitmap, GPIO_NA))
+ continue;
+
dir = !!FIELD_GET(ICE_AQC_NVM_SDP_AC_DIR_M, entry);
gpio = FIELD_GET(ICE_AQC_NVM_SDP_AC_SDP_NUM_M, entry);
- for_each_set_bit(bitmap_idx, bitmap, GPIO_NA + 1) {
- unsigned int idx;
-
- /* Check if entry's pin bit is valid */
- if (bitmap_idx >= NUM_PTP_PINS_NVM &&
- bitmap_idx != GPIO_NA)
- continue;
- /* Check if pin already exists */
- for (idx = 0; idx < ICE_N_PINS_MAX; idx++)
- if (pins[idx].name_idx == bitmap_idx)
- break;
-
- if (idx == ICE_N_PINS_MAX) {
- /* Pin not found, setup its entry and name */
- idx = n_pins++;
- pins[idx].name_idx = bitmap_idx;
- if (bitmap_idx == GPIO_NA)
- strscpy(pf->ptp.pin_desc[idx].name,
- ice_pin_names[gpio],
- sizeof(pf->ptp.pin_desc[idx]
- .name));
- }
+ for (idx = 0; idx < ICE_N_PINS_MAX; idx++) {
+ if (pins[idx].name_idx == gpio)
+ break;
+ }
- /* Setup in/out GPIO number */
- pins[idx].gpio[dir] = gpio;
+ if (idx == ICE_N_PINS_MAX) {
+ /* Pin not found, setup its entry and name */
+ idx = n_pins++;
+ pins[idx].name_idx = gpio;
}
+ pins[idx].gpio[dir] = gpio;
}
for (i = 0; i < n_pins; i++) {
@@ -2622,10 +2449,10 @@ static void ice_ptp_set_funcs_e82x(struct ice_pf *pf)
if (pf->hw.mac_type == ICE_MAC_GENERIC_3K_E825) {
pf->ptp.ice_pin_desc = ice_pin_desc_e825c;
- pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e825c);
+ pf->ptp.info.n_pins = ARRAY_SIZE(ice_pin_desc_e825c);
} else {
pf->ptp.ice_pin_desc = ice_pin_desc_e82x;
- pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e82x);
+ pf->ptp.info.n_pins = ARRAY_SIZE(ice_pin_desc_e82x);
}
ice_ptp_setup_pin_cfg(pf);
}
@@ -2651,15 +2478,13 @@ static void ice_ptp_set_funcs_e810(struct ice_pf *pf)
if (err) {
/* SDP section does not exist in NVM or is corrupted */
if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
- ptp->ice_pin_desc = ice_pin_desc_e810_sma;
- ptp->info.n_pins =
- ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e810_sma);
+ ptp->ice_pin_desc = ice_pin_desc_dpll;
+ ptp->info.n_pins = ARRAY_SIZE(ice_pin_desc_dpll);
} else {
pf->ptp.ice_pin_desc = ice_pin_desc_e810;
- pf->ptp.info.n_pins =
- ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e810);
- err = 0;
+ pf->ptp.info.n_pins = ARRAY_SIZE(ice_pin_desc_e810);
}
+ err = 0;
} else {
desc = devm_kcalloc(ice_pf_to_dev(pf), ICE_N_PINS_MAX,
sizeof(struct ice_ptp_pin_desc),
@@ -2677,8 +2502,6 @@ static void ice_ptp_set_funcs_e810(struct ice_pf *pf)
ptp->info.pin_config = ptp->pin_desc;
ice_ptp_setup_pin_cfg(pf);
- if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL))
- err = ice_ptp_set_sma_cfg(pf);
err:
if (err) {
devm_kfree(ice_pf_to_dev(pf), desc);
@@ -2704,7 +2527,7 @@ static void ice_ptp_set_funcs_e830(struct ice_pf *pf)
#endif /* CONFIG_ICE_HWTS */
/* Rest of the config is the same as base E810 */
pf->ptp.ice_pin_desc = ice_pin_desc_e810;
- pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e810);
+ pf->ptp.info.n_pins = ARRAY_SIZE(ice_pin_desc_e810);
ice_ptp_setup_pin_cfg(pf);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
index 3b769a0cad00..c8dac5a5bcd9 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -202,9 +202,6 @@ enum ice_ptp_pin_nvm {
/* Pin definitions for PTP */
#define ICE_N_PINS_MAX 6
-#define ICE_SMA_PINS_NUM 4
-#define ICE_PIN_DESC_ARR_LEN(_arr) (sizeof(_arr) / \
- sizeof(struct ice_ptp_pin_desc))
/**
* struct ice_ptp_pin_desc - hardware pin description data
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
index 003cdfada3ca..19dddd9b53dd 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
@@ -281,7 +281,7 @@ struct ice_eth56g_mac_reg_cfg eth56g_mac_cfg[NUM_ICE_ETH56G_LNK_SPD] = {
/* struct ice_time_ref_info_e82x
*
- * E822 hardware can use different sources as the reference for the PTP
+ * E82X hardware can use different sources as the reference for the PTP
* hardware clock. Each clock has different characteristics such as a slightly
* different frequency, etc.
*
@@ -289,8 +289,8 @@ struct ice_eth56g_mac_reg_cfg eth56g_mac_cfg[NUM_ICE_ETH56G_LNK_SPD] = {
* reference. See the struct ice_time_ref_info_e82x for information about the
* meaning of each constant.
*/
-const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = {
- /* ICE_TIME_REF_FREQ_25_000 -> 25 MHz */
+const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TSPLL_FREQ] = {
+ /* ICE_TSPLL_FREQ_25_000 -> 25 MHz */
{
/* pll_freq */
823437500, /* 823.4375 MHz PLL */
@@ -298,7 +298,7 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = {
0x136e44fabULL,
},
- /* ICE_TIME_REF_FREQ_122_880 -> 122.88 MHz */
+ /* ICE_TSPLL_FREQ_122_880 -> 122.88 MHz */
{
/* pll_freq */
783360000, /* 783.36 MHz */
@@ -306,7 +306,7 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = {
0x146cc2177ULL,
},
- /* ICE_TIME_REF_FREQ_125_000 -> 125 MHz */
+ /* ICE_TSPLL_FREQ_125_000 -> 125 MHz */
{
/* pll_freq */
796875000, /* 796.875 MHz */
@@ -314,7 +314,7 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = {
0x141414141ULL,
},
- /* ICE_TIME_REF_FREQ_153_600 -> 153.6 MHz */
+ /* ICE_TSPLL_FREQ_153_600 -> 153.6 MHz */
{
/* pll_freq */
816000000, /* 816 MHz */
@@ -322,7 +322,7 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = {
0x139b9b9baULL,
},
- /* ICE_TIME_REF_FREQ_156_250 -> 156.25 MHz */
+ /* ICE_TSPLL_FREQ_156_250 -> 156.25 MHz */
{
/* pll_freq */
830078125, /* 830.78125 MHz */
@@ -330,7 +330,7 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = {
0x134679aceULL,
},
- /* ICE_TIME_REF_FREQ_245_760 -> 245.76 MHz */
+ /* ICE_TSPLL_FREQ_245_760 -> 245.76 MHz */
{
/* pll_freq */
783360000, /* 783.36 MHz */
@@ -339,167 +339,6 @@ const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = {
},
};
-const struct ice_cgu_pll_params_e82x e822_cgu_params[NUM_ICE_TIME_REF_FREQ] = {
- /* ICE_TIME_REF_FREQ_25_000 -> 25 MHz */
- {
- /* refclk_pre_div */
- 1,
- /* feedback_div */
- 197,
- /* frac_n_div */
- 2621440,
- /* post_pll_div */
- 6,
- },
-
- /* ICE_TIME_REF_FREQ_122_880 -> 122.88 MHz */
- {
- /* refclk_pre_div */
- 5,
- /* feedback_div */
- 223,
- /* frac_n_div */
- 524288,
- /* post_pll_div */
- 7,
- },
-
- /* ICE_TIME_REF_FREQ_125_000 -> 125 MHz */
- {
- /* refclk_pre_div */
- 5,
- /* feedback_div */
- 223,
- /* frac_n_div */
- 524288,
- /* post_pll_div */
- 7,
- },
-
- /* ICE_TIME_REF_FREQ_153_600 -> 153.6 MHz */
- {
- /* refclk_pre_div */
- 5,
- /* feedback_div */
- 159,
- /* frac_n_div */
- 1572864,
- /* post_pll_div */
- 6,
- },
-
- /* ICE_TIME_REF_FREQ_156_250 -> 156.25 MHz */
- {
- /* refclk_pre_div */
- 5,
- /* feedback_div */
- 159,
- /* frac_n_div */
- 1572864,
- /* post_pll_div */
- 6,
- },
-
- /* ICE_TIME_REF_FREQ_245_760 -> 245.76 MHz */
- {
- /* refclk_pre_div */
- 10,
- /* feedback_div */
- 223,
- /* frac_n_div */
- 524288,
- /* post_pll_div */
- 7,
- },
-};
-
-const
-struct ice_cgu_pll_params_e825c e825c_cgu_params[NUM_ICE_TIME_REF_FREQ] = {
- /* ICE_TIME_REF_FREQ_25_000 -> 25 MHz */
- {
- /* tspll_ck_refclkfreq */
- 0x19,
- /* tspll_ndivratio */
- 1,
- /* tspll_fbdiv_intgr */
- 320,
- /* tspll_fbdiv_frac */
- 0,
- /* ref1588_ck_div */
- 0,
- },
-
- /* ICE_TIME_REF_FREQ_122_880 -> 122.88 MHz */
- {
- /* tspll_ck_refclkfreq */
- 0x29,
- /* tspll_ndivratio */
- 3,
- /* tspll_fbdiv_intgr */
- 195,
- /* tspll_fbdiv_frac */
- 1342177280UL,
- /* ref1588_ck_div */
- 0,
- },
-
- /* ICE_TIME_REF_FREQ_125_000 -> 125 MHz */
- {
- /* tspll_ck_refclkfreq */
- 0x3E,
- /* tspll_ndivratio */
- 2,
- /* tspll_fbdiv_intgr */
- 128,
- /* tspll_fbdiv_frac */
- 0,
- /* ref1588_ck_div */
- 0,
- },
-
- /* ICE_TIME_REF_FREQ_153_600 -> 153.6 MHz */
- {
- /* tspll_ck_refclkfreq */
- 0x33,
- /* tspll_ndivratio */
- 3,
- /* tspll_fbdiv_intgr */
- 156,
- /* tspll_fbdiv_frac */
- 1073741824UL,
- /* ref1588_ck_div */
- 0,
- },
-
- /* ICE_TIME_REF_FREQ_156_250 -> 156.25 MHz */
- {
- /* tspll_ck_refclkfreq */
- 0x1F,
- /* tspll_ndivratio */
- 5,
- /* tspll_fbdiv_intgr */
- 256,
- /* tspll_fbdiv_frac */
- 0,
- /* ref1588_ck_div */
- 0,
- },
-
- /* ICE_TIME_REF_FREQ_245_760 -> 245.76 MHz */
- {
- /* tspll_ck_refclkfreq */
- 0x52,
- /* tspll_ndivratio */
- 3,
- /* tspll_fbdiv_intgr */
- 97,
- /* tspll_fbdiv_frac */
- 2818572288UL,
- /* ref1588_ck_div */
- 0,
- },
-};
-
/* struct ice_vernier_info_e82x
*
* E822 hardware calibrates the delay of the timestamp indication from the
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index ccac84eb34c9..278231443546 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -6,7 +6,6 @@
#include "ice_common.h"
#include "ice_ptp_hw.h"
#include "ice_ptp_consts.h"
-#include "ice_cgu_regs.h"
static struct dpll_pin_frequency ice_cgu_pin_freq_common[] = {
DPLL_PIN_FREQUENCY_1PPS,
@@ -226,547 +225,6 @@ static u64 ice_ptp_read_src_incval(struct ice_hw *hw)
}
/**
- * ice_read_cgu_reg_e82x - Read a CGU register
- * @hw: pointer to the HW struct
- * @addr: Register address to read
- * @val: storage for register value read
- *
- * Read the contents of a register of the Clock Generation Unit. Only
- * applicable to E822 devices.
- *
- * Return: 0 on success, other error codes when failed to read from CGU
- */
-static int ice_read_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 *val)
-{
- struct ice_sbq_msg_input cgu_msg = {
- .opcode = ice_sbq_msg_rd,
- .dest_dev = ice_sbq_dev_cgu,
- .msg_addr_low = addr
- };
- int err;
-
- err = ice_sbq_rw_reg(hw, &cgu_msg, ICE_AQ_FLAG_RD);
- if (err) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to read CGU register 0x%04x, err %d\n",
- addr, err);
- return err;
- }
-
- *val = cgu_msg.data;
-
- return 0;
-}
-
-/**
- * ice_write_cgu_reg_e82x - Write a CGU register
- * @hw: pointer to the HW struct
- * @addr: Register address to write
- * @val: value to write into the register
- *
- * Write the specified value to a register of the Clock Generation Unit. Only
- * applicable to E822 devices.
- *
- * Return: 0 on success, other error codes when failed to write to CGU
- */
-static int ice_write_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 val)
-{
- struct ice_sbq_msg_input cgu_msg = {
- .opcode = ice_sbq_msg_wr,
- .dest_dev = ice_sbq_dev_cgu,
- .msg_addr_low = addr,
- .data = val
- };
- int err;
-
- err = ice_sbq_rw_reg(hw, &cgu_msg, ICE_AQ_FLAG_RD);
- if (err) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to write CGU register 0x%04x, err %d\n",
- addr, err);
- return err;
- }
-
- return err;
-}
-
-/**
- * ice_clk_freq_str - Convert time_ref_freq to string
- * @clk_freq: Clock frequency
- *
- * Return: specified TIME_REF clock frequency converted to a string
- */
-static const char *ice_clk_freq_str(enum ice_time_ref_freq clk_freq)
-{
- switch (clk_freq) {
- case ICE_TIME_REF_FREQ_25_000:
- return "25 MHz";
- case ICE_TIME_REF_FREQ_122_880:
- return "122.88 MHz";
- case ICE_TIME_REF_FREQ_125_000:
- return "125 MHz";
- case ICE_TIME_REF_FREQ_153_600:
- return "153.6 MHz";
- case ICE_TIME_REF_FREQ_156_250:
- return "156.25 MHz";
- case ICE_TIME_REF_FREQ_245_760:
- return "245.76 MHz";
- default:
- return "Unknown";
- }
-}
-
-/**
- * ice_clk_src_str - Convert time_ref_src to string
- * @clk_src: Clock source
- *
- * Return: specified clock source converted to its string name
- */
-static const char *ice_clk_src_str(enum ice_clk_src clk_src)
-{
- switch (clk_src) {
- case ICE_CLK_SRC_TCXO:
- return "TCXO";
- case ICE_CLK_SRC_TIME_REF:
- return "TIME_REF";
- default:
- return "Unknown";
- }
-}
-
-/**
- * ice_cfg_cgu_pll_e82x - Configure the Clock Generation Unit
- * @hw: pointer to the HW struct
- * @clk_freq: Clock frequency to program
- * @clk_src: Clock source to select (TIME_REF, or TCXO)
- *
- * Configure the Clock Generation Unit with the desired clock frequency and
- * time reference, enabling the PLL which drives the PTP hardware clock.
- *
- * Return:
- * * %0 - success
- * * %-EINVAL - input parameters are incorrect
- * * %-EBUSY - failed to lock TS PLL
- * * %other - CGU read/write failure
- */
-static int ice_cfg_cgu_pll_e82x(struct ice_hw *hw,
- enum ice_time_ref_freq clk_freq,
- enum ice_clk_src clk_src)
-{
- union tspll_ro_bwm_lf bwm_lf;
- union nac_cgu_dword19 dw19;
- union nac_cgu_dword22 dw22;
- union nac_cgu_dword24 dw24;
- union nac_cgu_dword9 dw9;
- int err;
-
- if (clk_freq >= NUM_ICE_TIME_REF_FREQ) {
- dev_warn(ice_hw_to_dev(hw), "Invalid TIME_REF frequency %u\n",
- clk_freq);
- return -EINVAL;
- }
-
- if (clk_src >= NUM_ICE_CLK_SRC) {
- dev_warn(ice_hw_to_dev(hw), "Invalid clock source %u\n",
- clk_src);
- return -EINVAL;
- }
-
- if (clk_src == ICE_CLK_SRC_TCXO &&
- clk_freq != ICE_TIME_REF_FREQ_25_000) {
- dev_warn(ice_hw_to_dev(hw),
- "TCXO only supports 25 MHz frequency\n");
- return -EINVAL;
- }
-
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD9, &dw9.val);
- if (err)
- return err;
-
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val);
- if (err)
- return err;
-
- err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
- if (err)
- return err;
-
- /* Log the current clock configuration */
- ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
- str_enabled_disabled(dw24.ts_pll_enable),
- ice_clk_src_str(dw24.time_ref_sel),
- ice_clk_freq_str(dw9.time_ref_freq_sel),
- bwm_lf.plllock_true_lock_cri ? "locked" : "unlocked");
-
- /* Disable the PLL before changing the clock source or frequency */
- if (dw24.ts_pll_enable) {
- dw24.ts_pll_enable = 0;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
- if (err)
- return err;
- }
-
- /* Set the frequency */
- dw9.time_ref_freq_sel = clk_freq;
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD9, dw9.val);
- if (err)
- return err;
-
- /* Configure the TS PLL feedback divisor */
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD19, &dw19.val);
- if (err)
- return err;
-
- dw19.tspll_fbdiv_intgr = e822_cgu_params[clk_freq].feedback_div;
- dw19.tspll_ndivratio = 1;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD19, dw19.val);
- if (err)
- return err;
-
- /* Configure the TS PLL post divisor */
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD22, &dw22.val);
- if (err)
- return err;
-
- dw22.time1588clk_div = e822_cgu_params[clk_freq].post_pll_div;
- dw22.time1588clk_sel_div2 = 0;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD22, dw22.val);
- if (err)
- return err;
-
- /* Configure the TS PLL pre divisor and clock source */
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val);
- if (err)
- return err;
-
- dw24.ref1588_ck_div = e822_cgu_params[clk_freq].refclk_pre_div;
- dw24.tspll_fbdiv_frac = e822_cgu_params[clk_freq].frac_n_div;
- dw24.time_ref_sel = clk_src;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
- if (err)
- return err;
-
- /* Finally, enable the PLL */
- dw24.ts_pll_enable = 1;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
- if (err)
- return err;
-
- /* Wait to verify if the PLL locks */
- usleep_range(1000, 5000);
-
- err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
- if (err)
- return err;
-
- if (!bwm_lf.plllock_true_lock_cri) {
- dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n");
- return -EBUSY;
- }
-
- /* Log the current clock configuration */
- ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
- str_enabled_disabled(dw24.ts_pll_enable),
- ice_clk_src_str(dw24.time_ref_sel),
- ice_clk_freq_str(dw9.time_ref_freq_sel),
- bwm_lf.plllock_true_lock_cri ? "locked" : "unlocked");
-
- return 0;
-}
-
-/**
- * ice_cfg_cgu_pll_e825c - Configure the Clock Generation Unit for E825-C
- * @hw: pointer to the HW struct
- * @clk_freq: Clock frequency to program
- * @clk_src: Clock source to select (TIME_REF, or TCXO)
- *
- * Configure the Clock Generation Unit with the desired clock frequency and
- * time reference, enabling the PLL which drives the PTP hardware clock.
- *
- * Return:
- * * %0 - success
- * * %-EINVAL - input parameters are incorrect
- * * %-EBUSY - failed to lock TS PLL
- * * %other - CGU read/write failure
- */
-static int ice_cfg_cgu_pll_e825c(struct ice_hw *hw,
- enum ice_time_ref_freq clk_freq,
- enum ice_clk_src clk_src)
-{
- union tspll_ro_lock_e825c ro_lock;
- union nac_cgu_dword16_e825c dw16;
- union nac_cgu_dword23_e825c dw23;
- union nac_cgu_dword19 dw19;
- union nac_cgu_dword22 dw22;
- union nac_cgu_dword24 dw24;
- union nac_cgu_dword9 dw9;
- int err;
-
- if (clk_freq >= NUM_ICE_TIME_REF_FREQ) {
- dev_warn(ice_hw_to_dev(hw), "Invalid TIME_REF frequency %u\n",
- clk_freq);
- return -EINVAL;
- }
-
- if (clk_src >= NUM_ICE_CLK_SRC) {
- dev_warn(ice_hw_to_dev(hw), "Invalid clock source %u\n",
- clk_src);
- return -EINVAL;
- }
-
- if (clk_src == ICE_CLK_SRC_TCXO &&
- clk_freq != ICE_TIME_REF_FREQ_156_250) {
- dev_warn(ice_hw_to_dev(hw),
- "TCXO only supports 156.25 MHz frequency\n");
- return -EINVAL;
- }
-
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD9, &dw9.val);
- if (err)
- return err;
-
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val);
- if (err)
- return err;
-
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD16_E825C, &dw16.val);
- if (err)
- return err;
-
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C, &dw23.val);
- if (err)
- return err;
-
- err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_LOCK_E825C, &ro_lock.val);
- if (err)
- return err;
-
- /* Log the current clock configuration */
- ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
- str_enabled_disabled(dw24.ts_pll_enable),
- ice_clk_src_str(dw23.time_ref_sel),
- ice_clk_freq_str(dw9.time_ref_freq_sel),
- ro_lock.plllock_true_lock_cri ? "locked" : "unlocked");
-
- /* Disable the PLL before changing the clock source or frequency */
- if (dw23.ts_pll_enable) {
- dw23.ts_pll_enable = 0;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C,
- dw23.val);
- if (err)
- return err;
- }
-
- /* Set the frequency */
- dw9.time_ref_freq_sel = clk_freq;
-
- /* Enable the correct receiver */
- if (clk_src == ICE_CLK_SRC_TCXO) {
- dw9.time_ref_en = 0;
- dw9.clk_eref0_en = 1;
- } else {
- dw9.time_ref_en = 1;
- dw9.clk_eref0_en = 0;
- }
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD9, dw9.val);
- if (err)
- return err;
-
- /* Choose the referenced frequency */
- dw16.tspll_ck_refclkfreq =
- e825c_cgu_params[clk_freq].tspll_ck_refclkfreq;
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD16_E825C, dw16.val);
- if (err)
- return err;
-
- /* Configure the TS PLL feedback divisor */
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD19, &dw19.val);
- if (err)
- return err;
-
- dw19.tspll_fbdiv_intgr =
- e825c_cgu_params[clk_freq].tspll_fbdiv_intgr;
- dw19.tspll_ndivratio =
- e825c_cgu_params[clk_freq].tspll_ndivratio;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD19, dw19.val);
- if (err)
- return err;
-
- /* Configure the TS PLL post divisor */
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD22, &dw22.val);
- if (err)
- return err;
-
- /* These two are constant for E825C */
- dw22.time1588clk_div = 5;
- dw22.time1588clk_sel_div2 = 0;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD22, dw22.val);
- if (err)
- return err;
-
- /* Configure the TS PLL pre divisor and clock source */
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C, &dw23.val);
- if (err)
- return err;
-
- dw23.ref1588_ck_div =
- e825c_cgu_params[clk_freq].ref1588_ck_div;
- dw23.time_ref_sel = clk_src;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C, dw23.val);
- if (err)
- return err;
-
- dw24.tspll_fbdiv_frac =
- e825c_cgu_params[clk_freq].tspll_fbdiv_frac;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
- if (err)
- return err;
-
- /* Finally, enable the PLL */
- dw23.ts_pll_enable = 1;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C, dw23.val);
- if (err)
- return err;
-
- /* Wait to verify if the PLL locks */
- usleep_range(1000, 5000);
-
- err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_LOCK_E825C, &ro_lock.val);
- if (err)
- return err;
-
- if (!ro_lock.plllock_true_lock_cri) {
- dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n");
- return -EBUSY;
- }
-
- /* Log the current clock configuration */
- ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
- str_enabled_disabled(dw24.ts_pll_enable),
- ice_clk_src_str(dw23.time_ref_sel),
- ice_clk_freq_str(dw9.time_ref_freq_sel),
- ro_lock.plllock_true_lock_cri ? "locked" : "unlocked");
-
- return 0;
-}
-
-#define ICE_ONE_PPS_OUT_AMP_MAX 3
-
-/**
- * ice_cgu_cfg_pps_out - Configure 1PPS output from CGU
- * @hw: pointer to the HW struct
- * @enable: true to enable 1PPS output, false to disable it
- *
- * Return: 0 on success, other negative error code when CGU read/write failed
- */
-int ice_cgu_cfg_pps_out(struct ice_hw *hw, bool enable)
-{
- union nac_cgu_dword9 dw9;
- int err;
-
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD9, &dw9.val);
- if (err)
- return err;
-
- dw9.one_pps_out_en = enable;
- dw9.one_pps_out_amp = enable * ICE_ONE_PPS_OUT_AMP_MAX;
- return ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD9, dw9.val);
-}
-
-/**
- * ice_cfg_cgu_pll_dis_sticky_bits_e82x - disable TS PLL sticky bits
- * @hw: pointer to the HW struct
- *
- * Configure the Clock Generation Unit TS PLL sticky bits so they don't latch on
- * losing TS PLL lock, but always show current state.
- *
- * Return: 0 on success, other error codes when failed to read/write CGU
- */
-static int ice_cfg_cgu_pll_dis_sticky_bits_e82x(struct ice_hw *hw)
-{
- union tspll_cntr_bist_settings cntr_bist;
- int err;
-
- err = ice_read_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS,
- &cntr_bist.val);
- if (err)
- return err;
-
- /* Disable sticky lock detection so lock err reported is accurate */
- cntr_bist.i_plllock_sel_0 = 0;
- cntr_bist.i_plllock_sel_1 = 0;
-
- return ice_write_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS,
- cntr_bist.val);
-}
-
-/**
- * ice_cfg_cgu_pll_dis_sticky_bits_e825c - disable TS PLL sticky bits for E825-C
- * @hw: pointer to the HW struct
- *
- * Configure the Clock Generation Unit TS PLL sticky bits so they don't latch on
- * losing TS PLL lock, but always show current state.
- *
- * Return: 0 on success, other error codes when failed to read/write CGU
- */
-static int ice_cfg_cgu_pll_dis_sticky_bits_e825c(struct ice_hw *hw)
-{
- union tspll_bw_tdc_e825c bw_tdc;
- int err;
-
- err = ice_read_cgu_reg_e82x(hw, TSPLL_BW_TDC_E825C, &bw_tdc.val);
- if (err)
- return err;
-
- bw_tdc.i_plllock_sel_1_0 = 0;
-
- return ice_write_cgu_reg_e82x(hw, TSPLL_BW_TDC_E825C, bw_tdc.val);
-}
-
-/**
- * ice_init_cgu_e82x - Initialize CGU with settings from firmware
- * @hw: pointer to the HW structure
- *
- * Initialize the Clock Generation Unit of the E822 device.
- *
- * Return: 0 on success, other error codes when failed to read/write/cfg CGU
- */
-static int ice_init_cgu_e82x(struct ice_hw *hw)
-{
- struct ice_ts_func_info *ts_info = &hw->func_caps.ts_func_info;
- int err;
-
- /* Disable sticky lock detection so lock err reported is accurate */
- if (hw->mac_type == ICE_MAC_GENERIC_3K_E825)
- err = ice_cfg_cgu_pll_dis_sticky_bits_e825c(hw);
- else
- err = ice_cfg_cgu_pll_dis_sticky_bits_e82x(hw);
- if (err)
- return err;
-
- /* Configure the CGU PLL using the parameters from the function
- * capabilities.
- */
- if (hw->mac_type == ICE_MAC_GENERIC_3K_E825)
- err = ice_cfg_cgu_pll_e825c(hw, ts_info->time_ref,
- (enum ice_clk_src)ts_info->clk_src);
- else
- err = ice_cfg_cgu_pll_e82x(hw, ts_info->time_ref,
- (enum ice_clk_src)ts_info->clk_src);
-
- return err;
-}
-
-/**
* ice_ptp_tmr_cmd_to_src_reg - Convert to source timer command value
* @hw: pointer to HW struct
* @cmd: Timer command
@@ -2668,7 +2126,7 @@ int ice_start_phy_timer_eth56g(struct ice_hw *hw, u8 port)
static int ice_ptp_init_phc_e825(struct ice_hw *hw)
{
/* Initialize the Clock Generation Unit */
- return ice_init_cgu_e82x(hw);
+ return ice_tspll_init(hw);
}
/**
@@ -3341,7 +2799,7 @@ static int ice_ptp_init_phc_e82x(struct ice_hw *hw)
wr32(hw, PF_SB_REM_DEV_CTL, val);
/* Initialize the Clock Generation Unit */
- err = ice_init_cgu_e82x(hw);
+ err = ice_tspll_init(hw);
if (err)
return err;
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
index 83f20fa7ace7..5896b346e579 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
@@ -194,23 +194,6 @@ struct ice_eth56g_mac_reg_cfg {
extern
const struct ice_eth56g_mac_reg_cfg eth56g_mac_cfg[NUM_ICE_ETH56G_LNK_SPD];
-/**
- * struct ice_cgu_pll_params_e82x - E82X CGU parameters
- * @refclk_pre_div: Reference clock pre-divisor
- * @feedback_div: Feedback divisor
- * @frac_n_div: Fractional divisor
- * @post_pll_div: Post PLL divisor
- *
- * Clock Generation Unit parameters used to program the PLL based on the
- * selected TIME_REF frequency.
- */
-struct ice_cgu_pll_params_e82x {
- u32 refclk_pre_div;
- u32 feedback_div;
- u32 frac_n_div;
- u32 post_pll_div;
-};
-
#define E810C_QSFP_C827_0_HANDLE 2
#define E810C_QSFP_C827_1_HANDLE 3
enum ice_e810_c827_idx {
@@ -282,31 +265,6 @@ struct ice_cgu_pin_desc {
struct dpll_pin_frequency *freq_supp;
};
-extern const struct
-ice_cgu_pll_params_e82x e822_cgu_params[NUM_ICE_TIME_REF_FREQ];
-
-/**
- * struct ice_cgu_pll_params_e825c - E825C CGU parameters
- * @tspll_ck_refclkfreq: tspll_ck_refclkfreq selection
- * @tspll_ndivratio: ndiv ratio that goes directly to the pll
- * @tspll_fbdiv_intgr: TS PLL integer feedback divide
- * @tspll_fbdiv_frac: TS PLL fractional feedback divide
- * @ref1588_ck_div: clock divider for tspll ref
- *
- * Clock Generation Unit parameters used to program the PLL based on the
- * selected TIME_REF/TCXO frequency.
- */
-struct ice_cgu_pll_params_e825c {
- u32 tspll_ck_refclkfreq;
- u32 tspll_ndivratio;
- u32 tspll_fbdiv_intgr;
- u32 tspll_fbdiv_frac;
- u32 ref1588_ck_div;
-};
-
-extern const struct
-ice_cgu_pll_params_e825c e825c_cgu_params[NUM_ICE_TIME_REF_FREQ];
-
#define E810C_QSFP_C827_0_HANDLE 2
#define E810C_QSFP_C827_1_HANDLE 3
@@ -314,7 +272,7 @@ ice_cgu_pll_params_e825c e825c_cgu_params[NUM_ICE_TIME_REF_FREQ];
extern const struct ice_phy_reg_info_eth56g eth56g_phy_res[NUM_ETH56G_PHY_RES];
/* Table of constants related to possible TIME_REF sources */
-extern const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ];
+extern const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TSPLL_FREQ];
/* Table of constants for Vernier calibration on E822 */
extern const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD];
@@ -328,7 +286,6 @@ extern const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD];
/* Device agnostic functions */
u8 ice_get_ptp_src_clock_index(struct ice_hw *hw);
-int ice_cgu_cfg_pps_out(struct ice_hw *hw, bool enable);
bool ice_ptp_lock(struct ice_hw *hw);
void ice_ptp_unlock(struct ice_hw *hw);
void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd);
@@ -357,7 +314,8 @@ void ice_ptp_reset_ts_memory_quad_e82x(struct ice_hw *hw, u8 quad);
*
* Returns the current TIME_REF from the capabilities structure.
*/
-static inline enum ice_time_ref_freq ice_e82x_time_ref(const struct ice_hw *hw)
+
+static inline enum ice_tspll_freq ice_e82x_time_ref(const struct ice_hw *hw)
{
return hw->func_caps.ts_func_info.time_ref;
}
@@ -371,17 +329,17 @@ static inline enum ice_time_ref_freq ice_e82x_time_ref(const struct ice_hw *hw)
* change, such as an update to the CGU registers.
*/
static inline void
-ice_set_e82x_time_ref(struct ice_hw *hw, enum ice_time_ref_freq time_ref)
+ice_set_e82x_time_ref(struct ice_hw *hw, enum ice_tspll_freq time_ref)
{
hw->func_caps.ts_func_info.time_ref = time_ref;
}
-static inline u64 ice_e82x_pll_freq(enum ice_time_ref_freq time_ref)
+static inline u64 ice_e82x_pll_freq(enum ice_tspll_freq time_ref)
{
return e82x_time_ref[time_ref].pll_freq;
}
-static inline u64 ice_e82x_nominal_incval(enum ice_time_ref_freq time_ref)
+static inline u64 ice_e82x_nominal_incval(enum ice_tspll_freq time_ref)
{
return e82x_time_ref[time_ref].nominal_incval;
}
@@ -704,6 +662,7 @@ static inline u64 ice_get_base_incval(struct ice_hw *hw)
#define ICE_SMA1_MASK (ICE_SMA1_DIR_EN | ICE_SMA1_TX_EN)
#define ICE_SMA2_MASK (ICE_SMA2_UFL2_RX_DIS | ICE_SMA2_DIR_EN | \
ICE_SMA2_TX_EN)
+#define ICE_SMA2_INACTIVE_MASK (ICE_SMA2_DIR_EN | ICE_SMA2_TX_EN)
#define ICE_ALL_SMA_MASK (ICE_SMA1_MASK | ICE_SMA2_MASK)
#define ICE_SMA_MIN_BIT 3
diff --git a/drivers/net/ethernet/intel/ice/ice_tspll.c b/drivers/net/ethernet/intel/ice/ice_tspll.c
new file mode 100644
index 000000000000..08af4ced50eb
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_tspll.c
@@ -0,0 +1,511 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_lib.h"
+#include "ice_ptp_hw.h"
+
+static const struct
+ice_tspll_params_e82x e82x_tspll_params[NUM_ICE_TSPLL_FREQ] = {
+ [ICE_TSPLL_FREQ_25_000] = {
+ .refclk_pre_div = 1,
+ .post_pll_div = 6,
+ .feedback_div = 197,
+ .frac_n_div = 2621440,
+ },
+ [ICE_TSPLL_FREQ_122_880] = {
+ .refclk_pre_div = 5,
+ .post_pll_div = 7,
+ .feedback_div = 223,
+ .frac_n_div = 524288
+ },
+ [ICE_TSPLL_FREQ_125_000] = {
+ .refclk_pre_div = 5,
+ .post_pll_div = 7,
+ .feedback_div = 223,
+ .frac_n_div = 524288
+ },
+ [ICE_TSPLL_FREQ_153_600] = {
+ .refclk_pre_div = 5,
+ .post_pll_div = 6,
+ .feedback_div = 159,
+ .frac_n_div = 1572864
+ },
+ [ICE_TSPLL_FREQ_156_250] = {
+ .refclk_pre_div = 5,
+ .post_pll_div = 6,
+ .feedback_div = 159,
+ .frac_n_div = 1572864
+ },
+ [ICE_TSPLL_FREQ_245_760] = {
+ .refclk_pre_div = 10,
+ .post_pll_div = 7,
+ .feedback_div = 223,
+ .frac_n_div = 524288
+ },
+};
+
+/**
+ * ice_tspll_clk_freq_str - Convert time_ref_freq to string
+ * @clk_freq: Clock frequency
+ *
+ * Return: specified TIME_REF clock frequency converted to a string.
+ */
+static const char *ice_tspll_clk_freq_str(enum ice_tspll_freq clk_freq)
+{
+ switch (clk_freq) {
+ case ICE_TSPLL_FREQ_25_000:
+ return "25 MHz";
+ case ICE_TSPLL_FREQ_122_880:
+ return "122.88 MHz";
+ case ICE_TSPLL_FREQ_125_000:
+ return "125 MHz";
+ case ICE_TSPLL_FREQ_153_600:
+ return "153.6 MHz";
+ case ICE_TSPLL_FREQ_156_250:
+ return "156.25 MHz";
+ case ICE_TSPLL_FREQ_245_760:
+ return "245.76 MHz";
+ default:
+ return "Unknown";
+ }
+}
+
+/**
+ * ice_tspll_clk_src_str - Convert time_ref_src to string
+ * @clk_src: Clock source
+ *
+ * Return: specified clock source converted to its string name
+ */
+static const char *ice_tspll_clk_src_str(enum ice_clk_src clk_src)
+{
+ switch (clk_src) {
+ case ICE_CLK_SRC_TCXO:
+ return "TCXO";
+ case ICE_CLK_SRC_TIME_REF:
+ return "TIME_REF";
+ default:
+ return "Unknown";
+ }
+}
+
+/**
+ * ice_tspll_log_cfg - Log current/new TSPLL configuration
+ * @hw: Pointer to the HW struct
+ * @enable: CGU enabled/disabled
+ * @clk_src: Current clock source
+ * @tspll_freq: Current clock frequency
+ * @lock: CGU lock status
+ * @new_cfg: true if this is a new config
+ */
+static void ice_tspll_log_cfg(struct ice_hw *hw, bool enable, u8 clk_src,
+ u8 tspll_freq, bool lock, bool new_cfg)
+{
+ dev_dbg(ice_hw_to_dev(hw),
+ "%s TSPLL configuration -- %s, src %s, freq %s, PLL %s\n",
+ new_cfg ? "New" : "Current", str_enabled_disabled(enable),
+ ice_tspll_clk_src_str((enum ice_clk_src)clk_src),
+ ice_tspll_clk_freq_str((enum ice_tspll_freq)tspll_freq),
+ lock ? "locked" : "unlocked");
+}
+
+/**
+ * ice_tspll_cfg_e82x - Configure the Clock Generation Unit TSPLL
+ * @hw: Pointer to the HW struct
+ * @clk_freq: Clock frequency to program
+ * @clk_src: Clock source to select (TIME_REF, or TCXO)
+ *
+ * Configure the Clock Generation Unit with the desired clock frequency and
+ * time reference, enabling the PLL which drives the PTP hardware clock.
+ *
+ * Return:
+ * * %0 - success
+ * * %-EINVAL - input parameters are incorrect
+ * * %-EBUSY - failed to lock TSPLL
+ * * %other - CGU read/write failure
+ */
+static int ice_tspll_cfg_e82x(struct ice_hw *hw, enum ice_tspll_freq clk_freq,
+ enum ice_clk_src clk_src)
+{
+ union tspll_ro_bwm_lf bwm_lf;
+ union ice_cgu_r19_e82x dw19;
+ union ice_cgu_r22 dw22;
+ union ice_cgu_r24 dw24;
+ union ice_cgu_r9 dw9;
+ int err;
+
+ if (clk_freq >= NUM_ICE_TSPLL_FREQ) {
+ dev_warn(ice_hw_to_dev(hw), "Invalid TIME_REF frequency %u\n",
+ clk_freq);
+ return -EINVAL;
+ }
+
+ if (clk_src >= NUM_ICE_CLK_SRC) {
+ dev_warn(ice_hw_to_dev(hw), "Invalid clock source %u\n",
+ clk_src);
+ return -EINVAL;
+ }
+
+ if (clk_src == ICE_CLK_SRC_TCXO && clk_freq != ICE_TSPLL_FREQ_25_000) {
+ dev_warn(ice_hw_to_dev(hw),
+ "TCXO only supports 25 MHz frequency\n");
+ return -EINVAL;
+ }
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_R9, &dw9.val);
+ if (err)
+ return err;
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_R24, &dw24.val);
+ if (err)
+ return err;
+
+ err = ice_read_cgu_reg(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
+ if (err)
+ return err;
+
+ ice_tspll_log_cfg(hw, dw24.ts_pll_enable, dw24.time_ref_sel,
+ dw9.time_ref_freq_sel, bwm_lf.plllock_true_lock_cri,
+ false);
+
+ /* Disable the PLL before changing the clock source or frequency */
+ if (dw24.ts_pll_enable) {
+ dw24.ts_pll_enable = 0;
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R24, dw24.val);
+ if (err)
+ return err;
+ }
+
+ /* Set the frequency */
+ dw9.time_ref_freq_sel = clk_freq;
+ err = ice_write_cgu_reg(hw, ICE_CGU_R9, dw9.val);
+ if (err)
+ return err;
+
+ /* Configure the TSPLL feedback divisor */
+ err = ice_read_cgu_reg(hw, ICE_CGU_R19, &dw19.val);
+ if (err)
+ return err;
+
+ dw19.fbdiv_intgr = e82x_tspll_params[clk_freq].feedback_div;
+ dw19.ndivratio = 1;
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R19, dw19.val);
+ if (err)
+ return err;
+
+ /* Configure the TSPLL post divisor */
+ err = ice_read_cgu_reg(hw, ICE_CGU_R22, &dw22.val);
+ if (err)
+ return err;
+
+ dw22.time1588clk_div = e82x_tspll_params[clk_freq].post_pll_div;
+ dw22.time1588clk_sel_div2 = 0;
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R22, dw22.val);
+ if (err)
+ return err;
+
+ /* Configure the TSPLL pre divisor and clock source */
+ err = ice_read_cgu_reg(hw, ICE_CGU_R24, &dw24.val);
+ if (err)
+ return err;
+
+ dw24.ref1588_ck_div = e82x_tspll_params[clk_freq].refclk_pre_div;
+ dw24.fbdiv_frac = e82x_tspll_params[clk_freq].frac_n_div;
+ dw24.time_ref_sel = clk_src;
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R24, dw24.val);
+ if (err)
+ return err;
+
+ /* Finally, enable the PLL */
+ dw24.ts_pll_enable = 1;
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R24, dw24.val);
+ if (err)
+ return err;
+
+ /* Wait to verify if the PLL locks */
+ usleep_range(1000, 5000);
+
+ err = ice_read_cgu_reg(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
+ if (err)
+ return err;
+
+ if (!bwm_lf.plllock_true_lock_cri) {
+ dev_warn(ice_hw_to_dev(hw), "TSPLL failed to lock\n");
+ return -EBUSY;
+ }
+
+ ice_tspll_log_cfg(hw, dw24.ts_pll_enable, clk_src, clk_freq, true,
+ true);
+
+ return 0;
+}
+
+/**
+ * ice_tspll_dis_sticky_bits_e82x - disable TSPLL sticky bits
+ * @hw: Pointer to the HW struct
+ *
+ * Configure the Clock Generation Unit TSPLL sticky bits so they don't latch on
+ * losing TSPLL lock, but always show current state.
+ *
+ * Return: 0 on success, other error codes when failed to read/write CGU.
+ */
+static int ice_tspll_dis_sticky_bits_e82x(struct ice_hw *hw)
+{
+ union tspll_cntr_bist_settings cntr_bist;
+ int err;
+
+ err = ice_read_cgu_reg(hw, TSPLL_CNTR_BIST_SETTINGS, &cntr_bist.val);
+ if (err)
+ return err;
+
+ /* Disable sticky lock detection so lock err reported is accurate */
+ cntr_bist.i_plllock_sel_0 = 0;
+ cntr_bist.i_plllock_sel_1 = 0;
+
+ return ice_write_cgu_reg(hw, TSPLL_CNTR_BIST_SETTINGS, cntr_bist.val);
+}
+
+/**
+ * ice_tspll_cfg_e825c - Configure the TSPLL for E825-C
+ * @hw: Pointer to the HW struct
+ * @clk_freq: Clock frequency to program
+ * @clk_src: Clock source to select (TIME_REF, or TCXO)
+ *
+ * Configure the Clock Generation Unit with the desired clock frequency and
+ * time reference, enabling the PLL which drives the PTP hardware clock.
+ *
+ * Return:
+ * * %0 - success
+ * * %-EINVAL - input parameters are incorrect
+ * * %-EBUSY - failed to lock TSPLL
+ * * %other - CGU read/write failure
+ */
+static int ice_tspll_cfg_e825c(struct ice_hw *hw, enum ice_tspll_freq clk_freq,
+ enum ice_clk_src clk_src)
+{
+ union tspll_ro_lock_e825c ro_lock;
+ union ice_cgu_r19_e825 dw19;
+ union ice_cgu_r16 dw16;
+ union ice_cgu_r23 dw23;
+ union ice_cgu_r22 dw22;
+ union ice_cgu_r9 dw9;
+ int err;
+
+ if (clk_freq >= NUM_ICE_TSPLL_FREQ) {
+ dev_warn(ice_hw_to_dev(hw), "Invalid TIME_REF frequency %u\n",
+ clk_freq);
+ return -EINVAL;
+ }
+
+ if (clk_src >= NUM_ICE_CLK_SRC) {
+ dev_warn(ice_hw_to_dev(hw), "Invalid clock source %u\n",
+ clk_src);
+ return -EINVAL;
+ }
+
+ if (clk_freq != ICE_TSPLL_FREQ_156_250) {
+ dev_warn(ice_hw_to_dev(hw), "Adapter only supports 156.25 MHz frequency\n");
+ return -EINVAL;
+ }
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_R9, &dw9.val);
+ if (err)
+ return err;
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_R16, &dw16.val);
+ if (err)
+ return err;
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_R23, &dw23.val);
+ if (err)
+ return err;
+
+ err = ice_read_cgu_reg(hw, TSPLL_RO_LOCK_E825C, &ro_lock.val);
+ if (err)
+ return err;
+
+ ice_tspll_log_cfg(hw, dw23.ts_pll_enable, dw23.time_ref_sel,
+ dw9.time_ref_freq_sel,
+ ro_lock.plllock_true_lock_cri, false);
+
+ /* Disable the PLL before changing the clock source or frequency */
+ if (dw23.ts_pll_enable) {
+ dw23.ts_pll_enable = 0;
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R23, dw23.val);
+ if (err)
+ return err;
+ }
+
+ /* Set the frequency */
+ dw9.time_ref_freq_sel = clk_freq;
+
+ /* Enable the correct receiver */
+ if (clk_src == ICE_CLK_SRC_TCXO) {
+ dw9.time_ref_en = 0;
+ dw9.clk_eref0_en = 1;
+ } else {
+ dw9.time_ref_en = 1;
+ dw9.clk_eref0_en = 0;
+ }
+ err = ice_write_cgu_reg(hw, ICE_CGU_R9, dw9.val);
+ if (err)
+ return err;
+
+ /* Choose the referenced frequency */
+ dw16.ck_refclkfreq = ICE_TSPLL_CK_REFCLKFREQ_E825;
+ err = ice_write_cgu_reg(hw, ICE_CGU_R16, dw16.val);
+ if (err)
+ return err;
+
+ /* Configure the TSPLL feedback divisor */
+ err = ice_read_cgu_reg(hw, ICE_CGU_R19, &dw19.val);
+ if (err)
+ return err;
+
+ dw19.tspll_fbdiv_intgr = ICE_TSPLL_FBDIV_INTGR_E825;
+ dw19.tspll_ndivratio = ICE_TSPLL_NDIVRATIO_E825;
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R19, dw19.val);
+ if (err)
+ return err;
+
+ /* Configure the TSPLL post divisor */
+ err = ice_read_cgu_reg(hw, ICE_CGU_R22, &dw22.val);
+ if (err)
+ return err;
+
+ /* These two are constant for E825C */
+ dw22.time1588clk_div = 5;
+ dw22.time1588clk_sel_div2 = 0;
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R22, dw22.val);
+ if (err)
+ return err;
+
+ /* Configure the TSPLL pre divisor and clock source */
+ err = ice_read_cgu_reg(hw, ICE_CGU_R23, &dw23.val);
+ if (err)
+ return err;
+
+ dw23.ref1588_ck_div = 0;
+ dw23.time_ref_sel = clk_src;
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R23, dw23.val);
+ if (err)
+ return err;
+
+ /* Clear the R24 register. */
+ err = ice_write_cgu_reg(hw, ICE_CGU_R24, 0);
+ if (err)
+ return err;
+
+ /* Finally, enable the PLL */
+ dw23.ts_pll_enable = 1;
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R23, dw23.val);
+ if (err)
+ return err;
+
+ /* Wait to verify if the PLL locks */
+ usleep_range(1000, 5000);
+
+ err = ice_read_cgu_reg(hw, TSPLL_RO_LOCK_E825C, &ro_lock.val);
+ if (err)
+ return err;
+
+ if (!ro_lock.plllock_true_lock_cri) {
+ dev_warn(ice_hw_to_dev(hw), "TSPLL failed to lock\n");
+ return -EBUSY;
+ }
+
+ ice_tspll_log_cfg(hw, dw23.ts_pll_enable, clk_src, clk_freq, true,
+ true);
+
+ return 0;
+}
+
+/**
+ * ice_tspll_dis_sticky_bits_e825c - disable TSPLL sticky bits for E825-C
+ * @hw: Pointer to the HW struct
+ *
+ * Configure the Clock Generation Unit TSPLL sticky bits so they don't latch on
+ * losing TSPLL lock, but always show current state.
+ *
+ * Return: 0 on success, other error codes when failed to read/write CGU.
+ */
+static int ice_tspll_dis_sticky_bits_e825c(struct ice_hw *hw)
+{
+ union tspll_bw_tdc_e825c bw_tdc;
+ int err;
+
+ err = ice_read_cgu_reg(hw, TSPLL_BW_TDC_E825C, &bw_tdc.val);
+ if (err)
+ return err;
+
+ bw_tdc.i_plllock_sel_1_0 = 0;
+
+ return ice_write_cgu_reg(hw, TSPLL_BW_TDC_E825C, bw_tdc.val);
+}
+
+#define ICE_ONE_PPS_OUT_AMP_MAX 3
+
+/**
+ * ice_tspll_cfg_pps_out_e825c - Enable/disable 1PPS output and set amplitude
+ * @hw: pointer to the HW struct
+ * @enable: true to enable 1PPS output, false to disable it
+ *
+ * Return: 0 on success, other negative error code when CGU read/write failed.
+ */
+int ice_tspll_cfg_pps_out_e825c(struct ice_hw *hw, bool enable)
+{
+ union ice_cgu_r9 r9;
+ int err;
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_R9, &r9.val);
+ if (err)
+ return err;
+
+ r9.one_pps_out_en = enable;
+ r9.one_pps_out_amp = enable * ICE_ONE_PPS_OUT_AMP_MAX;
+ return ice_write_cgu_reg(hw, ICE_CGU_R9, r9.val);
+}
+
+/**
+ * ice_tspll_init - Initialize TSPLL with settings from firmware
+ * @hw: Pointer to the HW structure
+ *
+ * Initialize the Clock Generation Unit of the E82X/E825 device.
+ *
+ * Return: 0 on success, other error codes when failed to read/write/cfg CGU.
+ */
+int ice_tspll_init(struct ice_hw *hw)
+{
+ struct ice_ts_func_info *ts_info = &hw->func_caps.ts_func_info;
+ int err;
+
+ /* Disable sticky lock detection so lock err reported is accurate. */
+ if (hw->mac_type == ICE_MAC_GENERIC_3K_E825)
+ err = ice_tspll_dis_sticky_bits_e825c(hw);
+ else
+ err = ice_tspll_dis_sticky_bits_e82x(hw);
+ if (err)
+ return err;
+
+ /* Configure the TSPLL using the parameters from the function
+ * capabilities.
+ */
+ if (hw->mac_type == ICE_MAC_GENERIC_3K_E825)
+ err = ice_tspll_cfg_e825c(hw, ts_info->time_ref,
+ (enum ice_clk_src)ts_info->clk_src);
+ else
+ err = ice_tspll_cfg_e82x(hw, ts_info->time_ref,
+ (enum ice_clk_src)ts_info->clk_src);
+
+ return err;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_tspll.h b/drivers/net/ethernet/intel/ice/ice_tspll.h
new file mode 100644
index 000000000000..c0b1232cc07c
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_tspll.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025, Intel Corporation. */
+
+#ifndef _ICE_TSPLL_H_
+#define _ICE_TSPLL_H_
+
+/**
+ * struct ice_tspll_params_e82x - E82X TSPLL parameters
+ * @refclk_pre_div: Reference clock pre-divisor
+ * @post_pll_div: Post PLL divisor
+ * @feedback_div: Feedback divisor
+ * @frac_n_div: Fractional divisor
+ *
+ * Clock Generation Unit parameters used to program the PLL based on the
+ * selected TIME_REF/TCXO frequency.
+ */
+struct ice_tspll_params_e82x {
+ u8 refclk_pre_div;
+ u8 post_pll_div;
+ u8 feedback_div;
+ u32 frac_n_div;
+};
+
+#define ICE_TSPLL_CK_REFCLKFREQ_E825 0x1F
+#define ICE_TSPLL_NDIVRATIO_E825 5
+#define ICE_TSPLL_FBDIV_INTGR_E825 256
+
+int ice_tspll_cfg_pps_out_e825c(struct ice_hw *hw, bool enable);
+int ice_tspll_init(struct ice_hw *hw);
+
+#endif /* _ICE_TSPLL_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 0e5107fe62ad..29e0088ab6b2 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -20,7 +20,6 @@
#define ICE_RX_HDR_SIZE 256
-#define FDIR_DESC_RXDID 0x40
#define ICE_FDIR_CLEAN_DELAY 10
/**
@@ -707,6 +706,37 @@ ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi)
}
/**
+ * ice_init_ctrl_rx_descs - Initialize Rx descriptors for control vsi.
+ * @rx_ring: ring to init descriptors on
+ * @count: number of descriptors to initialize
+ */
+void ice_init_ctrl_rx_descs(struct ice_rx_ring *rx_ring, u32 count)
+{
+ union ice_32b_rx_flex_desc *rx_desc;
+ u32 ntu = rx_ring->next_to_use;
+
+ if (!count)
+ return;
+
+ rx_desc = ICE_RX_DESC(rx_ring, ntu);
+
+ do {
+ rx_desc++;
+ ntu++;
+ if (unlikely(ntu == rx_ring->count)) {
+ rx_desc = ICE_RX_DESC(rx_ring, 0);
+ ntu = 0;
+ }
+
+ rx_desc->wb.status_error0 = 0;
+ count--;
+ } while (count);
+
+ if (rx_ring->next_to_use != ntu)
+ ice_release_rx_desc(rx_ring, ntu);
+}
+
+/**
* ice_alloc_rx_bufs - Replace used receive buffers
* @rx_ring: ring to place buffers on
* @cleaned_count: number of buffers to replace
@@ -726,8 +756,7 @@ bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, unsigned int cleaned_count)
struct ice_rx_buf *bi;
/* do nothing if no valid netdev defined */
- if ((!rx_ring->netdev && rx_ring->vsi->type != ICE_VSI_CTRL) ||
- !cleaned_count)
+ if (!rx_ring->netdev || !cleaned_count)
return false;
/* get the Rx descriptor and buffer based on next_to_use */
@@ -1184,6 +1213,45 @@ static void ice_put_rx_mbuf(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
}
/**
+ * ice_clean_ctrl_rx_irq - Clean descriptors from flow director Rx ring
+ * @rx_ring: Rx descriptor ring for ctrl_vsi to transact packets on
+ *
+ * This function cleans Rx descriptors from the ctrl_vsi Rx ring used
+ * to set flow director rules on VFs.
+ */
+void ice_clean_ctrl_rx_irq(struct ice_rx_ring *rx_ring)
+{
+ u32 ntc = rx_ring->next_to_clean;
+ unsigned int total_rx_pkts = 0;
+ u32 cnt = rx_ring->count;
+
+ while (likely(total_rx_pkts < ICE_DFLT_IRQ_WORK)) {
+ struct ice_vsi *ctrl_vsi = rx_ring->vsi;
+ union ice_32b_rx_flex_desc *rx_desc;
+ u16 stat_err_bits;
+
+ rx_desc = ICE_RX_DESC(rx_ring, ntc);
+
+ stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
+ if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
+ break;
+
+ dma_rmb();
+
+ if (ctrl_vsi->vf)
+ ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc);
+
+ if (++ntc == cnt)
+ ntc = 0;
+ total_rx_pkts++;
+ }
+
+ rx_ring->first_desc = ntc;
+ rx_ring->next_to_clean = ntc;
+ ice_init_ctrl_rx_descs(rx_ring, ICE_RX_DESC_UNUSED(rx_ring));
+}
+
+/**
* ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
* @rx_ring: Rx descriptor ring to transact packets on
* @budget: Total limit on number of packets to process
@@ -1195,7 +1263,7 @@ static void ice_put_rx_mbuf(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
*
* Returns amount of work completed
*/
-int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
+static int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
unsigned int offset = rx_ring->rx_offset;
@@ -1242,17 +1310,6 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
dma_rmb();
ice_trace(clean_rx_irq, rx_ring, rx_desc);
- if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) {
- struct ice_vsi *ctrl_vsi = rx_ring->vsi;
-
- if (rx_desc->wb.rxdid == FDIR_DESC_RXDID &&
- ctrl_vsi->vf)
- ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc);
- if (++ntc == cnt)
- ntc = 0;
- rx_ring->first_desc = ntc;
- continue;
- }
size = le16_to_cpu(rx_desc->wb.pkt_len) &
ICE_RX_FLX_DESC_PKT_LEN_M;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index a4b1e9514632..fef750c5f288 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -491,6 +491,7 @@ static inline unsigned int ice_rx_pg_order(struct ice_rx_ring *ring)
union ice_32b_rx_flex_desc;
+void ice_init_ctrl_rx_descs(struct ice_rx_ring *rx_ring, u32 num_descs);
bool ice_alloc_rx_bufs(struct ice_rx_ring *rxr, unsigned int cleaned_count);
netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev);
u16
@@ -506,6 +507,6 @@ int ice_napi_poll(struct napi_struct *napi, int budget);
int
ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
u8 *raw_packet);
-int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget);
void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring);
+void ice_clean_ctrl_rx_irq(struct ice_rx_ring *rx_ring);
#endif /* _ICE_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 3d68f465952d..03c6c271865d 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -326,17 +326,17 @@ struct ice_hw_common_caps {
#define ICE_TS_TMR_IDX_ASSOC_M BIT(24)
/* TIME_REF clock rate specification */
-enum ice_time_ref_freq {
- ICE_TIME_REF_FREQ_25_000 = 0,
- ICE_TIME_REF_FREQ_122_880 = 1,
- ICE_TIME_REF_FREQ_125_000 = 2,
- ICE_TIME_REF_FREQ_153_600 = 3,
- ICE_TIME_REF_FREQ_156_250 = 4,
- ICE_TIME_REF_FREQ_245_760 = 5,
+enum ice_tspll_freq {
+ ICE_TSPLL_FREQ_25_000 = 0,
+ ICE_TSPLL_FREQ_122_880 = 1,
+ ICE_TSPLL_FREQ_125_000 = 2,
+ ICE_TSPLL_FREQ_153_600 = 3,
+ ICE_TSPLL_FREQ_156_250 = 4,
+ ICE_TSPLL_FREQ_245_760 = 5,
- NUM_ICE_TIME_REF_FREQ,
+ NUM_ICE_TSPLL_FREQ,
- ICE_TIME_REF_FREQ_INVALID = -1,
+ ICE_TSPLL_FREQ_INVALID = -1,
};
/* Clock source specification */
@@ -349,7 +349,7 @@ enum ice_clk_src {
struct ice_ts_func_info {
/* Function specific info */
- enum ice_time_ref_freq time_ref;
+ enum ice_tspll_freq time_ref;
u8 clk_freq;
u8 clk_src;
u8 tmr_index_assoc;
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index eeeb9968e477..24426dcd8aa2 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -2999,13 +2999,13 @@ error_param:
}
/**
- * ice_vc_get_rss_hena - return the RSS HENA bits allowed by the hardware
+ * ice_vc_get_rss_hashcfg - return the RSS Hash configuration
* @vf: pointer to the VF info
*/
-static int ice_vc_get_rss_hena(struct ice_vf *vf)
+static int ice_vc_get_rss_hashcfg(struct ice_vf *vf)
{
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_rss_hena *vrh = NULL;
+ struct virtchnl_rss_hashcfg *vrh = NULL;
int len = 0, ret;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
@@ -3019,7 +3019,7 @@ static int ice_vc_get_rss_hena(struct ice_vf *vf)
goto err;
}
- len = sizeof(struct virtchnl_rss_hena);
+ len = sizeof(struct virtchnl_rss_hashcfg);
vrh = kzalloc(len, GFP_KERNEL);
if (!vrh) {
v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
@@ -3027,23 +3027,23 @@ static int ice_vc_get_rss_hena(struct ice_vf *vf)
goto err;
}
- vrh->hena = ICE_DEFAULT_RSS_HENA;
+ vrh->hashcfg = ICE_DEFAULT_RSS_HASHCFG;
err:
/* send the response back to the VF */
- ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS, v_ret,
+ ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS, v_ret,
(u8 *)vrh, len);
kfree(vrh);
return ret;
}
/**
- * ice_vc_set_rss_hena - set RSS HENA bits for the VF
+ * ice_vc_set_rss_hashcfg - set RSS Hash configuration bits for the VF
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
*/
-static int ice_vc_set_rss_hena(struct ice_vf *vf, u8 *msg)
+static int ice_vc_set_rss_hashcfg(struct ice_vf *vf, u8 *msg)
{
- struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg;
+ struct virtchnl_rss_hashcfg *vrh = (struct virtchnl_rss_hashcfg *)msg;
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
@@ -3074,9 +3074,9 @@ static int ice_vc_set_rss_hena(struct ice_vf *vf, u8 *msg)
* disable RSS
*/
status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx);
- if (status && !vrh->hena) {
+ if (status && !vrh->hashcfg) {
/* only report failure to clear the current RSS configuration if
- * that was clearly the VF's intention (i.e. vrh->hena = 0)
+ * that was clearly the VF's intention (i.e. vrh->hashcfg = 0)
*/
v_ret = ice_err_to_virt_err(status);
goto err;
@@ -3089,14 +3089,14 @@ static int ice_vc_set_rss_hena(struct ice_vf *vf, u8 *msg)
vf->vf_id);
}
- if (vrh->hena) {
- status = ice_add_avf_rss_cfg(&pf->hw, vsi, vrh->hena);
+ if (vrh->hashcfg) {
+ status = ice_add_avf_rss_cfg(&pf->hw, vsi, vrh->hashcfg);
v_ret = ice_err_to_virt_err(status);
}
/* send the response to the VF */
err:
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, v_ret,
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_SET_RSS_HASHCFG, v_ret,
NULL, 0);
}
@@ -4243,8 +4243,8 @@ static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = {
.add_vlan_msg = ice_vc_add_vlan_msg,
.remove_vlan_msg = ice_vc_remove_vlan_msg,
.query_rxdid = ice_vc_query_rxdid,
- .get_rss_hena = ice_vc_get_rss_hena,
- .set_rss_hena_msg = ice_vc_set_rss_hena,
+ .get_rss_hashcfg = ice_vc_get_rss_hashcfg,
+ .set_rss_hashcfg = ice_vc_set_rss_hashcfg,
.ena_vlan_stripping = ice_vc_ena_vlan_stripping,
.dis_vlan_stripping = ice_vc_dis_vlan_stripping,
.handle_rss_cfg_msg = ice_vc_handle_rss_cfg,
@@ -4380,8 +4380,8 @@ static const struct ice_virtchnl_ops ice_virtchnl_repr_ops = {
.add_vlan_msg = ice_vc_add_vlan_msg,
.remove_vlan_msg = ice_vc_remove_vlan_msg,
.query_rxdid = ice_vc_query_rxdid,
- .get_rss_hena = ice_vc_get_rss_hena,
- .set_rss_hena_msg = ice_vc_set_rss_hena,
+ .get_rss_hashcfg = ice_vc_get_rss_hashcfg,
+ .set_rss_hashcfg = ice_vc_set_rss_hashcfg,
.ena_vlan_stripping = ice_vc_ena_vlan_stripping,
.dis_vlan_stripping = ice_vc_dis_vlan_stripping,
.handle_rss_cfg_msg = ice_vc_handle_rss_cfg,
@@ -4582,11 +4582,11 @@ error_handler:
case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS:
err = ops->query_rxdid(vf);
break;
- case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
- err = ops->get_rss_hena(vf);
+ case VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS:
+ err = ops->get_rss_hashcfg(vf);
break;
- case VIRTCHNL_OP_SET_RSS_HENA:
- err = ops->set_rss_hena_msg(vf, msg);
+ case VIRTCHNL_OP_SET_RSS_HASHCFG:
+ err = ops->set_rss_hashcfg(vf, msg);
break;
case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
err = ops->ena_vlan_stripping(vf);
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.h b/drivers/net/ethernet/intel/ice/ice_virtchnl.h
index 222990f229d5..b3eece8c6780 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.h
@@ -57,8 +57,8 @@ struct ice_virtchnl_ops {
int (*add_vlan_msg)(struct ice_vf *vf, u8 *msg);
int (*remove_vlan_msg)(struct ice_vf *vf, u8 *msg);
int (*query_rxdid)(struct ice_vf *vf);
- int (*get_rss_hena)(struct ice_vf *vf);
- int (*set_rss_hena_msg)(struct ice_vf *vf, u8 *msg);
+ int (*get_rss_hashcfg)(struct ice_vf *vf);
+ int (*set_rss_hashcfg)(struct ice_vf *vf, u8 *msg);
int (*ena_vlan_stripping)(struct ice_vf *vf);
int (*dis_vlan_stripping)(struct ice_vf *vf);
int (*handle_rss_cfg_msg)(struct ice_vf *vf, u8 *msg, bool add);
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
index a3d1579a619a..4c2ec2337b38 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
@@ -65,7 +65,7 @@ static const u32 vlan_v2_allowlist_opcodes[] = {
/* VIRTCHNL_VF_OFFLOAD_RSS_PF */
static const u32 rss_pf_allowlist_opcodes[] = {
VIRTCHNL_OP_CONFIG_RSS_KEY, VIRTCHNL_OP_CONFIG_RSS_LUT,
- VIRTCHNL_OP_GET_RSS_HENA_CAPS, VIRTCHNL_OP_SET_RSS_HENA,
+ VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS, VIRTCHNL_OP_SET_RSS_HASHCFG,
VIRTCHNL_OP_CONFIG_RSS_HFUNC,
};
diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
index 993c354aa27a..555879b1248d 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
@@ -1006,7 +1006,7 @@ static int idpf_rx_singleq_clean(struct idpf_rx_queue *rx_q, int budget)
break;
skip_data:
- rx_buf->page = NULL;
+ rx_buf->netmem = 0;
IDPF_SINGLEQ_BUMP_RING_IDX(rx_q, ntc);
cleaned_count++;
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index 5cf440e09d0a..cef9dfb877e8 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -383,12 +383,12 @@ err_out:
*/
static void idpf_rx_page_rel(struct libeth_fqe *rx_buf)
{
- if (unlikely(!rx_buf->page))
+ if (unlikely(!rx_buf->netmem))
return;
- page_pool_put_full_page(rx_buf->page->pp, rx_buf->page, false);
+ libeth_rx_recycle_slow(rx_buf->netmem);
- rx_buf->page = NULL;
+ rx_buf->netmem = 0;
rx_buf->offset = 0;
}
@@ -3240,10 +3240,10 @@ idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb,
void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb,
unsigned int size)
{
- u32 hr = rx_buf->page->pp->p.offset;
+ u32 hr = netmem_get_pp(rx_buf->netmem)->p.offset;
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
- rx_buf->offset + hr, size, rx_buf->truesize);
+ skb_add_rx_frag_netmem(skb, skb_shinfo(skb)->nr_frags, rx_buf->netmem,
+ rx_buf->offset + hr, size, rx_buf->truesize);
}
/**
@@ -3266,16 +3266,20 @@ static u32 idpf_rx_hsplit_wa(const struct libeth_fqe *hdr,
struct libeth_fqe *buf, u32 data_len)
{
u32 copy = data_len <= L1_CACHE_BYTES ? data_len : ETH_HLEN;
+ struct page *hdr_page, *buf_page;
const void *src;
void *dst;
- if (!libeth_rx_sync_for_cpu(buf, copy))
+ if (unlikely(netmem_is_net_iov(buf->netmem)) ||
+ !libeth_rx_sync_for_cpu(buf, copy))
return 0;
- dst = page_address(hdr->page) + hdr->offset + hdr->page->pp->p.offset;
- src = page_address(buf->page) + buf->offset + buf->page->pp->p.offset;
- memcpy(dst, src, LARGEST_ALIGN(copy));
+ hdr_page = __netmem_to_page(hdr->netmem);
+ buf_page = __netmem_to_page(buf->netmem);
+ dst = page_address(hdr_page) + hdr->offset + hdr_page->pp->p.offset;
+ src = page_address(buf_page) + buf->offset + buf_page->pp->p.offset;
+ memcpy(dst, src, LARGEST_ALIGN(copy));
buf->offset += copy;
return copy;
@@ -3291,11 +3295,12 @@ static u32 idpf_rx_hsplit_wa(const struct libeth_fqe *hdr,
*/
struct sk_buff *idpf_rx_build_skb(const struct libeth_fqe *buf, u32 size)
{
- u32 hr = buf->page->pp->p.offset;
+ struct page *buf_page = __netmem_to_page(buf->netmem);
+ u32 hr = buf_page->pp->p.offset;
struct sk_buff *skb;
void *va;
- va = page_address(buf->page) + buf->offset;
+ va = page_address(buf_page) + buf->offset;
prefetch(va + hr);
skb = napi_build_skb(va, buf->truesize);
@@ -3429,7 +3434,8 @@ static int idpf_rx_splitq_clean(struct idpf_rx_queue *rxq, int budget)
if (unlikely(!hdr_len && !skb)) {
hdr_len = idpf_rx_hsplit_wa(hdr, rx_buf, pkt_len);
- pkt_len -= hdr_len;
+ /* If failed, drop both buffers by setting len to 0 */
+ pkt_len -= hdr_len ? : pkt_len;
u64_stats_update_begin(&rxq->stats_sync);
u64_stats_inc(&rxq->q_stats.hsplit_buf_ovf);
@@ -3446,7 +3452,7 @@ static int idpf_rx_splitq_clean(struct idpf_rx_queue *rxq, int budget)
u64_stats_update_end(&rxq->stats_sync);
}
- hdr->page = NULL;
+ hdr->netmem = 0;
payload:
if (!libeth_rx_sync_for_cpu(rx_buf, pkt_len))
@@ -3462,7 +3468,7 @@ payload:
break;
skip_data:
- rx_buf->page = NULL;
+ rx_buf->netmem = 0;
idpf_rx_post_buf_refill(refillq, buf_id);
IDPF_RX_BUMP_NTC(rxq, ntc);
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index ca6ccbc13954..92ef33459aec 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2500,9 +2500,11 @@ static int igb_get_ethtool_nfc_all(struct igb_adapter *adapter,
return 0;
}
-static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
- struct ethtool_rxnfc *cmd)
+static int igb_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct igb_adapter *adapter = netdev_priv(dev);
+
cmd->data = 0;
/* Report default options for RSS on igb */
@@ -2563,9 +2565,6 @@ static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
case ETHTOOL_GRXCLSRLALL:
ret = igb_get_ethtool_nfc_all(adapter, cmd, rule_locs);
break;
- case ETHTOOL_GRXFH:
- ret = igb_get_rss_hash_opts(adapter, cmd);
- break;
default:
break;
}
@@ -2575,9 +2574,11 @@ static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
#define UDP_RSS_FLAGS (IGB_FLAG_RSS_FIELD_IPV4_UDP | \
IGB_FLAG_RSS_FIELD_IPV6_UDP)
-static int igb_set_rss_hash_opt(struct igb_adapter *adapter,
- struct ethtool_rxnfc *nfc)
+static int igb_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
{
+ struct igb_adapter *adapter = netdev_priv(dev);
u32 flags = adapter->flags;
/* RSS does not support anything other than hashing
@@ -3005,9 +3006,6 @@ static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
int ret = -EOPNOTSUPP;
switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- ret = igb_set_rss_hash_opt(adapter, cmd);
- break;
case ETHTOOL_SRXCLSRLINS:
ret = igb_add_ethtool_nfc_entry(adapter, cmd);
break;
@@ -3485,6 +3483,8 @@ static const struct ethtool_ops igb_ethtool_ops = {
.get_rxfh_indir_size = igb_get_rxfh_indir_size,
.get_rxfh = igb_get_rxfh,
.set_rxfh = igb_set_rxfh,
+ .get_rxfh_fields = igb_get_rxfh_fields,
+ .set_rxfh_fields = igb_set_rxfh_fields,
.get_channels = igb_get_channels,
.set_channels = igb_set_channels,
.get_priv_flags = igb_get_priv_flags,
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 859a15e4ccba..1525ae25fd3e 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -43,6 +43,7 @@ void igc_ethtool_set_ops(struct net_device *);
struct igc_fpe_t {
struct ethtool_mmsv mmsv;
u32 tx_min_frag_size;
+ bool tx_enabled;
};
enum igc_mac_filter_type {
@@ -163,6 +164,7 @@ struct igc_ring {
bool launchtime_enable; /* true if LaunchTime is enabled */
ktime_t last_tx_cycle; /* end of the cycle with a launchtime transmission */
ktime_t last_ff_cycle; /* Last cycle with an active first flag */
+ bool preemptible; /* True if preemptible queue, false if express queue */
u32 start_time;
u32 end_time;
@@ -395,6 +397,7 @@ extern char igc_driver_name[];
#define IGC_FLAG_TSN_QBV_ENABLED BIT(17)
#define IGC_FLAG_TSN_QAV_ENABLED BIT(18)
#define IGC_FLAG_TSN_PREEMPT_ENABLED BIT(19)
+#define IGC_FLAG_TSN_REVERSE_TXQ_PRIO BIT(20)
#define IGC_FLAG_TSN_ANY_ENABLED \
(IGC_FLAG_TSN_QBV_ENABLED | IGC_FLAG_TSN_QAV_ENABLED | \
@@ -485,12 +488,30 @@ static inline u32 igc_rss_type(const union igc_adv_rx_desc *rx_desc)
* descriptors until either it has this many to write back, or the
* ITR timer expires.
*/
-#define IGC_RX_PTHRESH 8
-#define IGC_RX_HTHRESH 8
-#define IGC_TX_PTHRESH 8
-#define IGC_TX_HTHRESH 1
-#define IGC_RX_WTHRESH 4
-#define IGC_TX_WTHRESH 16
+#define IGC_RXDCTL_PTHRESH 8
+#define IGC_RXDCTL_HTHRESH 8
+#define IGC_RXDCTL_WTHRESH 4
+/* Ena specific Rx Queue */
+#define IGC_RXDCTL_QUEUE_ENABLE 0x02000000
+/* Receive Software Flush */
+#define IGC_RXDCTL_SWFLUSH 0x04000000
+
+#define IGC_TXDCTL_PTHRESH_MASK GENMASK(4, 0)
+#define IGC_TXDCTL_HTHRESH_MASK GENMASK(12, 8)
+#define IGC_TXDCTL_WTHRESH_MASK GENMASK(20, 16)
+#define IGC_TXDCTL_QUEUE_ENABLE_MASK GENMASK(25, 25)
+#define IGC_TXDCTL_SWFLUSH_MASK GENMASK(26, 26)
+#define IGC_TXDCTL_PRIORITY_MASK GENMASK(27, 27)
+
+#define IGC_TXDCTL_PTHRESH(x) FIELD_PREP(IGC_TXDCTL_PTHRESH_MASK, (x))
+#define IGC_TXDCTL_HTHRESH(x) FIELD_PREP(IGC_TXDCTL_HTHRESH_MASK, (x))
+#define IGC_TXDCTL_WTHRESH(x) FIELD_PREP(IGC_TXDCTL_WTHRESH_MASK, (x))
+/* Ena specific Tx Queue */
+#define IGC_TXDCTL_QUEUE_ENABLE FIELD_PREP(IGC_TXDCTL_QUEUE_ENABLE_MASK, 1)
+/* Transmit Software Flush */
+#define IGC_TXDCTL_SWFLUSH FIELD_PREP(IGC_TXDCTL_SWFLUSH_MASK, 1)
+#define IGC_TXDCTL_PRIORITY(x) FIELD_PREP(IGC_TXDCTL_PRIORITY_MASK, (x))
+#define IGC_TXDCTL_PRIORITY_HIGH IGC_TXDCTL_PRIORITY(1)
#define IGC_RX_DMA_ATTR \
(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
diff --git a/drivers/net/ethernet/intel/igc/igc_base.h b/drivers/net/ethernet/intel/igc/igc_base.h
index 6320eabb72fe..eaf17cd031c3 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.h
+++ b/drivers/net/ethernet/intel/igc/igc_base.h
@@ -86,14 +86,6 @@ union igc_adv_rx_desc {
} wb; /* writeback */
};
-/* Additional Transmit Descriptor Control definitions */
-#define IGC_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */
-#define IGC_TXDCTL_SWFLUSH 0x04000000 /* Transmit Software Flush */
-
-/* Additional Receive Descriptor Control definitions */
-#define IGC_RXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Rx Queue */
-#define IGC_RXDCTL_SWFLUSH 0x04000000 /* Receive Software Flush */
-
/* SRRCTL bit definitions */
#define IGC_SRRCTL_BSIZEPKT_MASK GENMASK(6, 0)
#define IGC_SRRCTL_BSIZEPKT(x) FIELD_PREP(IGC_SRRCTL_BSIZEPKT_MASK, \
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index 7189dfc389ad..86b346687196 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -588,6 +588,7 @@
#define IGC_TXQCTL_QUEUE_MODE_LAUNCHT 0x00000001
#define IGC_TXQCTL_STRICT_CYCLE 0x00000002
#define IGC_TXQCTL_STRICT_END 0x00000004
+#define IGC_TXQCTL_PREEMPTIBLE 0x00000008
#define IGC_TXQCTL_QAV_SEL_MASK 0x000000C0
#define IGC_TXQCTL_QAV_SEL_CBS0 0x00000080
#define IGC_TXQCTL_QAV_SEL_CBS1 0x000000C0
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index 3fc1eded9605..a7f397b58cd6 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -122,9 +122,11 @@ static const char igc_gstrings_test[][ETH_GSTRING_LEN] = {
#define IGC_STATS_LEN \
(IGC_GLOBAL_STATS_LEN + IGC_NETDEV_STATS_LEN + IGC_QUEUE_STATS_LEN)
+#define IGC_PRIV_FLAGS_LEGACY_RX BIT(0)
+#define IGC_PRIV_FLAGS_REVERSE_TSN_TXQ_PRIO BIT(1)
static const char igc_priv_flags_strings[][ETH_GSTRING_LEN] = {
-#define IGC_PRIV_FLAGS_LEGACY_RX BIT(0)
"legacy-rx",
+ "reverse-tsn-txq-prio",
};
#define IGC_PRIV_FLAGS_STR_LEN ARRAY_SIZE(igc_priv_flags_strings)
@@ -1045,9 +1047,11 @@ static int igc_ethtool_get_nfc_rules(struct igc_adapter *adapter,
return 0;
}
-static int igc_ethtool_get_rss_hash_opts(struct igc_adapter *adapter,
- struct ethtool_rxnfc *cmd)
+static int igc_ethtool_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct igc_adapter *adapter = netdev_priv(dev);
+
cmd->data = 0;
/* Report default options for RSS on igc */
@@ -1103,8 +1107,6 @@ static int igc_ethtool_get_rxnfc(struct net_device *dev,
return igc_ethtool_get_nfc_rule(adapter, cmd);
case ETHTOOL_GRXCLSRLALL:
return igc_ethtool_get_nfc_rules(adapter, cmd, rule_locs);
- case ETHTOOL_GRXFH:
- return igc_ethtool_get_rss_hash_opts(adapter, cmd);
default:
return -EOPNOTSUPP;
}
@@ -1112,9 +1114,11 @@ static int igc_ethtool_get_rxnfc(struct net_device *dev,
#define UDP_RSS_FLAGS (IGC_FLAG_RSS_FIELD_IPV4_UDP | \
IGC_FLAG_RSS_FIELD_IPV6_UDP)
-static int igc_ethtool_set_rss_hash_opt(struct igc_adapter *adapter,
- struct ethtool_rxnfc *nfc)
+static int igc_ethtool_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
{
+ struct igc_adapter *adapter = netdev_priv(dev);
u32 flags = adapter->flags;
/* RSS does not support anything other than hashing
@@ -1425,8 +1429,6 @@ static int igc_ethtool_set_rxnfc(struct net_device *dev,
struct igc_adapter *adapter = netdev_priv(dev);
switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- return igc_ethtool_set_rss_hash_opt(adapter, cmd);
case ETHTOOL_SRXCLSRLINS:
return igc_ethtool_add_nfc_rule(adapter, cmd);
case ETHTOOL_SRXCLSRLDEL:
@@ -1600,6 +1602,9 @@ static u32 igc_ethtool_get_priv_flags(struct net_device *netdev)
if (adapter->flags & IGC_FLAG_RX_LEGACY)
priv_flags |= IGC_PRIV_FLAGS_LEGACY_RX;
+ if (adapter->flags & IGC_FLAG_TSN_REVERSE_TXQ_PRIO)
+ priv_flags |= IGC_PRIV_FLAGS_REVERSE_TSN_TXQ_PRIO;
+
return priv_flags;
}
@@ -1608,10 +1613,13 @@ static int igc_ethtool_set_priv_flags(struct net_device *netdev, u32 priv_flags)
struct igc_adapter *adapter = netdev_priv(netdev);
unsigned int flags = adapter->flags;
- flags &= ~IGC_FLAG_RX_LEGACY;
+ flags &= ~(IGC_FLAG_RX_LEGACY | IGC_FLAG_TSN_REVERSE_TXQ_PRIO);
if (priv_flags & IGC_PRIV_FLAGS_LEGACY_RX)
flags |= IGC_FLAG_RX_LEGACY;
+ if (priv_flags & IGC_PRIV_FLAGS_REVERSE_TSN_TXQ_PRIO)
+ flags |= IGC_FLAG_TSN_REVERSE_TXQ_PRIO;
+
if (flags != adapter->flags) {
adapter->flags = flags;
@@ -2144,6 +2152,8 @@ static const struct ethtool_ops igc_ethtool_ops = {
.get_rxfh_indir_size = igc_ethtool_get_rxfh_indir_size,
.get_rxfh = igc_ethtool_get_rxfh,
.set_rxfh = igc_ethtool_set_rxfh,
+ .get_rxfh_fields = igc_ethtool_get_rxfh_fields,
+ .set_rxfh_fields = igc_ethtool_set_rxfh_fields,
.get_ts_info = igc_ethtool_get_ts_info,
.get_channels = igc_ethtool_get_channels,
.set_channels = igc_ethtool_set_channels,
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 686793c539f2..2e12915b42a9 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -683,9 +683,9 @@ static void igc_configure_rx_ring(struct igc_adapter *adapter,
wr32(IGC_SRRCTL(reg_idx), srrctl);
- rxdctl |= IGC_RX_PTHRESH;
- rxdctl |= IGC_RX_HTHRESH << 8;
- rxdctl |= IGC_RX_WTHRESH << 16;
+ rxdctl |= IGC_RXDCTL_PTHRESH;
+ rxdctl |= IGC_RXDCTL_HTHRESH << 8;
+ rxdctl |= IGC_RXDCTL_WTHRESH << 16;
/* initialize rx_buffer_info */
memset(ring->rx_buffer_info, 0,
@@ -749,11 +749,9 @@ static void igc_configure_tx_ring(struct igc_adapter *adapter,
wr32(IGC_TDH(reg_idx), 0);
writel(0, ring->tail);
- txdctl |= IGC_TX_PTHRESH;
- txdctl |= IGC_TX_HTHRESH << 8;
- txdctl |= IGC_TX_WTHRESH << 16;
+ txdctl |= IGC_TXDCTL_PTHRESH(8) | IGC_TXDCTL_HTHRESH(1) |
+ IGC_TXDCTL_WTHRESH(16) | IGC_TXDCTL_QUEUE_ENABLE;
- txdctl |= IGC_TXDCTL_QUEUE_ENABLE;
wr32(IGC_TXDCTL(reg_idx), txdctl);
}
@@ -1687,6 +1685,15 @@ done:
first->tx_flags = tx_flags;
first->protocol = protocol;
+ /* For preemptible queue, manually pad the skb so that HW includes
+ * padding bytes in mCRC calculation
+ */
+ if (tx_ring->preemptible && skb->len < ETH_ZLEN) {
+ if (skb_padto(skb, ETH_ZLEN))
+ goto out_drop;
+ skb_put(skb, ETH_ZLEN - skb->len);
+ }
+
tso = igc_tso(tx_ring, first, launch_time, first_flag, &hdr_len);
if (tso < 0)
goto out_drop;
@@ -6423,6 +6430,7 @@ static int igc_qbv_clear_schedule(struct igc_adapter *adapter)
ring->start_time = 0;
ring->end_time = NSEC_PER_SEC;
ring->max_sdu = 0;
+ ring->preemptible = false;
}
spin_lock_irqsave(&adapter->qbv_tx_lock, flags);
@@ -6488,9 +6496,12 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
if (!validate_schedule(adapter, qopt))
return -EINVAL;
- /* preemptible isn't supported yet */
- if (qopt->mqprio.preemptible_tcs)
- return -EOPNOTSUPP;
+ if (qopt->mqprio.preemptible_tcs &&
+ !(adapter->flags & IGC_FLAG_TSN_REVERSE_TXQ_PRIO)) {
+ NL_SET_ERR_MSG_MOD(qopt->extack,
+ "reverse-tsn-txq-prio private flag must be enabled before setting preemptible tc");
+ return -ENODEV;
+ }
igc_ptp_read(adapter, &now);
@@ -6583,6 +6594,8 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
ring->max_sdu = 0;
}
+ igc_fpe_save_preempt_queue(adapter, &qopt->mqprio);
+
return 0;
}
@@ -6702,7 +6715,8 @@ static int igc_tc_query_caps(struct igc_adapter *adapter,
case TC_SETUP_QDISC_TAPRIO: {
struct tc_taprio_caps *caps = base->caps;
- caps->broken_mqprio = true;
+ if (!(adapter->flags & IGC_FLAG_TSN_REVERSE_TXQ_PRIO))
+ caps->broken_mqprio = true;
if (hw->mac.type == igc_i225) {
caps->supports_queue_max_sdu = true;
@@ -6728,6 +6742,20 @@ static void igc_save_mqprio_params(struct igc_adapter *adapter, u8 num_tc,
adapter->queue_per_tc[i] = offset[i];
}
+static bool
+igc_tsn_is_tc_to_queue_priority_ordered(struct tc_mqprio_qopt_offload *mqprio)
+{
+ int num_tc = mqprio->qopt.num_tc;
+ int i;
+
+ for (i = 1; i < num_tc; i++) {
+ if (mqprio->qopt.offset[i - 1] > mqprio->qopt.offset[i])
+ return false;
+ }
+
+ return true;
+}
+
static int igc_tsn_enable_mqprio(struct igc_adapter *adapter,
struct tc_mqprio_qopt_offload *mqprio)
{
@@ -6739,6 +6767,7 @@ static int igc_tsn_enable_mqprio(struct igc_adapter *adapter,
if (!mqprio->qopt.num_tc) {
adapter->strict_priority_enable = false;
+ igc_fpe_clear_preempt_queue(adapter);
netdev_reset_tc(adapter->netdev);
goto apply;
}
@@ -6760,10 +6789,9 @@ static int igc_tsn_enable_mqprio(struct igc_adapter *adapter,
}
}
- /* Preemption is not supported yet. */
- if (mqprio->preemptible_tcs) {
+ if (!igc_tsn_is_tc_to_queue_priority_ordered(mqprio)) {
NL_SET_ERR_MSG_MOD(mqprio->extack,
- "Preemption is not supported yet");
+ "tc to queue mapping must preserve increasing priority (higher tc -> higher queue)");
return -EOPNOTSUPP;
}
@@ -6786,6 +6814,7 @@ static int igc_tsn_enable_mqprio(struct igc_adapter *adapter,
adapter->queue_per_tc[i] = i;
mqprio->qopt.hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+ igc_fpe_save_preempt_queue(adapter, mqprio);
apply:
return igc_tsn_offload_apply(adapter);
diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.c b/drivers/net/ethernet/intel/igc/igc_tsn.c
index f22cc4d4f459..8a110145bfee 100644
--- a/drivers/net/ethernet/intel/igc/igc_tsn.c
+++ b/drivers/net/ethernet/intel/igc/igc_tsn.c
@@ -13,6 +13,13 @@
#define TX_MAX_FRAG_SIZE (TX_MIN_FRAG_SIZE * \
(MAX_MULTPLIER_TX_MIN_FRAG + 1))
+enum tx_queue {
+ TX_QUEUE_0 = 0,
+ TX_QUEUE_1,
+ TX_QUEUE_2,
+ TX_QUEUE_3,
+};
+
DEFINE_STATIC_KEY_FALSE(igc_fpe_enabled);
static int igc_fpe_init_smd_frame(struct igc_ring *ring,
@@ -109,6 +116,18 @@ static int igc_fpe_xmit_smd_frame(struct igc_adapter *adapter,
return err;
}
+static void igc_fpe_configure_tx(struct ethtool_mmsv *mmsv, bool tx_enable)
+{
+ struct igc_fpe_t *fpe = container_of(mmsv, struct igc_fpe_t, mmsv);
+ struct igc_adapter *adapter;
+
+ adapter = container_of(fpe, struct igc_adapter, fpe);
+ adapter->fpe.tx_enabled = tx_enable;
+
+ /* Update config since tx_enabled affects preemptible queue configuration */
+ igc_tsn_offload_apply(adapter);
+}
+
static void igc_fpe_send_mpacket(struct ethtool_mmsv *mmsv,
enum ethtool_mpacket type)
{
@@ -130,15 +149,59 @@ static void igc_fpe_send_mpacket(struct ethtool_mmsv *mmsv,
}
static const struct ethtool_mmsv_ops igc_mmsv_ops = {
+ .configure_tx = igc_fpe_configure_tx,
.send_mpacket = igc_fpe_send_mpacket,
};
void igc_fpe_init(struct igc_adapter *adapter)
{
adapter->fpe.tx_min_frag_size = TX_MIN_FRAG_SIZE;
+ adapter->fpe.tx_enabled = false;
ethtool_mmsv_init(&adapter->fpe.mmsv, adapter->netdev, &igc_mmsv_ops);
}
+void igc_fpe_clear_preempt_queue(struct igc_adapter *adapter)
+{
+ for (int i = 0; i < adapter->num_tx_queues; i++) {
+ struct igc_ring *tx_ring = adapter->tx_ring[i];
+
+ tx_ring->preemptible = false;
+ }
+}
+
+static u32 igc_fpe_map_preempt_tc_to_queue(const struct igc_adapter *adapter,
+ unsigned long preemptible_tcs)
+{
+ struct net_device *dev = adapter->netdev;
+ u32 i, queue = 0;
+
+ for (i = 0; i < dev->num_tc; i++) {
+ u32 offset, count;
+
+ if (!(preemptible_tcs & BIT(i)))
+ continue;
+
+ offset = dev->tc_to_txq[i].offset;
+ count = dev->tc_to_txq[i].count;
+ queue |= GENMASK(offset + count - 1, offset);
+ }
+
+ return queue;
+}
+
+void igc_fpe_save_preempt_queue(struct igc_adapter *adapter,
+ const struct tc_mqprio_qopt_offload *mqprio)
+{
+ u32 preemptible_queue = igc_fpe_map_preempt_tc_to_queue(adapter,
+ mqprio->preemptible_tcs);
+
+ for (int i = 0; i < adapter->num_tx_queues; i++) {
+ struct igc_ring *tx_ring = adapter->tx_ring[i];
+
+ tx_ring->preemptible = !!(preemptible_queue & BIT(i));
+ }
+}
+
static bool is_any_launchtime(struct igc_adapter *adapter)
{
int i;
@@ -238,7 +301,7 @@ bool igc_tsn_is_taprio_activated_by_user(struct igc_adapter *adapter)
adapter->taprio_offload_enable;
}
-static void igc_tsn_tx_arb(struct igc_adapter *adapter, u16 *queue_per_tc)
+static void igc_tsn_tx_arb(struct igc_adapter *adapter, bool reverse_prio)
{
struct igc_hw *hw = &adapter->hw;
u32 txarb;
@@ -250,10 +313,17 @@ static void igc_tsn_tx_arb(struct igc_adapter *adapter, u16 *queue_per_tc)
IGC_TXARB_TXQ_PRIO_2_MASK |
IGC_TXARB_TXQ_PRIO_3_MASK);
- txarb |= IGC_TXARB_TXQ_PRIO_0(queue_per_tc[3]);
- txarb |= IGC_TXARB_TXQ_PRIO_1(queue_per_tc[2]);
- txarb |= IGC_TXARB_TXQ_PRIO_2(queue_per_tc[1]);
- txarb |= IGC_TXARB_TXQ_PRIO_3(queue_per_tc[0]);
+ if (reverse_prio) {
+ txarb |= IGC_TXARB_TXQ_PRIO_0(TX_QUEUE_3);
+ txarb |= IGC_TXARB_TXQ_PRIO_1(TX_QUEUE_2);
+ txarb |= IGC_TXARB_TXQ_PRIO_2(TX_QUEUE_1);
+ txarb |= IGC_TXARB_TXQ_PRIO_3(TX_QUEUE_0);
+ } else {
+ txarb |= IGC_TXARB_TXQ_PRIO_0(TX_QUEUE_0);
+ txarb |= IGC_TXARB_TXQ_PRIO_1(TX_QUEUE_1);
+ txarb |= IGC_TXARB_TXQ_PRIO_2(TX_QUEUE_2);
+ txarb |= IGC_TXARB_TXQ_PRIO_3(TX_QUEUE_3);
+ }
wr32(IGC_TXARB, txarb);
}
@@ -286,7 +356,6 @@ static void igc_tsn_set_rxpbsize(struct igc_adapter *adapter,
*/
static int igc_tsn_disable_offload(struct igc_adapter *adapter)
{
- u16 queue_per_tc[4] = { 3, 2, 1, 0 };
struct igc_hw *hw = &adapter->hw;
u32 tqavctrl;
int i;
@@ -308,9 +377,16 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter)
wr32(IGC_TQAVCTRL, tqavctrl);
for (i = 0; i < adapter->num_tx_queues; i++) {
+ int reg_idx = adapter->tx_ring[i]->reg_idx;
+ u32 txdctl;
+
wr32(IGC_TXQCTL(i), 0);
wr32(IGC_STQT(i), 0);
wr32(IGC_ENDQT(i), NSEC_PER_SEC);
+
+ txdctl = rd32(IGC_TXDCTL(reg_idx));
+ txdctl &= ~IGC_TXDCTL_PRIORITY_HIGH;
+ wr32(IGC_TXDCTL(reg_idx), txdctl);
}
wr32(IGC_QBVCYCLET_S, 0);
@@ -319,7 +395,7 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter)
/* Restore the default Tx arbitration: Priority 0 has the highest
* priority and is assigned to queue 0 and so on and so forth.
*/
- igc_tsn_tx_arb(adapter, queue_per_tc);
+ igc_tsn_tx_arb(adapter, false);
adapter->flags &= ~IGC_FLAG_TSN_QBV_ENABLED;
@@ -355,7 +431,7 @@ static u8 igc_fpe_get_frag_size_mult(const struct igc_fpe_t *fpe)
u32 igc_fpe_get_supported_frag_size(u32 frag_size)
{
- const u32 supported_sizes[] = {64, 128, 192, 256};
+ static const u32 supported_sizes[] = { 64, 128, 192, 256 };
/* Find the smallest supported size that is >= frag_size */
for (int i = 0; i < ARRAY_SIZE(supported_sizes); i++) {
@@ -385,15 +461,13 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter)
if (igc_is_device_id_i226(hw))
igc_tsn_set_retx_qbvfullthreshold(adapter);
- if (adapter->strict_priority_enable) {
- /* Configure queue priorities according to the user provided
- * mapping.
- */
- igc_tsn_tx_arb(adapter, adapter->queue_per_tc);
- }
+ if (adapter->strict_priority_enable ||
+ adapter->flags & IGC_FLAG_TSN_REVERSE_TXQ_PRIO)
+ igc_tsn_tx_arb(adapter, true);
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igc_ring *ring = adapter->tx_ring[i];
+ u32 txdctl = rd32(IGC_TXDCTL(ring->reg_idx));
u32 txqctl = 0;
u16 cbs_value;
u32 tqavcc;
@@ -427,6 +501,22 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter)
if (ring->launchtime_enable)
txqctl |= IGC_TXQCTL_QUEUE_MODE_LAUNCHT;
+ if (!adapter->fpe.tx_enabled) {
+ /* fpe inactive: clear both flags */
+ txqctl &= ~IGC_TXQCTL_PREEMPTIBLE;
+ txdctl &= ~IGC_TXDCTL_PRIORITY_HIGH;
+ } else if (ring->preemptible) {
+ /* fpe active + preemptible: enable preemptible queue + set low priority */
+ txqctl |= IGC_TXQCTL_PREEMPTIBLE;
+ txdctl &= ~IGC_TXDCTL_PRIORITY_HIGH;
+ } else {
+ /* fpe active + express: enable express queue + set high priority */
+ txqctl &= ~IGC_TXQCTL_PREEMPTIBLE;
+ txdctl |= IGC_TXDCTL_PRIORITY_HIGH;
+ }
+
+ wr32(IGC_TXDCTL(ring->reg_idx), txdctl);
+
/* Skip configuring CBS for Q2 and Q3 */
if (i > 1)
goto skip_cbs;
diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.h b/drivers/net/ethernet/intel/igc/igc_tsn.h
index c2a77229207b..a95b893459d7 100644
--- a/drivers/net/ethernet/intel/igc/igc_tsn.h
+++ b/drivers/net/ethernet/intel/igc/igc_tsn.h
@@ -4,6 +4,8 @@
#ifndef _IGC_TSN_H_
#define _IGC_TSN_H_
+#include <net/pkt_sched.h>
+
#define IGC_RX_MIN_FRAG_SIZE 60
#define SMD_FRAME_SIZE 60
@@ -15,6 +17,9 @@ enum igc_txd_popts_type {
DECLARE_STATIC_KEY_FALSE(igc_fpe_enabled);
void igc_fpe_init(struct igc_adapter *adapter);
+void igc_fpe_clear_preempt_queue(struct igc_adapter *adapter);
+void igc_fpe_save_preempt_queue(struct igc_adapter *adapter,
+ const struct tc_mqprio_qopt_offload *mqprio);
u32 igc_fpe_get_supported_frag_size(u32 frag_size);
int igc_tsn_offload_apply(struct igc_adapter *adapter);
int igc_tsn_reset(struct igc_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 47311b134a7a..c6772cd2d802 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -752,6 +752,7 @@ struct ixgbe_adapter {
bool link_up;
unsigned long sfp_poll_time;
unsigned long link_check_timeout;
+ u32 link_down_events;
struct timer_list service_timer;
struct work_struct service_task;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index d8a919ab7027..25c3a09ad7f1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1033,6 +1033,14 @@ static void ixgbe_get_regs(struct net_device *netdev,
regs_buff[1144] = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
}
+static void ixgbe_get_link_ext_stats(struct net_device *netdev,
+ struct ethtool_link_ext_stats *stats)
+{
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
+
+ stats->link_down_events = adapter->link_down_events;
+}
+
static int ixgbe_get_eeprom_len(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev);
@@ -2745,9 +2753,11 @@ static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter,
return 0;
}
-static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
- struct ethtool_rxnfc *cmd)
+static int ixgbe_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
+
cmd->data = 0;
/* Report default options for RSS on ixgbe */
@@ -2817,9 +2827,6 @@ static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
case ETHTOOL_GRXCLSRLALL:
ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, rule_locs);
break;
- case ETHTOOL_GRXFH:
- ret = ixgbe_get_rss_hash_opts(adapter, cmd);
- break;
default:
break;
}
@@ -3071,9 +3078,11 @@ static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
#define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \
IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
-static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter,
- struct ethtool_rxnfc *nfc)
+static int ixgbe_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
{
+ struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev);
u32 flags2 = adapter->flags2;
/*
@@ -3196,9 +3205,6 @@ static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
case ETHTOOL_SRXCLSRLDEL:
ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd);
break;
- case ETHTOOL_SRXFH:
- ret = ixgbe_set_rss_hash_opt(adapter, cmd);
- break;
default:
break;
}
@@ -3719,6 +3725,7 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.set_wol = ixgbe_set_wol,
.nway_reset = ixgbe_nway_reset,
.get_link = ethtool_op_get_link,
+ .get_link_ext_stats = ixgbe_get_link_ext_stats,
.get_eeprom_len = ixgbe_get_eeprom_len,
.get_eeprom = ixgbe_get_eeprom,
.set_eeprom = ixgbe_set_eeprom,
@@ -3742,6 +3749,8 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.get_rxfh_key_size = ixgbe_get_rxfh_key_size,
.get_rxfh = ixgbe_get_rxfh,
.set_rxfh = ixgbe_set_rxfh,
+ .get_rxfh_fields = ixgbe_get_rxfh_fields,
+ .set_rxfh_fields = ixgbe_set_rxfh_fields,
.get_eee = ixgbe_get_eee,
.set_eee = ixgbe_set_eee,
.get_channels = ixgbe_get_channels,
@@ -3764,6 +3773,7 @@ static const struct ethtool_ops ixgbe_ethtool_ops_e610 = {
.set_wol = ixgbe_set_wol_e610,
.nway_reset = ixgbe_nway_reset,
.get_link = ethtool_op_get_link,
+ .get_link_ext_stats = ixgbe_get_link_ext_stats,
.get_eeprom_len = ixgbe_get_eeprom_len,
.get_eeprom = ixgbe_get_eeprom,
.set_eeprom = ixgbe_set_eeprom,
@@ -3787,6 +3797,8 @@ static const struct ethtool_ops ixgbe_ethtool_ops_e610 = {
.get_rxfh_key_size = ixgbe_get_rxfh_key_size,
.get_rxfh = ixgbe_get_rxfh,
.set_rxfh = ixgbe_set_rxfh,
+ .get_rxfh_fields = ixgbe_get_rxfh_fields,
+ .set_rxfh_fields = ixgbe_set_rxfh_fields,
.get_eee = ixgbe_get_eee,
.set_eee = ixgbe_set_eee,
.get_channels = ixgbe_get_channels,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index cba860f0e1f1..6eccfba51fac 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -7991,6 +7991,8 @@ static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
if (!netif_carrier_ok(netdev))
return;
+ adapter->link_down_events++;
+
/* poll for SFP+ cable when link is down */
if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB)
adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 1d2acdb64f45..7461367a1868 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -20,7 +20,7 @@ static int ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw)
struct ixgbe_phy_info *phy = &hw->phy;
struct ixgbe_link_info *link = &hw->link;
- /* Start with X540 invariants, since so simular */
+ /* Start with X540 invariants, since so similar */
ixgbe_get_invariants_X540(hw);
if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper)
@@ -48,7 +48,7 @@ static int ixgbe_get_invariants_X550_a(struct ixgbe_hw *hw)
struct ixgbe_mac_info *mac = &hw->mac;
struct ixgbe_phy_info *phy = &hw->phy;
- /* Start with X540 invariants, since so simular */
+ /* Start with X540 invariants, since so similar */
ixgbe_get_invariants_X540(hw);
if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper)
@@ -685,7 +685,7 @@ static int ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
return 0;
}
-/** ixgbe_read_iosf_sb_reg_x550 - Writes a value to specified register of the
+/** ixgbe_read_iosf_sb_reg_x550 - Reads a value to specified register of the
* IOSF device
* @hw: pointer to hardware structure
* @reg_addr: 32 bit PHY register to write
@@ -847,7 +847,7 @@ static int ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
/** ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif
* @hw: pointer to hardware structure
- * @offset: offset of word in the EEPROM to read
+ * @offset: offset of word in the EEPROM to read
* @words: number of words
* @data: word(s) read from the EEPROM
*
@@ -1253,7 +1253,7 @@ static int ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw)
/**
* ixgbe_fw_recovery_mode_X550 - Check FW NVM recovery mode
- * @hw: pointer t hardware structure
+ * @hw: pointer to hardware structure
*
* Returns true if in FW NVM recovery mode.
*/
@@ -1267,7 +1267,7 @@ static bool ixgbe_fw_recovery_mode_X550(struct ixgbe_hw *hw)
/** ixgbe_disable_rx_x550 - Disable RX unit
*
- * Enables the Rx DMA unit for x550
+ * Disables the Rx DMA unit for x550
**/
static void ixgbe_disable_rx_x550(struct ixgbe_hw *hw)
{
@@ -1754,7 +1754,7 @@ ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed,
ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
/* If no SFP module present, then return success. Return success since
- * SFP not present error is not excepted in the setup MAC link flow.
+ * SFP not present error is not accepted in the setup MAC link flow.
*/
if (ret_val == -ENOENT)
return 0;
@@ -1804,7 +1804,7 @@ ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed,
ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
/* If no SFP module present, then return success. Return success since
- * SFP not present error is not excepted in the setup MAC link flow.
+ * SFP not present error is not accepted in the setup MAC link flow.
*/
if (ret_val == -ENOENT)
return 0;
@@ -2324,7 +2324,7 @@ static int ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
* PHY interrupt is lsc
* @is_overtemp: indicate whether an overtemp event encountered
*
- * Determime if external Base T PHY interrupt cause is high temperature
+ * Determine if external Base T PHY interrupt cause is high temperature
* failure alarm or link status change.
**/
static int ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc,
@@ -2669,7 +2669,7 @@ static int ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
if (status)
return status;
- /* If link is not still up, then no setup is necessary so return */
+ /* If the link is still not up, no setup is necessary */
status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
if (status)
return status;
@@ -2768,7 +2768,7 @@ static int ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
* Sends driver version number to firmware through the manageability
* block. On success return 0
* else returns -EBUSY when encountering an error acquiring
- * semaphore, -EIO when command fails or -ENIVAL when incorrect
+ * semaphore, -EIO when command fails or -EINVAL when incorrect
* params passed.
**/
int ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
@@ -3175,7 +3175,7 @@ static void ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw)
hw->phy.nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
/* If X552 (X550EM_a) and MDIO is connected to external PHY, then set
- * PHY address. This register field was has only been used for X552.
+ * PHY address. This register field has only been used for X552.
*/
if (hw->mac.type == ixgbe_mac_x550em_a &&
hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_MDIO_ACT) {
@@ -3735,7 +3735,7 @@ static int ixgbe_acquire_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
* @hw: pointer to hardware structure
* @mask: Mask to specify which semaphore to release
*
- * Release the SWFW semaphore and puts the shared PHY token as needed
+ * Release the SWFW semaphore and puts back the shared PHY token as needed
*/
static void ixgbe_release_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
{
@@ -3756,7 +3756,7 @@ static void ixgbe_release_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
* @phy_data: Pointer to read data from PHY register
*
* Reads a value from a specified PHY register using the SWFW lock and PHY
- * Token. The PHY Token is needed since the MDIO is shared between to MAC
+ * Token. The PHY Token is needed since the MDIO is shared between two MAC
* instances.
*/
static int ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
diff --git a/drivers/net/ethernet/intel/libeth/Kconfig b/drivers/net/ethernet/intel/libeth/Kconfig
index 480293b71dbc..2445b979c499 100644
--- a/drivers/net/ethernet/intel/libeth/Kconfig
+++ b/drivers/net/ethernet/intel/libeth/Kconfig
@@ -1,9 +1,15 @@
# SPDX-License-Identifier: GPL-2.0-only
-# Copyright (C) 2024 Intel Corporation
+# Copyright (C) 2024-2025 Intel Corporation
config LIBETH
- tristate
+ tristate "Common Ethernet library (libeth)" if COMPILE_TEST
select PAGE_POOL
help
libeth is a common library containing routines shared between several
drivers, but not yet promoted to the generic kernel API.
+
+config LIBETH_XDP
+ tristate "Common XDP library (libeth_xdp)" if COMPILE_TEST
+ select LIBETH
+ help
+ XDP and XSk helpers based on libeth hotpath management.
diff --git a/drivers/net/ethernet/intel/libeth/Makefile b/drivers/net/ethernet/intel/libeth/Makefile
index 52492b081132..350bc0b38bad 100644
--- a/drivers/net/ethernet/intel/libeth/Makefile
+++ b/drivers/net/ethernet/intel/libeth/Makefile
@@ -1,6 +1,12 @@
# SPDX-License-Identifier: GPL-2.0-only
-# Copyright (C) 2024 Intel Corporation
+# Copyright (C) 2024-2025 Intel Corporation
obj-$(CONFIG_LIBETH) += libeth.o
libeth-y := rx.o
+libeth-y += tx.o
+
+obj-$(CONFIG_LIBETH_XDP) += libeth_xdp.o
+
+libeth_xdp-y += xdp.o
+libeth_xdp-y += xsk.o
diff --git a/drivers/net/ethernet/intel/libeth/priv.h b/drivers/net/ethernet/intel/libeth/priv.h
new file mode 100644
index 000000000000..9b811d31015c
--- /dev/null
+++ b/drivers/net/ethernet/intel/libeth/priv.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2025 Intel Corporation */
+
+#ifndef __LIBETH_PRIV_H
+#define __LIBETH_PRIV_H
+
+#include <linux/types.h>
+
+/* XDP */
+
+enum xdp_action;
+struct libeth_xdp_buff;
+struct libeth_xdp_tx_frame;
+struct skb_shared_info;
+struct xdp_frame_bulk;
+
+extern const struct xsk_tx_metadata_ops libeth_xsktmo_slow;
+
+void libeth_xsk_tx_return_bulk(const struct libeth_xdp_tx_frame *bq,
+ u32 count);
+u32 libeth_xsk_prog_exception(struct libeth_xdp_buff *xdp, enum xdp_action act,
+ int ret);
+
+struct libeth_xdp_ops {
+ void (*bulk)(const struct skb_shared_info *sinfo,
+ struct xdp_frame_bulk *bq, bool frags);
+ void (*xsk)(struct libeth_xdp_buff *xdp);
+};
+
+void libeth_attach_xdp(const struct libeth_xdp_ops *ops);
+
+static inline void libeth_detach_xdp(void)
+{
+ libeth_attach_xdp(NULL);
+}
+
+#endif /* __LIBETH_PRIV_H */
diff --git a/drivers/net/ethernet/intel/libeth/rx.c b/drivers/net/ethernet/intel/libeth/rx.c
index 66d1d23b8ad2..62521a1f4ec9 100644
--- a/drivers/net/ethernet/intel/libeth/rx.c
+++ b/drivers/net/ethernet/intel/libeth/rx.c
@@ -1,5 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (C) 2024 Intel Corporation */
+/* Copyright (C) 2024-2025 Intel Corporation */
+
+#define DEFAULT_SYMBOL_NAMESPACE "LIBETH"
+
+#include <linux/export.h>
#include <net/libeth/rx.h>
@@ -68,7 +72,7 @@ static u32 libeth_rx_hw_len_truesize(const struct page_pool_params *pp,
static bool libeth_rx_page_pool_params(struct libeth_fq *fq,
struct page_pool_params *pp)
{
- pp->offset = LIBETH_SKB_HEADROOM;
+ pp->offset = fq->xdp ? LIBETH_XDP_HEADROOM : LIBETH_SKB_HEADROOM;
/* HW-writeable / syncable length per one page */
pp->max_len = LIBETH_RX_PAGE_LEN(pp->offset);
@@ -155,11 +159,12 @@ int libeth_rx_fq_create(struct libeth_fq *fq, struct napi_struct *napi)
.dev = napi->dev->dev.parent,
.netdev = napi->dev,
.napi = napi,
- .dma_dir = DMA_FROM_DEVICE,
};
struct libeth_fqe *fqes;
struct page_pool *pool;
- bool ret;
+ int ret;
+
+ pp.dma_dir = fq->xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
if (!fq->hsplit)
ret = libeth_rx_page_pool_params(fq, &pp);
@@ -173,20 +178,28 @@ int libeth_rx_fq_create(struct libeth_fq *fq, struct napi_struct *napi)
return PTR_ERR(pool);
fqes = kvcalloc_node(fq->count, sizeof(*fqes), GFP_KERNEL, fq->nid);
- if (!fqes)
+ if (!fqes) {
+ ret = -ENOMEM;
goto err_buf;
+ }
+
+ ret = xdp_reg_page_pool(pool);
+ if (ret)
+ goto err_mem;
fq->fqes = fqes;
fq->pp = pool;
return 0;
+err_mem:
+ kvfree(fqes);
err_buf:
page_pool_destroy(pool);
- return -ENOMEM;
+ return ret;
}
-EXPORT_SYMBOL_NS_GPL(libeth_rx_fq_create, "LIBETH");
+EXPORT_SYMBOL_GPL(libeth_rx_fq_create);
/**
* libeth_rx_fq_destroy - destroy a &page_pool created by libeth
@@ -194,22 +207,23 @@ EXPORT_SYMBOL_NS_GPL(libeth_rx_fq_create, "LIBETH");
*/
void libeth_rx_fq_destroy(struct libeth_fq *fq)
{
+ xdp_unreg_page_pool(fq->pp);
kvfree(fq->fqes);
page_pool_destroy(fq->pp);
}
-EXPORT_SYMBOL_NS_GPL(libeth_rx_fq_destroy, "LIBETH");
+EXPORT_SYMBOL_GPL(libeth_rx_fq_destroy);
/**
- * libeth_rx_recycle_slow - recycle a libeth page from the NAPI context
- * @page: page to recycle
+ * libeth_rx_recycle_slow - recycle libeth netmem
+ * @netmem: network memory to recycle
*
* To be used on exceptions or rare cases not requiring fast inline recycling.
*/
-void libeth_rx_recycle_slow(struct page *page)
+void __cold libeth_rx_recycle_slow(netmem_ref netmem)
{
- page_pool_recycle_direct(page->pp, page);
+ page_pool_put_full_netmem(netmem_get_pp(netmem), netmem, false);
}
-EXPORT_SYMBOL_NS_GPL(libeth_rx_recycle_slow, "LIBETH");
+EXPORT_SYMBOL_GPL(libeth_rx_recycle_slow);
/* Converting abstract packet type numbers into a software structure with
* the packet parameters to do O(1) lookup on Rx.
@@ -251,7 +265,7 @@ void libeth_rx_pt_gen_hash_type(struct libeth_rx_pt *pt)
pt->hash_type |= libeth_rx_pt_xdp_iprot[pt->inner_prot];
pt->hash_type |= libeth_rx_pt_xdp_pl[pt->payload_layer];
}
-EXPORT_SYMBOL_NS_GPL(libeth_rx_pt_gen_hash_type, "LIBETH");
+EXPORT_SYMBOL_GPL(libeth_rx_pt_gen_hash_type);
/* Module */
diff --git a/drivers/net/ethernet/intel/libeth/tx.c b/drivers/net/ethernet/intel/libeth/tx.c
new file mode 100644
index 000000000000..e0167f43d2a8
--- /dev/null
+++ b/drivers/net/ethernet/intel/libeth/tx.c
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2025 Intel Corporation */
+
+#define DEFAULT_SYMBOL_NAMESPACE "LIBETH"
+
+#include <net/libeth/xdp.h>
+
+#include "priv.h"
+
+/* Tx buffer completion */
+
+DEFINE_STATIC_CALL_NULL(bulk, libeth_xdp_return_buff_bulk);
+DEFINE_STATIC_CALL_NULL(xsk, libeth_xsk_buff_free_slow);
+
+/**
+ * libeth_tx_complete_any - perform Tx completion for one SQE of any type
+ * @sqe: Tx buffer to complete
+ * @cp: polling params
+ *
+ * Can be used to complete both regular and XDP SQEs, for example when
+ * destroying queues.
+ * When libeth_xdp is not loaded, XDPSQEs won't be handled.
+ */
+void libeth_tx_complete_any(struct libeth_sqe *sqe, struct libeth_cq_pp *cp)
+{
+ if (sqe->type >= __LIBETH_SQE_XDP_START)
+ __libeth_xdp_complete_tx(sqe, cp, static_call(bulk),
+ static_call(xsk));
+ else
+ libeth_tx_complete(sqe, cp);
+}
+EXPORT_SYMBOL_GPL(libeth_tx_complete_any);
+
+/* Module */
+
+void libeth_attach_xdp(const struct libeth_xdp_ops *ops)
+{
+ static_call_update(bulk, ops ? ops->bulk : NULL);
+ static_call_update(xsk, ops ? ops->xsk : NULL);
+}
+EXPORT_SYMBOL_GPL(libeth_attach_xdp);
diff --git a/drivers/net/ethernet/intel/libeth/xdp.c b/drivers/net/ethernet/intel/libeth/xdp.c
new file mode 100644
index 000000000000..d4ac027d9584
--- /dev/null
+++ b/drivers/net/ethernet/intel/libeth/xdp.c
@@ -0,0 +1,451 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2025 Intel Corporation */
+
+#define DEFAULT_SYMBOL_NAMESPACE "LIBETH_XDP"
+
+#include <linux/export.h>
+
+#include <net/libeth/xdp.h>
+
+#include "priv.h"
+
+/* XDPSQ sharing */
+
+DEFINE_STATIC_KEY_FALSE(libeth_xdpsq_share);
+EXPORT_SYMBOL_GPL(libeth_xdpsq_share);
+
+void __libeth_xdpsq_get(struct libeth_xdpsq_lock *lock,
+ const struct net_device *dev)
+{
+ bool warn;
+
+ spin_lock_init(&lock->lock);
+ lock->share = true;
+
+ warn = !static_key_enabled(&libeth_xdpsq_share);
+ static_branch_inc(&libeth_xdpsq_share);
+
+ if (warn && net_ratelimit())
+ netdev_warn(dev, "XDPSQ sharing enabled, possible XDP Tx slowdown\n");
+}
+EXPORT_SYMBOL_GPL(__libeth_xdpsq_get);
+
+void __libeth_xdpsq_put(struct libeth_xdpsq_lock *lock,
+ const struct net_device *dev)
+{
+ static_branch_dec(&libeth_xdpsq_share);
+
+ if (!static_key_enabled(&libeth_xdpsq_share) && net_ratelimit())
+ netdev_notice(dev, "XDPSQ sharing disabled\n");
+
+ lock->share = false;
+}
+EXPORT_SYMBOL_GPL(__libeth_xdpsq_put);
+
+void __acquires(&lock->lock)
+__libeth_xdpsq_lock(struct libeth_xdpsq_lock *lock)
+{
+ spin_lock(&lock->lock);
+}
+EXPORT_SYMBOL_GPL(__libeth_xdpsq_lock);
+
+void __releases(&lock->lock)
+__libeth_xdpsq_unlock(struct libeth_xdpsq_lock *lock)
+{
+ spin_unlock(&lock->lock);
+}
+EXPORT_SYMBOL_GPL(__libeth_xdpsq_unlock);
+
+/* XDPSQ clean-up timers */
+
+/**
+ * libeth_xdpsq_init_timer - initialize an XDPSQ clean-up timer
+ * @timer: timer to initialize
+ * @xdpsq: queue this timer belongs to
+ * @lock: corresponding XDPSQ lock
+ * @poll: queue polling/completion function
+ *
+ * XDPSQ clean-up timers must be set up before using at the queue configuration
+ * time. Set the required pointers and the cleaning callback.
+ */
+void libeth_xdpsq_init_timer(struct libeth_xdpsq_timer *timer, void *xdpsq,
+ struct libeth_xdpsq_lock *lock,
+ void (*poll)(struct work_struct *work))
+{
+ timer->xdpsq = xdpsq;
+ timer->lock = lock;
+
+ INIT_DELAYED_WORK(&timer->dwork, poll);
+}
+EXPORT_SYMBOL_GPL(libeth_xdpsq_init_timer);
+
+/* ``XDP_TX`` bulking */
+
+static void __cold
+libeth_xdp_tx_return_one(const struct libeth_xdp_tx_frame *frm)
+{
+ if (frm->len_fl & LIBETH_XDP_TX_MULTI)
+ libeth_xdp_return_frags(frm->data + frm->soff, true);
+
+ libeth_xdp_return_va(frm->data, true);
+}
+
+static void __cold
+libeth_xdp_tx_return_bulk(const struct libeth_xdp_tx_frame *bq, u32 count)
+{
+ for (u32 i = 0; i < count; i++) {
+ const struct libeth_xdp_tx_frame *frm = &bq[i];
+
+ if (!(frm->len_fl & LIBETH_XDP_TX_FIRST))
+ continue;
+
+ libeth_xdp_tx_return_one(frm);
+ }
+}
+
+static void __cold libeth_trace_xdp_exception(const struct net_device *dev,
+ const struct bpf_prog *prog,
+ u32 act)
+{
+ trace_xdp_exception(dev, prog, act);
+}
+
+/**
+ * libeth_xdp_tx_exception - handle Tx exceptions of XDP frames
+ * @bq: XDP Tx frame bulk
+ * @sent: number of frames sent successfully (from this bulk)
+ * @flags: internal libeth_xdp flags (XSk, .ndo_xdp_xmit etc.)
+ *
+ * Cold helper used by __libeth_xdp_tx_flush_bulk(), do not call directly.
+ * Reports XDP Tx exceptions, frees the frames that won't be sent or adjust
+ * the Tx bulk to try again later.
+ */
+void __cold libeth_xdp_tx_exception(struct libeth_xdp_tx_bulk *bq, u32 sent,
+ u32 flags)
+{
+ const struct libeth_xdp_tx_frame *pos = &bq->bulk[sent];
+ u32 left = bq->count - sent;
+
+ if (!(flags & LIBETH_XDP_TX_NDO))
+ libeth_trace_xdp_exception(bq->dev, bq->prog, XDP_TX);
+
+ if (!(flags & LIBETH_XDP_TX_DROP)) {
+ memmove(bq->bulk, pos, left * sizeof(*bq->bulk));
+ bq->count = left;
+
+ return;
+ }
+
+ if (flags & LIBETH_XDP_TX_XSK)
+ libeth_xsk_tx_return_bulk(pos, left);
+ else if (!(flags & LIBETH_XDP_TX_NDO))
+ libeth_xdp_tx_return_bulk(pos, left);
+ else
+ libeth_xdp_xmit_return_bulk(pos, left, bq->dev);
+
+ bq->count = 0;
+}
+EXPORT_SYMBOL_GPL(libeth_xdp_tx_exception);
+
+/* .ndo_xdp_xmit() implementation */
+
+u32 __cold libeth_xdp_xmit_return_bulk(const struct libeth_xdp_tx_frame *bq,
+ u32 count, const struct net_device *dev)
+{
+ u32 n = 0;
+
+ for (u32 i = 0; i < count; i++) {
+ const struct libeth_xdp_tx_frame *frm = &bq[i];
+ dma_addr_t dma;
+
+ if (frm->flags & LIBETH_XDP_TX_FIRST)
+ dma = *libeth_xdp_xmit_frame_dma(frm->xdpf);
+ else
+ dma = dma_unmap_addr(frm, dma);
+
+ dma_unmap_page(dev->dev.parent, dma, dma_unmap_len(frm, len),
+ DMA_TO_DEVICE);
+
+ /* Actual xdp_frames are freed by the core */
+ n += !!(frm->flags & LIBETH_XDP_TX_FIRST);
+ }
+
+ return n;
+}
+EXPORT_SYMBOL_GPL(libeth_xdp_xmit_return_bulk);
+
+/* Rx polling path */
+
+/**
+ * libeth_xdp_load_stash - recreate an &xdp_buff from libeth_xdp buffer stash
+ * @dst: target &libeth_xdp_buff to initialize
+ * @src: source stash
+ *
+ * External helper used by libeth_xdp_init_buff(), do not call directly.
+ * Recreate an onstack &libeth_xdp_buff using the stash saved earlier.
+ * The only field untouched (rxq) is initialized later in the
+ * abovementioned function.
+ */
+void libeth_xdp_load_stash(struct libeth_xdp_buff *dst,
+ const struct libeth_xdp_buff_stash *src)
+{
+ dst->data = src->data;
+ dst->base.data_end = src->data + src->len;
+ dst->base.data_meta = src->data;
+ dst->base.data_hard_start = src->data - src->headroom;
+
+ dst->base.frame_sz = src->frame_sz;
+ dst->base.flags = src->flags;
+}
+EXPORT_SYMBOL_GPL(libeth_xdp_load_stash);
+
+/**
+ * libeth_xdp_save_stash - convert &xdp_buff to a libeth_xdp buffer stash
+ * @dst: target &libeth_xdp_buff_stash to initialize
+ * @src: source XDP buffer
+ *
+ * External helper used by libeth_xdp_save_buff(), do not call directly.
+ * Use the fields from the passed XDP buffer to initialize the stash on the
+ * queue, so that a partially received frame can be finished later during
+ * the next NAPI poll.
+ */
+void libeth_xdp_save_stash(struct libeth_xdp_buff_stash *dst,
+ const struct libeth_xdp_buff *src)
+{
+ dst->data = src->data;
+ dst->headroom = src->data - src->base.data_hard_start;
+ dst->len = src->base.data_end - src->data;
+
+ dst->frame_sz = src->base.frame_sz;
+ dst->flags = src->base.flags;
+
+ WARN_ON_ONCE(dst->flags != src->base.flags);
+}
+EXPORT_SYMBOL_GPL(libeth_xdp_save_stash);
+
+void __libeth_xdp_return_stash(struct libeth_xdp_buff_stash *stash)
+{
+ LIBETH_XDP_ONSTACK_BUFF(xdp);
+
+ libeth_xdp_load_stash(xdp, stash);
+ libeth_xdp_return_buff_slow(xdp);
+
+ stash->data = NULL;
+}
+EXPORT_SYMBOL_GPL(__libeth_xdp_return_stash);
+
+/**
+ * libeth_xdp_return_buff_slow - free &libeth_xdp_buff
+ * @xdp: buffer to free/return
+ *
+ * Slowpath version of libeth_xdp_return_buff() to be called on exceptions,
+ * queue clean-ups etc., without unwanted inlining.
+ */
+void __cold libeth_xdp_return_buff_slow(struct libeth_xdp_buff *xdp)
+{
+ __libeth_xdp_return_buff(xdp, false);
+}
+EXPORT_SYMBOL_GPL(libeth_xdp_return_buff_slow);
+
+/**
+ * libeth_xdp_buff_add_frag - add frag to XDP buffer
+ * @xdp: head XDP buffer
+ * @fqe: Rx buffer containing the frag
+ * @len: frag length reported by HW
+ *
+ * External helper used by libeth_xdp_process_buff(), do not call directly.
+ * Frees both head and frag buffers on error.
+ *
+ * Return: true success, false on error (no space for a new frag).
+ */
+bool libeth_xdp_buff_add_frag(struct libeth_xdp_buff *xdp,
+ const struct libeth_fqe *fqe,
+ u32 len)
+{
+ netmem_ref netmem = fqe->netmem;
+
+ if (!xdp_buff_add_frag(&xdp->base, netmem,
+ fqe->offset + netmem_get_pp(netmem)->p.offset,
+ len, fqe->truesize))
+ goto recycle;
+
+ return true;
+
+recycle:
+ libeth_rx_recycle_slow(netmem);
+ libeth_xdp_return_buff_slow(xdp);
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(libeth_xdp_buff_add_frag);
+
+/**
+ * libeth_xdp_prog_exception - handle XDP prog exceptions
+ * @bq: XDP Tx bulk
+ * @xdp: buffer to process
+ * @act: original XDP prog verdict
+ * @ret: error code if redirect failed
+ *
+ * External helper used by __libeth_xdp_run_prog() and
+ * __libeth_xsk_run_prog_slow(), do not call directly.
+ * Reports invalid @act, XDP exception trace event and frees the buffer.
+ *
+ * Return: libeth_xdp XDP prog verdict.
+ */
+u32 __cold libeth_xdp_prog_exception(const struct libeth_xdp_tx_bulk *bq,
+ struct libeth_xdp_buff *xdp,
+ enum xdp_action act, int ret)
+{
+ if (act > XDP_REDIRECT)
+ bpf_warn_invalid_xdp_action(bq->dev, bq->prog, act);
+
+ libeth_trace_xdp_exception(bq->dev, bq->prog, act);
+
+ if (xdp->base.rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL)
+ return libeth_xsk_prog_exception(xdp, act, ret);
+
+ libeth_xdp_return_buff_slow(xdp);
+
+ return LIBETH_XDP_DROP;
+}
+EXPORT_SYMBOL_GPL(libeth_xdp_prog_exception);
+
+/* Tx buffer completion */
+
+static void libeth_xdp_put_netmem_bulk(netmem_ref netmem,
+ struct xdp_frame_bulk *bq)
+{
+ if (unlikely(bq->count == XDP_BULK_QUEUE_SIZE))
+ xdp_flush_frame_bulk(bq);
+
+ bq->q[bq->count++] = netmem;
+}
+
+/**
+ * libeth_xdp_return_buff_bulk - free &xdp_buff as part of a bulk
+ * @sinfo: shared info corresponding to the buffer
+ * @bq: XDP frame bulk to store the buffer
+ * @frags: whether the buffer has frags
+ *
+ * Same as xdp_return_frame_bulk(), but for &libeth_xdp_buff, speeds up Tx
+ * completion of ``XDP_TX`` buffers and allows to free them in same bulks
+ * with &xdp_frame buffers.
+ */
+void libeth_xdp_return_buff_bulk(const struct skb_shared_info *sinfo,
+ struct xdp_frame_bulk *bq, bool frags)
+{
+ if (!frags)
+ goto head;
+
+ for (u32 i = 0; i < sinfo->nr_frags; i++)
+ libeth_xdp_put_netmem_bulk(skb_frag_netmem(&sinfo->frags[i]),
+ bq);
+
+head:
+ libeth_xdp_put_netmem_bulk(virt_to_netmem(sinfo), bq);
+}
+EXPORT_SYMBOL_GPL(libeth_xdp_return_buff_bulk);
+
+/* Misc */
+
+/**
+ * libeth_xdp_queue_threshold - calculate XDP queue clean/refill threshold
+ * @count: number of descriptors in the queue
+ *
+ * The threshold is the limit at which RQs start to refill (when the number of
+ * empty buffers exceeds it) and SQs get cleaned up (when the number of free
+ * descriptors goes below it). To speed up hotpath processing, threshold is
+ * always pow-2, closest to 1/4 of the queue length.
+ * Don't call it on hotpath, calculate and cache the threshold during the
+ * queue initialization.
+ *
+ * Return: the calculated threshold.
+ */
+u32 libeth_xdp_queue_threshold(u32 count)
+{
+ u32 quarter, low, high;
+
+ if (likely(is_power_of_2(count)))
+ return count >> 2;
+
+ quarter = DIV_ROUND_CLOSEST(count, 4);
+ low = rounddown_pow_of_two(quarter);
+ high = roundup_pow_of_two(quarter);
+
+ return high - quarter <= quarter - low ? high : low;
+}
+EXPORT_SYMBOL_GPL(libeth_xdp_queue_threshold);
+
+/**
+ * __libeth_xdp_set_features - set XDP features for netdev
+ * @dev: &net_device to configure
+ * @xmo: XDP metadata ops (Rx hints)
+ * @zc_segs: maximum number of S/G frags the HW can transmit
+ * @tmo: XSk Tx metadata ops (Tx hints)
+ *
+ * Set all the features libeth_xdp supports. Only the first argument is
+ * necessary; without the third one (zero), XSk support won't be advertised.
+ * Use the non-underscored versions in drivers instead.
+ */
+void __libeth_xdp_set_features(struct net_device *dev,
+ const struct xdp_metadata_ops *xmo,
+ u32 zc_segs,
+ const struct xsk_tx_metadata_ops *tmo)
+{
+ xdp_set_features_flag(dev,
+ NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT |
+ (zc_segs ? NETDEV_XDP_ACT_XSK_ZEROCOPY : 0) |
+ NETDEV_XDP_ACT_RX_SG |
+ NETDEV_XDP_ACT_NDO_XMIT_SG);
+ dev->xdp_metadata_ops = xmo;
+
+ tmo = tmo == libeth_xsktmo ? &libeth_xsktmo_slow : tmo;
+
+ dev->xdp_zc_max_segs = zc_segs ? : 1;
+ dev->xsk_tx_metadata_ops = zc_segs ? tmo : NULL;
+}
+EXPORT_SYMBOL_GPL(__libeth_xdp_set_features);
+
+/**
+ * libeth_xdp_set_redirect - toggle the XDP redirect feature
+ * @dev: &net_device to configure
+ * @enable: whether XDP is enabled
+ *
+ * Use this when XDPSQs are not always available to dynamically enable
+ * and disable redirect feature.
+ */
+void libeth_xdp_set_redirect(struct net_device *dev, bool enable)
+{
+ if (enable)
+ xdp_features_set_redirect_target(dev, true);
+ else
+ xdp_features_clear_redirect_target(dev);
+}
+EXPORT_SYMBOL_GPL(libeth_xdp_set_redirect);
+
+/* Module */
+
+static const struct libeth_xdp_ops xdp_ops __initconst = {
+ .bulk = libeth_xdp_return_buff_bulk,
+ .xsk = libeth_xsk_buff_free_slow,
+};
+
+static int __init libeth_xdp_module_init(void)
+{
+ libeth_attach_xdp(&xdp_ops);
+
+ return 0;
+}
+module_init(libeth_xdp_module_init);
+
+static void __exit libeth_xdp_module_exit(void)
+{
+ libeth_detach_xdp();
+}
+module_exit(libeth_xdp_module_exit);
+
+MODULE_DESCRIPTION("Common Ethernet library - XDP infra");
+MODULE_IMPORT_NS("LIBETH");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/intel/libeth/xsk.c b/drivers/net/ethernet/intel/libeth/xsk.c
new file mode 100644
index 000000000000..846e902e31b6
--- /dev/null
+++ b/drivers/net/ethernet/intel/libeth/xsk.c
@@ -0,0 +1,271 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2025 Intel Corporation */
+
+#define DEFAULT_SYMBOL_NAMESPACE "LIBETH_XDP"
+
+#include <linux/export.h>
+
+#include <net/libeth/xsk.h>
+
+#include "priv.h"
+
+/* ``XDP_TX`` bulking */
+
+void __cold libeth_xsk_tx_return_bulk(const struct libeth_xdp_tx_frame *bq,
+ u32 count)
+{
+ for (u32 i = 0; i < count; i++)
+ libeth_xsk_buff_free_slow(bq[i].xsk);
+}
+
+/* XSk TMO */
+
+const struct xsk_tx_metadata_ops libeth_xsktmo_slow = {
+ .tmo_request_checksum = libeth_xsktmo_req_csum,
+};
+
+/* Rx polling path */
+
+/**
+ * libeth_xsk_buff_free_slow - free an XSk Rx buffer
+ * @xdp: buffer to free
+ *
+ * Slowpath version of xsk_buff_free() to be used on exceptions, cleanups etc.
+ * to avoid unwanted inlining.
+ */
+void libeth_xsk_buff_free_slow(struct libeth_xdp_buff *xdp)
+{
+ xsk_buff_free(&xdp->base);
+}
+EXPORT_SYMBOL_GPL(libeth_xsk_buff_free_slow);
+
+/**
+ * libeth_xsk_buff_add_frag - add frag to XSk Rx buffer
+ * @head: head buffer
+ * @xdp: frag buffer
+ *
+ * External helper used by libeth_xsk_process_buff(), do not call directly.
+ * Frees both main and frag buffers on error.
+ *
+ * Return: main buffer with attached frag on success, %NULL on error (no space
+ * for a new frag).
+ */
+struct libeth_xdp_buff *libeth_xsk_buff_add_frag(struct libeth_xdp_buff *head,
+ struct libeth_xdp_buff *xdp)
+{
+ if (!xsk_buff_add_frag(&head->base, &xdp->base))
+ goto free;
+
+ return head;
+
+free:
+ libeth_xsk_buff_free_slow(xdp);
+ libeth_xsk_buff_free_slow(head);
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(libeth_xsk_buff_add_frag);
+
+/**
+ * libeth_xsk_buff_stats_frags - update onstack RQ stats with XSk frags info
+ * @rs: onstack stats to update
+ * @xdp: buffer to account
+ *
+ * External helper used by __libeth_xsk_run_pass(), do not call directly.
+ * Adds buffer's frags count and total len to the onstack stats.
+ */
+void libeth_xsk_buff_stats_frags(struct libeth_rq_napi_stats *rs,
+ const struct libeth_xdp_buff *xdp)
+{
+ libeth_xdp_buff_stats_frags(rs, xdp);
+}
+EXPORT_SYMBOL_GPL(libeth_xsk_buff_stats_frags);
+
+/**
+ * __libeth_xsk_run_prog_slow - process the non-``XDP_REDIRECT`` verdicts
+ * @xdp: buffer to process
+ * @bq: Tx bulk for queueing on ``XDP_TX``
+ * @act: verdict to process
+ * @ret: error code if ``XDP_REDIRECT`` failed
+ *
+ * External helper used by __libeth_xsk_run_prog(), do not call directly.
+ * ``XDP_REDIRECT`` is the most common and hottest verdict on XSk, thus
+ * it is processed inline. The rest goes here for out-of-line processing,
+ * together with redirect errors.
+ *
+ * Return: libeth_xdp XDP prog verdict.
+ */
+u32 __libeth_xsk_run_prog_slow(struct libeth_xdp_buff *xdp,
+ const struct libeth_xdp_tx_bulk *bq,
+ enum xdp_action act, int ret)
+{
+ switch (act) {
+ case XDP_DROP:
+ xsk_buff_free(&xdp->base);
+
+ return LIBETH_XDP_DROP;
+ case XDP_TX:
+ return LIBETH_XDP_TX;
+ case XDP_PASS:
+ return LIBETH_XDP_PASS;
+ default:
+ break;
+ }
+
+ return libeth_xdp_prog_exception(bq, xdp, act, ret);
+}
+EXPORT_SYMBOL_GPL(__libeth_xsk_run_prog_slow);
+
+/**
+ * libeth_xsk_prog_exception - handle XDP prog exceptions on XSk
+ * @xdp: buffer to process
+ * @act: verdict returned by the prog
+ * @ret: error code if ``XDP_REDIRECT`` failed
+ *
+ * Internal. Frees the buffer and, if the queue uses XSk wakeups, stop the
+ * current NAPI poll when there are no free buffers left.
+ *
+ * Return: libeth_xdp's XDP prog verdict.
+ */
+u32 __cold libeth_xsk_prog_exception(struct libeth_xdp_buff *xdp,
+ enum xdp_action act, int ret)
+{
+ const struct xdp_buff_xsk *xsk;
+ u32 __ret = LIBETH_XDP_DROP;
+
+ if (act != XDP_REDIRECT)
+ goto drop;
+
+ xsk = container_of(&xdp->base, typeof(*xsk), xdp);
+ if (xsk_uses_need_wakeup(xsk->pool) && ret == -ENOBUFS)
+ __ret = LIBETH_XDP_ABORTED;
+
+drop:
+ libeth_xsk_buff_free_slow(xdp);
+
+ return __ret;
+}
+
+/* Refill */
+
+/**
+ * libeth_xskfq_create - create an XSkFQ
+ * @fq: fill queue to initialize
+ *
+ * Allocates the FQEs and initializes the fields used by libeth_xdp: number
+ * of buffers to refill, refill threshold and buffer len.
+ *
+ * Return: %0 on success, -errno otherwise.
+ */
+int libeth_xskfq_create(struct libeth_xskfq *fq)
+{
+ fq->fqes = kvcalloc_node(fq->count, sizeof(*fq->fqes), GFP_KERNEL,
+ fq->nid);
+ if (!fq->fqes)
+ return -ENOMEM;
+
+ fq->pending = fq->count;
+ fq->thresh = libeth_xdp_queue_threshold(fq->count);
+ fq->buf_len = xsk_pool_get_rx_frame_size(fq->pool);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(libeth_xskfq_create);
+
+/**
+ * libeth_xskfq_destroy - destroy an XSkFQ
+ * @fq: fill queue to destroy
+ *
+ * Zeroes the used fields and frees the FQEs array.
+ */
+void libeth_xskfq_destroy(struct libeth_xskfq *fq)
+{
+ fq->buf_len = 0;
+ fq->thresh = 0;
+ fq->pending = 0;
+
+ kvfree(fq->fqes);
+}
+EXPORT_SYMBOL_GPL(libeth_xskfq_destroy);
+
+/* .ndo_xsk_wakeup */
+
+static void libeth_xsk_napi_sched(void *info)
+{
+ __napi_schedule_irqoff(info);
+}
+
+/**
+ * libeth_xsk_init_wakeup - initialize libeth XSk wakeup structure
+ * @csd: struct to initialize
+ * @napi: NAPI corresponding to this queue
+ *
+ * libeth_xdp uses inter-processor interrupts to perform XSk wakeups. In order
+ * to do that, the corresponding CSDs must be initialized when creating the
+ * queues.
+ */
+void libeth_xsk_init_wakeup(call_single_data_t *csd, struct napi_struct *napi)
+{
+ INIT_CSD(csd, libeth_xsk_napi_sched, napi);
+}
+EXPORT_SYMBOL_GPL(libeth_xsk_init_wakeup);
+
+/**
+ * libeth_xsk_wakeup - perform an XSk wakeup
+ * @csd: CSD corresponding to the queue
+ * @qid: the stack queue index
+ *
+ * Try to mark the NAPI as missed first, so that it could be rescheduled.
+ * If it's not, schedule it on the corresponding CPU using IPIs (or directly
+ * if already running on it).
+ */
+void libeth_xsk_wakeup(call_single_data_t *csd, u32 qid)
+{
+ struct napi_struct *napi = csd->info;
+
+ if (napi_if_scheduled_mark_missed(napi) ||
+ unlikely(!napi_schedule_prep(napi)))
+ return;
+
+ if (unlikely(qid >= nr_cpu_ids))
+ qid %= nr_cpu_ids;
+
+ if (qid != raw_smp_processor_id() && cpu_online(qid))
+ smp_call_function_single_async(qid, csd);
+ else
+ __napi_schedule(napi);
+}
+EXPORT_SYMBOL_GPL(libeth_xsk_wakeup);
+
+/* Pool setup */
+
+#define LIBETH_XSK_DMA_ATTR \
+ (DMA_ATTR_WEAK_ORDERING | DMA_ATTR_SKIP_CPU_SYNC)
+
+/**
+ * libeth_xsk_setup_pool - setup or destroy an XSk pool for a queue
+ * @dev: target &net_device
+ * @qid: stack queue index to configure
+ * @enable: whether to enable or disable the pool
+ *
+ * Check that @qid is valid and then map or unmap the pool.
+ *
+ * Return: %0 on success, -errno otherwise.
+ */
+int libeth_xsk_setup_pool(struct net_device *dev, u32 qid, bool enable)
+{
+ struct xsk_buff_pool *pool;
+
+ pool = xsk_get_pool_from_qid(dev, qid);
+ if (!pool)
+ return -EINVAL;
+
+ if (enable)
+ return xsk_pool_dma_map(pool, dev->dev.parent,
+ LIBETH_XSK_DMA_ATTR);
+ else
+ xsk_pool_dma_unmap(pool, LIBETH_XSK_DMA_ATTR);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(libeth_xsk_setup_pool);
diff --git a/drivers/net/ethernet/intel/libie/rx.c b/drivers/net/ethernet/intel/libie/rx.c
index 66a9825fe11f..6fda656afa9c 100644
--- a/drivers/net/ethernet/intel/libie/rx.c
+++ b/drivers/net/ethernet/intel/libie/rx.c
@@ -1,6 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (C) 2024 Intel Corporation */
+/* Copyright (C) 2024-2025 Intel Corporation */
+#define DEFAULT_SYMBOL_NAMESPACE "LIBIE"
+
+#include <linux/export.h>
#include <linux/net/intel/libie/rx.h>
/* O(1) converting i40e/ice/iavf's 8/10-bit hardware packet type to a parsed
@@ -116,7 +119,7 @@ const struct libeth_rx_pt libie_rx_pt_lut[LIBIE_RX_PT_NUM] = {
LIBIE_RX_PT_IP(4),
LIBIE_RX_PT_IP(6),
};
-EXPORT_SYMBOL_NS_GPL(libie_rx_pt_lut, "LIBIE");
+EXPORT_SYMBOL_GPL(libie_rx_pt_lut);
MODULE_DESCRIPTION("Intel(R) Ethernet common library");
MODULE_IMPORT_NS("LIBETH");
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 147571fdada3..feab392ab2ee 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -5014,8 +5014,6 @@ static int mvneta_ethtool_get_rxnfc(struct net_device *dev,
case ETHTOOL_GRXRINGS:
info->data = rxq_number;
return 0;
- case ETHTOOL_GRXFH:
- return -EOPNOTSUPP;
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
index 8ed83fb98862..44b201817d94 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
@@ -1618,7 +1618,8 @@ int mvpp22_port_rss_ctx_indir_get(struct mvpp2_port *port, u32 port_ctx,
return 0;
}
-int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info)
+int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port,
+ const struct ethtool_rxfh_fields *info)
{
u16 hash_opts = 0;
u32 flow_type;
@@ -1656,7 +1657,8 @@ int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info)
return mvpp2_port_rss_hash_opts_set(port, flow_type, hash_opts);
}
-int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info)
+int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port,
+ struct ethtool_rxfh_fields *info)
{
unsigned long hash_opts;
u32 flow_type;
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
index 85c9c6e80678..caadf3aea95d 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
@@ -272,8 +272,10 @@ int mvpp22_port_rss_ctx_indir_set(struct mvpp2_port *port, u32 rss_ctx,
int mvpp22_port_rss_ctx_indir_get(struct mvpp2_port *port, u32 rss_ctx,
u32 *indir);
-int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info);
-int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info);
+int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port,
+ struct ethtool_rxfh_fields *info);
+int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port,
+ const struct ethtool_rxfh_fields *info);
void mvpp2_cls_init(struct mvpp2 *priv);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index a7872d14a49d..8ebb985d2573 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -5588,9 +5588,6 @@ static int mvpp2_ethtool_get_rxnfc(struct net_device *dev,
return -EOPNOTSUPP;
switch (info->cmd) {
- case ETHTOOL_GRXFH:
- ret = mvpp2_ethtool_rxfh_get(port, info);
- break;
case ETHTOOL_GRXRINGS:
info->data = port->nrxqs;
break;
@@ -5628,9 +5625,6 @@ static int mvpp2_ethtool_set_rxnfc(struct net_device *dev,
return -EOPNOTSUPP;
switch (info->cmd) {
- case ETHTOOL_SRXFH:
- ret = mvpp2_ethtool_rxfh_set(port, info);
- break;
case ETHTOOL_SRXCLSRLINS:
ret = mvpp2_ethtool_cls_rule_ins(port, info);
break;
@@ -5747,6 +5741,29 @@ static int mvpp2_ethtool_set_rxfh(struct net_device *dev,
return mvpp2_modify_rxfh_context(dev, NULL, rxfh, extack);
}
+static int mvpp2_ethtool_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *info)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (!mvpp22_rss_is_supported(port))
+ return -EOPNOTSUPP;
+
+ return mvpp2_ethtool_rxfh_get(port, info);
+}
+
+static int mvpp2_ethtool_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *info,
+ struct netlink_ext_ack *extack)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (!mvpp22_rss_is_supported(port))
+ return -EOPNOTSUPP;
+
+ return mvpp2_ethtool_rxfh_set(port, info);
+}
+
static int mvpp2_ethtool_get_eee(struct net_device *dev,
struct ethtool_keee *eee)
{
@@ -5813,6 +5830,8 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = {
.get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size,
.get_rxfh = mvpp2_ethtool_get_rxfh,
.set_rxfh = mvpp2_ethtool_set_rxfh,
+ .get_rxfh_fields = mvpp2_ethtool_get_rxfh_fields,
+ .set_rxfh_fields = mvpp2_ethtool_set_rxfh_fields,
.create_rxfh_context = mvpp2_create_rxfh_context,
.modify_rxfh_context = mvpp2_modify_rxfh_context,
.remove_rxfh_context = mvpp2_remove_rxfh_context,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
index ccea37847df8..532813d8d028 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
@@ -12,4 +12,4 @@ rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \
rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o rvu_switch.o \
rvu_sdp.o rvu_npc_hash.o mcs.o mcs_rvu_if.o mcs_cnf10kb.o \
- rvu_rep.o
+ rvu_rep.o cn20k/mbox_init.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/api.h b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/api.h
new file mode 100644
index 000000000000..4285b5d6a6a2
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/api.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#ifndef CN20K_API_H
+#define CN20K_API_H
+
+#include "../rvu.h"
+
+struct ng_rvu {
+ struct mbox_ops *rvu_mbox_ops;
+ struct qmem *pf_mbox_addr;
+ struct qmem *vf_mbox_addr;
+};
+
+/* Mbox related APIs */
+int cn20k_rvu_mbox_init(struct rvu *rvu, int type, int num);
+int cn20k_rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
+ int num, int type, unsigned long *pf_bmap);
+void cn20k_free_mbox_memory(struct rvu *rvu);
+int cn20k_register_afpf_mbox_intr(struct rvu *rvu);
+int cn20k_register_afvf_mbox_intr(struct rvu *rvu, int pf_vec_start);
+void cn20k_rvu_enable_mbox_intr(struct rvu *rvu);
+void cn20k_rvu_unregister_interrupts(struct rvu *rvu);
+int cn20k_mbox_setup(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ void *reg_base, int direction, int ndevs);
+void cn20k_rvu_enable_afvf_intr(struct rvu *rvu, int vfs);
+void cn20k_rvu_disable_afvf_intr(struct rvu *rvu, int vfs);
+#endif /* CN20K_API_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/mbox_init.c b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/mbox_init.c
new file mode 100644
index 000000000000..bd3aab7770dd
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/mbox_init.c
@@ -0,0 +1,424 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+
+#include "rvu_trace.h"
+#include "mbox.h"
+#include "reg.h"
+#include "api.h"
+
+static irqreturn_t cn20k_afvf_mbox_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu_irq_data *rvu_irq_data = rvu_irq;
+ struct rvu *rvu = rvu_irq_data->rvu;
+ u64 intr;
+
+ /* Sync with mbox memory region */
+ rmb();
+
+ /* Clear interrupts */
+ intr = rvupf_read64(rvu, rvu_irq_data->intr_status);
+ rvupf_write64(rvu, rvu_irq_data->intr_status, intr);
+
+ if (intr)
+ trace_otx2_msg_interrupt(rvu->pdev, "VF(s) to AF", intr);
+
+ rvu_irq_data->afvf_queue_work_hdlr(&rvu->afvf_wq_info, rvu_irq_data->start,
+ rvu_irq_data->mdevs, intr);
+
+ return IRQ_HANDLED;
+}
+
+int cn20k_register_afvf_mbox_intr(struct rvu *rvu, int pf_vec_start)
+{
+ struct rvu_irq_data *irq_data;
+ int intr_vec, offset, vec = 0;
+ int err;
+
+ /* irq data for 4 VFPF intr vectors */
+ irq_data = devm_kcalloc(rvu->dev, 4,
+ sizeof(struct rvu_irq_data), GFP_KERNEL);
+ if (!irq_data)
+ return -ENOMEM;
+
+ for (intr_vec = RVU_MBOX_PF_INT_VEC_VFPF_MBOX0; intr_vec <=
+ RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1;
+ intr_vec++, vec++) {
+ switch (intr_vec) {
+ case RVU_MBOX_PF_INT_VEC_VFPF_MBOX0:
+ irq_data[vec].intr_status =
+ RVU_MBOX_PF_VFPF_INTX(0);
+ irq_data[vec].start = 0;
+ irq_data[vec].mdevs = 64;
+ break;
+ case RVU_MBOX_PF_INT_VEC_VFPF_MBOX1:
+ irq_data[vec].intr_status =
+ RVU_MBOX_PF_VFPF_INTX(1);
+ irq_data[vec].start = 64;
+ irq_data[vec].mdevs = 64;
+ break;
+ case RVU_MBOX_PF_INT_VEC_VFPF1_MBOX0:
+ irq_data[vec].intr_status =
+ RVU_MBOX_PF_VFPF1_INTX(0);
+ irq_data[vec].start = 0;
+ irq_data[vec].mdevs = 64;
+ break;
+ case RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1:
+ irq_data[vec].intr_status = RVU_MBOX_PF_VFPF1_INTX(1);
+ irq_data[vec].start = 64;
+ irq_data[vec].mdevs = 64;
+ break;
+ }
+ irq_data[vec].afvf_queue_work_hdlr =
+ rvu_queue_work;
+ offset = pf_vec_start + intr_vec;
+ irq_data[vec].vec_num = offset;
+ irq_data[vec].rvu = rvu;
+
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAF VFAF%d Mbox%d",
+ vec / 2, vec % 2);
+ err = request_irq(pci_irq_vector(rvu->pdev, offset),
+ rvu->ng_rvu->rvu_mbox_ops->afvf_intr_handler, 0,
+ &rvu->irq_name[offset * NAME_SIZE],
+ &irq_data[vec]);
+ if (err) {
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for AFVF mbox irq\n");
+ return err;
+ }
+ rvu->irq_allocated[offset] = true;
+ }
+
+ return 0;
+}
+
+/* CN20K mbox PFx => AF irq handler */
+static irqreturn_t cn20k_mbox_pf_common_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu_irq_data *rvu_irq_data = rvu_irq;
+ struct rvu *rvu = rvu_irq_data->rvu;
+ u64 intr;
+
+ /* Clear interrupts */
+ intr = rvu_read64(rvu, BLKADDR_RVUM, rvu_irq_data->intr_status);
+ rvu_write64(rvu, BLKADDR_RVUM, rvu_irq_data->intr_status, intr);
+
+ if (intr)
+ trace_otx2_msg_interrupt(rvu->pdev, "PF(s) to AF", intr);
+
+ /* Sync with mbox memory region */
+ rmb();
+
+ rvu_irq_data->rvu_queue_work_hdlr(&rvu->afpf_wq_info,
+ rvu_irq_data->start,
+ rvu_irq_data->mdevs, intr);
+
+ return IRQ_HANDLED;
+}
+
+void cn20k_rvu_enable_mbox_intr(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ /* Clear spurious irqs, if any */
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_MBOX_AF_PFAF_INT(0), INTR_MASK(hw->total_pfs));
+
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_MBOX_AF_PFAF_INT(1), INTR_MASK(hw->total_pfs - 64));
+
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_MBOX_AF_PFAF1_INT(0), INTR_MASK(hw->total_pfs));
+
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_MBOX_AF_PFAF1_INT(1), INTR_MASK(hw->total_pfs - 64));
+
+ /* Enable mailbox interrupt for all PFs except PF0 i.e AF itself */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF_INT_ENA_W1S(0),
+ INTR_MASK(hw->total_pfs) & ~1ULL);
+
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF_INT_ENA_W1S(1),
+ INTR_MASK(hw->total_pfs - 64));
+
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF1_INT_ENA_W1S(0),
+ INTR_MASK(hw->total_pfs) & ~1ULL);
+
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF1_INT_ENA_W1S(1),
+ INTR_MASK(hw->total_pfs - 64));
+}
+
+void cn20k_rvu_unregister_interrupts(struct rvu *rvu)
+{
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF_INT_ENA_W1C(0),
+ INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF_INT_ENA_W1C(1),
+ INTR_MASK(rvu->hw->total_pfs - 64));
+
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF1_INT_ENA_W1C(0),
+ INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF1_INT_ENA_W1C(1),
+ INTR_MASK(rvu->hw->total_pfs - 64));
+}
+
+int cn20k_register_afpf_mbox_intr(struct rvu *rvu)
+{
+ struct rvu_irq_data *irq_data;
+ int intr_vec, ret, vec = 0;
+
+ /* irq data for 4 PF intr vectors */
+ irq_data = devm_kcalloc(rvu->dev, 4,
+ sizeof(struct rvu_irq_data), GFP_KERNEL);
+ if (!irq_data)
+ return -ENOMEM;
+
+ for (intr_vec = RVU_AF_CN20K_INT_VEC_PFAF_MBOX0; intr_vec <=
+ RVU_AF_CN20K_INT_VEC_PFAF1_MBOX1; intr_vec++,
+ vec++) {
+ switch (intr_vec) {
+ case RVU_AF_CN20K_INT_VEC_PFAF_MBOX0:
+ irq_data[vec].intr_status =
+ RVU_MBOX_AF_PFAF_INT(0);
+ irq_data[vec].start = 0;
+ irq_data[vec].mdevs = 64;
+ break;
+ case RVU_AF_CN20K_INT_VEC_PFAF_MBOX1:
+ irq_data[vec].intr_status =
+ RVU_MBOX_AF_PFAF_INT(1);
+ irq_data[vec].start = 64;
+ irq_data[vec].mdevs = 96;
+ break;
+ case RVU_AF_CN20K_INT_VEC_PFAF1_MBOX0:
+ irq_data[vec].intr_status =
+ RVU_MBOX_AF_PFAF1_INT(0);
+ irq_data[vec].start = 0;
+ irq_data[vec].mdevs = 64;
+ break;
+ case RVU_AF_CN20K_INT_VEC_PFAF1_MBOX1:
+ irq_data[vec].intr_status =
+ RVU_MBOX_AF_PFAF1_INT(1);
+ irq_data[vec].start = 64;
+ irq_data[vec].mdevs = 96;
+ break;
+ }
+ irq_data[vec].rvu_queue_work_hdlr = rvu_queue_work;
+ irq_data[vec].vec_num = intr_vec;
+ irq_data[vec].rvu = rvu;
+
+ /* Register mailbox interrupt handler */
+ sprintf(&rvu->irq_name[intr_vec * NAME_SIZE],
+ "RVUAF PFAF%d Mbox%d",
+ vec / 2, vec % 2);
+ ret = request_irq(pci_irq_vector(rvu->pdev, intr_vec),
+ rvu->ng_rvu->rvu_mbox_ops->pf_intr_handler, 0,
+ &rvu->irq_name[intr_vec * NAME_SIZE],
+ &irq_data[vec]);
+ if (ret)
+ return ret;
+
+ rvu->irq_allocated[intr_vec] = true;
+ }
+
+ return 0;
+}
+
+int cn20k_rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
+ int num, int type, unsigned long *pf_bmap)
+{
+ int region;
+ u64 bar;
+
+ if (type == TYPE_AFVF) {
+ for (region = 0; region < num; region++) {
+ if (!test_bit(region, pf_bmap))
+ continue;
+
+ bar = (u64)phys_to_virt((u64)rvu->ng_rvu->vf_mbox_addr->base);
+ bar += region * MBOX_SIZE;
+ mbox_addr[region] = (void *)bar;
+
+ if (!mbox_addr[region])
+ return -ENOMEM;
+ }
+ return 0;
+ }
+
+ for (region = 0; region < num; region++) {
+ if (!test_bit(region, pf_bmap))
+ continue;
+
+ bar = (u64)phys_to_virt((u64)rvu->ng_rvu->pf_mbox_addr->base);
+ bar += region * MBOX_SIZE;
+
+ mbox_addr[region] = (void *)bar;
+
+ if (!mbox_addr[region])
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int rvu_alloc_mbox_memory(struct rvu *rvu, int type,
+ int ndevs, int mbox_size)
+{
+ struct qmem *mbox_addr;
+ dma_addr_t iova;
+ int pf, err;
+
+ /* Allocate contiguous memory for mailbox communication.
+ * eg: AF <=> PFx mbox memory
+ * This allocated memory is split into chunks of MBOX_SIZE
+ * and setup into each of the RVU PFs. In HW this memory will
+ * get aliased to an offset within BAR2 of those PFs.
+ *
+ * AF will access mbox memory using direct physical addresses
+ * and PFs will access the same shared memory from BAR2.
+ *
+ * PF <=> VF mbox memory also works in the same fashion.
+ * AFPF, PFVF requires IOVA to be used to maintain the mailbox msgs
+ */
+
+ err = qmem_alloc(rvu->dev, &mbox_addr, ndevs, mbox_size);
+ if (err)
+ return -ENOMEM;
+
+ switch (type) {
+ case TYPE_AFPF:
+ rvu->ng_rvu->pf_mbox_addr = mbox_addr;
+ iova = (u64)mbox_addr->iova;
+ for (pf = 0; pf < ndevs; pf++) {
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFX_ADDR(pf),
+ (u64)iova);
+ iova += mbox_size;
+ }
+ break;
+ case TYPE_AFVF:
+ rvu->ng_rvu->vf_mbox_addr = mbox_addr;
+ rvupf_write64(rvu, RVU_PF_VF_MBOX_ADDR, (u64)mbox_addr->iova);
+ break;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+static struct mbox_ops cn20k_mbox_ops = {
+ .pf_intr_handler = cn20k_mbox_pf_common_intr_handler,
+ .afvf_intr_handler = cn20k_afvf_mbox_intr_handler,
+};
+
+int cn20k_rvu_mbox_init(struct rvu *rvu, int type, int ndevs)
+{
+ int dev;
+
+ if (!is_cn20k(rvu->pdev))
+ return 0;
+
+ rvu->ng_rvu->rvu_mbox_ops = &cn20k_mbox_ops;
+
+ if (type == TYPE_AFVF) {
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_PF_VF_CFG, ilog2(MBOX_SIZE));
+ } else {
+ for (dev = 0; dev < ndevs; dev++)
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_MBOX_AF_PFX_CFG(dev), ilog2(MBOX_SIZE));
+ }
+
+ return rvu_alloc_mbox_memory(rvu, type, ndevs, MBOX_SIZE);
+}
+
+void cn20k_free_mbox_memory(struct rvu *rvu)
+{
+ if (!is_cn20k(rvu->pdev))
+ return;
+
+ qmem_free(rvu->dev, rvu->ng_rvu->pf_mbox_addr);
+ qmem_free(rvu->dev, rvu->ng_rvu->vf_mbox_addr);
+}
+
+void cn20k_rvu_disable_afvf_intr(struct rvu *rvu, int vfs)
+{
+ rvupf_write64(rvu, RVU_MBOX_PF_VFPF_INT_ENA_W1CX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_MBOX_PF_VFPF1_INT_ENA_W1CX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs));
+
+ if (vfs <= 64)
+ return;
+
+ rvupf_write64(rvu, RVU_MBOX_PF_VFPF_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_MBOX_PF_VFPF1_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
+}
+
+void cn20k_rvu_enable_afvf_intr(struct rvu *rvu, int vfs)
+{
+ /* Clear any pending interrupts and enable AF VF interrupts for
+ * the first 64 VFs.
+ */
+ rvupf_write64(rvu, RVU_MBOX_PF_VFPF_INTX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_MBOX_PF_VFPF_INT_ENA_W1SX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_MBOX_PF_VFPF1_INTX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_MBOX_PF_VFPF1_INT_ENA_W1SX(0), INTR_MASK(vfs));
+
+ /* FLR */
+ rvupf_write64(rvu, RVU_PF_VFFLR_INTX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(vfs));
+
+ /* Same for remaining VFs, if any. */
+ if (vfs <= 64)
+ return;
+
+ rvupf_write64(rvu, RVU_MBOX_PF_VFPF_INTX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_MBOX_PF_VFPF_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_MBOX_PF_VFPF1_INTX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_MBOX_PF_VFPF1_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
+
+ rvupf_write64(rvu, RVU_PF_VFFLR_INTX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
+}
+
+int rvu_alloc_cint_qint_mem(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ int blkaddr, int nixlf)
+{
+ int qints, hwctx_size, err;
+ u64 cfg, ctx_cfg;
+
+ if (is_rvu_otx2(rvu) || is_cn20k(rvu->pdev))
+ return 0;
+
+ ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
+ /* Alloc memory for CQINT's HW contexts */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
+ qints = (cfg >> 24) & 0xFFF;
+ hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
+ err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
+ if (err)
+ return -ENOMEM;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
+ (u64)pfvf->cq_ints_ctx->iova);
+
+ /* Alloc memory for QINT's HW contexts */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
+ qints = (cfg >> 12) & 0xFFF;
+ hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
+ err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
+ if (err)
+ return -ENOMEM;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
+ (u64)pfvf->nix_qints_ctx->iova);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/reg.h b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/reg.h
new file mode 100644
index 000000000000..affb39803120
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/reg.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#ifndef RVU_MBOX_REG_H
+#define RVU_MBOX_REG_H
+#include "../rvu.h"
+#include "../rvu_reg.h"
+
+/* RVUM block registers */
+#define RVU_PF_DISC (0x0)
+#define RVU_PRIV_PFX_DISC(a) (0x8000208 | (a) << 16)
+#define RVU_PRIV_HWVFX_DISC(a) (0xD000000 | (a) << 12)
+
+/* Mbox Registers */
+/* RVU AF BAR0 Mbox registers for AF => PFx */
+#define RVU_MBOX_AF_PFX_ADDR(a) (0x5000 | (a) << 4)
+#define RVU_MBOX_AF_PFX_CFG(a) (0x6000 | (a) << 4)
+#define RVU_MBOX_AF_AFPFX_TRIGX(a) (0x9000 | (a) << 3)
+#define RVU_MBOX_AF_PFAF_INT(a) (0x2980 | (a) << 6)
+#define RVU_MBOX_AF_PFAF_INT_W1S(a) (0x2988 | (a) << 6)
+#define RVU_MBOX_AF_PFAF_INT_ENA_W1S(a) (0x2990 | (a) << 6)
+#define RVU_MBOX_AF_PFAF_INT_ENA_W1C(a) (0x2998 | (a) << 6)
+#define RVU_MBOX_AF_PFAF1_INT(a) (0x29A0 | (a) << 6)
+#define RVU_MBOX_AF_PFAF1_INT_W1S(a) (0x29A8 | (a) << 6)
+#define RVU_MBOX_AF_PFAF1_INT_ENA_W1S(a) (0x29B0 | (a) << 6)
+#define RVU_MBOX_AF_PFAF1_INT_ENA_W1C(a) (0x29B8 | (a) << 6)
+
+/* RVU PF => AF mbox registers */
+#define RVU_MBOX_PF_PFAF_TRIGX(a) (0xC00 | (a) << 3)
+#define RVU_MBOX_PF_INT (0xC20)
+#define RVU_MBOX_PF_INT_W1S (0xC28)
+#define RVU_MBOX_PF_INT_ENA_W1S (0xC30)
+#define RVU_MBOX_PF_INT_ENA_W1C (0xC38)
+
+#define RVU_AF_BAR2_SEL (0x9000000)
+#define RVU_AF_BAR2_PFID (0x16400)
+#define NIX_CINTX_INT_W1S(a) (0xd30 | (a) << 12)
+#define NIX_QINTX_CNT(a) (0xc00 | (a) << 12)
+
+#define RVU_MBOX_AF_VFAF_INT(a) (0x3000 | (a) << 6)
+#define RVU_MBOX_AF_VFAF_INT_W1S(a) (0x3008 | (a) << 6)
+#define RVU_MBOX_AF_VFAF_INT_ENA_W1S(a) (0x3010 | (a) << 6)
+#define RVU_MBOX_AF_VFAF_INT_ENA_W1C(a) (0x3018 | (a) << 6)
+#define RVU_MBOX_AF_VFAF_INT_ENA_W1C(a) (0x3018 | (a) << 6)
+#define RVU_MBOX_AF_VFAF1_INT(a) (0x3020 | (a) << 6)
+#define RVU_MBOX_AF_VFAF1_INT_W1S(a) (0x3028 | (a) << 6)
+#define RVU_MBOX_AF_VFAF1_IN_ENA_W1S(a) (0x3030 | (a) << 6)
+#define RVU_MBOX_AF_VFAF1_IN_ENA_W1C(a) (0x3038 | (a) << 6)
+
+#define RVU_MBOX_AF_AFVFX_TRIG(a, b) (0x10000 | (a) << 4 | (b) << 3)
+#define RVU_MBOX_AF_VFX_ADDR(a) (0x20000 | (a) << 4)
+#define RVU_MBOX_AF_VFX_CFG(a) (0x28000 | (a) << 4)
+
+#define RVU_MBOX_PF_VFX_PFVF_TRIGX(a) (0x2000 | (a) << 3)
+
+#define RVU_MBOX_PF_VFPF_INTX(a) (0x1000 | (a) << 3)
+#define RVU_MBOX_PF_VFPF_INT_W1SX(a) (0x1020 | (a) << 3)
+#define RVU_MBOX_PF_VFPF_INT_ENA_W1SX(a) (0x1040 | (a) << 3)
+#define RVU_MBOX_PF_VFPF_INT_ENA_W1CX(a) (0x1060 | (a) << 3)
+
+#define RVU_MBOX_PF_VFPF1_INTX(a) (0x1080 | (a) << 3)
+#define RVU_MBOX_PF_VFPF1_INT_W1SX(a) (0x10a0 | (a) << 3)
+#define RVU_MBOX_PF_VFPF1_INT_ENA_W1SX(a) (0x10c0 | (a) << 3)
+#define RVU_MBOX_PF_VFPF1_INT_ENA_W1CX(a) (0x10e0 | (a) << 3)
+
+#define RVU_MBOX_PF_VF_ADDR (0xC40)
+#define RVU_MBOX_PF_LMTLINE_ADDR (0xC48)
+#define RVU_MBOX_PF_VF_CFG (0xC60)
+
+#define RVU_MBOX_VF_VFPF_TRIGX(a) (0x3000 | (a) << 3)
+#define RVU_MBOX_VF_INT (0x20)
+#define RVU_MBOX_VF_INT_W1S (0x28)
+#define RVU_MBOX_VF_INT_ENA_W1S (0x30)
+#define RVU_MBOX_VF_INT_ENA_W1C (0x38)
+
+#define RVU_MBOX_VF_VFAF_TRIGX(a) (0x2000 | (a) << 3)
+#endif /* RVU_MBOX_REG_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cn20k/struct.h b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/struct.h
new file mode 100644
index 000000000000..76ce3ec6da9c
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cn20k/struct.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#ifndef STRUCT_H
+#define STRUCT_H
+
+/*
+ * CN20k RVU PF MBOX Interrupt Vector Enumeration
+ *
+ * Vectors 0 - 3 are compatible with pre cn20k and hence
+ * existing macros are being reused.
+ */
+enum rvu_mbox_pf_int_vec_e {
+ RVU_MBOX_PF_INT_VEC_VFPF_MBOX0 = 0x4,
+ RVU_MBOX_PF_INT_VEC_VFPF_MBOX1 = 0x5,
+ RVU_MBOX_PF_INT_VEC_VFPF1_MBOX0 = 0x6,
+ RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1 = 0x7,
+ RVU_MBOX_PF_INT_VEC_AFPF_MBOX = 0x8,
+ RVU_MBOX_PF_INT_VEC_CNT = 0x9,
+};
+
+/* RVU Admin function Interrupt Vector Enumeration */
+enum rvu_af_cn20k_int_vec_e {
+ RVU_AF_CN20K_INT_VEC_POISON = 0x0,
+ RVU_AF_CN20K_INT_VEC_PFFLR0 = 0x1,
+ RVU_AF_CN20K_INT_VEC_PFFLR1 = 0x2,
+ RVU_AF_CN20K_INT_VEC_PFME0 = 0x3,
+ RVU_AF_CN20K_INT_VEC_PFME1 = 0x4,
+ RVU_AF_CN20K_INT_VEC_GEN = 0x5,
+ RVU_AF_CN20K_INT_VEC_PFAF_MBOX0 = 0x6,
+ RVU_AF_CN20K_INT_VEC_PFAF_MBOX1 = 0x7,
+ RVU_AF_CN20K_INT_VEC_PFAF1_MBOX0 = 0x8,
+ RVU_AF_CN20K_INT_VEC_PFAF1_MBOX1 = 0x9,
+ RVU_AF_CN20K_INT_VEC_CNT = 0xa,
+};
+#endif
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h
index 406c59100a35..8a08bebf08c2 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h
@@ -39,7 +39,7 @@ struct qmem {
void *base;
dma_addr_t iova;
int alloc_sz;
- u16 entry_sz;
+ u32 entry_sz;
u8 align;
u32 qsize;
};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
index 7d21905deed8..75872d257eca 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
@@ -10,8 +10,11 @@
#include <linux/pci.h>
#include "rvu_reg.h"
+#include "cn20k/reg.h"
+#include "cn20k/api.h"
#include "mbox.h"
#include "rvu_trace.h"
+#include "rvu.h"
static const u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
@@ -28,8 +31,10 @@ void __otx2_mbox_reset(struct otx2_mbox *mbox, int devid)
mdev->rsp_size = 0;
tx_hdr->num_msgs = 0;
tx_hdr->msg_size = 0;
+ tx_hdr->sig = 0;
rx_hdr->num_msgs = 0;
rx_hdr->msg_size = 0;
+ rx_hdr->sig = 0;
}
EXPORT_SYMBOL(__otx2_mbox_reset);
@@ -53,9 +58,98 @@ void otx2_mbox_destroy(struct otx2_mbox *mbox)
}
EXPORT_SYMBOL(otx2_mbox_destroy);
+int cn20k_mbox_setup(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ void *reg_base, int direction, int ndevs)
+{
+ switch (direction) {
+ case MBOX_DIR_AFPF:
+ case MBOX_DIR_PFVF:
+ mbox->tx_start = MBOX_DOWN_TX_START;
+ mbox->rx_start = MBOX_DOWN_RX_START;
+ mbox->tx_size = MBOX_DOWN_TX_SIZE;
+ mbox->rx_size = MBOX_DOWN_RX_SIZE;
+ break;
+ case MBOX_DIR_PFAF:
+ case MBOX_DIR_VFPF:
+ mbox->tx_start = MBOX_DOWN_RX_START;
+ mbox->rx_start = MBOX_DOWN_TX_START;
+ mbox->tx_size = MBOX_DOWN_RX_SIZE;
+ mbox->rx_size = MBOX_DOWN_TX_SIZE;
+ break;
+ case MBOX_DIR_AFPF_UP:
+ case MBOX_DIR_PFVF_UP:
+ mbox->tx_start = MBOX_UP_TX_START;
+ mbox->rx_start = MBOX_UP_RX_START;
+ mbox->tx_size = MBOX_UP_TX_SIZE;
+ mbox->rx_size = MBOX_UP_RX_SIZE;
+ break;
+ case MBOX_DIR_PFAF_UP:
+ case MBOX_DIR_VFPF_UP:
+ mbox->tx_start = MBOX_UP_RX_START;
+ mbox->rx_start = MBOX_UP_TX_START;
+ mbox->tx_size = MBOX_UP_RX_SIZE;
+ mbox->rx_size = MBOX_UP_TX_SIZE;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ switch (direction) {
+ case MBOX_DIR_AFPF:
+ mbox->trigger = RVU_MBOX_AF_AFPFX_TRIGX(1);
+ mbox->tr_shift = 4;
+ break;
+ case MBOX_DIR_AFPF_UP:
+ mbox->trigger = RVU_MBOX_AF_AFPFX_TRIGX(0);
+ mbox->tr_shift = 4;
+ break;
+ case MBOX_DIR_PFAF:
+ mbox->trigger = RVU_MBOX_PF_PFAF_TRIGX(0);
+ mbox->tr_shift = 0;
+ break;
+ case MBOX_DIR_PFAF_UP:
+ mbox->trigger = RVU_MBOX_PF_PFAF_TRIGX(1);
+ mbox->tr_shift = 0;
+ break;
+ case MBOX_DIR_PFVF:
+ mbox->trigger = RVU_MBOX_PF_VFX_PFVF_TRIGX(1);
+ mbox->tr_shift = 4;
+ break;
+ case MBOX_DIR_PFVF_UP:
+ mbox->trigger = RVU_MBOX_PF_VFX_PFVF_TRIGX(0);
+ mbox->tr_shift = 4;
+ break;
+ case MBOX_DIR_VFPF:
+ mbox->trigger = RVU_MBOX_VF_VFPF_TRIGX(0);
+ mbox->tr_shift = 0;
+ break;
+ case MBOX_DIR_VFPF_UP:
+ mbox->trigger = RVU_MBOX_VF_VFPF_TRIGX(1);
+ mbox->tr_shift = 0;
+ break;
+ default:
+ return -ENODEV;
+ }
+ mbox->reg_base = reg_base;
+ mbox->pdev = pdev;
+
+ mbox->dev = kcalloc(ndevs, sizeof(struct otx2_mbox_dev), GFP_KERNEL);
+ if (!mbox->dev) {
+ otx2_mbox_destroy(mbox);
+ return -ENOMEM;
+ }
+ mbox->ndevs = ndevs;
+
+ return 0;
+}
+
static int otx2_mbox_setup(struct otx2_mbox *mbox, struct pci_dev *pdev,
void *reg_base, int direction, int ndevs)
{
+ if (is_cn20k(pdev))
+ return cn20k_mbox_setup(mbox, pdev, reg_base,
+ direction, ndevs);
+
switch (direction) {
case MBOX_DIR_AFPF:
case MBOX_DIR_PFVF:
@@ -234,7 +328,10 @@ static void otx2_mbox_msg_send_data(struct otx2_mbox *mbox, int devid, u64 data)
spin_lock(&mdev->mbox_lock);
- tx_hdr->msg_size = mdev->msg_size;
+ if (!tx_hdr->sig) {
+ tx_hdr->msg_size = mdev->msg_size;
+ tx_hdr->num_msgs = mdev->num_msgs;
+ }
/* Reset header for next messages */
mdev->msg_size = 0;
@@ -248,7 +345,6 @@ static void otx2_mbox_msg_send_data(struct otx2_mbox *mbox, int devid, u64 data)
* messages. So this should be written after writing all the messages
* to the shared memory.
*/
- tx_hdr->num_msgs = mdev->num_msgs;
rx_hdr->num_msgs = 0;
msg = (struct mbox_msghdr *)(hw_mbase + mbox->tx_start + msgs_offset);
@@ -309,6 +405,7 @@ struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
{
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
struct mbox_msghdr *msghdr = NULL;
+ struct mbox_hdr *mboxhdr = NULL;
spin_lock(&mdev->mbox_lock);
size = ALIGN(size, MBOX_MSG_ALIGN);
@@ -332,6 +429,11 @@ struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
mdev->msg_size += size;
mdev->rsp_size += size_rsp;
msghdr->next_msgoff = mdev->msg_size + msgs_offset;
+
+ mboxhdr = mdev->mbase + mbox->tx_start;
+ /* Clear the msg header region */
+ memset(mboxhdr, 0, msgs_offset);
+
exit:
spin_unlock(&mdev->mbox_lock);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index a213b2663583..b3562d658d45 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -13,6 +13,7 @@
#include "rvu_struct.h"
#include "common.h"
+#include "cn20k/struct.h"
#define MBOX_SIZE SZ_64K
@@ -50,6 +51,11 @@
#define MBOX_DIR_PFVF_UP 6 /* PF sends messages to VF */
#define MBOX_DIR_VFPF_UP 7 /* VF replies to PF */
+enum {
+ TYPE_AFVF,
+ TYPE_AFPF,
+};
+
struct otx2_mbox_dev {
void *mbase; /* This dev's mbox region */
void *hwbase;
@@ -78,6 +84,8 @@ struct otx2_mbox {
struct mbox_hdr {
u64 msg_size; /* Total msgs size embedded */
u16 num_msgs; /* No of msgs embedded */
+ u16 opt_msg;
+ u8 sig;
};
/* Header which precedes every msg and is also part of it */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
index 0277d226293e..d7030dfa5dad 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
@@ -97,7 +97,7 @@ int mcs_add_intr_wq_entry(struct mcs *mcs, struct mcs_intr_event *event)
if (pcifunc & RVU_PFVF_FUNC_MASK)
pfvf = &mcs->vf[rvu_get_hwvf(rvu, pcifunc)];
else
- pfvf = &mcs->pf[rvu_get_pf(pcifunc)];
+ pfvf = &mcs->pf[rvu_get_pf(rvu->pdev, pcifunc)];
event->intr_mask &= pfvf->intr_mask;
@@ -123,7 +123,7 @@ static int mcs_notify_pfvf(struct mcs_intr_event *event, struct rvu *rvu)
struct mcs_intr_info *req;
int pf;
- pf = rvu_get_pf(event->pcifunc);
+ pf = rvu_get_pf(rvu->pdev, event->pcifunc);
mutex_lock(&rvu->mbox_lock);
@@ -193,7 +193,7 @@ int rvu_mbox_handler_mcs_intr_cfg(struct rvu *rvu,
if (pcifunc & RVU_PFVF_FUNC_MASK)
pfvf = &mcs->vf[rvu_get_hwvf(rvu, pcifunc)];
else
- pfvf = &mcs->pf[rvu_get_pf(pcifunc)];
+ pfvf = &mcs->pf[rvu_get_pf(rvu->pdev, pcifunc)];
mcs->pf_map[0] = pcifunc;
pfvf->intr_mask = req->intr_mask;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index a8025f0486c9..7e538ee8a59f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -20,6 +20,8 @@
#include "rvu_trace.h"
#include "rvu_npc_hash.h"
+#include "cn20k/reg.h"
+#include "cn20k/api.h"
#define DRV_NAME "rvu_af"
#define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver"
@@ -34,10 +36,8 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
int type, int num,
void (mbox_handler)(struct work_struct *),
void (mbox_up_handler)(struct work_struct *));
-enum {
- TYPE_AFVF,
- TYPE_AFPF,
-};
+static irqreturn_t rvu_mbox_pf_intr_handler(int irq, void *rvu_irq);
+static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq);
/* Supported devices */
static const struct pci_device_id rvu_id_table[] = {
@@ -294,7 +294,7 @@ int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc)
devnum = rvu_get_hwvf(rvu, pcifunc);
} else {
is_pf = true;
- devnum = rvu_get_pf(pcifunc);
+ devnum = rvu_get_pf(rvu->pdev, pcifunc);
}
/* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' or
@@ -359,7 +359,7 @@ static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf,
devnum = rvu_get_hwvf(rvu, pcifunc);
} else {
is_pf = true;
- devnum = rvu_get_pf(pcifunc);
+ devnum = rvu_get_pf(rvu->pdev, pcifunc);
}
block->fn_map[lf] = attach ? pcifunc : 0;
@@ -400,11 +400,6 @@ static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf,
rvu_write64(rvu, BLKADDR_RVUM, reg | (devnum << 16), num_lfs);
}
-inline int rvu_get_pf(u16 pcifunc)
-{
- return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
-}
-
void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf)
{
u64 cfg;
@@ -422,7 +417,7 @@ int rvu_get_hwvf(struct rvu *rvu, int pcifunc)
int pf, func;
u64 cfg;
- pf = rvu_get_pf(pcifunc);
+ pf = rvu_get_pf(rvu->pdev, pcifunc);
func = pcifunc & RVU_PFVF_FUNC_MASK;
/* Get first HWVF attached to this PF */
@@ -437,7 +432,7 @@ struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc)
if (pcifunc & RVU_PFVF_FUNC_MASK)
return &rvu->hwvf[rvu_get_hwvf(rvu, pcifunc)];
else
- return &rvu->pf[rvu_get_pf(pcifunc)];
+ return &rvu->pf[rvu_get_pf(rvu->pdev, pcifunc)];
}
static bool is_pf_func_valid(struct rvu *rvu, u16 pcifunc)
@@ -445,7 +440,7 @@ static bool is_pf_func_valid(struct rvu *rvu, u16 pcifunc)
int pf, vf, nvfs;
u64 cfg;
- pf = rvu_get_pf(pcifunc);
+ pf = rvu_get_pf(rvu->pdev, pcifunc);
if (pf >= rvu->hw->total_pfs)
return false;
@@ -760,6 +755,11 @@ static void rvu_free_hw_resources(struct rvu *rvu)
rvu_reset_msix(rvu);
mutex_destroy(&rvu->rsrc_lock);
+
+ /* Free the QINT/CINT memory */
+ pfvf = &rvu->pf[RVU_AFPF];
+ qmem_free(rvu->dev, pfvf->nix_qints_ctx);
+ qmem_free(rvu->dev, pfvf->cq_ints_ctx);
}
static void rvu_setup_pfvf_macaddress(struct rvu *rvu)
@@ -1487,7 +1487,7 @@ int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc)
pf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
/* All CGX mapped PFs are set with assigned NIX block during init */
- if (is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
+ if (is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc))) {
blkaddr = pf->nix_blkaddr;
} else if (is_lbk_vf(rvu, pcifunc)) {
vf = pcifunc - 1;
@@ -1501,7 +1501,7 @@ int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc)
}
/* if SDP1 then the blkaddr is NIX1 */
- if (is_sdp_pfvf(pcifunc) && pf->sdp_info->node_id == 1)
+ if (is_sdp_pfvf(rvu, pcifunc) && pf->sdp_info->node_id == 1)
blkaddr = BLKADDR_NIX1;
switch (blkaddr) {
@@ -2006,7 +2006,7 @@ int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req,
vf = pcifunc & RVU_PFVF_FUNC_MASK;
cfg = rvu_read64(rvu, BLKADDR_RVUM,
- RVU_PRIV_PFX_CFG(rvu_get_pf(pcifunc)));
+ RVU_PRIV_PFX_CFG(rvu_get_pf(rvu->pdev, pcifunc)));
numvfs = (cfg >> 12) & 0xFF;
if (vf && vf <= numvfs)
@@ -2223,15 +2223,30 @@ static void __rvu_mbox_handler(struct rvu_work *mwork, int type, bool poll)
offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
+ if (req_hdr->sig && !(is_rvu_otx2(rvu) || is_cn20k(rvu->pdev))) {
+ req_hdr->opt_msg = mw->mbox_wrk[devid].num_msgs;
+ rvu_write64(rvu, BLKADDR_NIX0, RVU_AF_BAR2_SEL,
+ RVU_AF_BAR2_PFID);
+ if (type == TYPE_AFPF)
+ rvu_write64(rvu, BLKADDR_NIX0,
+ AF_BAR2_ALIASX(0, NIX_CINTX_INT_W1S(devid)),
+ 0x1);
+ else
+ rvu_write64(rvu, BLKADDR_NIX0,
+ AF_BAR2_ALIASX(0, NIX_QINTX_CNT(devid)),
+ 0x1);
+ usleep_range(5000, 6000);
+ goto done;
+ }
+
for (id = 0; id < mw->mbox_wrk[devid].num_msgs; id++) {
msg = mdev->mbase + offset;
/* Set which PF/VF sent this message based on mbox IRQ */
switch (type) {
case TYPE_AFPF:
- msg->pcifunc &=
- ~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT);
- msg->pcifunc |= (devid << RVU_PFVF_PF_SHIFT);
+ msg->pcifunc &= rvu_pcifunc_pf_mask(rvu->pdev);
+ msg->pcifunc |= rvu_make_pcifunc(rvu->pdev, devid, 0);
break;
case TYPE_AFVF:
msg->pcifunc &=
@@ -2249,16 +2264,17 @@ static void __rvu_mbox_handler(struct rvu_work *mwork, int type, bool poll)
if (msg->pcifunc & RVU_PFVF_FUNC_MASK)
dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n",
err, otx2_mbox_id2name(msg->id),
- msg->id, rvu_get_pf(msg->pcifunc),
+ msg->id, rvu_get_pf(rvu->pdev, msg->pcifunc),
(msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1);
else
dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n",
err, otx2_mbox_id2name(msg->id),
msg->id, devid);
}
+done:
mw->mbox_wrk[devid].num_msgs = 0;
- if (poll)
+ if (!is_cn20k(mbox->pdev) && poll)
otx2_mbox_wait_for_zero(mbox, devid);
/* Send mbox responses to VF/PF */
@@ -2364,13 +2380,21 @@ static inline void rvu_afvf_mbox_up_handler(struct work_struct *work)
__rvu_mbox_up_handler(mwork, TYPE_AFVF);
}
-static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
+static int rvu_get_mbox_regions(struct rvu *rvu, void __iomem **mbox_addr,
int num, int type, unsigned long *pf_bmap)
{
struct rvu_hwinfo *hw = rvu->hw;
int region;
u64 bar4;
+ /* For cn20k platform AF mailbox region is allocated by software
+ * and the corresponding IOVA is programmed in hardware unlike earlier
+ * silicons where software uses the hardware region after ioremap.
+ */
+ if (is_cn20k(rvu->pdev))
+ return cn20k_rvu_get_mbox_regions(rvu, (void *)mbox_addr,
+ num, type, pf_bmap);
+
/* For cn10k platform VF mailbox regions of a PF follows after the
* PF <-> AF mailbox region. Whereas for Octeontx2 it is read from
* RVU_PF_VF_BAR4_ADDR register.
@@ -2389,7 +2413,7 @@ static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
bar4 = rvupf_read64(rvu, RVU_PF_VF_BAR4_ADDR);
bar4 += region * MBOX_SIZE;
}
- mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE);
+ mbox_addr[region] = ioremap_wc(bar4, MBOX_SIZE);
if (!mbox_addr[region])
goto error;
}
@@ -2412,7 +2436,7 @@ static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
RVU_AF_PF_BAR4_ADDR);
bar4 += region * MBOX_SIZE;
}
- mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE);
+ mbox_addr[region] = ioremap_wc(bar4, MBOX_SIZE);
if (!mbox_addr[region])
goto error;
}
@@ -2420,20 +2444,26 @@ static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
error:
while (region--)
- iounmap((void __iomem *)mbox_addr[region]);
+ iounmap(mbox_addr[region]);
return -ENOMEM;
}
+static struct mbox_ops rvu_mbox_ops = {
+ .pf_intr_handler = rvu_mbox_pf_intr_handler,
+ .afvf_intr_handler = rvu_mbox_intr_handler,
+};
+
static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
int type, int num,
void (mbox_handler)(struct work_struct *),
void (mbox_up_handler)(struct work_struct *))
{
int err = -EINVAL, i, dir, dir_up;
+ void __iomem **mbox_regions;
+ struct ng_rvu *ng_rvu_mbox;
void __iomem *reg_base;
struct rvu_work *mwork;
unsigned long *pf_bmap;
- void **mbox_regions;
const char *name;
u64 cfg;
@@ -2441,6 +2471,12 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
if (!pf_bmap)
return -ENOMEM;
+ ng_rvu_mbox = kzalloc(sizeof(*ng_rvu_mbox), GFP_KERNEL);
+ if (!ng_rvu_mbox) {
+ err = -ENOMEM;
+ goto free_bitmap;
+ }
+
/* RVU VFs */
if (type == TYPE_AFVF)
bitmap_set(pf_bmap, 0, num);
@@ -2454,12 +2490,20 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
}
}
+ rvu->ng_rvu = ng_rvu_mbox;
+
+ rvu->ng_rvu->rvu_mbox_ops = &rvu_mbox_ops;
+
+ err = cn20k_rvu_mbox_init(rvu, type, num);
+ if (err)
+ goto free_mem;
+
mutex_init(&rvu->mbox_lock);
- mbox_regions = kcalloc(num, sizeof(void *), GFP_KERNEL);
+ mbox_regions = kcalloc(num, sizeof(void __iomem *), GFP_KERNEL);
if (!mbox_regions) {
err = -ENOMEM;
- goto free_bitmap;
+ goto free_qmem;
}
switch (type) {
@@ -2486,7 +2530,7 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
}
mw->mbox_wq = alloc_workqueue("%s",
- WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
+ WQ_HIGHPRI | WQ_MEM_RECLAIM,
num, name);
if (!mw->mbox_wq) {
err = -ENOMEM;
@@ -2529,7 +2573,11 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
mwork->rvu = rvu;
INIT_WORK(&mwork->work, mbox_up_handler);
}
- goto free_regions;
+
+ kfree(mbox_regions);
+ bitmap_free(pf_bmap);
+
+ return 0;
exit:
destroy_workqueue(mw->mbox_wq);
@@ -2538,6 +2586,10 @@ unmap_regions:
iounmap((void __iomem *)mbox_regions[num]);
free_regions:
kfree(mbox_regions);
+free_qmem:
+ cn20k_free_mbox_memory(rvu);
+free_mem:
+ kfree(rvu->ng_rvu);
free_bitmap:
bitmap_free(pf_bmap);
return err;
@@ -2564,8 +2616,8 @@ static void rvu_mbox_destroy(struct mbox_wq_info *mw)
otx2_mbox_destroy(&mw->mbox_up);
}
-static void rvu_queue_work(struct mbox_wq_info *mw, int first,
- int mdevs, u64 intr)
+void rvu_queue_work(struct mbox_wq_info *mw, int first,
+ int mdevs, u64 intr)
{
struct otx2_mbox_dev *mdev;
struct otx2_mbox *mbox;
@@ -2656,6 +2708,11 @@ static void rvu_enable_mbox_intr(struct rvu *rvu)
{
struct rvu_hwinfo *hw = rvu->hw;
+ if (is_cn20k(rvu->pdev)) {
+ cn20k_rvu_enable_mbox_intr(rvu);
+ return;
+ }
+
/* Clear spurious irqs, if any */
rvu_write64(rvu, BLKADDR_RVUM,
RVU_AF_PFAF_MBOX_INT, INTR_MASK(hw->total_pfs));
@@ -2773,7 +2830,7 @@ static void rvu_flr_handler(struct work_struct *work)
cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
numvfs = (cfg >> 12) & 0xFF;
- pcifunc = pf << RVU_PFVF_PF_SHIFT;
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0);
for (vf = 0; vf < numvfs; vf++)
__rvu_flr_handler(rvu, (pcifunc | (vf + 1)));
@@ -2909,9 +2966,12 @@ static void rvu_unregister_interrupts(struct rvu *rvu)
rvu_cpt_unregister_interrupts(rvu);
- /* Disable the Mbox interrupt */
- rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C,
- INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+ if (!is_cn20k(rvu->pdev))
+ /* Disable the Mbox interrupt */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C,
+ INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+ else
+ cn20k_rvu_unregister_interrupts(rvu);
/* Disable the PF FLR interrupt */
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
@@ -2944,6 +3004,10 @@ static int rvu_afvf_msix_vectors_num_ok(struct rvu *rvu)
* VF interrupts can be handled. Offset equal to zero means
* that PF vectors are not configured and overlapping AF vectors.
*/
+ if (is_cn20k(rvu->pdev))
+ return (pfvf->msix.max >= RVU_AF_CN20K_INT_VEC_CNT +
+ RVU_MBOX_PF_INT_VEC_CNT) && offset;
+
return (pfvf->msix.max >= RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT) &&
offset;
}
@@ -2974,18 +3038,30 @@ static int rvu_register_interrupts(struct rvu *rvu)
return ret;
}
- /* Register mailbox interrupt handler */
- sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox");
- ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX),
- rvu_mbox_pf_intr_handler, 0,
- &rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu);
- if (ret) {
- dev_err(rvu->dev,
- "RVUAF: IRQ registration failed for mbox irq\n");
- goto fail;
- }
+ if (!is_cn20k(rvu->pdev)) {
+ /* Register mailbox interrupt handler */
+ sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE],
+ "RVUAF Mbox");
+ ret = request_irq(pci_irq_vector
+ (rvu->pdev, RVU_AF_INT_VEC_MBOX),
+ rvu->ng_rvu->rvu_mbox_ops->pf_intr_handler, 0,
+ &rvu->irq_name[RVU_AF_INT_VEC_MBOX *
+ NAME_SIZE], rvu);
+ if (ret) {
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for mbox\n");
+ goto fail;
+ }
- rvu->irq_allocated[RVU_AF_INT_VEC_MBOX] = true;
+ rvu->irq_allocated[RVU_AF_INT_VEC_MBOX] = true;
+ } else {
+ ret = cn20k_register_afpf_mbox_intr(rvu);
+ if (ret) {
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for mbox\n");
+ goto fail;
+ }
+ }
/* Enable mailbox interrupts from all PFs */
rvu_enable_mbox_intr(rvu);
@@ -3040,34 +3116,40 @@ static int rvu_register_interrupts(struct rvu *rvu)
/* Get PF MSIX vectors offset. */
pf_vec_start = rvu_read64(rvu, BLKADDR_RVUM,
RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
+ if (!is_cn20k(rvu->pdev)) {
+ /* Register MBOX0 interrupt. */
+ offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX0;
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox0");
+ ret = request_irq(pci_irq_vector(rvu->pdev, offset),
+ rvu->ng_rvu->rvu_mbox_ops->afvf_intr_handler, 0,
+ &rvu->irq_name[offset * NAME_SIZE],
+ rvu);
+ if (ret)
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for Mbox0\n");
- /* Register MBOX0 interrupt. */
- offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX0;
- sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox0");
- ret = request_irq(pci_irq_vector(rvu->pdev, offset),
- rvu_mbox_intr_handler, 0,
- &rvu->irq_name[offset * NAME_SIZE],
- rvu);
- if (ret)
- dev_err(rvu->dev,
- "RVUAF: IRQ registration failed for Mbox0\n");
-
- rvu->irq_allocated[offset] = true;
+ rvu->irq_allocated[offset] = true;
- /* Register MBOX1 interrupt. MBOX1 IRQ number follows MBOX0 so
- * simply increment current offset by 1.
- */
- offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX1;
- sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox1");
- ret = request_irq(pci_irq_vector(rvu->pdev, offset),
- rvu_mbox_intr_handler, 0,
- &rvu->irq_name[offset * NAME_SIZE],
- rvu);
- if (ret)
- dev_err(rvu->dev,
- "RVUAF: IRQ registration failed for Mbox1\n");
+ /* Register MBOX1 interrupt. MBOX1 IRQ number follows MBOX0 so
+ * simply increment current offset by 1.
+ */
+ offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX1;
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox1");
+ ret = request_irq(pci_irq_vector(rvu->pdev, offset),
+ rvu->ng_rvu->rvu_mbox_ops->afvf_intr_handler, 0,
+ &rvu->irq_name[offset * NAME_SIZE],
+ rvu);
+ if (ret)
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for Mbox1\n");
- rvu->irq_allocated[offset] = true;
+ rvu->irq_allocated[offset] = true;
+ } else {
+ ret = cn20k_register_afvf_mbox_intr(rvu, pf_vec_start);
+ if (ret)
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for Mbox\n");
+ }
/* Register FLR interrupt handler for AF's VFs */
offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR0;
@@ -3178,6 +3260,9 @@ static void rvu_disable_afvf_intr(struct rvu *rvu)
{
int vfs = rvu->vfs;
+ if (is_cn20k(rvu->pdev))
+ return cn20k_rvu_disable_afvf_intr(rvu, vfs);
+
rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), INTR_MASK(vfs));
rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs));
rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs));
@@ -3194,6 +3279,9 @@ static void rvu_enable_afvf_intr(struct rvu *rvu)
{
int vfs = rvu->vfs;
+ if (is_cn20k(rvu->pdev))
+ return cn20k_rvu_enable_afvf_intr(rvu, vfs);
+
/* Clear any pending interrupts and enable AF VF interrupts for
* the first 64 VFs.
*/
@@ -3438,6 +3526,9 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ptp_start(rvu, rvu->fwdata->sclk, rvu->fwdata->ptp_ext_clk_rate,
rvu->fwdata->ptp_ext_tstamp);
+ /* Alloc CINT and QINT memory */
+ rvu_alloc_cint_qint_mem(rvu, &rvu->pf[RVU_AFPF], BLKADDR_NIX0,
+ (rvu->hw->block[BLKADDR_NIX0].lf.max));
return 0;
err_dl:
rvu_unregister_dl(rvu);
@@ -3489,6 +3580,9 @@ static void rvu_remove(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL);
devm_kfree(&pdev->dev, rvu->hw);
+ if (is_cn20k(rvu->pdev))
+ cn20k_free_mbox_memory(rvu);
+ kfree(rvu->ng_rvu);
devm_kfree(&pdev->dev, rvu);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 48f66292ad5c..7ee1fdeb5295 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -10,6 +10,7 @@
#include <linux/pci.h>
#include <net/devlink.h>
+#include <linux/soc/marvell/silicons.h>
#include "rvu_struct.h"
#include "rvu_devlink.h"
@@ -43,12 +44,39 @@
#define MAX_CPT_BLKS 2
/* PF_FUNC */
-#define RVU_PFVF_PF_SHIFT 10
-#define RVU_PFVF_PF_MASK 0x3F
-#define RVU_PFVF_FUNC_SHIFT 0
-#define RVU_PFVF_FUNC_MASK 0x3FF
+#define RVU_OTX2_PFVF_PF_SHIFT 10
+#define RVU_OTX2_PFVF_PF_MASK 0x3F
+#define RVU_PFVF_FUNC_SHIFT 0
+#define RVU_PFVF_FUNC_MASK 0x3FF
+#define RVU_CN20K_PFVF_PF_SHIFT 9
+#define RVU_CN20K_PFVF_PF_MASK 0x7F
+
+static inline u16 rvu_make_pcifunc(struct pci_dev *pdev, int pf, int func)
+{
+ if (is_cn20k(pdev))
+ return ((pf & RVU_CN20K_PFVF_PF_MASK) <<
+ RVU_CN20K_PFVF_PF_SHIFT) |
+ ((func & RVU_PFVF_FUNC_MASK) <<
+ RVU_PFVF_FUNC_SHIFT);
+ else
+ return ((pf & RVU_OTX2_PFVF_PF_MASK) <<
+ RVU_OTX2_PFVF_PF_SHIFT) |
+ ((func & RVU_PFVF_FUNC_MASK) <<
+ RVU_PFVF_FUNC_SHIFT);
+}
+
+static inline int rvu_pcifunc_pf_mask(struct pci_dev *pdev)
+{
+ if (is_cn20k(pdev))
+ return ~(RVU_CN20K_PFVF_PF_MASK << RVU_CN20K_PFVF_PF_SHIFT);
+ else
+ return ~(RVU_OTX2_PFVF_PF_MASK << RVU_OTX2_PFVF_PF_SHIFT);
+}
+
+#define RVU_AFPF 25
#ifdef CONFIG_DEBUG_FS
+
struct dump_ctx {
int lf;
int id;
@@ -446,6 +474,23 @@ struct mbox_wq_info {
struct workqueue_struct *mbox_wq;
};
+struct rvu_irq_data {
+ u64 intr_status;
+ void (*rvu_queue_work_hdlr)(struct mbox_wq_info *mw, int first,
+ int mdevs, u64 intr);
+ void (*afvf_queue_work_hdlr)(struct mbox_wq_info *mw, int first,
+ int mdevs, u64 intr);
+ struct rvu *rvu;
+ int vec_num;
+ int start;
+ int mdevs;
+};
+
+struct mbox_ops {
+ irqreturn_t (*pf_intr_handler)(int irq, void *rvu_irq);
+ irqreturn_t (*afvf_intr_handler)(int irq, void *rvu_irq);
+};
+
struct channel_fwdata {
struct sdp_node_info info;
u8 valid;
@@ -611,6 +656,8 @@ struct rvu {
struct list_head rep_evtq_head;
/* Representor event lock */
spinlock_t rep_evtq_lock;
+
+ struct ng_rvu *ng_rvu;
};
static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
@@ -836,7 +883,6 @@ int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc);
void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start);
bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc);
u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr);
-int rvu_get_pf(u16 pcifunc);
struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc);
void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf);
bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr);
@@ -865,8 +911,8 @@ void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq);
/* SDP APIs */
int rvu_sdp_init(struct rvu *rvu);
-bool is_sdp_pfvf(u16 pcifunc);
-bool is_sdp_pf(u16 pcifunc);
+bool is_sdp_pfvf(struct rvu *rvu, u16 pcifunc);
+bool is_sdp_pf(struct rvu *rvu, u16 pcifunc);
bool is_sdp_vf(struct rvu *rvu, u16 pcifunc);
static inline bool is_rep_dev(struct rvu *rvu, u16 pcifunc)
@@ -877,11 +923,21 @@ static inline bool is_rep_dev(struct rvu *rvu, u16 pcifunc)
return false;
}
+static inline int rvu_get_pf(struct pci_dev *pdev, u16 pcifunc)
+{
+ if (is_cn20k(pdev))
+ return (pcifunc >> RVU_CN20K_PFVF_PF_SHIFT) &
+ RVU_CN20K_PFVF_PF_MASK;
+ else
+ return (pcifunc >> RVU_OTX2_PFVF_PF_SHIFT) &
+ RVU_OTX2_PFVF_PF_MASK;
+}
+
/* CGX APIs */
static inline bool is_pf_cgxmapped(struct rvu *rvu, u8 pf)
{
return (pf >= PF_CGXMAP_BASE && pf <= rvu->cgx_mapped_pfs) &&
- !is_sdp_pf(pf << RVU_PFVF_PF_SHIFT);
+ !is_sdp_pf(rvu, rvu_make_pcifunc(rvu->pdev, pf, 0));
}
static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id)
@@ -893,7 +949,7 @@ static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id)
static inline bool is_cgx_vf(struct rvu *rvu, u16 pcifunc)
{
return ((pcifunc & RVU_PFVF_FUNC_MASK) &&
- is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)));
+ is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc)));
}
#define M(_name, _id, fn_name, req, rsp) \
@@ -901,6 +957,10 @@ int rvu_mbox_handler_ ## fn_name(struct rvu *, struct req *, struct rsp *);
MBOX_MESSAGES
#undef M
+/* Mbox APIs */
+void rvu_queue_work(struct mbox_wq_info *mw, int first,
+ int mdevs, u64 intr);
+
int rvu_cgx_init(struct rvu *rvu);
int rvu_cgx_exit(struct rvu *rvu);
void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu);
@@ -955,7 +1015,8 @@ int rvu_nix_mcast_get_mce_index(struct rvu *rvu, u16 pcifunc,
int rvu_nix_mcast_update_mcam_entry(struct rvu *rvu, u16 pcifunc,
u32 mcast_grp_idx, u16 mcam_index);
void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc);
-
+int rvu_alloc_cint_qint_mem(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ int blkaddr, int nixlf);
/* NPC APIs */
void rvu_npc_freemem(struct rvu *rvu);
int rvu_npc_get_pkind(struct rvu *rvu, u16 pf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index d0331b0e0bfd..b79db887ab9b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -457,7 +457,7 @@ int rvu_cgx_exit(struct rvu *rvu)
inline bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc)
{
if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
- !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
+ !is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc)))
return false;
return true;
}
@@ -484,7 +484,7 @@ void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable)
int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
{
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
void *cgxd;
@@ -501,7 +501,7 @@ int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
int rvu_cgx_tx_enable(struct rvu *rvu, u16 pcifunc, bool enable)
{
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
void *cgxd;
@@ -526,7 +526,7 @@ int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable)
void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc)
{
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
int i = 0, lmac_count = 0;
struct mac_ops *mac_ops;
u8 max_dmac_filters;
@@ -577,7 +577,7 @@ int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req,
static int rvu_lmac_get_stats(struct rvu *rvu, struct msg_req *req,
void *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
struct mac_ops *mac_ops;
int stat = 0, err = 0;
u64 tx_stat, rx_stat;
@@ -633,7 +633,7 @@ int rvu_mbox_handler_rpm_stats(struct rvu *rvu, struct msg_req *req,
int rvu_mbox_handler_cgx_stats_rst(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
struct rvu_pfvf *parent_pf;
struct mac_ops *mac_ops;
u8 cgx_idx, lmac;
@@ -663,7 +663,7 @@ int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu,
struct msg_req *req,
struct cgx_fec_stats_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
struct mac_ops *mac_ops;
u8 cgx_idx, lmac;
void *cgxd;
@@ -681,7 +681,7 @@ int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
struct cgx_mac_addr_set_or_get *req,
struct cgx_mac_addr_set_or_get *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
u8 cgx_id, lmac_id;
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
@@ -701,7 +701,7 @@ int rvu_mbox_handler_cgx_mac_addr_add(struct rvu *rvu,
struct cgx_mac_addr_add_req *req,
struct cgx_mac_addr_add_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
u8 cgx_id, lmac_id;
int rc = 0;
@@ -725,7 +725,7 @@ int rvu_mbox_handler_cgx_mac_addr_del(struct rvu *rvu,
struct cgx_mac_addr_del_req *req,
struct msg_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
u8 cgx_id, lmac_id;
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
@@ -743,7 +743,7 @@ int rvu_mbox_handler_cgx_mac_max_entries_get(struct rvu *rvu,
struct cgx_max_dmac_entries_get_rsp
*rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
u8 cgx_id, lmac_id;
/* If msg is received from PFs(which are not mapped to CGX LMACs)
@@ -769,7 +769,7 @@ int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
struct cgx_mac_addr_set_or_get *req,
struct cgx_mac_addr_set_or_get *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
u8 cgx_id, lmac_id;
int rc = 0;
u64 cfg;
@@ -790,7 +790,7 @@ int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
u8 cgx_id, lmac_id;
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
@@ -809,7 +809,7 @@ int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
u8 cgx_id, lmac_id;
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
@@ -828,7 +828,7 @@ int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
void *cgxd;
@@ -864,7 +864,7 @@ static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
int rvu_mbox_handler_cgx_ptp_rx_enable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
- if (!is_pf_cgxmapped(rvu, rvu_get_pf(req->hdr.pcifunc)))
+ if (!is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, req->hdr.pcifunc)))
return -EPERM;
return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, true);
@@ -878,7 +878,7 @@ int rvu_mbox_handler_cgx_ptp_rx_disable(struct rvu *rvu, struct msg_req *req,
static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en)
{
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
u8 cgx_id, lmac_id;
if (!is_cgx_config_permitted(rvu, pcifunc))
@@ -917,7 +917,7 @@ int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req,
u8 cgx_id, lmac_id;
int pf, err;
- pf = rvu_get_pf(req->hdr.pcifunc);
+ pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
if (!is_pf_cgxmapped(rvu, pf))
return -ENODEV;
@@ -933,7 +933,7 @@ int rvu_mbox_handler_cgx_features_get(struct rvu *rvu,
struct msg_req *req,
struct cgx_features_info_msg *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
u8 cgx_idx, lmac;
void *cgxd;
@@ -975,7 +975,7 @@ u32 rvu_cgx_get_lmac_fifolen(struct rvu *rvu, int cgx, int lmac)
static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en)
{
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
@@ -1005,7 +1005,7 @@ int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause)
{
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
u8 rx_pfc = 0, tx_pfc = 0;
struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
@@ -1046,7 +1046,7 @@ int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu,
struct cgx_pause_frm_cfg *req,
struct cgx_pause_frm_cfg *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
int err = 0;
@@ -1073,7 +1073,7 @@ int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu,
int rvu_mbox_handler_cgx_get_phy_fec_stats(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
u8 cgx_id, lmac_id;
if (!is_pf_cgxmapped(rvu, pf))
@@ -1106,7 +1106,7 @@ int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id,
/* Assumes LF of a PF and all of its VF belongs to the same
* NIX block
*/
- pcifunc = pf << RVU_PFVF_PF_SHIFT;
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0);
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (blkaddr < 0)
return 0;
@@ -1133,10 +1133,10 @@ int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start)
struct rvu_pfvf *parent_pf, *pfvf;
int cgx_users, err = 0;
- if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
+ if (!is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc)))
return 0;
- parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
+ parent_pf = &rvu->pf[rvu_get_pf(rvu->pdev, pcifunc)];
pfvf = rvu_get_pfvf(rvu, pcifunc);
mutex_lock(&rvu->cgx_cfg_lock);
@@ -1179,7 +1179,7 @@ int rvu_mbox_handler_cgx_set_fec_param(struct rvu *rvu,
struct fec_mode *req,
struct fec_mode *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
u8 cgx_id, lmac_id;
if (!is_pf_cgxmapped(rvu, pf))
@@ -1195,7 +1195,7 @@ int rvu_mbox_handler_cgx_set_fec_param(struct rvu *rvu,
int rvu_mbox_handler_cgx_get_aux_link_info(struct rvu *rvu, struct msg_req *req,
struct cgx_fw_data *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
u8 cgx_id, lmac_id;
if (!rvu->fwdata)
@@ -1222,7 +1222,7 @@ int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu,
struct cgx_set_link_mode_req *req,
struct cgx_set_link_mode_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
u8 cgx_idx, lmac;
void *cgxd;
@@ -1238,7 +1238,7 @@ int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu,
int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req,
struct msg_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
u8 cgx_id, lmac_id;
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
@@ -1256,7 +1256,7 @@ int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu,
struct cgx_mac_addr_update_req *req,
struct cgx_mac_addr_update_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
u8 cgx_id, lmac_id;
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
@@ -1272,7 +1272,7 @@ int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu,
int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause,
u8 rx_pause, u16 pfc_en)
{
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
u8 rx_8023 = 0, tx_8023 = 0;
struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
@@ -1310,7 +1310,7 @@ int rvu_mbox_handler_cgx_prio_flow_ctrl_cfg(struct rvu *rvu,
struct cgx_pfc_cfg *req,
struct cgx_pfc_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
void *cgxd;
@@ -1335,7 +1335,7 @@ int rvu_mbox_handler_cgx_prio_flow_ctrl_cfg(struct rvu *rvu,
void rvu_mac_reset(struct rvu *rvu, u16 pcifunc)
{
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
struct mac_ops *mac_ops;
struct cgx *cgxd;
u8 cgx, lmac;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
index 4a3370a40dd8..05adc54535eb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
@@ -66,7 +66,7 @@ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val,
#define LMT_MAP_TBL_W1_OFF 8
static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc)
{
- return ((rvu_get_pf(pcifunc) * LMT_MAX_VFS) +
+ return ((rvu_get_pf(rvu->pdev, pcifunc) * LMT_MAX_VFS) +
(pcifunc & RVU_PFVF_FUNC_MASK)) * LMT_MAPTBL_ENTRY_SIZE;
}
@@ -83,7 +83,7 @@ static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc,
mutex_lock(&rvu->rsrc_lock);
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_REQ, iova);
- pf = rvu_get_pf(pcifunc) & RVU_PFVF_PF_MASK;
+ pf = rvu_get_pf(rvu->pdev, pcifunc) & RVU_OTX2_PFVF_PF_MASK;
val = BIT_ULL(63) | BIT_ULL(14) | BIT_ULL(13) | pf << 8 |
((pcifunc & RVU_PFVF_FUNC_MASK) & 0xFF);
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TXN_REQ, val);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
index 3c5bbaf12e59..f404117bf6c8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
@@ -410,7 +410,7 @@ static bool is_cpt_pf(struct rvu *rvu, u16 pcifunc)
{
int cpt_pf_num = rvu->cpt_pf_num;
- if (rvu_get_pf(pcifunc) != cpt_pf_num)
+ if (rvu_get_pf(rvu->pdev, pcifunc) != cpt_pf_num)
return false;
if (pcifunc & RVU_PFVF_FUNC_MASK)
return false;
@@ -422,7 +422,7 @@ static bool is_cpt_vf(struct rvu *rvu, u16 pcifunc)
{
int cpt_pf_num = rvu->cpt_pf_num;
- if (rvu_get_pf(pcifunc) != cpt_pf_num)
+ if (rvu_get_pf(rvu->pdev, pcifunc) != cpt_pf_num)
return false;
if (!(pcifunc & RVU_PFVF_FUNC_MASK))
return false;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index c827da626471..0c20642f81b9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -688,7 +688,7 @@ static int get_max_column_width(struct rvu *rvu)
for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
- pcifunc = pf << 10 | vf;
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, vf);
if (!pcifunc)
continue;
@@ -759,7 +759,7 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
off = 0;
flag = 0;
- pcifunc = pf << 10 | vf;
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, vf);
if (!pcifunc)
continue;
@@ -842,7 +842,7 @@ static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
cgx[0] = 0;
lmac[0] = 0;
- pcifunc = pf << 10;
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0);
pfvf = rvu_get_pfvf(rvu, pcifunc);
if (pfvf->nix_blkaddr == BLKADDR_NIX0)
@@ -2623,10 +2623,10 @@ static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused)
pcifunc = ipolicer->pfvf_map[idx];
if (!(pcifunc & RVU_PFVF_FUNC_MASK))
seq_printf(m, "Allocated to :: PF %d\n",
- rvu_get_pf(pcifunc));
+ rvu_get_pf(rvu->pdev, pcifunc));
else
seq_printf(m, "Allocated to :: PF %d VF %d\n",
- rvu_get_pf(pcifunc),
+ rvu_get_pf(rvu->pdev, pcifunc),
(pcifunc & RVU_PFVF_FUNC_MASK) - 1);
print_band_prof_ctx(m, &aq_rsp.prof);
}
@@ -2983,10 +2983,10 @@ static void rvu_print_npc_mcam_info(struct seq_file *s,
if (!(pcifunc & RVU_PFVF_FUNC_MASK))
seq_printf(s, "\n\t\t Device \t\t: PF%d\n",
- rvu_get_pf(pcifunc));
+ rvu_get_pf(rvu->pdev, pcifunc));
else
seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n",
- rvu_get_pf(pcifunc),
+ rvu_get_pf(rvu->pdev, pcifunc),
(pcifunc & RVU_PFVF_FUNC_MASK) - 1);
if (entry_acnt) {
@@ -3049,13 +3049,13 @@ static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
seq_puts(filp, "\n\t\t Current allocation\n");
seq_puts(filp, "\t\t====================\n");
for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
- pcifunc = (pf << RVU_PFVF_PF_SHIFT);
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0);
rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
numvfs = (cfg >> 12) & 0xFF;
for (vf = 0; vf < numvfs; vf++) {
- pcifunc = (pf << RVU_PFVF_PF_SHIFT) | (vf + 1);
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, (vf + 1));
rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
}
}
@@ -3326,7 +3326,7 @@ static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
mutex_lock(&mcam->lock);
list_for_each_entry(iter, &mcam->mcam_rules, list) {
- pf = (iter->owner >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
+ pf = rvu_get_pf(rvu->pdev, iter->owner);
seq_printf(s, "\n\tInstalled by: PF%d ", pf);
if (iter->owner & RVU_PFVF_FUNC_MASK) {
@@ -3344,7 +3344,7 @@ static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
rvu_dbg_npc_mcam_show_flows(s, iter);
if (is_npc_intf_rx(iter->intf)) {
target = iter->rx_action.pf_func;
- pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
+ pf = rvu_get_pf(rvu->pdev, target);
seq_printf(s, "\tForward to: PF%d ", pf);
if (target & RVU_PFVF_FUNC_MASK) {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 613655fcd34f..bdf4d852c15d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -315,7 +315,8 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
if (lvl >= hw->cap.nix_tx_aggr_lvl) {
if ((nix_get_tx_link(rvu, map_func) !=
nix_get_tx_link(rvu, pcifunc)) &&
- (rvu_get_pf(map_func) != rvu_get_pf(pcifunc)))
+ (rvu_get_pf(rvu->pdev, map_func) !=
+ rvu_get_pf(rvu->pdev, pcifunc)))
return false;
else
return true;
@@ -339,7 +340,7 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf,
bool from_vf;
int err;
- pf = rvu_get_pf(pcifunc);
+ pf = rvu_get_pf(rvu->pdev, pcifunc);
if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK &&
type != NIX_INTF_TYPE_SDP)
return 0;
@@ -416,7 +417,7 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf,
break;
case NIX_INTF_TYPE_SDP:
from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
- parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
+ parent_pf = &rvu->pf[rvu_get_pf(rvu->pdev, pcifunc)];
sdp_info = parent_pf->sdp_info;
if (!sdp_info) {
dev_err(rvu->dev, "Invalid sdp_info pointer\n");
@@ -590,12 +591,12 @@ static int nix_bp_disable(struct rvu *rvu,
u16 chan_v;
u64 cfg;
- pf = rvu_get_pf(pcifunc);
+ pf = rvu_get_pf(rvu->pdev, pcifunc);
type = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
return 0;
- if (is_sdp_pfvf(pcifunc))
+ if (is_sdp_pfvf(rvu, pcifunc))
type = NIX_INTF_TYPE_SDP;
if (cpt_link && !rvu->hw->cpt_links)
@@ -736,9 +737,9 @@ static int nix_bp_enable(struct rvu *rvu,
u16 chan_v;
u64 cfg;
- pf = rvu_get_pf(pcifunc);
+ pf = rvu_get_pf(rvu->pdev, pcifunc);
type = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
- if (is_sdp_pfvf(pcifunc))
+ if (is_sdp_pfvf(rvu, pcifunc))
type = NIX_INTF_TYPE_SDP;
/* Enable backpressure only for CGX mapped PFs and LBK/SDP interface */
@@ -1674,7 +1675,7 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
}
intf = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
- if (is_sdp_pfvf(pcifunc))
+ if (is_sdp_pfvf(rvu, pcifunc))
intf = NIX_INTF_TYPE_SDP;
err = nix_interface_init(rvu, pcifunc, intf, nixlf, rsp,
@@ -1798,7 +1799,8 @@ int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg);
if (rc < 0) {
dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)",
- rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
+ rvu_get_pf(rvu->pdev, pcifunc),
+ pcifunc & RVU_PFVF_FUNC_MASK);
return NIX_AF_ERR_MARK_CFG_FAIL;
}
@@ -2050,7 +2052,7 @@ static void nix_clear_tx_xoff(struct rvu *rvu, int blkaddr,
static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc)
{
struct rvu_hwinfo *hw = rvu->hw;
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
u8 cgx_id = 0, lmac_id = 0;
if (is_lbk_vf(rvu, pcifunc)) {/* LBK links */
@@ -2068,7 +2070,7 @@ static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc,
int link, int *start, int *end)
{
struct rvu_hwinfo *hw = rvu->hw;
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
/* LBK links */
if (is_lbk_vf(rvu, pcifunc) || is_rep_dev(rvu, pcifunc)) {
@@ -2426,7 +2428,7 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
{
struct nix_smq_flush_ctx *smq_flush_ctx;
int err, restore_tx_en = 0, i;
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
u8 cgx_id = 0, lmac_id = 0;
u16 tl2_tl3_link_schq;
u8 link, link_level;
@@ -2820,7 +2822,7 @@ void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, u16 pcifunc,
{
struct rvu_hwinfo *hw = rvu->hw;
int lbk_link_start, lbk_links;
- u8 pf = rvu_get_pf(pcifunc);
+ u8 pf = rvu_get_pf(rvu->pdev, pcifunc);
int schq;
u64 cfg;
@@ -3190,7 +3192,8 @@ static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw,
err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL);
if (err) {
dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
- rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
+ rvu_get_pf(rvu->pdev, pcifunc),
+ pcifunc & RVU_PFVF_FUNC_MASK);
return err;
}
return 0;
@@ -3458,7 +3461,7 @@ int nix_update_mce_list(struct rvu *rvu, u16 pcifunc,
dev_err(rvu->dev,
"%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
__func__, idx, mce_list->max,
- pcifunc >> RVU_PFVF_PF_SHIFT);
+ rvu_get_pf(rvu->pdev, pcifunc));
return -EINVAL;
}
@@ -3510,7 +3513,8 @@ void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type,
struct rvu_pfvf *pfvf;
if (!hw->cap.nix_rx_multicast ||
- !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc & ~RVU_PFVF_FUNC_MASK))) {
+ !is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev,
+ pcifunc & ~RVU_PFVF_FUNC_MASK))) {
*mce_list = NULL;
*mce_idx = 0;
return;
@@ -3544,13 +3548,13 @@ static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
int pf;
/* skip multicast pkt replication for AF's VFs & SDP links */
- if (is_lbk_vf(rvu, pcifunc) || is_sdp_pfvf(pcifunc))
+ if (is_lbk_vf(rvu, pcifunc) || is_sdp_pfvf(rvu, pcifunc))
return 0;
if (!hw->cap.nix_rx_multicast)
return 0;
- pf = rvu_get_pf(pcifunc);
+ pf = rvu_get_pf(rvu->pdev, pcifunc);
if (!is_pf_cgxmapped(rvu, pf))
return 0;
@@ -3619,7 +3623,7 @@ static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw)
for (idx = 0; idx < (numvfs + 1); idx++) {
/* idx-0 is for PF, followed by VFs */
- pcifunc = (pf << RVU_PFVF_PF_SHIFT);
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0);
pcifunc |= idx;
/* Add dummy entries now, so that we don't have to check
* for whether AQ_OP should be INIT/WRITE later on.
@@ -4554,7 +4558,7 @@ int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
static void nix_find_link_frs(struct rvu *rvu,
struct nix_frs_cfg *req, u16 pcifunc)
{
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
struct rvu_pfvf *pfvf;
int maxlen, minlen;
int numvfs, hwvf;
@@ -4601,7 +4605,7 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
{
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
int blkaddr, link = -1;
struct nix_hw *nix_hw;
struct rvu_pfvf *pfvf;
@@ -5251,7 +5255,7 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
rvu_switch_update_rules(rvu, pcifunc, true);
- pf = rvu_get_pf(pcifunc);
+ pf = rvu_get_pf(rvu->pdev, pcifunc);
if (is_pf_cgxmapped(rvu, pf) && rvu->rep_mode)
rvu_rep_notify_pfvf_state(rvu, pcifunc, true);
@@ -5284,7 +5288,7 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
rvu_switch_update_rules(rvu, pcifunc, false);
rvu_cgx_tx_enable(rvu, pcifunc, true);
- pf = rvu_get_pf(pcifunc);
+ pf = rvu_get_pf(rvu->pdev, pcifunc);
if (is_pf_cgxmapped(rvu, pf) && rvu->rep_mode)
rvu_rep_notify_pfvf_state(rvu, pcifunc, false);
return 0;
@@ -5296,7 +5300,7 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
struct hwctx_disable_req ctx_req;
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
u64 sa_base;
@@ -5385,7 +5389,7 @@ static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
int nixlf;
u64 cfg;
- pf = rvu_get_pf(pcifunc);
+ pf = rvu_get_pf(rvu->pdev, pcifunc);
if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
return 0;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index da15bb451178..c7c70429eb6c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -147,7 +147,9 @@ static int npc_get_ucast_mcam_index(struct npc_mcam *mcam, u16 pcifunc,
int npc_get_nixlf_mcam_index(struct npc_mcam *mcam,
u16 pcifunc, int nixlf, int type)
{
- int pf = rvu_get_pf(pcifunc);
+ struct rvu_hwinfo *hw = container_of(mcam, struct rvu_hwinfo, mcam);
+ struct rvu *rvu = hw->rvu;
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
int index;
/* Check if this is for a PF */
@@ -698,7 +700,7 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
/* RX_ACTION set to MCAST for CGX PF's */
if (hw->cap.nix_rx_multicast && pfvf->use_mce_list &&
- is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
+ is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc))) {
*(u64 *)&action = 0;
action.op = NIX_RX_ACTIONOP_MCAST;
pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
@@ -3434,7 +3436,7 @@ int rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir,
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
int blkaddr, nixlf, rc, intf_mode;
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
u64 rxpkind, txpkind;
u8 cgx_id, lmac_id;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
index d2661e7fabdb..999f6d93c7fe 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
@@ -1465,7 +1465,7 @@ static int rvu_npc_exact_update_table_entry(struct rvu *rvu, u8 cgx_id, u8 lmac_
int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc)
{
struct npc_exact_table *table;
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
u8 cgx_id, lmac_id;
u32 drop_mcam_idx;
bool *promisc;
@@ -1512,7 +1512,7 @@ int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc)
int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc)
{
struct npc_exact_table *table;
- int pf = rvu_get_pf(pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, pcifunc);
u8 cgx_id, lmac_id;
u32 drop_mcam_idx;
bool *promisc;
@@ -1560,7 +1560,7 @@ int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc)
int rvu_npc_exact_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req,
struct msg_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
u32 seq_id = req->index;
struct rvu_pfvf *pfvf;
u8 cgx_id, lmac_id;
@@ -1593,7 +1593,7 @@ int rvu_npc_exact_mac_addr_update(struct rvu *rvu,
struct cgx_mac_addr_update_req *req,
struct cgx_mac_addr_update_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
struct npc_exact_table_entry *entry;
struct npc_exact_table *table;
struct rvu_pfvf *pfvf;
@@ -1675,7 +1675,7 @@ int rvu_npc_exact_mac_addr_add(struct rvu *rvu,
struct cgx_mac_addr_add_req *req,
struct cgx_mac_addr_add_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
struct rvu_pfvf *pfvf;
u8 cgx_id, lmac_id;
int rc = 0;
@@ -1711,7 +1711,7 @@ int rvu_npc_exact_mac_addr_del(struct rvu *rvu,
struct cgx_mac_addr_del_req *req,
struct msg_rsp *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
int rc;
rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index);
@@ -1736,7 +1736,7 @@ int rvu_npc_exact_mac_addr_del(struct rvu *rvu,
int rvu_npc_exact_mac_addr_set(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req,
struct cgx_mac_addr_set_or_get *rsp)
{
- int pf = rvu_get_pf(req->hdr.pcifunc);
+ int pf = rvu_get_pf(rvu->pdev, req->hdr.pcifunc);
u32 seq_id = req->index;
struct rvu_pfvf *pfvf;
u8 cgx_id, lmac_id;
@@ -2001,7 +2001,7 @@ int rvu_npc_exact_init(struct rvu *rvu)
}
/* Filter rules are only for PF */
- pcifunc = RVU_PFFUNC(i, 0);
+ pcifunc = RVU_PFFUNC(rvu->pdev, i, 0);
dev_dbg(rvu->dev,
"%s:Drop rule cgx=%d lmac=%d chan(val=0x%llx, mask=0x%llx\n",
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h
index 57a09328d46b..cb25cf478f1f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h
@@ -139,9 +139,7 @@ static struct npc_mcam_kex_hash npc_mkex_hash_default __maybe_unused = {
#define NPC_MCAM_DROP_RULE_MAX 30
#define NPC_MCAM_SDP_DROP_RULE_IDX 0
-#define RVU_PFFUNC(pf, func) \
- ((((pf) & RVU_PFVF_PF_MASK) << RVU_PFVF_PF_SHIFT) | \
- (((func) & RVU_PFVF_FUNC_MASK) << RVU_PFVF_FUNC_SHIFT))
+#define RVU_PFFUNC(pdev, pf, func) rvu_make_pcifunc(pdev, pf, func)
enum npc_exact_opc_type {
NPC_EXACT_OPC_MEM,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c
index 32953cca108c..03099bc570bd 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c
@@ -39,7 +39,7 @@ static int rvu_rep_up_notify(struct rvu *rvu, struct rep_event *event)
struct rep_event *msg;
int pf;
- pf = rvu_get_pf(event->pcifunc);
+ pf = rvu_get_pf(rvu->pdev, event->pcifunc);
if (event->event & RVU_EVENT_MAC_ADDR_CHANGE)
ether_addr_copy(pfvf->mac_addr, event->evt_data.mac);
@@ -114,10 +114,10 @@ int rvu_rep_notify_pfvf_state(struct rvu *rvu, u16 pcifunc, bool enable)
struct rep_event *req;
int pf;
- if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
+ if (!is_pf_cgxmapped(rvu, rvu_get_pf(rvu->pdev, pcifunc)))
return 0;
- pf = rvu_get_pf(rvu->rep_pcifunc);
+ pf = rvu_get_pf(rvu->pdev, rvu->rep_pcifunc);
mutex_lock(&rvu->mbox_lock);
req = otx2_mbox_alloc_msg_rep_event_up_notify(rvu, pf);
@@ -325,7 +325,7 @@ int rvu_rep_install_mcam_rules(struct rvu *rvu)
if (!is_pf_cgxmapped(rvu, pf))
continue;
- pcifunc = pf << RVU_PFVF_PF_SHIFT;
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0);
rvu_get_nix_blkaddr(rvu, pcifunc);
rep = true;
for (i = 0; i < 2; i++) {
@@ -345,8 +345,7 @@ int rvu_rep_install_mcam_rules(struct rvu *rvu)
rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL);
for (vf = 0; vf < numvfs; vf++) {
- pcifunc = pf << RVU_PFVF_PF_SHIFT |
- ((vf + 1) & RVU_PFVF_FUNC_MASK);
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, vf + 1);
rvu_get_nix_blkaddr(rvu, pcifunc);
/* Skip installimg rules if nixlf is not attached */
@@ -454,7 +453,7 @@ int rvu_mbox_handler_get_rep_cnt(struct rvu *rvu, struct msg_req *req,
for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
if (!is_pf_cgxmapped(rvu, pf))
continue;
- pcifunc = pf << RVU_PFVF_PF_SHIFT;
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0);
rvu->rep2pfvf_map[rep] = pcifunc;
rsp->rep_pf_map[rep] = pcifunc;
rep++;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
index 38cfe148f4b7..e4a5f9fa6fd4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
@@ -17,9 +17,9 @@
/* SDP PF number */
static int sdp_pf_num[MAX_SDP] = {-1, -1};
-bool is_sdp_pfvf(u16 pcifunc)
+bool is_sdp_pfvf(struct rvu *rvu, u16 pcifunc)
{
- u16 pf = rvu_get_pf(pcifunc);
+ u16 pf = rvu_get_pf(rvu->pdev, pcifunc);
u32 found = 0, i = 0;
while (i < MAX_SDP) {
@@ -34,9 +34,9 @@ bool is_sdp_pfvf(u16 pcifunc)
return true;
}
-bool is_sdp_pf(u16 pcifunc)
+bool is_sdp_pf(struct rvu *rvu, u16 pcifunc)
{
- return (is_sdp_pfvf(pcifunc) &&
+ return (is_sdp_pfvf(rvu, pcifunc) &&
!(pcifunc & RVU_PFVF_FUNC_MASK));
}
@@ -46,7 +46,7 @@ bool is_sdp_vf(struct rvu *rvu, u16 pcifunc)
if (!(pcifunc & ~RVU_PFVF_FUNC_MASK))
return (rvu->vf_devid == RVU_SDP_VF_DEVID);
- return (is_sdp_pfvf(pcifunc) &&
+ return (is_sdp_pfvf(rvu, pcifunc) &&
!!(pcifunc & RVU_PFVF_FUNC_MASK));
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
index 77ac94cb2ec4..0596a3ac4c12 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
@@ -33,7 +33,8 @@ enum rvu_block_addr_e {
BLKADDR_NDC_NIX1_RX = 0x10ULL,
BLKADDR_NDC_NIX1_TX = 0x11ULL,
BLKADDR_APR = 0x16ULL,
- BLK_COUNT = 0x17ULL,
+ BLKADDR_MBOX = 0x1bULL,
+ BLK_COUNT = 0x1cULL,
};
/* RVU Block Type Enumeration */
@@ -49,7 +50,8 @@ enum rvu_block_type_e {
BLKTYPE_TIM = 0x8,
BLKTYPE_CPT = 0x9,
BLKTYPE_NDC = 0xa,
- BLKTYPE_MAX = 0xa,
+ BLKTYPE_MBOX = 0x13,
+ BLKTYPE_MAX = 0x13,
};
/* RVU Admin function Interrupt Vector Enumeration */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c
index 268efb7c1c15..49ce38685a7e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c
@@ -93,7 +93,7 @@ static int rvu_switch_install_rules(struct rvu *rvu)
if (!is_pf_cgxmapped(rvu, pf))
continue;
- pcifunc = pf << 10;
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0);
/* rvu_get_nix_blkaddr sets up the corresponding NIX block
* address and NIX RX and TX interfaces for a pcifunc.
* Generally it is called during attach call of a pcifunc but it
@@ -126,7 +126,7 @@ static int rvu_switch_install_rules(struct rvu *rvu)
rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL);
for (vf = 0; vf < numvfs; vf++) {
- pcifunc = pf << 10 | ((vf + 1) & 0x3FF);
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, (vf + 1));
rvu_get_nix_blkaddr(rvu, pcifunc);
err = rvu_switch_install_rx_rule(rvu, pcifunc, 0x0);
@@ -236,7 +236,7 @@ void rvu_switch_disable(struct rvu *rvu)
if (!is_pf_cgxmapped(rvu, pf))
continue;
- pcifunc = pf << 10;
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, 0);
err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF);
if (err)
dev_err(rvu->dev,
@@ -248,7 +248,7 @@ void rvu_switch_disable(struct rvu *rvu)
rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL);
for (vf = 0; vf < numvfs; vf++) {
- pcifunc = pf << 10 | ((vf + 1) & 0x3FF);
+ pcifunc = rvu_make_pcifunc(rvu->pdev, pf, (vf + 1));
err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF);
if (err)
dev_err(rvu->dev,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index 69e0778f9ac1..883e9f4d601c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -8,7 +8,7 @@ obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o otx2_ptp.o
obj-$(CONFIG_RVU_ESWITCH) += rvu_rep.o
rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
- otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o \
+ otx2_flows.o otx2_tc.o cn10k.o cn20k.o otx2_dmac_flt.o \
otx2_devlink.o qos_sq.o qos.o otx2_xsk.o
rvu_nicvf-y := otx2_vf.o
rvu_rep-y := rep.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
index 7f6a435ac680..bec7d5b4d7cc 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
@@ -14,6 +14,7 @@ static struct dev_hw_ops otx2_hw_ops = {
.sqe_flush = otx2_sqe_flush,
.aura_freeptr = otx2_aura_freeptr,
.refill_pool_ptrs = otx2_refill_pool_ptrs,
+ .pfaf_mbox_intr_handler = otx2_pfaf_mbox_intr_handler,
};
static struct dev_hw_ops cn10k_hw_ops = {
@@ -21,8 +22,20 @@ static struct dev_hw_ops cn10k_hw_ops = {
.sqe_flush = cn10k_sqe_flush,
.aura_freeptr = cn10k_aura_freeptr,
.refill_pool_ptrs = cn10k_refill_pool_ptrs,
+ .pfaf_mbox_intr_handler = otx2_pfaf_mbox_intr_handler,
};
+void otx2_init_hw_ops(struct otx2_nic *pfvf)
+{
+ if (!test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) {
+ pfvf->hw_ops = &otx2_hw_ops;
+ return;
+ }
+
+ pfvf->hw_ops = &cn10k_hw_ops;
+}
+EXPORT_SYMBOL(otx2_init_hw_ops);
+
int cn10k_lmtst_init(struct otx2_nic *pfvf)
{
@@ -30,12 +43,9 @@ int cn10k_lmtst_init(struct otx2_nic *pfvf)
struct otx2_lmt_info *lmt_info;
int err, cpu;
- if (!test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) {
- pfvf->hw_ops = &otx2_hw_ops;
+ if (!test_bit(CN10K_LMTST, &pfvf->hw.cap_flag))
return 0;
- }
- pfvf->hw_ops = &cn10k_hw_ops;
/* Total LMTLINES = num_online_cpus() * 32 (For Burst flush).*/
pfvf->tot_lmt_lines = (num_online_cpus() * LMT_BURST_SIZE);
pfvf->hw.lmt_info = alloc_percpu(struct otx2_lmt_info);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
index e3f0bce9908f..945ab10bd4ed 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
@@ -39,4 +39,5 @@ int cn10k_alloc_leaf_profile(struct otx2_nic *pfvf, u16 *leaf);
int cn10k_set_ipolicer_rate(struct otx2_nic *pfvf, u16 profile,
u32 burst, u64 rate, bool pps);
int cn10k_free_leaf_profile(struct otx2_nic *pfvf, u16 leaf);
+void otx2_init_hw_ops(struct otx2_nic *pfvf);
#endif /* CN10K_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
index a6500e3673f2..c691f0722154 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
@@ -481,7 +481,7 @@ static int cn10k_outb_write_sa(struct otx2_nic *pf, struct qmem *sa_info)
goto set_available;
/* Trigger CTX flush to write dirty data back to DRAM */
- reg_val = FIELD_PREP(CPT_LF_CTX_FLUSH, sa_iova >> 7);
+ reg_val = FIELD_PREP(CPT_LF_CTX_FLUSH_CPTR, sa_iova >> 7);
otx2_write64(pf, CN10K_CPT_LF_CTX_FLUSH, reg_val);
set_available:
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h
index 9965df0faa3e..43fbce0d6039 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h
@@ -220,7 +220,7 @@ struct cpt_sg_s {
#define CPT_LF_Q_SIZE_DIV40 GENMASK_ULL(14, 0)
/* CPT LF CTX Flush Register */
-#define CPT_LF_CTX_FLUSH GENMASK_ULL(45, 0)
+#define CPT_LF_CTX_FLUSH_CPTR GENMASK_ULL(45, 0)
#ifdef CONFIG_XFRM_OFFLOAD
int cn10k_ipsec_init(struct net_device *netdev);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c
new file mode 100644
index 000000000000..ec8cde98076d
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#include "otx2_common.h"
+#include "otx2_reg.h"
+#include "otx2_struct.h"
+#include "cn10k.h"
+
+static struct dev_hw_ops cn20k_hw_ops = {
+ .pfaf_mbox_intr_handler = cn20k_pfaf_mbox_intr_handler,
+ .vfaf_mbox_intr_handler = cn20k_vfaf_mbox_intr_handler,
+ .pfvf_mbox_intr_handler = cn20k_pfvf_mbox_intr_handler,
+};
+
+void cn20k_init(struct otx2_nic *pfvf)
+{
+ pfvf->hw_ops = &cn20k_hw_ops;
+}
+EXPORT_SYMBOL(cn20k_init);
+/* CN20K mbox AF => PFx irq handler */
+irqreturn_t cn20k_pfaf_mbox_intr_handler(int irq, void *pf_irq)
+{
+ struct otx2_nic *pf = pf_irq;
+ struct mbox *mw = &pf->mbox;
+ struct otx2_mbox_dev *mdev;
+ struct otx2_mbox *mbox;
+ struct mbox_hdr *hdr;
+ u64 pf_trig_val;
+
+ pf_trig_val = otx2_read64(pf, RVU_PF_INT) & 0x3ULL;
+
+ /* Clear the IRQ */
+ otx2_write64(pf, RVU_PF_INT, pf_trig_val);
+
+ if (pf_trig_val & BIT_ULL(0)) {
+ mbox = &mw->mbox_up;
+ mdev = &mbox->dev[0];
+ otx2_sync_mbox_bbuf(mbox, 0);
+
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ if (hdr->num_msgs)
+ queue_work(pf->mbox_wq, &mw->mbox_up_wrk);
+
+ trace_otx2_msg_interrupt(pf->pdev, "UP message from AF to PF",
+ BIT_ULL(0));
+ }
+
+ if (pf_trig_val & BIT_ULL(1)) {
+ mbox = &mw->mbox;
+ mdev = &mbox->dev[0];
+ otx2_sync_mbox_bbuf(mbox, 0);
+
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ if (hdr->num_msgs)
+ queue_work(pf->mbox_wq, &mw->mbox_wrk);
+ trace_otx2_msg_interrupt(pf->pdev, "DOWN reply from AF to PF",
+ BIT_ULL(1));
+ }
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t cn20k_vfaf_mbox_intr_handler(int irq, void *vf_irq)
+{
+ struct otx2_nic *vf = vf_irq;
+ struct otx2_mbox_dev *mdev;
+ struct otx2_mbox *mbox;
+ struct mbox_hdr *hdr;
+ u64 vf_trig_val;
+
+ vf_trig_val = otx2_read64(vf, RVU_VF_INT) & 0x3ULL;
+ /* Clear the IRQ */
+ otx2_write64(vf, RVU_VF_INT, vf_trig_val);
+
+ /* Read latest mbox data */
+ smp_rmb();
+
+ if (vf_trig_val & BIT_ULL(1)) {
+ /* Check for PF => VF response messages */
+ mbox = &vf->mbox.mbox;
+ mdev = &mbox->dev[0];
+ otx2_sync_mbox_bbuf(mbox, 0);
+
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ if (hdr->num_msgs)
+ queue_work(vf->mbox_wq, &vf->mbox.mbox_wrk);
+
+ trace_otx2_msg_interrupt(mbox->pdev, "DOWN reply from PF0 to VF",
+ BIT_ULL(1));
+ }
+
+ if (vf_trig_val & BIT_ULL(0)) {
+ /* Check for PF => VF notification messages */
+ mbox = &vf->mbox.mbox_up;
+ mdev = &mbox->dev[0];
+ otx2_sync_mbox_bbuf(mbox, 0);
+
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ if (hdr->num_msgs)
+ queue_work(vf->mbox_wq, &vf->mbox.mbox_up_wrk);
+
+ trace_otx2_msg_interrupt(mbox->pdev, "UP message from PF0 to VF",
+ BIT_ULL(0));
+ }
+
+ return IRQ_HANDLED;
+}
+
+void cn20k_enable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
+{
+ /* Clear PF <=> VF mailbox IRQ */
+ otx2_write64(pf, RVU_MBOX_PF_VFPF_INTX(0), ~0ull);
+ otx2_write64(pf, RVU_MBOX_PF_VFPF_INTX(1), ~0ull);
+ otx2_write64(pf, RVU_MBOX_PF_VFPF1_INTX(0), ~0ull);
+ otx2_write64(pf, RVU_MBOX_PF_VFPF1_INTX(1), ~0ull);
+
+ /* Enable PF <=> VF mailbox IRQ */
+ otx2_write64(pf, RVU_MBOX_PF_VFPF_INT_ENA_W1SX(0), INTR_MASK(numvfs));
+ otx2_write64(pf, RVU_MBOX_PF_VFPF1_INT_ENA_W1SX(0), INTR_MASK(numvfs));
+ if (numvfs > 64) {
+ numvfs -= 64;
+ otx2_write64(pf, RVU_MBOX_PF_VFPF_INT_ENA_W1SX(1),
+ INTR_MASK(numvfs));
+ otx2_write64(pf, RVU_MBOX_PF_VFPF1_INT_ENA_W1SX(1),
+ INTR_MASK(numvfs));
+ }
+}
+
+void cn20k_disable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
+{
+ int vector, intr_vec, vec = 0;
+
+ /* Disable PF <=> VF mailbox IRQ */
+ otx2_write64(pf, RVU_MBOX_PF_VFPF_INT_ENA_W1CX(0), ~0ull);
+ otx2_write64(pf, RVU_MBOX_PF_VFPF_INT_ENA_W1CX(1), ~0ull);
+ otx2_write64(pf, RVU_MBOX_PF_VFPF1_INT_ENA_W1CX(0), ~0ull);
+ otx2_write64(pf, RVU_MBOX_PF_VFPF1_INT_ENA_W1CX(1), ~0ull);
+
+ otx2_write64(pf, RVU_MBOX_PF_VFPF_INTX(0), ~0ull);
+ otx2_write64(pf, RVU_MBOX_PF_VFPF1_INTX(0), ~0ull);
+
+ if (numvfs > 64) {
+ otx2_write64(pf, RVU_MBOX_PF_VFPF_INTX(1), ~0ull);
+ otx2_write64(pf, RVU_MBOX_PF_VFPF1_INTX(1), ~0ull);
+ }
+
+ for (intr_vec = RVU_MBOX_PF_INT_VEC_VFPF_MBOX0; intr_vec <=
+ RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1; intr_vec++, vec++) {
+ vector = pci_irq_vector(pf->pdev, intr_vec);
+ free_irq(vector, pf->hw.pfvf_irq_devid[vec]);
+ }
+}
+
+irqreturn_t cn20k_pfvf_mbox_intr_handler(int irq, void *pf_irq)
+{
+ struct pf_irq_data *irq_data = pf_irq;
+ struct otx2_nic *pf = irq_data->pf;
+ struct mbox *mbox;
+ u64 intr;
+
+ /* Sync with mbox memory region */
+ rmb();
+
+ /* Clear interrupts */
+ intr = otx2_read64(pf, irq_data->intr_status);
+ otx2_write64(pf, irq_data->intr_status, intr);
+ mbox = pf->mbox_pfvf;
+
+ if (intr)
+ trace_otx2_msg_interrupt(pf->pdev, "VF(s) to PF", intr);
+
+ irq_data->pf_queue_work_hdlr(mbox, pf->mbox_pfvf_wq, irq_data->start,
+ irq_data->mdevs, intr);
+
+ return IRQ_HANDLED;
+}
+
+int cn20k_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
+{
+ struct otx2_hw *hw = &pf->hw;
+ struct pf_irq_data *irq_data;
+ int intr_vec, ret, vec = 0;
+ char *irq_name;
+
+ /* irq data for 4 PF intr vectors */
+ irq_data = devm_kcalloc(pf->dev, 4,
+ sizeof(struct pf_irq_data), GFP_KERNEL);
+ if (!irq_data)
+ return -ENOMEM;
+
+ for (intr_vec = RVU_MBOX_PF_INT_VEC_VFPF_MBOX0; intr_vec <=
+ RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1; intr_vec++, vec++) {
+ switch (intr_vec) {
+ case RVU_MBOX_PF_INT_VEC_VFPF_MBOX0:
+ irq_data[vec].intr_status =
+ RVU_MBOX_PF_VFPF_INTX(0);
+ irq_data[vec].start = 0;
+ irq_data[vec].mdevs = 64;
+ break;
+ case RVU_MBOX_PF_INT_VEC_VFPF_MBOX1:
+ irq_data[vec].intr_status =
+ RVU_MBOX_PF_VFPF_INTX(1);
+ irq_data[vec].start = 64;
+ irq_data[vec].mdevs = 96;
+ break;
+ case RVU_MBOX_PF_INT_VEC_VFPF1_MBOX0:
+ irq_data[vec].intr_status =
+ RVU_MBOX_PF_VFPF1_INTX(0);
+ irq_data[vec].start = 0;
+ irq_data[vec].mdevs = 64;
+ break;
+ case RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1:
+ irq_data[vec].intr_status =
+ RVU_MBOX_PF_VFPF1_INTX(1);
+ irq_data[vec].start = 64;
+ irq_data[vec].mdevs = 96;
+ break;
+ }
+ irq_data[vec].pf_queue_work_hdlr = otx2_queue_vf_work;
+ irq_data[vec].vec_num = intr_vec;
+ irq_data[vec].pf = pf;
+
+ /* Register mailbox interrupt handler */
+ irq_name = &hw->irq_name[intr_vec * NAME_SIZE];
+ if (pf->pcifunc)
+ snprintf(irq_name, NAME_SIZE,
+ "RVUPF%d_VF%d Mbox%d", rvu_get_pf(pf->pdev,
+ pf->pcifunc), vec / 2, vec % 2);
+ else
+ snprintf(irq_name, NAME_SIZE, "RVUPF_VF%d Mbox%d",
+ vec / 2, vec % 2);
+
+ hw->pfvf_irq_devid[vec] = &irq_data[vec];
+ ret = request_irq(pci_irq_vector(pf->pdev, intr_vec),
+ pf->hw_ops->pfvf_mbox_intr_handler, 0,
+ irq_name,
+ &irq_data[vec]);
+ if (ret) {
+ dev_err(pf->dev,
+ "RVUPF: IRQ registration failed for PFVF mbox0 irq\n");
+ return ret;
+ }
+ }
+
+ cn20k_enable_pfvf_mbox_intr(pf, numvfs);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.h
new file mode 100644
index 000000000000..832adaf8c57f
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn20k.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#ifndef CN20K_H
+#define CN20K_H
+
+#include "otx2_common.h"
+
+void cn20k_init(struct otx2_nic *pfvf);
+int cn20k_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs);
+void cn20k_disable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs);
+void cn20k_enable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs);
+#endif /* CN20K_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 6b5c9536d26d..9a10396e7504 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -28,12 +28,12 @@ static void otx2_nix_rq_op_stats(struct queue_stats *stats,
struct otx2_nic *pfvf, int qidx)
{
u64 incr = (u64)qidx << 32;
- u64 *ptr;
+ void __iomem *ptr;
- ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_OCTS);
+ ptr = otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_OCTS);
stats->bytes = otx2_atomic64_add(incr, ptr);
- ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_PKTS);
+ ptr = otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_PKTS);
stats->pkts = otx2_atomic64_add(incr, ptr);
}
@@ -41,12 +41,12 @@ static void otx2_nix_sq_op_stats(struct queue_stats *stats,
struct otx2_nic *pfvf, int qidx)
{
u64 incr = (u64)qidx << 32;
- u64 *ptr;
+ void __iomem *ptr;
- ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_OCTS);
+ ptr = otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_OCTS);
stats->bytes = otx2_atomic64_add(incr, ptr);
- ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_PKTS);
+ ptr = otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_PKTS);
stats->pkts = otx2_atomic64_add(incr, ptr);
}
@@ -860,9 +860,10 @@ void otx2_sqb_flush(struct otx2_nic *pfvf)
{
int qidx, sqe_tail, sqe_head;
struct otx2_snd_queue *sq;
- u64 incr, *ptr, val;
+ void __iomem *ptr;
+ u64 incr, val;
- ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
+ ptr = otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++) {
sq = &pfvf->qset.sq[qidx];
if (!sq->sqb_ptrs)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index ca0e6ab12ceb..6b59881f78e0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -28,10 +28,12 @@
#include "otx2_reg.h"
#include "otx2_txrx.h"
#include "otx2_devlink.h"
+#include <rvu.h>
#include <rvu_trace.h>
#include "qos.h"
#include "rep.h"
#include "cn10k_ipsec.h"
+#include "cn20k.h"
/* IPv4 flag more fragment bit */
#define IPV4_FLAG_MORE 0x20
@@ -61,6 +63,12 @@
/* Number of segments per SG structure */
#define MAX_SEGS_PER_SG 3
+irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq);
+irqreturn_t cn20k_pfaf_mbox_intr_handler(int irq, void *pf_irq);
+irqreturn_t cn20k_vfaf_mbox_intr_handler(int irq, void *vf_irq);
+irqreturn_t cn20k_pfvf_mbox_intr_handler(int irq, void *pf_irq);
+irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq);
+
enum arua_mapped_qtypes {
AURA_NIX_RQ,
AURA_NIX_SQ,
@@ -245,6 +253,7 @@ struct otx2_hw {
u16 nix_msixoff; /* Offset of NIX vectors */
char *irq_name;
cpumask_var_t *affinity_mask;
+ struct pf_irq_data *pfvf_irq_devid[4];
/* Stats */
struct otx2_dev_stats dev_stats;
@@ -366,6 +375,9 @@ struct dev_hw_ops {
int size, int qidx);
int (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq);
void (*aura_freeptr)(void *dev, int aura, u64 buf);
+ irqreturn_t (*pfaf_mbox_intr_handler)(int irq, void *pf_irq);
+ irqreturn_t (*vfaf_mbox_intr_handler)(int irq, void *pf_irq);
+ irqreturn_t (*pfvf_mbox_intr_handler)(int irq, void *pf_irq);
};
#define CN10K_MCS_SA_PER_SC 4
@@ -433,6 +445,16 @@ struct cn10k_mcs_cfg {
struct list_head rxsc_list;
};
+struct pf_irq_data {
+ u64 intr_status;
+ void (*pf_queue_work_hdlr)(struct mbox *mb, struct workqueue_struct *mw,
+ int first, int mdevs, u64 intr);
+ struct otx2_nic *pf;
+ int vec_num;
+ int start;
+ int mdevs;
+};
+
struct otx2_nic {
void __iomem *reg_base;
struct net_device *netdev;
@@ -476,6 +498,7 @@ struct otx2_nic {
struct mbox *mbox_pfvf;
struct workqueue_struct *mbox_wq;
struct workqueue_struct *mbox_pfvf_wq;
+ struct qmem *pfvf_mbox_addr;
u8 total_vfs;
u16 pcifunc; /* RVU PF_FUNC */
@@ -730,8 +753,9 @@ static inline void otx2_write128(u64 lo, u64 hi, void __iomem *addr)
::[x0]"r"(lo), [x1]"r"(hi), [p1]"r"(addr));
}
-static inline u64 otx2_atomic64_add(u64 incr, u64 *ptr)
+static inline u64 otx2_atomic64_add(u64 incr, void __iomem *addr)
{
+ u64 __iomem *ptr = addr;
u64 result;
__asm__ volatile(".cpu generic+lse\n"
@@ -744,7 +768,11 @@ static inline u64 otx2_atomic64_add(u64 incr, u64 *ptr)
#else
#define otx2_write128(lo, hi, addr) writeq((hi) | (lo), addr)
-#define otx2_atomic64_add(incr, ptr) ({ *ptr += incr; })
+
+static inline u64 otx2_atomic64_add(u64 incr, void __iomem *addr)
+{
+ return 0;
+}
#endif
static inline void __cn10k_aura_freeptr(struct otx2_nic *pfvf, u64 aura,
@@ -794,7 +822,7 @@ static inline void cn10k_aura_freeptr(void *dev, int aura, u64 buf)
/* Alloc pointer from pool/aura */
static inline u64 otx2_aura_allocptr(struct otx2_nic *pfvf, int aura)
{
- u64 *ptr = (__force u64 *)otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_ALLOCX(0));
+ void __iomem *ptr = otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_ALLOCX(0));
u64 incr = (u64)aura | BIT_ULL(63);
return otx2_atomic64_add(incr, ptr);
@@ -899,21 +927,11 @@ MBOX_UP_MCS_MESSAGES
/* Time to wait before watchdog kicks off */
#define OTX2_TX_TIMEOUT (100 * HZ)
-#define RVU_PFVF_PF_SHIFT 10
-#define RVU_PFVF_PF_MASK 0x3F
-#define RVU_PFVF_FUNC_SHIFT 0
-#define RVU_PFVF_FUNC_MASK 0x3FF
-
static inline bool is_otx2_vf(u16 pcifunc)
{
return !!(pcifunc & RVU_PFVF_FUNC_MASK);
}
-static inline int rvu_get_pf(u16 pcifunc)
-{
- return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
-}
-
static inline dma_addr_t otx2_dma_map_page(struct otx2_nic *pfvf,
struct page *page,
size_t offset, size_t size,
@@ -1191,4 +1209,6 @@ dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
struct sk_buff *skb, int seg, int *len);
void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg);
int otx2_read_free_sqe(struct otx2_nic *pfvf, u16 qidx);
+void otx2_queue_vf_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
+ int first, int mdevs, u64 intr);
#endif /* OTX2_COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 45b8c9230184..9b7f847b9c22 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -559,10 +559,13 @@ static int otx2_set_coalesce(struct net_device *netdev,
return 0;
}
-static int otx2_get_rss_hash_opts(struct otx2_nic *pfvf,
- struct ethtool_rxnfc *nfc)
+static int otx2_get_rss_hash_opts(struct net_device *dev,
+ struct ethtool_rxfh_fields *nfc)
{
- struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ struct otx2_nic *pfvf = netdev_priv(dev);
+ struct otx2_rss_info *rss;
+
+ rss = &pfvf->hw.rss_info;
if (!(rss->flowkey_cfg &
(NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6)))
@@ -609,12 +612,17 @@ static int otx2_get_rss_hash_opts(struct otx2_nic *pfvf,
return 0;
}
-static int otx2_set_rss_hash_opts(struct otx2_nic *pfvf,
- struct ethtool_rxnfc *nfc)
+static int otx2_set_rss_hash_opts(struct net_device *dev,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
{
- struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ struct otx2_nic *pfvf = netdev_priv(dev);
u32 rxh_l4 = RXH_L4_B_0_1 | RXH_L4_B_2_3;
- u32 rss_cfg = rss->flowkey_cfg;
+ struct otx2_rss_info *rss;
+ u32 rss_cfg;
+
+ rss = &pfvf->hw.rss_info;
+ rss_cfg = rss->flowkey_cfg;
if (!rss->enable) {
netdev_err(pfvf->netdev,
@@ -743,8 +751,6 @@ static int otx2_get_rxnfc(struct net_device *dev,
if (netif_running(dev) && ntuple)
ret = otx2_get_all_flows(pfvf, nfc, rules);
break;
- case ETHTOOL_GRXFH:
- return otx2_get_rss_hash_opts(pfvf, nfc);
default:
break;
}
@@ -759,9 +765,6 @@ static int otx2_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc)
pfvf->flow_cfg->ntuple = ntuple;
switch (nfc->cmd) {
- case ETHTOOL_SRXFH:
- ret = otx2_set_rss_hash_opts(pfvf, nfc);
- break;
case ETHTOOL_SRXCLSRLINS:
if (netif_running(dev) && ntuple)
ret = otx2_add_flow(pfvf, nfc);
@@ -1329,6 +1332,8 @@ static const struct ethtool_ops otx2_ethtool_ops = {
.get_rxfh_indir_size = otx2_get_rxfh_indir_size,
.get_rxfh = otx2_get_rxfh,
.set_rxfh = otx2_set_rxfh,
+ .get_rxfh_fields = otx2_get_rss_hash_opts,
+ .set_rxfh_fields = otx2_set_rss_hash_opts,
.get_msglevel = otx2_get_msglevel,
.set_msglevel = otx2_set_msglevel,
.get_pauseparam = otx2_get_pauseparam,
@@ -1442,6 +1447,8 @@ static const struct ethtool_ops otx2vf_ethtool_ops = {
.get_rxfh_indir_size = otx2_get_rxfh_indir_size,
.get_rxfh = otx2_get_rxfh,
.set_rxfh = otx2_set_rxfh,
+ .get_rxfh_fields = otx2_get_rss_hash_opts,
+ .set_rxfh_fields = otx2_set_rss_hash_opts,
.get_ringparam = otx2_get_ringparam,
.set_ringparam = otx2_set_ringparam,
.get_coalesce = otx2_get_coalesce,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index db7c466fdc39..4e2d1206e1b0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -206,7 +206,8 @@ static int otx2_register_flr_me_intr(struct otx2_nic *pf, int numvfs)
/* Register ME interrupt handler*/
irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME0 * NAME_SIZE];
- snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME0", rvu_get_pf(pf->pcifunc));
+ snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME0",
+ rvu_get_pf(pf->pdev, pf->pcifunc));
ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0),
otx2_pf_me_intr_handler, 0, irq_name, pf);
if (ret) {
@@ -216,7 +217,8 @@ static int otx2_register_flr_me_intr(struct otx2_nic *pf, int numvfs)
/* Register FLR interrupt handler */
irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR0 * NAME_SIZE];
- snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR0", rvu_get_pf(pf->pcifunc));
+ snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR0",
+ rvu_get_pf(pf->pdev, pf->pcifunc));
ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0),
otx2_pf_flr_intr_handler, 0, irq_name, pf);
if (ret) {
@@ -228,7 +230,7 @@ static int otx2_register_flr_me_intr(struct otx2_nic *pf, int numvfs)
if (numvfs > 64) {
irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME1 * NAME_SIZE];
snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME1",
- rvu_get_pf(pf->pcifunc));
+ rvu_get_pf(pf->pdev, pf->pcifunc));
ret = request_irq(pci_irq_vector
(pf->pdev, RVU_PF_INT_VEC_VFME1),
otx2_pf_me_intr_handler, 0, irq_name, pf);
@@ -238,7 +240,7 @@ static int otx2_register_flr_me_intr(struct otx2_nic *pf, int numvfs)
}
irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR1 * NAME_SIZE];
snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR1",
- rvu_get_pf(pf->pcifunc));
+ rvu_get_pf(pf->pdev, pf->pcifunc));
ret = request_irq(pci_irq_vector
(pf->pdev, RVU_PF_INT_VEC_VFFLR1),
otx2_pf_flr_intr_handler, 0, irq_name, pf);
@@ -294,8 +296,8 @@ static int otx2_pf_flr_init(struct otx2_nic *pf, int num_vfs)
return 0;
}
-static void otx2_queue_vf_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
- int first, int mdevs, u64 intr)
+void otx2_queue_vf_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
+ int first, int mdevs, u64 intr)
{
struct otx2_mbox_dev *mdev;
struct otx2_mbox *mbox;
@@ -545,7 +547,7 @@ end:
}
}
-static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
+irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
{
struct otx2_nic *pf = (struct otx2_nic *)(pf_irq);
int vfs = pf->total_vfs;
@@ -574,6 +576,23 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
return IRQ_HANDLED;
}
+static void *cn20k_pfvf_mbox_alloc(struct otx2_nic *pf, int numvfs)
+{
+ struct qmem *mbox_addr;
+ int err;
+
+ err = qmem_alloc(&pf->pdev->dev, &mbox_addr, numvfs, MBOX_SIZE);
+ if (err) {
+ dev_err(pf->dev, "qmem alloc fail\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ otx2_write64(pf, RVU_PF_VF_MBOX_ADDR, (u64)mbox_addr->iova);
+ pf->pfvf_mbox_addr = mbox_addr;
+
+ return mbox_addr->base;
+}
+
static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs)
{
void __iomem *hwbase;
@@ -595,20 +614,27 @@ static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs)
if (!pf->mbox_pfvf_wq)
return -ENOMEM;
- /* On CN10K platform, PF <-> VF mailbox region follows after
- * PF <-> AF mailbox region.
+ /* For CN20K, PF allocates mbox memory in DRAM and writes PF/VF
+ * regions/offsets in RVU_PF_VF_MBOX_ADDR, the RVU_PFX_FUNC_PFAF_MBOX
+ * gives the aliased address to access PF/VF mailbox regions.
*/
- if (test_bit(CN10K_MBOX, &pf->hw.cap_flag))
- base = pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM) +
- MBOX_SIZE;
- else
- base = readq((void __iomem *)((u64)pf->reg_base +
- RVU_PF_VF_BAR4_ADDR));
+ if (is_cn20k(pf->pdev)) {
+ hwbase = (void __iomem *)cn20k_pfvf_mbox_alloc(pf, numvfs);
+ } else {
+ /* On CN10K platform, PF <-> VF mailbox region follows after
+ * PF <-> AF mailbox region.
+ */
+ if (test_bit(CN10K_MBOX, &pf->hw.cap_flag))
+ base = pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM) +
+ MBOX_SIZE;
+ else
+ base = readq(pf->reg_base + RVU_PF_VF_BAR4_ADDR);
- hwbase = ioremap_wc(base, MBOX_SIZE * pf->total_vfs);
- if (!hwbase) {
- err = -ENOMEM;
- goto free_wq;
+ hwbase = ioremap_wc(base, MBOX_SIZE * pf->total_vfs);
+ if (!hwbase) {
+ err = -ENOMEM;
+ goto free_wq;
+ }
}
mbox = &pf->mbox_pfvf[0];
@@ -632,7 +658,7 @@ static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs)
return 0;
free_iomem:
- if (hwbase)
+ if (hwbase && !(is_cn20k(pf->pdev)))
iounmap(hwbase);
free_wq:
destroy_workqueue(pf->mbox_pfvf_wq);
@@ -651,8 +677,10 @@ static void otx2_pfvf_mbox_destroy(struct otx2_nic *pf)
pf->mbox_pfvf_wq = NULL;
}
- if (mbox->mbox.hwbase)
+ if (mbox->mbox.hwbase && !is_cn20k(pf->pdev))
iounmap(mbox->mbox.hwbase);
+ else
+ qmem_free(&pf->pdev->dev, pf->pfvf_mbox_addr);
otx2_mbox_destroy(&mbox->mbox);
}
@@ -676,6 +704,9 @@ static void otx2_disable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
{
int vector;
+ if (is_cn20k(pf->pdev))
+ return cn20k_disable_pfvf_mbox_intr(pf, numvfs);
+
/* Disable PF <=> VF mailbox IRQ */
otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), ~0ull);
otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), ~0ull);
@@ -697,11 +728,14 @@ static int otx2_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
char *irq_name;
int err;
+ if (is_cn20k(pf->pdev))
+ return cn20k_register_pfvf_mbox_intr(pf, numvfs);
+
/* Register MBOX0 interrupt handler */
irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX0 * NAME_SIZE];
if (pf->pcifunc)
snprintf(irq_name, NAME_SIZE,
- "RVUPF%d_VF Mbox0", rvu_get_pf(pf->pcifunc));
+ "RVUPF%d_VF Mbox0", rvu_get_pf(pf->pdev, pf->pcifunc));
else
snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox0");
err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0),
@@ -717,7 +751,8 @@ static int otx2_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX1 * NAME_SIZE];
if (pf->pcifunc)
snprintf(irq_name, NAME_SIZE,
- "RVUPF%d_VF Mbox1", rvu_get_pf(pf->pcifunc));
+ "RVUPF%d_VF Mbox1",
+ rvu_get_pf(pf->pdev, pf->pcifunc));
else
snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox1");
err = request_irq(pci_irq_vector(pf->pdev,
@@ -1006,7 +1041,7 @@ static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
otx2_mbox_msg_send(mbox, 0);
}
-static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq)
+irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq)
{
struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
struct mbox *mw = &pf->mbox;
@@ -1064,10 +1099,18 @@ static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq)
void otx2_disable_mbox_intr(struct otx2_nic *pf)
{
- int vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX);
+ int vector;
/* Disable AF => PF mailbox IRQ */
- otx2_write64(pf, RVU_PF_INT_ENA_W1C, BIT_ULL(0));
+ if (!is_cn20k(pf->pdev)) {
+ vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX);
+ otx2_write64(pf, RVU_PF_INT_ENA_W1C, BIT_ULL(0));
+ } else {
+ vector = pci_irq_vector(pf->pdev,
+ RVU_MBOX_PF_INT_VEC_AFPF_MBOX);
+ otx2_write64(pf, RVU_PF_INT_ENA_W1C,
+ BIT_ULL(0) | BIT_ULL(1));
+ }
free_irq(vector, pf);
}
EXPORT_SYMBOL(otx2_disable_mbox_intr);
@@ -1080,10 +1123,24 @@ int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af)
int err;
/* Register mailbox interrupt handler */
- irq_name = &hw->irq_name[RVU_PF_INT_VEC_AFPF_MBOX * NAME_SIZE];
- snprintf(irq_name, NAME_SIZE, "RVUPFAF Mbox");
- err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX),
- otx2_pfaf_mbox_intr_handler, 0, irq_name, pf);
+ if (!is_cn20k(pf->pdev)) {
+ irq_name = &hw->irq_name[RVU_PF_INT_VEC_AFPF_MBOX * NAME_SIZE];
+ snprintf(irq_name, NAME_SIZE, "RVUPF%d AFPF Mbox",
+ rvu_get_pf(pf->pdev, pf->pcifunc));
+ err = request_irq(pci_irq_vector
+ (pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX),
+ pf->hw_ops->pfaf_mbox_intr_handler,
+ 0, irq_name, pf);
+ } else {
+ irq_name = &hw->irq_name[RVU_MBOX_PF_INT_VEC_AFPF_MBOX *
+ NAME_SIZE];
+ snprintf(irq_name, NAME_SIZE, "RVUPF%d AFPF Mbox",
+ rvu_get_pf(pf->pdev, pf->pcifunc));
+ err = request_irq(pci_irq_vector
+ (pf->pdev, RVU_MBOX_PF_INT_VEC_AFPF_MBOX),
+ pf->hw_ops->pfaf_mbox_intr_handler,
+ 0, irq_name, pf);
+ }
if (err) {
dev_err(pf->dev,
"RVUPF: IRQ registration failed for PFAF mbox irq\n");
@@ -1093,8 +1150,14 @@ int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af)
/* Enable mailbox interrupt for msgs coming from AF.
* First clear to avoid spurious interrupts, if any.
*/
- otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
- otx2_write64(pf, RVU_PF_INT_ENA_W1S, BIT_ULL(0));
+ if (!is_cn20k(pf->pdev)) {
+ otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
+ otx2_write64(pf, RVU_PF_INT_ENA_W1S, BIT_ULL(0));
+ } else {
+ otx2_write64(pf, RVU_PF_INT, BIT_ULL(0) | BIT_ULL(1));
+ otx2_write64(pf, RVU_PF_INT_ENA_W1S, BIT_ULL(0) |
+ BIT_ULL(1));
+ }
if (!probe_af)
return 0;
@@ -1125,7 +1188,7 @@ void otx2_pfaf_mbox_destroy(struct otx2_nic *pf)
pf->mbox_wq = NULL;
}
- if (mbox->mbox.hwbase)
+ if (mbox->mbox.hwbase && !is_cn20k(pf->pdev))
iounmap((void __iomem *)mbox->mbox.hwbase);
otx2_mbox_destroy(&mbox->mbox);
@@ -1145,12 +1208,20 @@ int otx2_pfaf_mbox_init(struct otx2_nic *pf)
if (!pf->mbox_wq)
return -ENOMEM;
- /* Mailbox is a reserved memory (in RAM) region shared between
- * admin function (i.e AF) and this PF, shouldn't be mapped as
- * device memory to allow unaligned accesses.
+ /* For CN20K, AF allocates mbox memory in DRAM and writes PF
+ * regions/offsets in RVU_MBOX_AF_PFX_ADDR, the RVU_PFX_FUNC_PFAF_MBOX
+ * gives the aliased address to access AF/PF mailbox regions.
*/
- hwbase = ioremap_wc(pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM),
- MBOX_SIZE);
+ if (is_cn20k(pf->pdev))
+ hwbase = pf->reg_base + RVU_PFX_FUNC_PFAF_MBOX +
+ ((u64)BLKADDR_MBOX << RVU_FUNC_BLKADDR_SHIFT);
+ else
+ /* Mailbox is a reserved memory (in RAM) region shared between
+ * admin function (i.e AF) and this PF, shouldn't be mapped as
+ * device memory to allow unaligned accesses.
+ */
+ hwbase = ioremap_wc(pci_resource_start
+ (pf->pdev, PCI_MBOX_BAR_NUM), MBOX_SIZE);
if (!hwbase) {
dev_err(pf->dev, "Unable to map PFAF mailbox region\n");
err = -ENOMEM;
@@ -1323,8 +1394,8 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
{
struct otx2_nic *pf = data;
struct otx2_snd_queue *sq;
- u64 val, *ptr;
- u64 qidx = 0;
+ void __iomem *ptr;
+ u64 val, qidx = 0;
/* CQ */
for (qidx = 0; qidx < pf->qset.cq_cnt; qidx++) {
@@ -1972,7 +2043,7 @@ int otx2_open(struct net_device *netdev)
if (err) {
dev_err(pf->dev,
"RVUPF%d: IRQ registration failed for QERR\n",
- rvu_get_pf(pf->pcifunc));
+ rvu_get_pf(pf->pdev, pf->pcifunc));
goto err_disable_napi;
}
@@ -1990,7 +2061,7 @@ int otx2_open(struct net_device *netdev)
if (name_len >= NAME_SIZE) {
dev_err(pf->dev,
"RVUPF%d: IRQ registration failed for CQ%d, irq name is too long\n",
- rvu_get_pf(pf->pcifunc), qidx);
+ rvu_get_pf(pf->pdev, pf->pcifunc), qidx);
err = -EINVAL;
goto err_free_cints;
}
@@ -2001,7 +2072,7 @@ int otx2_open(struct net_device *netdev)
if (err) {
dev_err(pf->dev,
"RVUPF%d: IRQ registration failed for CQ%d\n",
- rvu_get_pf(pf->pcifunc), qidx);
+ rvu_get_pf(pf->pdev, pf->pcifunc), qidx);
goto err_free_cints;
}
vec++;
@@ -2998,8 +3069,13 @@ int otx2_init_rsrc(struct pci_dev *pdev, struct otx2_nic *pf)
if (err)
return err;
- err = pci_alloc_irq_vectors(hw->pdev, RVU_PF_INT_VEC_CNT,
- RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX);
+ if (!is_cn20k(pf->pdev))
+ err = pci_alloc_irq_vectors(hw->pdev, RVU_PF_INT_VEC_CNT,
+ RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX);
+ else
+ err = pci_alloc_irq_vectors(hw->pdev, RVU_MBOX_PF_INT_VEC_CNT,
+ RVU_MBOX_PF_INT_VEC_CNT,
+ PCI_IRQ_MSIX);
if (err < 0) {
dev_err(dev, "%s: Failed to alloc %d IRQ vectors\n",
__func__, num_vec);
@@ -3008,6 +3084,11 @@ int otx2_init_rsrc(struct pci_dev *pdev, struct otx2_nic *pf)
otx2_setup_dev_hw_settings(pf);
+ if (is_cn20k(pf->pdev))
+ cn20k_init(pf);
+ else
+ otx2_init_hw_ops(pf);
+
/* Init PF <=> AF mailbox stuff */
err = otx2_pfaf_mbox_init(pf);
if (err)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
index e3aee6e36215..1cd576fd09c5 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
@@ -44,6 +44,17 @@
#define RVU_PF_VF_MBOX_ADDR (0xC40)
#define RVU_PF_LMTLINE_ADDR (0xC48)
+#define RVU_MBOX_PF_VFX_PFVF_TRIGX(a) (0x2000 | (a) << 3)
+#define RVU_MBOX_PF_VFPF_INTX(a) (0x1000 | (a) << 3)
+#define RVU_MBOX_PF_VFPF_INT_W1SX(a) (0x1020 | (a) << 3)
+#define RVU_MBOX_PF_VFPF_INT_ENA_W1SX(a) (0x1040 | (a) << 3)
+#define RVU_MBOX_PF_VFPF_INT_ENA_W1CX(a) (0x1060 | (a) << 3)
+
+#define RVU_MBOX_PF_VFPF1_INTX(a) (0x1080 | (a) << 3)
+#define RVU_MBOX_PF_VFPF1_INT_W1SX(a) (0x10a0 | (a) << 3)
+#define RVU_MBOX_PF_VFPF1_INT_ENA_W1SX(a) (0x10c0 | (a) << 3)
+#define RVU_MBOX_PF_VFPF1_INT_ENA_W1CX(a) (0x10e0 | (a) << 3)
+
/* RVU VF registers */
#define RVU_VF_VFPF_MBOX0 (0x00000)
#define RVU_VF_VFPF_MBOX1 (0x00008)
@@ -58,6 +69,11 @@
#define RVU_VF_MSIX_PBAX(a) (0xF0000 | (a) << 3)
#define RVU_VF_MBOX_REGION (0xC0000)
+/* CN20K RVU_MBOX_E: RVU PF/VF MBOX Address Range Enumeration */
+#define RVU_MBOX_AF_PFX_ADDR(a) (0x5000 | (a) << 4)
+#define RVU_PFX_FUNC_PFAF_MBOX (0x80000)
+#define RVU_PFX_FUNCX_VFAF_MBOX (0x40000)
+
#define RVU_FUNC_BLKADDR_SHIFT 20
#define RVU_FUNC_BLKADDR_MASK 0x1FULL
@@ -138,39 +154,12 @@
#define NIX_LF_CINTX_ENA_W1S(a) (NIX_LFBASE | 0xD40 | (a) << 12)
#define NIX_LF_CINTX_ENA_W1C(a) (NIX_LFBASE | 0xD50 | (a) << 12)
-/* NIX AF transmit scheduler registers */
-#define NIX_AF_SMQX_CFG(a) (0x700 | (u64)(a) << 16)
-#define NIX_AF_TL4X_SDP_LINK_CFG(a) (0xB10 | (u64)(a) << 16)
-#define NIX_AF_TL1X_SCHEDULE(a) (0xC00 | (u64)(a) << 16)
-#define NIX_AF_TL1X_CIR(a) (0xC20 | (u64)(a) << 16)
-#define NIX_AF_TL1X_TOPOLOGY(a) (0xC80 | (u64)(a) << 16)
-#define NIX_AF_TL2X_PARENT(a) (0xE88 | (u64)(a) << 16)
-#define NIX_AF_TL2X_SCHEDULE(a) (0xE00 | (u64)(a) << 16)
-#define NIX_AF_TL2X_TOPOLOGY(a) (0xE80 | (u64)(a) << 16)
-#define NIX_AF_TL2X_CIR(a) (0xE20 | (u64)(a) << 16)
-#define NIX_AF_TL2X_PIR(a) (0xE30 | (u64)(a) << 16)
-#define NIX_AF_TL3X_PARENT(a) (0x1088 | (u64)(a) << 16)
-#define NIX_AF_TL3X_SCHEDULE(a) (0x1000 | (u64)(a) << 16)
-#define NIX_AF_TL3X_SHAPE(a) (0x1010 | (u64)(a) << 16)
-#define NIX_AF_TL3X_CIR(a) (0x1020 | (u64)(a) << 16)
-#define NIX_AF_TL3X_PIR(a) (0x1030 | (u64)(a) << 16)
-#define NIX_AF_TL3X_TOPOLOGY(a) (0x1080 | (u64)(a) << 16)
-#define NIX_AF_TL4X_PARENT(a) (0x1288 | (u64)(a) << 16)
-#define NIX_AF_TL4X_SCHEDULE(a) (0x1200 | (u64)(a) << 16)
-#define NIX_AF_TL4X_SHAPE(a) (0x1210 | (u64)(a) << 16)
-#define NIX_AF_TL4X_CIR(a) (0x1220 | (u64)(a) << 16)
-#define NIX_AF_TL4X_PIR(a) (0x1230 | (u64)(a) << 16)
-#define NIX_AF_TL4X_TOPOLOGY(a) (0x1280 | (u64)(a) << 16)
-#define NIX_AF_MDQX_SCHEDULE(a) (0x1400 | (u64)(a) << 16)
-#define NIX_AF_MDQX_SHAPE(a) (0x1410 | (u64)(a) << 16)
-#define NIX_AF_MDQX_CIR(a) (0x1420 | (u64)(a) << 16)
-#define NIX_AF_MDQX_PIR(a) (0x1430 | (u64)(a) << 16)
-#define NIX_AF_MDQX_PARENT(a) (0x1480 | (u64)(a) << 16)
-#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) (0x1700 | (u64)(a) << 16 | (b) << 3)
-
/* LMT LF registers */
#define LMT_LFBASE BIT_ULL(RVU_FUNC_BLKADDR_SHIFT)
#define LMT_LF_LMTLINEX(a) (LMT_LFBASE | 0x000 | (a) << 12)
#define LMT_LF_LMTCANCEL (LMT_LFBASE | 0x400)
+/* CN20K registers */
+#define RVU_PF_DISC (0x0)
+
#endif /* OTX2_REG_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
index 9a226ca74425..5f80b23c5335 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
@@ -467,7 +467,8 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic,
target = act->dev;
if (target->dev.parent) {
priv = netdev_priv(target);
- if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) {
+ if (rvu_get_pf(nic->pdev, nic->pcifunc) !=
+ rvu_get_pf(nic->pdev, priv->pcifunc)) {
NL_SET_ERR_MSG_MOD(extack,
"can't redirect to other pf/vf");
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 8a8b598bd389..5589fccd370b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -240,6 +240,10 @@ static void otx2vf_disable_mbox_intr(struct otx2_nic *vf)
/* Disable VF => PF mailbox IRQ */
otx2_write64(vf, RVU_VF_INT_ENA_W1C, BIT_ULL(0));
+
+ if (is_cn20k(vf->pdev))
+ otx2_write64(vf, RVU_VF_INT_ENA_W1C, BIT_ULL(0) | BIT_ULL(1));
+
free_irq(vector, vf);
}
@@ -252,9 +256,18 @@ static int otx2vf_register_mbox_intr(struct otx2_nic *vf, bool probe_pf)
/* Register mailbox interrupt handler */
irq_name = &hw->irq_name[RVU_VF_INT_VEC_MBOX * NAME_SIZE];
- snprintf(irq_name, NAME_SIZE, "RVUVFAF Mbox");
- err = request_irq(pci_irq_vector(vf->pdev, RVU_VF_INT_VEC_MBOX),
- otx2vf_vfaf_mbox_intr_handler, 0, irq_name, vf);
+ snprintf(irq_name, NAME_SIZE, "RVUVF%d AFVF Mbox", ((vf->pcifunc &
+ RVU_PFVF_FUNC_MASK) - 1));
+
+ if (!is_cn20k(vf->pdev)) {
+ err = request_irq(pci_irq_vector(vf->pdev, RVU_VF_INT_VEC_MBOX),
+ otx2vf_vfaf_mbox_intr_handler, 0, irq_name, vf);
+ } else {
+ err = request_irq(pci_irq_vector(vf->pdev, RVU_VF_INT_VEC_MBOX),
+ vf->hw_ops->vfaf_mbox_intr_handler, 0, irq_name,
+ vf);
+ }
+
if (err) {
dev_err(vf->dev,
"RVUPF: IRQ registration failed for VFAF mbox irq\n");
@@ -264,8 +277,15 @@ static int otx2vf_register_mbox_intr(struct otx2_nic *vf, bool probe_pf)
/* Enable mailbox interrupt for msgs coming from PF.
* First clear to avoid spurious interrupts, if any.
*/
- otx2_write64(vf, RVU_VF_INT, BIT_ULL(0));
- otx2_write64(vf, RVU_VF_INT_ENA_W1S, BIT_ULL(0));
+ if (!is_cn20k(vf->pdev)) {
+ otx2_write64(vf, RVU_VF_INT, BIT_ULL(0));
+ otx2_write64(vf, RVU_VF_INT_ENA_W1S, BIT_ULL(0));
+ } else {
+ otx2_write64(vf, RVU_VF_INT, BIT_ULL(0) | BIT_ULL(1) |
+ BIT_ULL(2) | BIT_ULL(3));
+ otx2_write64(vf, RVU_VF_INT_ENA_W1S, BIT_ULL(0) |
+ BIT_ULL(1) | BIT_ULL(2) | BIT_ULL(3));
+ }
if (!probe_pf)
return 0;
@@ -315,7 +335,13 @@ static int otx2vf_vfaf_mbox_init(struct otx2_nic *vf)
if (!vf->mbox_wq)
return -ENOMEM;
- if (test_bit(CN10K_MBOX, &vf->hw.cap_flag)) {
+ /* For cn20k platform, VF mailbox region is in dram aliased from AF
+ * VF MBOX ADDR, MBOX is a separate RVU block.
+ */
+ if (is_cn20k(vf->pdev)) {
+ hwbase = vf->reg_base + RVU_VF_MBOX_REGION + ((u64)BLKADDR_MBOX <<
+ RVU_FUNC_BLKADDR_SHIFT);
+ } else if (test_bit(CN10K_MBOX, &vf->hw.cap_flag)) {
/* For cn10k platform, VF mailbox region is in its BAR2
* register space
*/
@@ -616,6 +642,12 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
otx2_setup_dev_hw_settings(vf);
+
+ if (is_cn20k(vf->pdev))
+ cn20k_init(vf);
+ else
+ otx2_init_hw_ops(vf);
+
/* Init VF <=> PF mailbox stuff */
err = otx2vf_vfaf_mbox_init(vf);
if (err)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c
index 58d572ce08ef..2872adabc830 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c
@@ -151,9 +151,10 @@ static void otx2_qos_sq_free_sqbs(struct otx2_nic *pfvf, int qidx)
static void otx2_qos_sqb_flush(struct otx2_nic *pfvf, int qidx)
{
int sqe_tail, sqe_head;
- u64 incr, *ptr, val;
+ void __iomem *ptr;
+ u64 incr, val;
- ptr = (__force u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
+ ptr = otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
incr = (u64)qidx << 32;
val = otx2_atomic64_add(incr, ptr);
sqe_head = (val >> 20) & 0x3F;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/rep.c b/drivers/net/ethernet/marvell/octeontx2/nic/rep.c
index 2cd3da3b6843..25af98034e2e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/rep.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/rep.c
@@ -244,10 +244,10 @@ static int rvu_rep_devlink_port_register(struct rep_dev *rep)
if (!(rep->pcifunc & RVU_PFVF_FUNC_MASK)) {
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
- attrs.phys.port_number = rvu_get_pf(rep->pcifunc);
+ attrs.phys.port_number = rvu_get_pf(priv->pdev, rep->pcifunc);
} else {
attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF;
- attrs.pci_vf.pf = rvu_get_pf(rep->pcifunc);
+ attrs.pci_vf.pf = rvu_get_pf(priv->pdev, rep->pcifunc);
attrs.pci_vf.vf = rep->pcifunc & RVU_PFVF_FUNC_MASK;
}
@@ -672,7 +672,8 @@ int rvu_rep_create(struct otx2_nic *priv, struct netlink_ext_ack *extack)
rep->pcifunc = pcifunc;
snprintf(ndev->name, sizeof(ndev->name), "Rpf%dvf%d",
- rvu_get_pf(pcifunc), (pcifunc & RVU_PFVF_FUNC_MASK));
+ rvu_get_pf(priv->pdev, pcifunc),
+ (pcifunc & RVU_PFVF_FUNC_MASK));
ndev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM | NETIF_F_RXHASH |
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index b38e4f2de674..f8a907747db4 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -3336,6 +3336,48 @@ static void mtk_tx_timeout(struct net_device *dev, unsigned int txqueue)
schedule_work(&eth->pending_work);
}
+static int mtk_get_irqs(struct platform_device *pdev, struct mtk_eth *eth)
+{
+ int i;
+
+ /* future SoCs beginning with MT7988 should use named IRQs in dts */
+ eth->irq[MTK_FE_IRQ_TX] = platform_get_irq_byname(pdev, "fe1");
+ eth->irq[MTK_FE_IRQ_RX] = platform_get_irq_byname(pdev, "fe2");
+ if (eth->irq[MTK_FE_IRQ_TX] >= 0 && eth->irq[MTK_FE_IRQ_RX] >= 0)
+ return 0;
+
+ /* only use legacy mode if platform_get_irq_byname returned -ENXIO */
+ if (eth->irq[MTK_FE_IRQ_TX] != -ENXIO)
+ return eth->irq[MTK_FE_IRQ_TX];
+
+ if (eth->irq[MTK_FE_IRQ_RX] != -ENXIO)
+ return eth->irq[MTK_FE_IRQ_RX];
+
+ /* legacy way:
+ * On MTK_SHARED_INT SoCs (MT7621 + MT7628) the first IRQ is taken
+ * from devicetree and used for both RX and TX - it is shared.
+ * On SoCs with non-shared IRQs the first entry is not used,
+ * the second is for TX, and the third is for RX.
+ */
+ for (i = 0; i < MTK_FE_IRQ_NUM; i++) {
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
+ if (i == MTK_FE_IRQ_SHARED)
+ eth->irq[MTK_FE_IRQ_SHARED] = platform_get_irq(pdev, i);
+ else
+ eth->irq[i] = eth->irq[MTK_FE_IRQ_SHARED];
+ } else {
+ eth->irq[i] = platform_get_irq(pdev, i + 1);
+ }
+
+ if (eth->irq[i] < 0) {
+ dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
+ return -ENXIO;
+ }
+ }
+
+ return 0;
+}
+
static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
{
struct mtk_eth *eth = _eth;
@@ -3389,7 +3431,7 @@ static void mtk_poll_controller(struct net_device *dev)
mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
- mtk_handle_irq_rx(eth->irq[2], dev);
+ mtk_handle_irq_rx(eth->irq[MTK_FE_IRQ_RX], dev);
mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
}
@@ -4875,7 +4917,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
eth->netdev[id]->features |= eth->soc->hw_features;
eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
- eth->netdev[id]->irq = eth->irq[0];
+ eth->netdev[id]->irq = eth->irq[MTK_FE_IRQ_SHARED];
eth->netdev[id]->dev.of_node = np;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
@@ -5105,17 +5147,10 @@ static int mtk_probe(struct platform_device *pdev)
}
}
- for (i = 0; i < 3; i++) {
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
- eth->irq[i] = eth->irq[0];
- else
- eth->irq[i] = platform_get_irq(pdev, i);
- if (eth->irq[i] < 0) {
- dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
- err = -ENXIO;
- goto err_wed_exit;
- }
- }
+ err = mtk_get_irqs(pdev, eth);
+ if (err)
+ goto err_wed_exit;
+
for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
eth->clks[i] = devm_clk_get(eth->dev,
mtk_clks_source_name[i]);
@@ -5159,17 +5194,17 @@ static int mtk_probe(struct platform_device *pdev)
}
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
- err = devm_request_irq(eth->dev, eth->irq[0],
+ err = devm_request_irq(eth->dev, eth->irq[MTK_FE_IRQ_SHARED],
mtk_handle_irq, 0,
dev_name(eth->dev), eth);
} else {
- err = devm_request_irq(eth->dev, eth->irq[1],
+ err = devm_request_irq(eth->dev, eth->irq[MTK_FE_IRQ_TX],
mtk_handle_irq_tx, 0,
dev_name(eth->dev), eth);
if (err)
goto err_free_dev;
- err = devm_request_irq(eth->dev, eth->irq[2],
+ err = devm_request_irq(eth->dev, eth->irq[MTK_FE_IRQ_RX],
mtk_handle_irq_rx, 0,
dev_name(eth->dev), eth);
}
@@ -5215,7 +5250,7 @@ static int mtk_probe(struct platform_device *pdev)
} else
netif_info(eth, probe, eth->netdev[i],
"mediatek frame engine at 0x%08lx, irq %d\n",
- eth->netdev[i]->base_addr, eth->irq[0]);
+ eth->netdev[i]->base_addr, eth->irq[MTK_FE_IRQ_SHARED]);
}
/* we run 2 devices on the same DMA ring so we need a dummy device
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 6f72a8c8ae1e..9261c0e13b59 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -642,6 +642,11 @@
#define MTK_MAC_FSM(x) (0x1010C + ((x) * 0x100))
+#define MTK_FE_IRQ_SHARED 0
+#define MTK_FE_IRQ_TX 0
+#define MTK_FE_IRQ_RX 1
+#define MTK_FE_IRQ_NUM (MTK_FE_IRQ_RX + 1)
+
struct mtk_rx_dma {
unsigned int rxd1;
unsigned int rxd2;
@@ -1292,7 +1297,7 @@ struct mtk_eth {
struct net_device *dummy_dev;
struct net_device *netdev[MTK_MAX_DEVS];
struct mtk_mac *mac[MTK_MAX_DEVS];
- int irq[3];
+ int irq[MTK_FE_IRQ_NUM];
u32 msg_enable;
unsigned long sysclk;
struct regmap *ethsys;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
index 752a72499b4f..be80da03a594 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
@@ -290,9 +290,6 @@ static int mlx4_en_dcbnl_ieee_getets(struct net_device *dev,
struct mlx4_en_priv *priv = netdev_priv(dev);
struct ieee_ets *my_ets = &priv->ets;
- if (!my_ets)
- return -EINVAL;
-
ets->ets_cap = IEEE_8021QAZ_MAX_TCS;
ets->cbs = my_ets->cbs;
memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 281b34af0bb4..d2071aff7b8f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2670,8 +2670,7 @@ static int mlx4_udp_tunnel_sync(struct net_device *dev, unsigned int table)
static const struct udp_tunnel_nic_info mlx4_udp_tunnels = {
.sync_table = mlx4_udp_tunnel_sync,
- .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
- UDP_TUNNEL_NIC_INFO_IPV4_ONLY,
+ .flags = UDP_TUNNEL_NIC_INFO_IPV4_ONLY,
.tables = {
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
},
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index febeadfdd5a5..03d2fc7d9b09 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -49,6 +49,8 @@
#include <linux/mlx4/device.h>
#include <linux/mlx4/doorbell.h>
+#include <rdma/ib_verbs.h>
+
#include "mlx4.h"
#include "fw.h"
#include "icm.h"
@@ -1246,14 +1248,6 @@ err_out:
return err ? err : count;
}
-enum ibta_mtu {
- IB_MTU_256 = 1,
- IB_MTU_512 = 2,
- IB_MTU_1024 = 3,
- IB_MTU_2048 = 4,
- IB_MTU_4096 = 5
-};
-
static inline int int_to_ibta_mtu(int mtu)
{
switch (mtu) {
@@ -1266,7 +1260,7 @@ static inline int int_to_ibta_mtu(int mtu)
}
}
-static inline int ibta_mtu_to_int(enum ibta_mtu mtu)
+static inline int ibta_mtu_to_int(enum ib_mtu mtu)
{
switch (mtu) {
case IB_MTU_256: return 256;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index 73cd74644378..42218834183a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -35,6 +35,55 @@ static u16 mlx5_fw_ver_subminor(u32 version)
return version & 0xffff;
}
+static int mlx5_devlink_serial_numbers_put(struct mlx5_core_dev *dev,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct pci_dev *pdev = dev->pdev;
+ unsigned int vpd_size, kw_len;
+ char *str, *end;
+ u8 *vpd_data;
+ int err = 0;
+ int start;
+
+ vpd_data = pci_vpd_alloc(pdev, &vpd_size);
+ if (IS_ERR(vpd_data))
+ return 0;
+
+ start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+ PCI_VPD_RO_KEYWORD_SERIALNO, &kw_len);
+ if (start >= 0) {
+ str = kstrndup(vpd_data + start, kw_len, GFP_KERNEL);
+ if (!str) {
+ err = -ENOMEM;
+ goto end;
+ }
+ end = strchrnul(str, ' ');
+ *end = '\0';
+ err = devlink_info_board_serial_number_put(req, str);
+ kfree(str);
+ if (err)
+ goto end;
+ }
+
+ start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, "V3", &kw_len);
+ if (start >= 0) {
+ str = kstrndup(vpd_data + start, kw_len, GFP_KERNEL);
+ if (!str) {
+ err = -ENOMEM;
+ goto end;
+ }
+ err = devlink_info_serial_number_put(req, str);
+ kfree(str);
+ if (err)
+ goto end;
+ }
+
+end:
+ kfree(vpd_data);
+ return err;
+}
+
#define DEVLINK_FW_STRING_LEN 32
static int
@@ -49,6 +98,10 @@ mlx5_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
if (!mlx5_core_is_pf(dev))
return 0;
+ err = mlx5_devlink_serial_numbers_put(dev, req, extack);
+ if (err)
+ return err;
+
err = devlink_info_version_fixed_put(req, "fw.psid", dev->board_id);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 5b0d03b3efe8..65a73913b9a2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -278,10 +278,6 @@ enum packet_merge {
struct mlx5e_packet_merge_param {
enum packet_merge type;
u32 timeout;
- struct {
- u8 match_criteria_type;
- u8 alignment_granularity;
- } shampo;
};
struct mlx5e_params {
@@ -557,7 +553,7 @@ struct mlx5e_icosq {
} ____cacheline_aligned_in_smp;
struct mlx5e_frag_page {
- struct page *page;
+ netmem_ref netmem;
u16 frags;
};
@@ -638,7 +634,6 @@ struct mlx5e_shampo_hd {
struct mlx5e_frag_page *pages;
u32 hd_per_wq;
u16 hd_per_wqe;
- u16 pages_per_wq;
unsigned long *bitmap;
u16 pi;
u16 ci;
@@ -721,7 +716,11 @@ struct mlx5e_rq {
struct bpf_prog __rcu *xdp_prog;
struct mlx5e_xdpsq *xdpsq;
DECLARE_BITMAP(flags, 8);
+
+ /* page pools */
struct page_pool *page_pool;
+ struct page_pool *hd_page_pool;
+
struct mlx5e_xdp_buff mxbuf;
/* AF_XDP zero-copy */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_ethtool.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_ethtool.h
index 9e276fd3c0cf..c21fe36527a0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_ethtool.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_ethtool.h
@@ -11,6 +11,11 @@ int mlx5e_ethtool_alloc(struct mlx5e_ethtool_steering **ethtool);
void mlx5e_ethtool_free(struct mlx5e_ethtool_steering *ethtool);
void mlx5e_ethtool_init_steering(struct mlx5e_flow_steering *fs);
void mlx5e_ethtool_cleanup_steering(struct mlx5e_flow_steering *fs);
+int mlx5e_ethtool_set_rxfh_fields(struct mlx5e_priv *priv,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack);
+int mlx5e_ethtool_get_rxfh_fields(struct mlx5e_priv *priv,
+ struct ethtool_rxfh_fields *nfc);
int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd);
int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
struct ethtool_rxnfc *info, u32 *rule_locs);
@@ -20,6 +25,15 @@ static inline int mlx5e_ethtool_alloc(struct mlx5e_ethtool_steering **ethtool)
static inline void mlx5e_ethtool_free(struct mlx5e_ethtool_steering *ethtool) { }
static inline void mlx5e_ethtool_init_steering(struct mlx5e_flow_steering *fs) { }
static inline void mlx5e_ethtool_cleanup_steering(struct mlx5e_flow_steering *fs) { }
+static inline int
+mlx5e_ethtool_set_rxfh_fields(struct mlx5e_priv *priv,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
+{ return -EOPNOTSUPP; }
+static inline int
+mlx5e_ethtool_get_rxfh_fields(struct mlx5e_priv *priv,
+ struct ethtool_rxfh_fields *nfc)
+{ return -EOPNOTSUPP; }
static inline int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd)
{ return -EOPNOTSUPP; }
static inline int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 58ec5e44aa7a..fc945bce933a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -901,6 +901,7 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
{
void *rqc = param->rqc;
void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
+ u32 lro_timeout;
int ndsegs = 1;
int err;
@@ -926,22 +927,25 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
MLX5_SET(wq, wq, log_wqe_stride_size,
log_wqe_stride_size - MLX5_MPWQE_LOG_STRIDE_SZ_BASE);
MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk));
- if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) {
- MLX5_SET(wq, wq, shampo_enable, true);
- MLX5_SET(wq, wq, log_reservation_size,
- mlx5e_shampo_get_log_rsrv_size(mdev, params));
- MLX5_SET(wq, wq,
- log_max_num_of_packets_per_reservation,
- mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params));
- MLX5_SET(wq, wq, log_headers_entry_size,
- mlx5e_shampo_get_log_hd_entry_size(mdev, params));
- MLX5_SET(rqc, rqc, reservation_timeout,
- mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_SHAMPO_TIMEOUT));
- MLX5_SET(rqc, rqc, shampo_match_criteria_type,
- params->packet_merge.shampo.match_criteria_type);
- MLX5_SET(rqc, rqc, shampo_no_match_alignment_granularity,
- params->packet_merge.shampo.alignment_granularity);
- }
+ if (params->packet_merge.type != MLX5E_PACKET_MERGE_SHAMPO)
+ break;
+
+ MLX5_SET(wq, wq, shampo_enable, true);
+ MLX5_SET(wq, wq, log_reservation_size,
+ mlx5e_shampo_get_log_rsrv_size(mdev, params));
+ MLX5_SET(wq, wq,
+ log_max_num_of_packets_per_reservation,
+ mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params));
+ MLX5_SET(wq, wq, log_headers_entry_size,
+ mlx5e_shampo_get_log_hd_entry_size(mdev, params));
+ lro_timeout =
+ mlx5e_choose_lro_timeout(mdev,
+ MLX5E_DEFAULT_SHAMPO_TIMEOUT);
+ MLX5_SET(rqc, rqc, reservation_timeout, lro_timeout);
+ MLX5_SET(rqc, rqc, shampo_match_criteria_type,
+ MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_EXTENDED);
+ MLX5_SET(rqc, rqc, shampo_no_match_alignment_granularity,
+ MLX5_RQC_SHAMPO_NO_MATCH_ALIGNMENT_GRANULARITY_STRIDE);
break;
}
default: /* MLX5_WQ_TYPE_CYCLIC */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index e837c21d3d21..6501252359b0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -362,7 +362,8 @@ mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma)
dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
break;
case MLX5E_DMA_MAP_PAGE:
- dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
+ netmem_dma_unmap_page_attrs(pdev, dma->addr, dma->size,
+ DMA_TO_DEVICE, 0);
break;
default:
WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 3cb8d3bf9044..995eedf7a51a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -32,6 +32,7 @@
#include <linux/dim.h>
#include <linux/ethtool_netlink.h>
+#include <net/netdev_queues.h>
#include "en.h"
#include "en/channels.h"
@@ -365,11 +366,6 @@ void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE;
param->rx_pending = 1 << priv->channels.params.log_rq_mtu_frames;
param->tx_pending = 1 << priv->channels.params.log_sq_size;
-
- kernel_param->tcp_data_split =
- (priv->channels.params.packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) ?
- ETHTOOL_TCP_DATA_SPLIT_ENABLED :
- ETHTOOL_TCP_DATA_SPLIT_DISABLED;
}
static void mlx5e_get_ringparam(struct net_device *dev,
@@ -382,6 +378,27 @@ static void mlx5e_get_ringparam(struct net_device *dev,
mlx5e_ethtool_get_ringparam(priv, param, kernel_param);
}
+static bool mlx5e_ethtool_set_tcp_data_split(struct mlx5e_priv *priv,
+ u8 tcp_data_split,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *dev = priv->netdev;
+
+ if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED &&
+ !(dev->features & NETIF_F_GRO_HW)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "TCP-data-split is not supported when GRO HW is disabled");
+ return false;
+ }
+
+ /* Might need to disable HW-GRO if it was kept on due to hds. */
+ if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_DISABLED &&
+ dev->cfg->hds_config == ETHTOOL_TCP_DATA_SPLIT_ENABLED)
+ netdev_update_features(priv->netdev);
+
+ return true;
+}
+
int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
struct ethtool_ringparam *param,
struct netlink_ext_ack *extack)
@@ -440,6 +457,11 @@ static int mlx5e_set_ringparam(struct net_device *dev,
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ if (!mlx5e_ethtool_set_tcp_data_split(priv,
+ kernel_param->tcp_data_split,
+ extack))
+ return -EINVAL;
+
return mlx5e_ethtool_set_ringparam(priv, param, extack);
}
@@ -2377,6 +2399,23 @@ static u32 mlx5e_get_priv_flags(struct net_device *netdev)
return priv->channels.params.pflags;
}
+static int mlx5e_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *info)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ return mlx5e_ethtool_get_rxfh_fields(priv, info);
+}
+
+static int mlx5e_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ return mlx5e_ethtool_set_rxfh_fields(priv, cmd, extack);
+}
+
static int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
u32 *rule_locs)
{
@@ -2616,12 +2655,14 @@ static void mlx5e_get_ts_stats(struct net_device *netdev,
const struct ethtool_ops mlx5e_ethtool_ops = {
.cap_link_lanes_supported = true,
.cap_rss_ctx_supported = true,
+ .rxfh_per_ctx_fields = true,
.rxfh_per_ctx_key = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USE_ADAPTIVE |
ETHTOOL_COALESCE_USE_CQE,
.supported_input_xfrm = RXH_XFRM_SYM_OR_XOR,
+ .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT,
.get_drvinfo = mlx5e_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_link_ext_state = mlx5e_get_link_ext_state,
@@ -2642,6 +2683,8 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.get_rxfh_indir_size = mlx5e_get_rxfh_indir_size,
.get_rxfh = mlx5e_get_rxfh,
.set_rxfh = mlx5e_set_rxfh,
+ .get_rxfh_fields = mlx5e_get_rxfh_fields,
+ .set_rxfh_fields = mlx5e_set_rxfh_fields,
.get_rxnfc = mlx5e_get_rxnfc,
.set_rxnfc = mlx5e_set_rxnfc,
.get_tunable = mlx5e_get_tunable,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index d68230a7b9f4..79916f1abd14 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -894,17 +894,17 @@ static int flow_type_to_traffic_type(u32 flow_type)
}
}
-static int mlx5e_set_rss_hash_opt(struct mlx5e_priv *priv,
- struct ethtool_rxnfc *nfc)
+int mlx5e_ethtool_set_rxfh_fields(struct mlx5e_priv *priv,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
{
u8 rx_hash_field = 0;
u32 flow_type = 0;
- u32 rss_idx = 0;
+ u32 rss_idx;
int err;
int tt;
- if (nfc->flow_type & FLOW_RSS)
- rss_idx = nfc->rss_context;
+ rss_idx = nfc->rss_context;
flow_type = flow_type_mask(nfc->flow_type);
tt = flow_type_to_traffic_type(flow_type);
@@ -941,16 +941,15 @@ static int mlx5e_set_rss_hash_opt(struct mlx5e_priv *priv,
return err;
}
-static int mlx5e_get_rss_hash_opt(struct mlx5e_priv *priv,
- struct ethtool_rxnfc *nfc)
+int mlx5e_ethtool_get_rxfh_fields(struct mlx5e_priv *priv,
+ struct ethtool_rxfh_fields *nfc)
{
int hash_field = 0;
u32 flow_type = 0;
- u32 rss_idx = 0;
+ u32 rss_idx;
int tt;
- if (nfc->flow_type & FLOW_RSS)
- rss_idx = nfc->rss_context;
+ rss_idx = nfc->rss_context;
flow_type = flow_type_mask(nfc->flow_type);
tt = flow_type_to_traffic_type(flow_type);
@@ -986,9 +985,6 @@ int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd)
case ETHTOOL_SRXCLSRLDEL:
err = mlx5e_ethtool_flow_remove(priv, cmd->fs.location);
break;
- case ETHTOOL_SRXFH:
- err = mlx5e_set_rss_hash_opt(priv, cmd);
- break;
default:
err = -EOPNOTSUPP;
break;
@@ -1013,9 +1009,6 @@ int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
case ETHTOOL_GRXCLSRLALL:
err = mlx5e_ethtool_get_all_flows(priv, info, rule_locs);
break;
- case ETHTOOL_GRXFH:
- err = mlx5e_get_rss_hash_opt(priv, info);
- break;
default:
err = -EOPNOTSUPP;
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index ea822c69d137..dca5ca51a470 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -41,6 +41,7 @@
#include <linux/filter.h>
#include <net/netdev_lock.h>
#include <net/netdev_queues.h>
+#include <net/netdev_rx_queue.h>
#include <net/page_pool/types.h>
#include <net/pkt_sched.h>
#include <net/xdp_sock_drv.h>
@@ -78,7 +79,8 @@
static bool mlx5e_hw_gro_supported(struct mlx5_core_dev *mdev)
{
- if (!MLX5_CAP_GEN(mdev, shampo))
+ if (!MLX5_CAP_GEN(mdev, shampo) ||
+ !MLX5_CAP_SHAMPO(mdev, shampo_header_split_data_merge))
return false;
/* Our HW-GRO implementation relies on "KSM Mkey" for
@@ -331,47 +333,6 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
}
-static int mlx5e_rq_shampo_hd_alloc(struct mlx5e_rq *rq, int node)
-{
- rq->mpwqe.shampo = kvzalloc_node(sizeof(*rq->mpwqe.shampo),
- GFP_KERNEL, node);
- if (!rq->mpwqe.shampo)
- return -ENOMEM;
- return 0;
-}
-
-static void mlx5e_rq_shampo_hd_free(struct mlx5e_rq *rq)
-{
- kvfree(rq->mpwqe.shampo);
-}
-
-static int mlx5e_rq_shampo_hd_info_alloc(struct mlx5e_rq *rq, int node)
-{
- struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
-
- shampo->bitmap = bitmap_zalloc_node(shampo->hd_per_wq, GFP_KERNEL,
- node);
- shampo->pages = kvzalloc_node(array_size(shampo->hd_per_wq,
- sizeof(*shampo->pages)),
- GFP_KERNEL, node);
- if (!shampo->bitmap || !shampo->pages)
- goto err_nomem;
-
- return 0;
-
-err_nomem:
- bitmap_free(shampo->bitmap);
- kvfree(shampo->pages);
-
- return -ENOMEM;
-}
-
-static void mlx5e_rq_shampo_hd_info_free(struct mlx5e_rq *rq)
-{
- bitmap_free(rq->mpwqe.shampo->bitmap);
- kvfree(rq->mpwqe.shampo->pages);
-}
-
static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node)
{
int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
@@ -584,19 +545,18 @@ static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq
}
static int mlx5e_create_rq_hd_umr_mkey(struct mlx5_core_dev *mdev,
- struct mlx5e_rq *rq)
+ u16 hd_per_wq, u32 *umr_mkey)
{
u32 max_ksm_size = BIT(MLX5_CAP_GEN(mdev, log_max_klm_list_size));
- if (max_ksm_size < rq->mpwqe.shampo->hd_per_wq) {
+ if (max_ksm_size < hd_per_wq) {
mlx5_core_err(mdev, "max ksm list size 0x%x is smaller than shampo header buffer list size 0x%x\n",
- max_ksm_size, rq->mpwqe.shampo->hd_per_wq);
+ max_ksm_size, hd_per_wq);
return -EINVAL;
}
-
- return mlx5e_create_umr_ksm_mkey(mdev, rq->mpwqe.shampo->hd_per_wq,
+ return mlx5e_create_umr_ksm_mkey(mdev, hd_per_wq,
MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE,
- &rq->mpwqe.shampo->mkey);
+ umr_mkey);
}
static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
@@ -758,6 +718,42 @@ static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param
xdp_frag_size);
}
+static int mlx5e_rq_shampo_hd_info_alloc(struct mlx5e_rq *rq, u16 hd_per_wq,
+ int node)
+{
+ struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
+
+ shampo->hd_per_wq = hd_per_wq;
+
+ shampo->bitmap = bitmap_zalloc_node(hd_per_wq, GFP_KERNEL, node);
+ shampo->pages = kvzalloc_node(array_size(hd_per_wq,
+ sizeof(*shampo->pages)),
+ GFP_KERNEL, node);
+ if (!shampo->bitmap || !shampo->pages)
+ goto err_nomem;
+
+ return 0;
+
+err_nomem:
+ kvfree(shampo->pages);
+ bitmap_free(shampo->bitmap);
+
+ return -ENOMEM;
+}
+
+static void mlx5e_rq_shampo_hd_info_free(struct mlx5e_rq *rq)
+{
+ kvfree(rq->mpwqe.shampo->pages);
+ bitmap_free(rq->mpwqe.shampo->bitmap);
+}
+
+static bool mlx5_rq_needs_separate_hd_pool(struct mlx5e_rq *rq)
+{
+ struct netdev_rx_queue *rxq = __netif_get_rx_queue(rq->netdev, rq->ix);
+
+ return !!rxq->mp_params.mp_ops;
+}
+
static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_rq_param *rqp,
@@ -765,42 +761,81 @@ static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
u32 *pool_size,
int node)
{
+ void *wqc = MLX5_ADDR_OF(rqc, rqp->rqc, wq);
+ u32 hd_pool_size;
+ u16 hd_per_wq;
+ int wq_size;
int err;
if (!test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
return 0;
- err = mlx5e_rq_shampo_hd_alloc(rq, node);
- if (err)
- goto out;
- rq->mpwqe.shampo->hd_per_wq =
- mlx5e_shampo_hd_per_wq(mdev, params, rqp);
- err = mlx5e_create_rq_hd_umr_mkey(mdev, rq);
+
+ rq->mpwqe.shampo = kvzalloc_node(sizeof(*rq->mpwqe.shampo),
+ GFP_KERNEL, node);
+ if (!rq->mpwqe.shampo)
+ return -ENOMEM;
+
+ /* split headers data structures */
+ hd_per_wq = mlx5e_shampo_hd_per_wq(mdev, params, rqp);
+ err = mlx5e_rq_shampo_hd_info_alloc(rq, hd_per_wq, node);
if (err)
- goto err_shampo_hd;
- err = mlx5e_rq_shampo_hd_info_alloc(rq, node);
+ goto err_shampo_hd_info_alloc;
+
+ err = mlx5e_create_rq_hd_umr_mkey(mdev, hd_per_wq,
+ &rq->mpwqe.shampo->mkey);
if (err)
- goto err_shampo_info;
+ goto err_umr_mkey;
+
+ rq->mpwqe.shampo->key = cpu_to_be32(rq->mpwqe.shampo->mkey);
+ rq->mpwqe.shampo->hd_per_wqe =
+ mlx5e_shampo_hd_per_wqe(mdev, params, rqp);
+ wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
+ hd_pool_size = (rq->mpwqe.shampo->hd_per_wqe * wq_size) /
+ MLX5E_SHAMPO_WQ_HEADER_PER_PAGE;
+
+ if (mlx5_rq_needs_separate_hd_pool(rq)) {
+ /* Separate page pool for shampo headers */
+ struct page_pool_params pp_params = { };
+
+ pp_params.order = 0;
+ pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pp_params.pool_size = hd_pool_size;
+ pp_params.nid = node;
+ pp_params.dev = rq->pdev;
+ pp_params.napi = rq->cq.napi;
+ pp_params.netdev = rq->netdev;
+ pp_params.dma_dir = rq->buff.map_dir;
+ pp_params.max_len = PAGE_SIZE;
+
+ rq->hd_page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(rq->hd_page_pool)) {
+ err = PTR_ERR(rq->hd_page_pool);
+ rq->hd_page_pool = NULL;
+ goto err_hds_page_pool;
+ }
+ } else {
+ /* Common page pool, reserve space for headers. */
+ *pool_size += hd_pool_size;
+ rq->hd_page_pool = NULL;
+ }
+
+ /* gro only data structures */
rq->hw_gro_data = kvzalloc_node(sizeof(*rq->hw_gro_data), GFP_KERNEL, node);
if (!rq->hw_gro_data) {
err = -ENOMEM;
goto err_hw_gro_data;
}
- rq->mpwqe.shampo->key =
- cpu_to_be32(rq->mpwqe.shampo->mkey);
- rq->mpwqe.shampo->hd_per_wqe =
- mlx5e_shampo_hd_per_wqe(mdev, params, rqp);
- rq->mpwqe.shampo->pages_per_wq =
- rq->mpwqe.shampo->hd_per_wq / MLX5E_SHAMPO_WQ_HEADER_PER_PAGE;
- *pool_size += rq->mpwqe.shampo->pages_per_wq;
+
return 0;
err_hw_gro_data:
- mlx5e_rq_shampo_hd_info_free(rq);
-err_shampo_info:
+ page_pool_destroy(rq->hd_page_pool);
+err_hds_page_pool:
mlx5_core_destroy_mkey(mdev, rq->mpwqe.shampo->mkey);
-err_shampo_hd:
- mlx5e_rq_shampo_hd_free(rq);
-out:
+err_umr_mkey:
+ mlx5e_rq_shampo_hd_info_free(rq);
+err_shampo_hd_info_alloc:
+ kvfree(rq->mpwqe.shampo);
return err;
}
@@ -810,9 +845,11 @@ static void mlx5e_rq_free_shampo(struct mlx5e_rq *rq)
return;
kvfree(rq->hw_gro_data);
+ if (rq->hd_page_pool != rq->page_pool)
+ page_pool_destroy(rq->hd_page_pool);
mlx5e_rq_shampo_hd_info_free(rq);
mlx5_core_destroy_mkey(rq->mdev, rq->mpwqe.shampo->mkey);
- mlx5e_rq_shampo_hd_free(rq);
+ kvfree(rq->mpwqe.shampo);
}
static int mlx5e_alloc_rq(struct mlx5e_params *params,
@@ -929,6 +966,11 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
pp_params.netdev = rq->netdev;
pp_params.dma_dir = rq->buff.map_dir;
pp_params.max_len = PAGE_SIZE;
+ pp_params.queue_idx = rq->ix;
+
+ /* Shampo header data split allow for unreadable netmem */
+ if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
+ pp_params.flags |= PP_FLAG_ALLOW_UNREADABLE_NETMEM;
/* page_pool can be used even when there is no rq->xdp_prog,
* given page_pool does not handle DMA mapping there is no
@@ -941,6 +983,8 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
rq->page_pool = NULL;
goto err_free_by_rq_type;
}
+ if (!rq->hd_page_pool)
+ rq->hd_page_pool = rq->page_pool;
if (xdp_rxq_info_is_reg(&rq->xdp_rxq))
err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
MEM_TYPE_PAGE_POOL, rq->page_pool);
@@ -4043,10 +4087,6 @@ static int set_feature_hw_gro(struct net_device *netdev, bool enable)
if (enable) {
new_params.packet_merge.type = MLX5E_PACKET_MERGE_SHAMPO;
- new_params.packet_merge.shampo.match_criteria_type =
- MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_EXTENDED;
- new_params.packet_merge.shampo.alignment_granularity =
- MLX5_RQC_SHAMPO_NO_MATCH_ALIGNMENT_GRANULARITY_STRIDE;
} else if (new_params.packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) {
new_params.packet_merge.type = MLX5E_PACKET_MERGE_NONE;
} else {
@@ -4373,6 +4413,7 @@ static netdev_features_t mlx5e_fix_uplink_rep_features(struct net_device *netdev
static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
netdev_features_t features)
{
+ struct netdev_config *cfg = netdev->cfg_pending;
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_vlan_table *vlan;
struct mlx5e_params *params;
@@ -4439,6 +4480,13 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
}
}
+ /* The header-data split ring param requires HW GRO to stay enabled. */
+ if (cfg && cfg->hds_config == ETHTOOL_TCP_DATA_SPLIT_ENABLED &&
+ !(features & NETIF_F_GRO_HW)) {
+ netdev_warn(netdev, "Keeping HW-GRO enabled, TCP header-data split depends on it\n");
+ features |= NETIF_F_GRO_HW;
+ }
+
if (mlx5e_is_uplink_rep(priv)) {
features = mlx5e_fix_uplink_rep_features(netdev, features);
netdev->netns_immutable = true;
@@ -5303,8 +5351,7 @@ void mlx5e_vxlan_set_netdev_info(struct mlx5e_priv *priv)
priv->nic_info.set_port = mlx5e_vxlan_set_port;
priv->nic_info.unset_port = mlx5e_vxlan_unset_port;
- priv->nic_info.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
- UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN;
+ priv->nic_info.flags = UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN;
priv->nic_info.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN;
/* Don't count the space hard-coded to the IANA port */
priv->nic_info.tables[0].n_entries =
@@ -5454,6 +5501,103 @@ static const struct netdev_stat_ops mlx5e_stat_ops = {
.get_base_stats = mlx5e_get_base_stats,
};
+struct mlx5_qmgmt_data {
+ struct mlx5e_channel *c;
+ struct mlx5e_channel_param cparam;
+};
+
+static int mlx5e_queue_mem_alloc(struct net_device *dev, void *newq,
+ int queue_index)
+{
+ struct mlx5_qmgmt_data *new = (struct mlx5_qmgmt_data *)newq;
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_channels *chs = &priv->channels;
+ struct mlx5e_params params = chs->params;
+ struct mlx5_core_dev *mdev;
+ int err;
+
+ mutex_lock(&priv->state_lock);
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ err = -ENODEV;
+ goto unlock;
+ }
+
+ if (queue_index >= chs->num) {
+ err = -ERANGE;
+ goto unlock;
+ }
+
+ if (MLX5E_GET_PFLAG(&chs->params, MLX5E_PFLAG_TX_PORT_TS) ||
+ chs->params.ptp_rx ||
+ chs->params.xdp_prog ||
+ priv->htb) {
+ netdev_err(priv->netdev,
+ "Cloning channels with Port/rx PTP, XDP or HTB is not supported\n");
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+
+ mdev = mlx5_sd_ch_ix_get_dev(priv->mdev, queue_index);
+ err = mlx5e_build_channel_param(mdev, &params, &new->cparam);
+ if (err)
+ goto unlock;
+
+ err = mlx5e_open_channel(priv, queue_index, &params, NULL, &new->c);
+unlock:
+ mutex_unlock(&priv->state_lock);
+ return err;
+}
+
+static void mlx5e_queue_mem_free(struct net_device *dev, void *mem)
+{
+ struct mlx5_qmgmt_data *data = (struct mlx5_qmgmt_data *)mem;
+
+ /* not supposed to happen since mlx5e_queue_start never fails
+ * but this is how this should be implemented just in case
+ */
+ if (data->c)
+ mlx5e_close_channel(data->c);
+}
+
+static int mlx5e_queue_stop(struct net_device *dev, void *oldq, int queue_index)
+{
+ /* In mlx5 a txq cannot be simply stopped in isolation, only restarted.
+ * mlx5e_queue_start does not fail, we stop the old queue there.
+ * TODO: Improve this.
+ */
+ return 0;
+}
+
+static int mlx5e_queue_start(struct net_device *dev, void *newq,
+ int queue_index)
+{
+ struct mlx5_qmgmt_data *new = (struct mlx5_qmgmt_data *)newq;
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_channel *old;
+
+ mutex_lock(&priv->state_lock);
+
+ /* stop and close the old */
+ old = priv->channels.c[queue_index];
+ mlx5e_deactivate_priv_channels(priv);
+ /* close old before activating new, to avoid napi conflict */
+ mlx5e_close_channel(old);
+
+ /* start the new */
+ priv->channels.c[queue_index] = new->c;
+ mlx5e_activate_priv_channels(priv);
+ mutex_unlock(&priv->state_lock);
+ return 0;
+}
+
+static const struct netdev_queue_mgmt_ops mlx5e_queue_mgmt_ops = {
+ .ndo_queue_mem_size = sizeof(struct mlx5_qmgmt_data),
+ .ndo_queue_mem_alloc = mlx5e_queue_mem_alloc,
+ .ndo_queue_mem_free = mlx5e_queue_mem_free,
+ .ndo_queue_start = mlx5e_queue_start,
+ .ndo_queue_stop = mlx5e_queue_stop,
+};
+
static void mlx5e_build_nic_netdev(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -5464,6 +5608,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
SET_NETDEV_DEV(netdev, mdev->device);
netdev->netdev_ops = &mlx5e_netdev_ops;
+ netdev->queue_mgmt_ops = &mlx5e_queue_mgmt_ops;
netdev->xdp_metadata_ops = &mlx5e_xdp_metadata_ops;
netdev->xsk_tx_metadata_ops = &mlx5e_xsk_tx_metadata_ops;
netdev->request_ops_lock = true;
@@ -5506,17 +5651,17 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
MLX5E_MPWRQ_UMR_MODE_ALIGNED))
netdev->vlan_features |= NETIF_F_LRO;
+ if (mlx5e_hw_gro_supported(mdev) &&
+ mlx5e_check_fragmented_striding_rq_cap(mdev, PAGE_SHIFT,
+ MLX5E_MPWRQ_UMR_MODE_ALIGNED))
+ netdev->vlan_features |= NETIF_F_GRO_HW;
+
netdev->hw_features = netdev->vlan_features;
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
- if (mlx5e_hw_gro_supported(mdev) &&
- mlx5e_check_fragmented_striding_rq_cap(mdev, PAGE_SHIFT,
- MLX5E_MPWRQ_UMR_MODE_ALIGNED))
- netdev->hw_features |= NETIF_F_GRO_HW;
-
if (mlx5e_tunnel_any_tx_proto_supported(mdev)) {
netdev->hw_enc_features |= NETIF_F_HW_CSUM;
netdev->hw_enc_features |= NETIF_F_TSO;
@@ -5595,6 +5740,8 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->priv_flags |= IFF_UNICAST_FLT;
+ netdev->netmem_tx = true;
+
netif_set_tso_max_size(netdev, GSO_MAX_SIZE);
mlx5e_set_xdp_feature(netdev);
mlx5e_set_netdev_dev_addr(netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 84b1ab8233b8..2bb32082bfcc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -273,33 +273,32 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
#define MLX5E_PAGECNT_BIAS_MAX (PAGE_SIZE / 64)
-static int mlx5e_page_alloc_fragmented(struct mlx5e_rq *rq,
+static int mlx5e_page_alloc_fragmented(struct page_pool *pp,
struct mlx5e_frag_page *frag_page)
{
- struct page *page;
+ netmem_ref netmem = page_pool_dev_alloc_netmems(pp);
- page = page_pool_dev_alloc_pages(rq->page_pool);
- if (unlikely(!page))
+ if (unlikely(!netmem))
return -ENOMEM;
- page_pool_fragment_page(page, MLX5E_PAGECNT_BIAS_MAX);
+ page_pool_fragment_netmem(netmem, MLX5E_PAGECNT_BIAS_MAX);
*frag_page = (struct mlx5e_frag_page) {
- .page = page,
+ .netmem = netmem,
.frags = 0,
};
return 0;
}
-static void mlx5e_page_release_fragmented(struct mlx5e_rq *rq,
+static void mlx5e_page_release_fragmented(struct page_pool *pp,
struct mlx5e_frag_page *frag_page)
{
u16 drain_count = MLX5E_PAGECNT_BIAS_MAX - frag_page->frags;
- struct page *page = frag_page->page;
+ netmem_ref netmem = frag_page->netmem;
- if (page_pool_unref_page(page, drain_count) == 0)
- page_pool_put_unrefed_page(rq->page_pool, page, -1, true);
+ if (page_pool_unref_netmem(netmem, drain_count) == 0)
+ page_pool_put_unrefed_netmem(pp, netmem, -1, true);
}
static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
@@ -313,7 +312,8 @@ static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
* offset) should just use the new one without replenishing again
* by themselves.
*/
- err = mlx5e_page_alloc_fragmented(rq, frag->frag_page);
+ err = mlx5e_page_alloc_fragmented(rq->page_pool,
+ frag->frag_page);
return err;
}
@@ -332,7 +332,7 @@ static inline void mlx5e_put_rx_frag(struct mlx5e_rq *rq,
struct mlx5e_wqe_frag_info *frag)
{
if (mlx5e_frag_can_release(frag))
- mlx5e_page_release_fragmented(rq, frag->frag_page);
+ mlx5e_page_release_fragmented(rq->page_pool, frag->frag_page);
}
static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix)
@@ -358,7 +358,7 @@ static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe,
frag->flags &= ~BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
headroom = i == 0 ? rq->buff.headroom : 0;
- addr = page_pool_get_dma_addr(frag->frag_page->page);
+ addr = page_pool_get_dma_addr_netmem(frag->frag_page->netmem);
wqe->data[i].addr = cpu_to_be64(addr + frag->offset + headroom);
}
@@ -499,9 +499,10 @@ mlx5e_add_skb_shared_info_frag(struct mlx5e_rq *rq, struct skb_shared_info *sinf
struct xdp_buff *xdp, struct mlx5e_frag_page *frag_page,
u32 frag_offset, u32 len)
{
+ netmem_ref netmem = frag_page->netmem;
skb_frag_t *frag;
- dma_addr_t addr = page_pool_get_dma_addr(frag_page->page);
+ dma_addr_t addr = page_pool_get_dma_addr_netmem(netmem);
dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len, rq->buff.map_dir);
if (!xdp_buff_has_frags(xdp)) {
@@ -514,9 +515,9 @@ mlx5e_add_skb_shared_info_frag(struct mlx5e_rq *rq, struct skb_shared_info *sinf
}
frag = &sinfo->frags[sinfo->nr_frags++];
- skb_frag_fill_page_desc(frag, frag_page->page, frag_offset, len);
+ skb_frag_fill_netmem_desc(frag, netmem, frag_offset, len);
- if (page_is_pfmemalloc(frag_page->page))
+ if (netmem_is_pfmemalloc(netmem))
xdp_buff_set_frag_pfmemalloc(xdp);
sinfo->xdp_frags_size += len;
}
@@ -527,27 +528,29 @@ mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb,
u32 frag_offset, u32 len,
unsigned int truesize)
{
- dma_addr_t addr = page_pool_get_dma_addr(frag_page->page);
+ dma_addr_t addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
u8 next_frag = skb_shinfo(skb)->nr_frags;
+ netmem_ref netmem = frag_page->netmem;
dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len,
rq->buff.map_dir);
- if (skb_can_coalesce(skb, next_frag, frag_page->page, frag_offset)) {
+ if (skb_can_coalesce_netmem(skb, next_frag, netmem, frag_offset)) {
skb_coalesce_rx_frag(skb, next_frag - 1, len, truesize);
- } else {
- frag_page->frags++;
- skb_add_rx_frag(skb, next_frag, frag_page->page,
- frag_offset, len, truesize);
+ return;
}
+
+ frag_page->frags++;
+ skb_add_rx_frag_netmem(skb, next_frag, netmem,
+ frag_offset, len, truesize);
}
static inline void
mlx5e_copy_skb_header(struct mlx5e_rq *rq, struct sk_buff *skb,
- struct page *page, dma_addr_t addr,
+ netmem_ref netmem, dma_addr_t addr,
int offset_from, int dma_offset, u32 headlen)
{
- const void *from = page_address(page) + offset_from;
+ const void *from = netmem_address(netmem) + offset_from;
/* Aligning len to sizeof(long) optimizes memcpy performance */
unsigned int len = ALIGN(headlen, sizeof(long));
@@ -584,7 +587,8 @@ mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi)
struct mlx5e_frag_page *frag_page;
frag_page = &wi->alloc_units.frag_pages[i];
- mlx5e_page_release_fragmented(rq, frag_page);
+ mlx5e_page_release_fragmented(rq->page_pool,
+ frag_page);
}
}
}
@@ -679,12 +683,11 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, index);
u64 addr;
- err = mlx5e_page_alloc_fragmented(rq, frag_page);
+ err = mlx5e_page_alloc_fragmented(rq->hd_page_pool, frag_page);
if (unlikely(err))
goto err_unmap;
-
- addr = page_pool_get_dma_addr(frag_page->page);
+ addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
for (int j = 0; j < MLX5E_SHAMPO_WQ_HEADER_PER_PAGE; j++) {
header_offset = mlx5e_shampo_hd_offset(index++);
@@ -715,7 +718,8 @@ err_unmap:
if (!header_offset) {
struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, index);
- mlx5e_page_release_fragmented(rq, frag_page);
+ mlx5e_page_release_fragmented(rq->hd_page_pool,
+ frag_page);
}
}
@@ -791,10 +795,11 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
for (i = 0; i < rq->mpwqe.pages_per_wqe; i++, frag_page++) {
dma_addr_t addr;
- err = mlx5e_page_alloc_fragmented(rq, frag_page);
+ err = mlx5e_page_alloc_fragmented(rq->page_pool, frag_page);
if (unlikely(err))
goto err_unmap;
- addr = page_pool_get_dma_addr(frag_page->page);
+
+ addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
umr_wqe->inline_mtts[i] = (struct mlx5_mtt) {
.ptag = cpu_to_be64(addr | MLX5_EN_WR),
};
@@ -836,7 +841,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
err_unmap:
while (--i >= 0) {
frag_page--;
- mlx5e_page_release_fragmented(rq, frag_page);
+ mlx5e_page_release_fragmented(rq->page_pool, frag_page);
}
bitmap_fill(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
@@ -855,7 +860,7 @@ mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index)
if (((header_index + 1) & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) == 0) {
struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index);
- mlx5e_page_release_fragmented(rq, frag_page);
+ mlx5e_page_release_fragmented(rq->hd_page_pool, frag_page);
}
clear_bit(header_index, shampo->bitmap);
}
@@ -1100,6 +1105,8 @@ INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
if (rq->page_pool)
page_pool_nid_changed(rq->page_pool, numa_mem_id());
+ if (rq->hd_page_pool)
+ page_pool_nid_changed(rq->hd_page_pool, numa_mem_id());
head = rq->mpwqe.actual_wq_head;
i = missing;
@@ -1212,7 +1219,7 @@ static void *mlx5e_shampo_get_packet_hd(struct mlx5e_rq *rq, u16 header_index)
struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index);
u16 head_offset = mlx5e_shampo_hd_offset(header_index) + rq->buff.headroom;
- return page_address(frag_page->page) + head_offset;
+ return netmem_address(frag_page->netmem) + head_offset;
}
static void mlx5e_shampo_update_ipv4_udp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4)
@@ -1673,11 +1680,11 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
dma_addr_t addr;
u32 frag_size;
- va = page_address(frag_page->page) + wi->offset;
+ va = netmem_address(frag_page->netmem) + wi->offset;
data = va + rx_headroom;
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
- addr = page_pool_get_dma_addr(frag_page->page);
+ addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset,
frag_size, rq->buff.map_dir);
net_prefetch(data);
@@ -1727,10 +1734,10 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
frag_page = wi->frag_page;
- va = page_address(frag_page->page) + wi->offset;
+ va = netmem_address(frag_page->netmem) + wi->offset;
frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt);
- addr = page_pool_get_dma_addr(frag_page->page);
+ addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset,
rq->buff.frame0_sz, rq->buff.map_dir);
net_prefetchw(va); /* xdp_frame data area */
@@ -2003,12 +2010,14 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
if (prog) {
/* area for bpf_xdp_[store|load]_bytes */
- net_prefetchw(page_address(frag_page->page) + frag_offset);
- if (unlikely(mlx5e_page_alloc_fragmented(rq, &wi->linear_page))) {
+ net_prefetchw(netmem_address(frag_page->netmem) + frag_offset);
+ if (unlikely(mlx5e_page_alloc_fragmented(rq->page_pool,
+ &wi->linear_page))) {
rq->stats->buff_alloc_err++;
return NULL;
}
- va = page_address(wi->linear_page.page);
+
+ va = netmem_address(wi->linear_page.netmem);
net_prefetchw(va); /* xdp_frame data area */
linear_hr = XDP_PACKET_HEADROOM;
linear_data_len = 0;
@@ -2068,7 +2077,8 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
wi->linear_page.frags++;
}
- mlx5e_page_release_fragmented(rq, &wi->linear_page);
+ mlx5e_page_release_fragmented(rq->page_pool,
+ &wi->linear_page);
return NULL; /* page/packet was consumed by XDP */
}
@@ -2077,13 +2087,14 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
mxbuf->xdp.data - mxbuf->xdp.data_hard_start, 0,
mxbuf->xdp.data - mxbuf->xdp.data_meta);
if (unlikely(!skb)) {
- mlx5e_page_release_fragmented(rq, &wi->linear_page);
+ mlx5e_page_release_fragmented(rq->page_pool,
+ &wi->linear_page);
return NULL;
}
skb_mark_for_recycle(skb);
wi->linear_page.frags++;
- mlx5e_page_release_fragmented(rq, &wi->linear_page);
+ mlx5e_page_release_fragmented(rq->page_pool, &wi->linear_page);
if (xdp_buff_has_frags(&mxbuf->xdp)) {
struct mlx5e_frag_page *pagep;
@@ -2117,8 +2128,8 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
while (++pagep < frag_page);
}
/* copy header */
- addr = page_pool_get_dma_addr(head_page->page);
- mlx5e_copy_skb_header(rq, skb, head_page->page, addr,
+ addr = page_pool_get_dma_addr_netmem(head_page->netmem);
+ mlx5e_copy_skb_header(rq, skb, head_page->netmem, addr,
head_offset, head_offset, headlen);
/* skb linear part was allocated with headlen and aligned to long */
skb->tail += headlen;
@@ -2148,11 +2159,11 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
return NULL;
}
- va = page_address(frag_page->page) + head_offset;
+ va = netmem_address(frag_page->netmem) + head_offset;
data = va + rx_headroom;
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
- addr = page_pool_get_dma_addr(frag_page->page);
+ addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
dma_sync_single_range_for_cpu(rq->pdev, addr, head_offset,
frag_size, rq->buff.map_dir);
net_prefetch(data);
@@ -2191,16 +2202,19 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
struct mlx5_cqe64 *cqe, u16 header_index)
{
struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index);
- dma_addr_t page_dma_addr = page_pool_get_dma_addr(frag_page->page);
u16 head_offset = mlx5e_shampo_hd_offset(header_index);
- dma_addr_t dma_addr = page_dma_addr + head_offset;
u16 head_size = cqe->shampo.header_size;
u16 rx_headroom = rq->buff.headroom;
struct sk_buff *skb = NULL;
+ dma_addr_t page_dma_addr;
+ dma_addr_t dma_addr;
void *hdr, *data;
u32 frag_size;
- hdr = page_address(frag_page->page) + head_offset;
+ page_dma_addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
+ dma_addr = page_dma_addr + head_offset;
+
+ hdr = netmem_address(frag_page->netmem) + head_offset;
data = hdr + rx_headroom;
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + head_size);
@@ -2225,7 +2239,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
}
net_prefetchw(skb->data);
- mlx5e_copy_skb_header(rq, skb, frag_page->page, dma_addr,
+ mlx5e_copy_skb_header(rq, skb, frag_page->netmem, dma_addr,
head_offset + rx_headroom,
rx_headroom, head_size);
/* skb linear part was allocated with headlen and aligned to long */
@@ -2319,11 +2333,23 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
}
if (!*skb) {
- if (likely(head_size))
+ if (likely(head_size)) {
*skb = mlx5e_skb_from_cqe_shampo(rq, wi, cqe, header_index);
- else
- *skb = mlx5e_skb_from_cqe_mpwrq_nonlinear(rq, wi, cqe, cqe_bcnt,
- data_offset, page_idx);
+ } else {
+ struct mlx5e_frag_page *frag_page;
+
+ frag_page = &wi->alloc_units.frag_pages[page_idx];
+ /* Drop packets with header in unreadable data area to
+ * prevent the kernel from touching it.
+ */
+ if (unlikely(netmem_is_net_iov(frag_page->netmem)))
+ goto free_hd_entry;
+ *skb = mlx5e_skb_from_cqe_mpwrq_nonlinear(rq, wi, cqe,
+ cqe_bcnt,
+ data_offset,
+ page_idx);
+ }
+
if (unlikely(!*skb))
goto free_hd_entry;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index 9772327d5124..4b3430ac3905 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -238,6 +238,23 @@ static u32 mlx5i_flow_type_mask(u32 flow_type)
return flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
}
+static int mlx5i_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+
+ return mlx5e_ethtool_set_rxfh_fields(priv, cmd, extack);
+}
+
+static int mlx5i_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *info)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+
+ return mlx5e_ethtool_get_rxfh_fields(priv, info);
+}
+
static int mlx5i_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
{
struct mlx5e_priv *priv = mlx5i_epriv(dev);
@@ -283,6 +300,8 @@ const struct ethtool_ops mlx5i_ethtool_ops = {
.get_coalesce = mlx5i_get_coalesce,
.set_coalesce = mlx5i_set_coalesce,
.get_ts_info = mlx5i_get_ts_info,
+ .get_rxfh_fields = mlx5i_get_rxfh_fields,
+ .set_rxfh_fields = mlx5i_set_rxfh_fields,
.get_rxnfc = mlx5i_get_rxnfc,
.set_rxnfc = mlx5i_set_rxnfc,
.get_link_ksettings = mlx5i_get_link_ksettings,
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
index 36393a17d92d..9c89d5378668 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
@@ -446,6 +446,26 @@ enum {
#define FBNIC_TMI_ILLEGAL_PTP_REQS 0x04409 /* 0x11024 */
#define FBNIC_TMI_GOOD_PTP_TS 0x0440a /* 0x11028 */
#define FBNIC_TMI_BAD_PTP_TS 0x0440b /* 0x1102c */
+#define FBNIC_TMI_STAT_TX_PACKET_1519_2047_BYTES_L \
+ 0x04433 /* 0x110cc */
+#define FBNIC_TMI_STAT_TX_PACKET_1519_2047_BYTES_H \
+ 0x04434 /* 0x110d0 */
+#define FBNIC_TMI_STAT_TX_PACKET_2048_4095_BYTES_L \
+ 0x04435 /* 0x110d4 */
+#define FBNIC_TMI_STAT_TX_PACKET_2048_4095_BYTES_H \
+ 0x04436 /* 0x110d8 */
+#define FBNIC_TMI_STAT_TX_PACKET_4096_8191_BYTES_L \
+ 0x04437 /* 0x110dc */
+#define FBNIC_TMI_STAT_TX_PACKET_4096_8191_BYTES_H \
+ 0x04438 /* 0x110e0 */
+#define FBNIC_TMI_STAT_TX_PACKET_8192_9216_BYTES_L \
+ 0x04439 /* 0x110e4 */
+#define FBNIC_TMI_STAT_TX_PACKET_8192_9216_BYTES_H \
+ 0x0443a /* 0x110e8 */
+#define FBNIC_TMI_STAT_TX_PACKET_9217_MAX_BYTES_L \
+ 0x0443b /* 0x110ec */
+#define FBNIC_TMI_STAT_TX_PACKET_9217_MAX_BYTES_H \
+ 0x0443c /* 0x110f0 */
#define FBNIC_CSR_END_TMI 0x0443f /* CSR section delimiter */
/* Precision Time Protocol Registers */
@@ -473,7 +493,7 @@ enum {
#define FBNIC_PTP_ADD_VAL_NS 0x04806 /* 0x12018 */
#define FBNIC_PTP_ADD_VAL_NS_MASK CSR_GENMASK(15, 0)
-#define FBNIC_PTP_ADD_VAL_SUBNS 0x04807 /* 0x1201c */
+#define FBNIC_PTP_ADD_VAL_SUBNS 0x04807 /* 0x1201c */
#define FBNIC_PTP_CTR_VAL_HI 0x04808 /* 0x12020 */
#define FBNIC_PTP_CTR_VAL_LO 0x04809 /* 0x12024 */
@@ -674,6 +694,26 @@ enum {
#define FBNIC_RPC_CNTR_OVR_SIZE_ERR 0x084a6 /* 0x21298 */
#define FBNIC_RPC_TCAM_MACDA_VALIDATE 0x0852d /* 0x214b4 */
+#define FBNIC_RPC_STAT_RX_PACKET_1519_2047_BYTES_L \
+ 0x0855f /* 0x2157c */
+#define FBNIC_RPC_STAT_RX_PACKET_1519_2047_BYTES_H \
+ 0x08560 /* 0x21580 */
+#define FBNIC_RPC_STAT_RX_PACKET_2048_4095_BYTES_L \
+ 0x08561 /* 0x21584 */
+#define FBNIC_RPC_STAT_RX_PACKET_2048_4095_BYTES_H \
+ 0x08562 /* 0x21588 */
+#define FBNIC_RPC_STAT_RX_PACKET_4096_8191_BYTES_L \
+ 0x08563 /* 0x2158c */
+#define FBNIC_RPC_STAT_RX_PACKET_4096_8191_BYTES_H \
+ 0x08564 /* 0x21590 */
+#define FBNIC_RPC_STAT_RX_PACKET_8192_9216_BYTES_L \
+ 0x08565 /* 0x21594 */
+#define FBNIC_RPC_STAT_RX_PACKET_8192_9216_BYTES_H \
+ 0x08566 /* 0x21598 */
+#define FBNIC_RPC_STAT_RX_PACKET_9217_MAX_BYTES_L \
+ 0x08567 /* 0x2159c */
+#define FBNIC_RPC_STAT_RX_PACKET_9217_MAX_BYTES_H \
+ 0x08568 /* 0x215a0 */
#define FBNIC_CSR_END_RPC 0x0856b /* CSR section delimiter */
/* RPC RAM Registers */
@@ -776,16 +816,12 @@ enum {
#define FBNIC_CSR_START_MAC_STAT 0x11a00
#define FBNIC_MAC_STAT_RX_BYTE_COUNT_L 0x11a08 /* 0x46820 */
#define FBNIC_MAC_STAT_RX_BYTE_COUNT_H 0x11a09 /* 0x46824 */
-#define FBNIC_MAC_STAT_RX_ALIGN_ERROR_L \
- 0x11a0a /* 0x46828 */
-#define FBNIC_MAC_STAT_RX_ALIGN_ERROR_H \
- 0x11a0b /* 0x4682c */
+#define FBNIC_MAC_STAT_RX_ALIGN_ERROR_L 0x11a0a /* 0x46828 */
+#define FBNIC_MAC_STAT_RX_ALIGN_ERROR_H 0x11a0b /* 0x4682c */
#define FBNIC_MAC_STAT_RX_TOOLONG_L 0x11a0e /* 0x46838 */
#define FBNIC_MAC_STAT_RX_TOOLONG_H 0x11a0f /* 0x4683c */
-#define FBNIC_MAC_STAT_RX_RECEIVED_OK_L \
- 0x11a12 /* 0x46848 */
-#define FBNIC_MAC_STAT_RX_RECEIVED_OK_H \
- 0x11a13 /* 0x4684c */
+#define FBNIC_MAC_STAT_RX_RECEIVED_OK_L 0x11a12 /* 0x46848 */
+#define FBNIC_MAC_STAT_RX_RECEIVED_OK_H 0x11a13 /* 0x4684c */
#define FBNIC_MAC_STAT_RX_PACKET_BAD_FCS_L \
0x11a14 /* 0x46850 */
#define FBNIC_MAC_STAT_RX_PACKET_BAD_FCS_H \
@@ -796,20 +832,90 @@ enum {
#define FBNIC_MAC_STAT_RX_MULTICAST_H 0x11a1d /* 0x46874 */
#define FBNIC_MAC_STAT_RX_BROADCAST_L 0x11a1e /* 0x46878 */
#define FBNIC_MAC_STAT_RX_BROADCAST_H 0x11a1f /* 0x4687c */
+#define FBNIC_MAC_STAT_RX_UNDERSIZE_L 0x11a24 /* 0x46890 */
+#define FBNIC_MAC_STAT_RX_UNDERSIZE_H 0x11a25 /* 0x46894 */
+#define FBNIC_MAC_STAT_RX_PACKET_64_BYTES_L \
+ 0x11a26 /* 0x46898 */
+#define FBNIC_MAC_STAT_RX_PACKET_64_BYTES_H \
+ 0x11a27 /* 0x4689c */
+#define FBNIC_MAC_STAT_RX_PACKET_65_127_BYTES_L \
+ 0x11a28 /* 0x468a0 */
+#define FBNIC_MAC_STAT_RX_PACKET_65_127_BYTES_H \
+ 0x11a29 /* 0x468a4 */
+#define FBNIC_MAC_STAT_RX_PACKET_128_255_BYTES_L \
+ 0x11a2a /* 0x468a8 */
+#define FBNIC_MAC_STAT_RX_PACKET_128_255_BYTES_H \
+ 0x11a2b /* 0x468ac */
+#define FBNIC_MAC_STAT_RX_PACKET_256_511_BYTES_L \
+ 0x11a2c /* 0x468b0 */
+#define FBNIC_MAC_STAT_RX_PACKET_256_511_BYTES_H \
+ 0x11a2d /* 0x468b4 */
+#define FBNIC_MAC_STAT_RX_PACKET_512_1023_BYTES_L \
+ 0x11a2e /* 0x468b8 */
+#define FBNIC_MAC_STAT_RX_PACKET_512_1023_BYTES_H \
+ 0x11a2f /* 0x468bc */
+#define FBNIC_MAC_STAT_RX_PACKET_1024_1518_BYTES_L \
+ 0x11a30 /* 0x468c0 */
+#define FBNIC_MAC_STAT_RX_PACKET_1024_1518_BYTES_H \
+ 0x11a31 /* 0x468c4 */
+#define FBNIC_MAC_STAT_RX_PACKET_1519_MAX_BYTES_L \
+ 0x11a32 /* 0x468c8 */
+#define FBNIC_MAC_STAT_RX_PACKET_1519_MAX_BYTES_H \
+ 0x11a33 /* 0x468cc */
+#define FBNIC_MAC_STAT_RX_OVERSIZE_L 0x11a34 /* 0x468d0 */
+#define FBNIC_MAC_STAT_RX_OVERSSIZE_H 0x11a35 /* 0x468d4 */
+#define FBNIC_MAC_STAT_RX_JABBER_L 0x11a36 /* 0x468d8 */
+#define FBNIC_MAC_STAT_RX_JABBER_H 0x11a37 /* 0x468dc */
+#define FBNIC_MAC_STAT_RX_FRAGMENT_L 0x11a38 /* 0x468e0 */
+#define FBNIC_MAC_STAT_RX_FRAGMENT_H 0x11a39 /* 0x468e4 */
+#define FBNIC_MAC_STAT_RX_CONTROL_FRAMES_L \
+ 0x11a3c /* 0x468f0 */
+#define FBNIC_MAC_STAT_RX_CONTROL_FRAMES_H \
+ 0x11a3d /* 0x468f4 */
#define FBNIC_MAC_STAT_TX_BYTE_COUNT_L 0x11a3e /* 0x468f8 */
#define FBNIC_MAC_STAT_TX_BYTE_COUNT_H 0x11a3f /* 0x468fc */
#define FBNIC_MAC_STAT_TX_TRANSMITTED_OK_L \
0x11a42 /* 0x46908 */
#define FBNIC_MAC_STAT_TX_TRANSMITTED_OK_H \
0x11a43 /* 0x4690c */
-#define FBNIC_MAC_STAT_TX_IFOUTERRORS_L \
- 0x11a46 /* 0x46918 */
-#define FBNIC_MAC_STAT_TX_IFOUTERRORS_H \
- 0x11a47 /* 0x4691c */
+#define FBNIC_MAC_STAT_TX_IFOUTERRORS_L 0x11a46 /* 0x46918 */
+#define FBNIC_MAC_STAT_TX_IFOUTERRORS_H 0x11a47 /* 0x4691c */
#define FBNIC_MAC_STAT_TX_MULTICAST_L 0x11a4a /* 0x46928 */
#define FBNIC_MAC_STAT_TX_MULTICAST_H 0x11a4b /* 0x4692c */
#define FBNIC_MAC_STAT_TX_BROADCAST_L 0x11a4c /* 0x46930 */
#define FBNIC_MAC_STAT_TX_BROADCAST_H 0x11a4d /* 0x46934 */
+#define FBNIC_MAC_STAT_TX_PACKET_64_BYTES_L \
+ 0x11a4e /* 0x46938 */
+#define FBNIC_MAC_STAT_TX_PACKET_64_BYTES_H \
+ 0x11a4f /* 0x4693c */
+#define FBNIC_MAC_STAT_TX_PACKET_65_127_BYTES_L \
+ 0x11a50 /* 0x46940 */
+#define FBNIC_MAC_STAT_TX_PACKET_65_127_BYTES_H \
+ 0x11a51 /* 0x46944 */
+#define FBNIC_MAC_STAT_TX_PACKET_128_255_BYTES_L \
+ 0x11a52 /* 0x46948 */
+#define FBNIC_MAC_STAT_TX_PACKET_128_255_BYTES_H \
+ 0x11a53 /* 0x4694c */
+#define FBNIC_MAC_STAT_TX_PACKET_256_511_BYTES_L \
+ 0x11a54 /* 0x46950 */
+#define FBNIC_MAC_STAT_TX_PACKET_256_511_BYTES_H \
+ 0x11a55 /* 0x46954 */
+#define FBNIC_MAC_STAT_TX_PACKET_512_1023_BYTES_L \
+ 0x11a56 /* 0x46958 */
+#define FBNIC_MAC_STAT_TX_PACKET_512_1023_BYTES_H \
+ 0x11a57 /* 0x4695c */
+#define FBNIC_MAC_STAT_TX_PACKET_1024_1518_BYTES_L \
+ 0x11a58 /* 0x46960 */
+#define FBNIC_MAC_STAT_TX_PACKET_1024_1518_BYTES_H \
+ 0x11a59 /* 0x46964 */
+#define FBNIC_MAC_STAT_TX_PACKET_1519_MAX_BYTES_L \
+ 0x11a5a /* 0x46968 */
+#define FBNIC_MAC_STAT_TX_PACKET_1519_MAX_BYTES_H \
+ 0x11a5b /* 0x4696c */
+#define FBNIC_MAC_STAT_TX_CONTROL_FRAMES_L \
+ 0x11a5e /* 0x46978 */
+#define FBNIC_MAC_STAT_TX_CONTROL_FRAMES_H \
+ 0x11a5f /* 0x4697c */
/* PCIE Comphy Registers */
#define FBNIC_CSR_START_PCIE_SS_COMPHY 0x2442e /* CSR section delimiter */
@@ -857,7 +963,7 @@ enum {
0x3107e /* 0xc41f8 */
#define FBNIC_PUL_USER_OB_RD_DBG_CNT_NP_CRED_63_32 \
0x3107f /* 0xc41fc */
-#define FBNIC_CSR_END_PUL_USER 0x310ea /* CSR section delimiter */
+#define FBNIC_CSR_END_PUL_USER 0x310ea /* CSR section delimiter */
/* Queue Registers
*
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c b/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c
index 4c4938eedd7b..c5f81f139e7e 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c
@@ -182,7 +182,7 @@ fbnic_flash_start(struct fbnic_dev *fbd, struct pldmfw_component *component)
else
err = -ETIMEDOUT;
- fbnic_fw_clear_cmpl(fbd, cmpl);
+ fbnic_mbx_clear_cmpl(fbd, cmpl);
cmpl_free:
fbnic_fw_put_cmpl(cmpl);
@@ -300,7 +300,7 @@ err_no_msg:
component_name, 0, 0);
}
- fbnic_fw_clear_cmpl(fbd, cmpl);
+ fbnic_mbx_clear_cmpl(fbd, cmpl);
cmpl_free:
fbnic_fw_put_cmpl(cmpl);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
index 5c7556c8c4c5..588da02d6e22 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
@@ -531,20 +531,6 @@ static int fbnic_get_rss_hash_idx(u32 flow_type)
return -1;
}
-static int
-fbnic_get_rss_hash_opts(struct fbnic_net *fbn, struct ethtool_rxnfc *cmd)
-{
- int hash_opt_idx = fbnic_get_rss_hash_idx(cmd->flow_type);
-
- if (hash_opt_idx < 0)
- return -EINVAL;
-
- /* Report options from rss_en table in fbn */
- cmd->data = fbn->rss_flow_hash[hash_opt_idx];
-
- return 0;
-}
-
static int fbnic_get_cls_rule_all(struct fbnic_net *fbn,
struct ethtool_rxnfc *cmd,
u32 *rule_locs)
@@ -779,9 +765,6 @@ static int fbnic_get_rxnfc(struct net_device *netdev,
cmd->data = fbn->num_rx_queues;
ret = 0;
break;
- case ETHTOOL_GRXFH:
- ret = fbnic_get_rss_hash_opts(fbn, cmd);
- break;
case ETHTOOL_GRXCLSRULE:
ret = fbnic_get_cls_rule(fbn, cmd);
break;
@@ -803,41 +786,6 @@ static int fbnic_get_rxnfc(struct net_device *netdev,
return ret;
}
-#define FBNIC_L2_HASH_OPTIONS \
- (RXH_L2DA | RXH_DISCARD)
-#define FBNIC_L3_HASH_OPTIONS \
- (FBNIC_L2_HASH_OPTIONS | RXH_IP_SRC | RXH_IP_DST)
-#define FBNIC_L4_HASH_OPTIONS \
- (FBNIC_L3_HASH_OPTIONS | RXH_L4_B_0_1 | RXH_L4_B_2_3)
-
-static int
-fbnic_set_rss_hash_opts(struct fbnic_net *fbn, const struct ethtool_rxnfc *cmd)
-{
- int hash_opt_idx;
-
- /* Verify the type requested is correct */
- hash_opt_idx = fbnic_get_rss_hash_idx(cmd->flow_type);
- if (hash_opt_idx < 0)
- return -EINVAL;
-
- /* Verify the fields asked for can actually be assigned based on type */
- if (cmd->data & ~FBNIC_L4_HASH_OPTIONS ||
- (hash_opt_idx > FBNIC_L4_HASH_OPT &&
- cmd->data & ~FBNIC_L3_HASH_OPTIONS) ||
- (hash_opt_idx > FBNIC_IP_HASH_OPT &&
- cmd->data & ~FBNIC_L2_HASH_OPTIONS))
- return -EINVAL;
-
- fbn->rss_flow_hash[hash_opt_idx] = cmd->data;
-
- if (netif_running(fbn->netdev)) {
- fbnic_rss_reinit(fbn->fbd, fbn);
- fbnic_write_rules(fbn->fbd);
- }
-
- return 0;
-}
-
static int fbnic_cls_rule_any_loc(struct fbnic_dev *fbd)
{
int i;
@@ -1244,9 +1192,6 @@ static int fbnic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
int ret = -EOPNOTSUPP;
switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- ret = fbnic_set_rss_hash_opts(fbn, cmd);
- break;
case ETHTOOL_SRXCLSRLINS:
ret = fbnic_set_cls_rule_ins(fbn, cmd);
break;
@@ -1347,6 +1292,60 @@ fbnic_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
}
static int
+fbnic_get_rss_hash_opts(struct net_device *netdev,
+ struct ethtool_rxfh_fields *cmd)
+{
+ int hash_opt_idx = fbnic_get_rss_hash_idx(cmd->flow_type);
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ if (hash_opt_idx < 0)
+ return -EINVAL;
+
+ /* Report options from rss_en table in fbn */
+ cmd->data = fbn->rss_flow_hash[hash_opt_idx];
+
+ return 0;
+}
+
+#define FBNIC_L2_HASH_OPTIONS \
+ (RXH_L2DA | RXH_DISCARD)
+#define FBNIC_L3_HASH_OPTIONS \
+ (FBNIC_L2_HASH_OPTIONS | RXH_IP_SRC | RXH_IP_DST)
+#define FBNIC_L4_HASH_OPTIONS \
+ (FBNIC_L3_HASH_OPTIONS | RXH_L4_B_0_1 | RXH_L4_B_2_3)
+
+static int
+fbnic_set_rss_hash_opts(struct net_device *netdev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ int hash_opt_idx;
+
+ /* Verify the type requested is correct */
+ hash_opt_idx = fbnic_get_rss_hash_idx(cmd->flow_type);
+ if (hash_opt_idx < 0)
+ return -EINVAL;
+
+ /* Verify the fields asked for can actually be assigned based on type */
+ if (cmd->data & ~FBNIC_L4_HASH_OPTIONS ||
+ (hash_opt_idx > FBNIC_L4_HASH_OPT &&
+ cmd->data & ~FBNIC_L3_HASH_OPTIONS) ||
+ (hash_opt_idx > FBNIC_IP_HASH_OPT &&
+ cmd->data & ~FBNIC_L2_HASH_OPTIONS))
+ return -EINVAL;
+
+ fbn->rss_flow_hash[hash_opt_idx] = cmd->data;
+
+ if (netif_running(fbn->netdev)) {
+ fbnic_rss_reinit(fbn->fbd, fbn);
+ fbnic_write_rules(fbn->fbd);
+ }
+
+ return 0;
+}
+
+static int
fbnic_modify_rxfh_context(struct net_device *netdev,
struct ethtool_rxfh_context *ctx,
const struct ethtool_rxfh_param *rxfh,
@@ -1612,35 +1611,107 @@ fbnic_get_eth_mac_stats(struct net_device *netdev,
&mac_stats->eth_mac.FrameTooLongErrors);
}
+static void
+fbnic_get_eth_ctrl_stats(struct net_device *netdev,
+ struct ethtool_eth_ctrl_stats *eth_ctrl_stats)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_mac_stats *mac_stats;
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ mac_stats = &fbd->hw_stats.mac;
+
+ fbd->mac->get_eth_ctrl_stats(fbd, false, &mac_stats->eth_ctrl);
+
+ eth_ctrl_stats->MACControlFramesReceived =
+ mac_stats->eth_ctrl.MACControlFramesReceived.value;
+ eth_ctrl_stats->MACControlFramesTransmitted =
+ mac_stats->eth_ctrl.MACControlFramesTransmitted.value;
+}
+
+static const struct ethtool_rmon_hist_range fbnic_rmon_ranges[] = {
+ { 0, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, 2047 },
+ { 2048, 4095 },
+ { 4096, 8191 },
+ { 8192, 9216 },
+ { 9217, FBNIC_MAX_JUMBO_FRAME_SIZE },
+ {}
+};
+
+static void
+fbnic_get_rmon_stats(struct net_device *netdev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_mac_stats *mac_stats;
+ struct fbnic_dev *fbd = fbn->fbd;
+ int i;
+
+ mac_stats = &fbd->hw_stats.mac;
+
+ fbd->mac->get_rmon_stats(fbd, false, &mac_stats->rmon);
+
+ rmon_stats->undersize_pkts =
+ mac_stats->rmon.undersize_pkts.value;
+ rmon_stats->oversize_pkts =
+ mac_stats->rmon.oversize_pkts.value;
+ rmon_stats->fragments =
+ mac_stats->rmon.fragments.value;
+ rmon_stats->jabbers =
+ mac_stats->rmon.jabbers.value;
+
+ for (i = 0; fbnic_rmon_ranges[i].high; i++) {
+ rmon_stats->hist[i] = mac_stats->rmon.hist[i].value;
+ rmon_stats->hist_tx[i] = mac_stats->rmon.hist_tx[i].value;
+ }
+
+ *ranges = fbnic_rmon_ranges;
+}
+
static const struct ethtool_ops fbnic_ethtool_ops = {
- .supported_coalesce_params =
- ETHTOOL_COALESCE_USECS |
- ETHTOOL_COALESCE_RX_MAX_FRAMES,
- .rxfh_max_num_contexts = FBNIC_RPC_RSS_TBL_COUNT,
- .get_drvinfo = fbnic_get_drvinfo,
- .get_regs_len = fbnic_get_regs_len,
- .get_regs = fbnic_get_regs,
- .get_coalesce = fbnic_get_coalesce,
- .set_coalesce = fbnic_set_coalesce,
- .get_ringparam = fbnic_get_ringparam,
- .set_ringparam = fbnic_set_ringparam,
- .get_strings = fbnic_get_strings,
- .get_ethtool_stats = fbnic_get_ethtool_stats,
- .get_sset_count = fbnic_get_sset_count,
- .get_rxnfc = fbnic_get_rxnfc,
- .set_rxnfc = fbnic_set_rxnfc,
- .get_rxfh_key_size = fbnic_get_rxfh_key_size,
- .get_rxfh_indir_size = fbnic_get_rxfh_indir_size,
- .get_rxfh = fbnic_get_rxfh,
- .set_rxfh = fbnic_set_rxfh,
- .create_rxfh_context = fbnic_create_rxfh_context,
- .modify_rxfh_context = fbnic_modify_rxfh_context,
- .remove_rxfh_context = fbnic_remove_rxfh_context,
- .get_channels = fbnic_get_channels,
- .set_channels = fbnic_set_channels,
- .get_ts_info = fbnic_get_ts_info,
- .get_ts_stats = fbnic_get_ts_stats,
- .get_eth_mac_stats = fbnic_get_eth_mac_stats,
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_RX_MAX_FRAMES,
+ .rxfh_max_num_contexts = FBNIC_RPC_RSS_TBL_COUNT,
+ .get_drvinfo = fbnic_get_drvinfo,
+ .get_regs_len = fbnic_get_regs_len,
+ .get_regs = fbnic_get_regs,
+ .get_link = ethtool_op_get_link,
+ .get_coalesce = fbnic_get_coalesce,
+ .set_coalesce = fbnic_set_coalesce,
+ .get_ringparam = fbnic_get_ringparam,
+ .set_ringparam = fbnic_set_ringparam,
+ .get_pauseparam = fbnic_phylink_get_pauseparam,
+ .set_pauseparam = fbnic_phylink_set_pauseparam,
+ .get_strings = fbnic_get_strings,
+ .get_ethtool_stats = fbnic_get_ethtool_stats,
+ .get_sset_count = fbnic_get_sset_count,
+ .get_rxnfc = fbnic_get_rxnfc,
+ .set_rxnfc = fbnic_set_rxnfc,
+ .get_rxfh_key_size = fbnic_get_rxfh_key_size,
+ .get_rxfh_indir_size = fbnic_get_rxfh_indir_size,
+ .get_rxfh = fbnic_get_rxfh,
+ .set_rxfh = fbnic_set_rxfh,
+ .get_rxfh_fields = fbnic_get_rss_hash_opts,
+ .set_rxfh_fields = fbnic_set_rss_hash_opts,
+ .create_rxfh_context = fbnic_create_rxfh_context,
+ .modify_rxfh_context = fbnic_modify_rxfh_context,
+ .remove_rxfh_context = fbnic_remove_rxfh_context,
+ .get_channels = fbnic_get_channels,
+ .set_channels = fbnic_set_channels,
+ .get_ts_info = fbnic_get_ts_info,
+ .get_ts_stats = fbnic_get_ts_stats,
+ .get_link_ksettings = fbnic_phylink_ethtool_ksettings_get,
+ .get_fecparam = fbnic_phylink_get_fecparam,
+ .get_eth_mac_stats = fbnic_get_eth_mac_stats,
+ .get_eth_ctrl_stats = fbnic_get_eth_ctrl_stats,
+ .get_rmon_stats = fbnic_get_rmon_stats,
};
void fbnic_set_ethtool_ops(struct net_device *dev)
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
index 4521d0483d18..1d220d8369e7 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
@@ -95,6 +95,9 @@ void fbnic_mbx_init(struct fbnic_dev *fbd)
/* Initialize lock to protect Tx ring */
spin_lock_init(&fbd->fw_tx_lock);
+ /* Reset FW Capabilities */
+ memset(&fbd->fw_cap, 0, sizeof(fbd->fw_cap));
+
/* Reinitialize mailbox memory */
for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
memset(&fbd->mbx[i], 0, sizeof(struct fbnic_fw_mbx));
@@ -335,6 +338,16 @@ unlock_mbx:
return err;
}
+void fbnic_mbx_clear_cmpl(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *fw_cmpl)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&fbd->fw_tx_lock, flags);
+ fbnic_mbx_clear_cmpl_slot(fbd, fw_cmpl);
+ spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
+}
+
static void fbnic_fw_release_cmpl_data(struct kref *kref)
{
struct fbnic_fw_completion *cmpl_data;
@@ -373,11 +386,11 @@ fbnic_fw_get_cmpl_by_type(struct fbnic_dev *fbd, u32 msg_type)
*
* Return:
* One the following values:
- * -EOPNOTSUPP: Is not ASIC so mailbox is not supported
- * -ENODEV: Device I/O error
- * -ENOMEM: Failed to allocate message
- * -EBUSY: No space in mailbox
- * -ENOSPC: DMA mapping failed
+ * -EOPNOTSUPP: Is not ASIC so mailbox is not supported
+ * -ENODEV: Device I/O error
+ * -ENOMEM: Failed to allocate message
+ * -EBUSY: No space in mailbox
+ * -ENOSPC: DMA mapping failed
*
* This function sends a single TLV header indicating the host wants to take
* some action. However there are no other side effects which means that any
@@ -1117,6 +1130,7 @@ void fbnic_mbx_poll(struct fbnic_dev *fbd)
int fbnic_mbx_poll_tx_ready(struct fbnic_dev *fbd)
{
+ struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
unsigned long timeout = jiffies + 10 * HZ + 1;
int err, i;
@@ -1149,8 +1163,23 @@ int fbnic_mbx_poll_tx_ready(struct fbnic_dev *fbd)
if (err)
goto clean_mbx;
- /* Use "1" to indicate we entered the state waiting for a response */
- fbd->fw_cap.running.mgmt.version = 1;
+ /* Poll until we get a current management firmware version, use "1"
+ * to indicate we entered the polling state waiting for a response
+ */
+ for (fbd->fw_cap.running.mgmt.version = 1;
+ fbd->fw_cap.running.mgmt.version < MIN_FW_VERSION_CODE;) {
+ if (!tx_mbx->ready)
+ err = -ENODEV;
+ if (err)
+ goto clean_mbx;
+
+ msleep(20);
+ fbnic_mbx_poll(fbd);
+
+ /* set err, but wait till mgmt.version check to report it */
+ if (!time_is_after_jiffies(timeout))
+ err = -ETIMEDOUT;
+ }
return 0;
clean_mbx:
@@ -1244,16 +1273,6 @@ struct fbnic_fw_completion *fbnic_fw_alloc_cmpl(u32 msg_type)
return cmpl;
}
-void fbnic_fw_clear_cmpl(struct fbnic_dev *fbd,
- struct fbnic_fw_completion *fw_cmpl)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&fbd->fw_tx_lock, flags);
- fbnic_mbx_clear_cmpl_slot(fbd, fw_cmpl);
- spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
-}
-
void fbnic_fw_put_cmpl(struct fbnic_fw_completion *fw_cmpl)
{
kref_put(&fw_cmpl->ref_count, fbnic_fw_release_cmpl_data);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
index 08bc4b918de7..555b231b38c1 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
@@ -19,10 +19,10 @@ struct fbnic_fw_mbx {
};
// FW_VER_MAX_SIZE must match ETHTOOL_FWVERS_LEN
-#define FBNIC_FW_VER_MAX_SIZE 32
+#define FBNIC_FW_VER_MAX_SIZE 32
// Formatted version is in the format XX.YY.ZZ_RRR_COMMIT
#define FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE (FBNIC_FW_VER_MAX_SIZE - 13)
-#define FBNIC_FW_LOG_MAX_SIZE 256
+#define FBNIC_FW_LOG_MAX_SIZE 256
struct fbnic_fw_ver {
u32 version;
@@ -66,6 +66,8 @@ void fbnic_mbx_init(struct fbnic_dev *fbd);
void fbnic_mbx_clean(struct fbnic_dev *fbd);
int fbnic_mbx_set_cmpl(struct fbnic_dev *fbd,
struct fbnic_fw_completion *cmpl_data);
+void fbnic_mbx_clear_cmpl(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data);
void fbnic_mbx_poll(struct fbnic_dev *fbd);
int fbnic_mbx_poll_tx_ready(struct fbnic_dev *fbd);
void fbnic_mbx_flush_tx(struct fbnic_dev *fbd);
@@ -81,8 +83,6 @@ int fbnic_fw_xmit_fw_write_chunk(struct fbnic_dev *fbd,
int fbnic_fw_xmit_tsene_read_msg(struct fbnic_dev *fbd,
struct fbnic_fw_completion *cmpl_data);
struct fbnic_fw_completion *fbnic_fw_alloc_cmpl(u32 msg_type);
-void fbnic_fw_clear_cmpl(struct fbnic_dev *fbd,
- struct fbnic_fw_completion *cmpl_data);
void fbnic_fw_put_cmpl(struct fbnic_fw_completion *cmpl_data);
#define fbnic_mk_full_fw_ver_str(_rev_id, _delim, _commit, _str, _str_sz) \
@@ -155,10 +155,10 @@ enum {
};
enum {
- FBNIC_FW_LINK_SPEED_25R1 = 1,
- FBNIC_FW_LINK_SPEED_50R2 = 2,
- FBNIC_FW_LINK_SPEED_50R1 = 3,
- FBNIC_FW_LINK_SPEED_100R2 = 4,
+ FBNIC_FW_LINK_MODE_25CR = 1,
+ FBNIC_FW_LINK_MODE_50CR2 = 2,
+ FBNIC_FW_LINK_MODE_50CR = 3,
+ FBNIC_FW_LINK_MODE_100CR2 = 4,
};
enum {
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h
index 07e54bb75bf3..4fe239717497 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h
@@ -22,6 +22,23 @@ struct fbnic_hw_stat {
struct fbnic_stat_counter bytes;
};
+/* Note: not updated by fbnic_get_hw_stats() */
+struct fbnic_eth_ctrl_stats {
+ struct fbnic_stat_counter MACControlFramesTransmitted;
+ struct fbnic_stat_counter MACControlFramesReceived;
+};
+
+/* Note: not updated by fbnic_get_hw_stats() */
+struct fbnic_rmon_stats {
+ struct fbnic_stat_counter undersize_pkts;
+ struct fbnic_stat_counter oversize_pkts;
+ struct fbnic_stat_counter fragments;
+ struct fbnic_stat_counter jabbers;
+
+ struct fbnic_stat_counter hist[ETHTOOL_RMON_HIST_MAX];
+ struct fbnic_stat_counter hist_tx[ETHTOOL_RMON_HIST_MAX];
+};
+
struct fbnic_eth_mac_stats {
struct fbnic_stat_counter FramesTransmittedOK;
struct fbnic_stat_counter FramesReceivedOK;
@@ -40,6 +57,8 @@ struct fbnic_eth_mac_stats {
struct fbnic_mac_stats {
struct fbnic_eth_mac_stats eth_mac;
+ struct fbnic_eth_ctrl_stats eth_ctrl;
+ struct fbnic_rmon_stats rmon;
};
struct fbnic_tmi_stats {
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
index 10e108c1fcd0..fd8d67f9048e 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
@@ -452,7 +452,7 @@ static u32 __fbnic_mac_cmd_config_asic(struct fbnic_dev *fbd,
command_config |= FBNIC_MAC_COMMAND_CONFIG_RX_PAUSE_DIS;
/* Disable fault handling if no FEC is requested */
- if ((fbn->fec & FBNIC_FEC_MODE_MASK) == FBNIC_FEC_OFF)
+ if (fbn->fec == FBNIC_FEC_OFF)
command_config |= FBNIC_MAC_COMMAND_CONFIG_FLT_HDL_DIS;
return command_config;
@@ -468,15 +468,15 @@ static bool fbnic_mac_get_pcs_link_status(struct fbnic_dev *fbd)
return false;
/* Define the expected lane mask for the status bits we need to check */
- switch (fbn->link_mode & FBNIC_LINK_MODE_MASK) {
- case FBNIC_LINK_100R2:
+ switch (fbn->aui) {
+ case FBNIC_AUI_100GAUI2:
lane_mask = 0xf;
break;
- case FBNIC_LINK_50R1:
+ case FBNIC_AUI_50GAUI1:
lane_mask = 3;
break;
- case FBNIC_LINK_50R2:
- switch (fbn->fec & FBNIC_FEC_MODE_MASK) {
+ case FBNIC_AUI_LAUI2:
+ switch (fbn->fec) {
case FBNIC_FEC_OFF:
lane_mask = 0x63;
break;
@@ -488,13 +488,13 @@ static bool fbnic_mac_get_pcs_link_status(struct fbnic_dev *fbd)
break;
}
break;
- case FBNIC_LINK_25R1:
+ case FBNIC_AUI_25GAUI:
lane_mask = 1;
break;
}
/* Use an XOR to remove the bits we expect to see set */
- switch (fbn->fec & FBNIC_FEC_MODE_MASK) {
+ switch (fbn->fec) {
case FBNIC_FEC_OFF:
lane_mask ^= FIELD_GET(FBNIC_SIG_PCS_OUT0_BLOCK_LOCK,
pcs_status);
@@ -540,53 +540,41 @@ static bool fbnic_pcs_get_link_asic(struct fbnic_dev *fbd)
return link;
}
-static void fbnic_pcs_get_fw_settings(struct fbnic_dev *fbd)
+void fbnic_mac_get_fw_settings(struct fbnic_dev *fbd, u8 *aui, u8 *fec)
{
- struct fbnic_net *fbn = netdev_priv(fbd->netdev);
- u8 link_mode = fbn->link_mode;
- u8 fec = fbn->fec;
-
- /* Update FEC first to reflect FW current mode */
- if (fbn->fec & FBNIC_FEC_AUTO) {
- switch (fbd->fw_cap.link_fec) {
- case FBNIC_FW_LINK_FEC_NONE:
- fec = FBNIC_FEC_OFF;
- break;
- case FBNIC_FW_LINK_FEC_RS:
- fec = FBNIC_FEC_RS;
- break;
- case FBNIC_FW_LINK_FEC_BASER:
- fec = FBNIC_FEC_BASER;
- break;
- default:
- return;
- }
-
- fbn->fec = fec;
+ /* Retrieve default speed from FW */
+ switch (fbd->fw_cap.link_speed) {
+ case FBNIC_FW_LINK_MODE_25CR:
+ *aui = FBNIC_AUI_25GAUI;
+ break;
+ case FBNIC_FW_LINK_MODE_50CR2:
+ *aui = FBNIC_AUI_LAUI2;
+ break;
+ case FBNIC_FW_LINK_MODE_50CR:
+ *aui = FBNIC_AUI_50GAUI1;
+ *fec = FBNIC_FEC_RS;
+ return;
+ case FBNIC_FW_LINK_MODE_100CR2:
+ *aui = FBNIC_AUI_100GAUI2;
+ *fec = FBNIC_FEC_RS;
+ return;
+ default:
+ *aui = FBNIC_AUI_UNKNOWN;
+ return;
}
- /* Do nothing if AUTO mode is not engaged */
- if (fbn->link_mode & FBNIC_LINK_AUTO) {
- switch (fbd->fw_cap.link_speed) {
- case FBNIC_FW_LINK_SPEED_25R1:
- link_mode = FBNIC_LINK_25R1;
- break;
- case FBNIC_FW_LINK_SPEED_50R2:
- link_mode = FBNIC_LINK_50R2;
- break;
- case FBNIC_FW_LINK_SPEED_50R1:
- link_mode = FBNIC_LINK_50R1;
- fec = FBNIC_FEC_RS;
- break;
- case FBNIC_FW_LINK_SPEED_100R2:
- link_mode = FBNIC_LINK_100R2;
- fec = FBNIC_FEC_RS;
- break;
- default:
- return;
- }
-
- fbn->link_mode = link_mode;
+ /* Update FEC first to reflect FW current mode */
+ switch (fbd->fw_cap.link_fec) {
+ case FBNIC_FW_LINK_FEC_NONE:
+ *fec = FBNIC_FEC_OFF;
+ break;
+ case FBNIC_FW_LINK_FEC_RS:
+ default:
+ *fec = FBNIC_FEC_RS;
+ break;
+ case FBNIC_FW_LINK_FEC_BASER:
+ *fec = FBNIC_FEC_BASER;
+ break;
}
}
@@ -596,9 +584,6 @@ static int fbnic_pcs_enable_asic(struct fbnic_dev *fbd)
wr32(fbd, FBNIC_SIG_PCS_INTR_MASK, ~0);
wr32(fbd, FBNIC_SIG_PCS_INTR_STS, ~0);
- /* Pull in settings from FW */
- fbnic_pcs_get_fw_settings(fbd);
-
return 0;
}
@@ -680,6 +665,76 @@ fbnic_mac_get_eth_mac_stats(struct fbnic_dev *fbd, bool reset,
MAC_STAT_TX_BROADCAST);
}
+static void
+fbnic_mac_get_eth_ctrl_stats(struct fbnic_dev *fbd, bool reset,
+ struct fbnic_eth_ctrl_stats *ctrl_stats)
+{
+ fbnic_mac_stat_rd64(fbd, reset, ctrl_stats->MACControlFramesReceived,
+ MAC_STAT_RX_CONTROL_FRAMES);
+ fbnic_mac_stat_rd64(fbd, reset, ctrl_stats->MACControlFramesTransmitted,
+ MAC_STAT_TX_CONTROL_FRAMES);
+}
+
+static void
+fbnic_mac_get_rmon_stats(struct fbnic_dev *fbd, bool reset,
+ struct fbnic_rmon_stats *rmon_stats)
+{
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->undersize_pkts,
+ MAC_STAT_RX_UNDERSIZE);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->oversize_pkts,
+ MAC_STAT_RX_OVERSIZE);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->fragments,
+ MAC_STAT_RX_FRAGMENT);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->jabbers,
+ MAC_STAT_RX_JABBER);
+
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[0],
+ MAC_STAT_RX_PACKET_64_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[1],
+ MAC_STAT_RX_PACKET_65_127_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[2],
+ MAC_STAT_RX_PACKET_128_255_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[3],
+ MAC_STAT_RX_PACKET_256_511_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[4],
+ MAC_STAT_RX_PACKET_512_1023_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[5],
+ MAC_STAT_RX_PACKET_1024_1518_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[6],
+ RPC_STAT_RX_PACKET_1519_2047_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[7],
+ RPC_STAT_RX_PACKET_2048_4095_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[8],
+ RPC_STAT_RX_PACKET_4096_8191_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[9],
+ RPC_STAT_RX_PACKET_8192_9216_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[10],
+ RPC_STAT_RX_PACKET_9217_MAX_BYTES);
+
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[0],
+ MAC_STAT_TX_PACKET_64_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[1],
+ MAC_STAT_TX_PACKET_65_127_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[2],
+ MAC_STAT_TX_PACKET_128_255_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[3],
+ MAC_STAT_TX_PACKET_256_511_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[4],
+ MAC_STAT_TX_PACKET_512_1023_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[5],
+ MAC_STAT_TX_PACKET_1024_1518_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[6],
+ TMI_STAT_TX_PACKET_1519_2047_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[7],
+ TMI_STAT_TX_PACKET_2048_4095_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[8],
+ TMI_STAT_TX_PACKET_4096_8191_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[9],
+ TMI_STAT_TX_PACKET_8192_9216_BYTES);
+ fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[10],
+ TMI_STAT_TX_PACKET_9217_MAX_BYTES);
+}
+
static int fbnic_mac_get_sensor_asic(struct fbnic_dev *fbd, int id,
long *val)
{
@@ -741,7 +796,7 @@ static int fbnic_mac_get_sensor_asic(struct fbnic_dev *fbd, int id,
*val = *sensor;
exit_cleanup:
- fbnic_fw_clear_cmpl(fbd, fw_cmpl);
+ fbnic_mbx_clear_cmpl(fbd, fw_cmpl);
exit_free:
fbnic_fw_put_cmpl(fw_cmpl);
@@ -755,6 +810,8 @@ static const struct fbnic_mac fbnic_mac_asic = {
.pcs_get_link = fbnic_pcs_get_link_asic,
.pcs_get_link_event = fbnic_pcs_get_link_event_asic,
.get_eth_mac_stats = fbnic_mac_get_eth_mac_stats,
+ .get_eth_ctrl_stats = fbnic_mac_get_eth_ctrl_stats,
+ .get_rmon_stats = fbnic_mac_get_rmon_stats,
.link_down = fbnic_mac_link_down_asic,
.link_up = fbnic_mac_link_up_asic,
.get_sensor = fbnic_mac_get_sensor_asic,
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.h b/drivers/net/ethernet/meta/fbnic/fbnic_mac.h
index 05a591653e09..86fa06da2b3e 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.h
@@ -25,27 +25,23 @@ enum {
FBNIC_FEC_OFF = 0,
FBNIC_FEC_RS = 1,
FBNIC_FEC_BASER = 2,
- FBNIC_FEC_AUTO = 4,
};
-#define FBNIC_FEC_MODE_MASK (FBNIC_FEC_AUTO - 1)
-
-/* Treat the link modes as a set of modulation/lanes bitmask:
+/* Treat the AUI modes as a modulation/lanes bitmask:
* Bit 0: Lane Count, 0 = R1, 1 = R2
* Bit 1: Modulation, 0 = NRZ, 1 = PAM4
- * Bit 2: Retrieve link mode from FW
+ * Bit 2: Unknown Modulation/Lane Configuration
*/
enum {
- FBNIC_LINK_25R1 = 0,
- FBNIC_LINK_50R2 = 1,
- FBNIC_LINK_50R1 = 2,
- FBNIC_LINK_100R2 = 3,
- FBNIC_LINK_AUTO = 4,
+ FBNIC_AUI_25GAUI = 0, /* 25.7812GBd 25.78125 * 1 */
+ FBNIC_AUI_LAUI2 = 1, /* 51.5625GBd 25.78128 * 2 */
+ FBNIC_AUI_50GAUI1 = 2, /* 53.125GBd 53.125 * 1 */
+ FBNIC_AUI_100GAUI2 = 3, /* 106.25GBd 53.125 * 2 */
+ FBNIC_AUI_UNKNOWN = 4,
};
-#define FBNIC_LINK_MODE_R2 (FBNIC_LINK_50R2)
-#define FBNIC_LINK_MODE_PAM4 (FBNIC_LINK_50R1)
-#define FBNIC_LINK_MODE_MASK (FBNIC_LINK_AUTO - 1)
+#define FBNIC_AUI_MODE_R2 (FBNIC_AUI_LAUI2)
+#define FBNIC_AUI_MODE_PAM4 (FBNIC_AUI_50GAUI1)
enum fbnic_sensor_id {
FBNIC_SENSOR_TEMP, /* Temp in millidegrees Centigrade */
@@ -85,6 +81,10 @@ struct fbnic_mac {
void (*get_eth_mac_stats)(struct fbnic_dev *fbd, bool reset,
struct fbnic_eth_mac_stats *mac_stats);
+ void (*get_eth_ctrl_stats)(struct fbnic_dev *fbd, bool reset,
+ struct fbnic_eth_ctrl_stats *ctrl_stats);
+ void (*get_rmon_stats)(struct fbnic_dev *fbd, bool reset,
+ struct fbnic_rmon_stats *rmon_stats);
void (*link_down)(struct fbnic_dev *fbd);
void (*link_up)(struct fbnic_dev *fbd, bool tx_pause, bool rx_pause);
@@ -93,4 +93,5 @@ struct fbnic_mac {
};
int fbnic_mac_init(struct fbnic_dev *fbd);
+void fbnic_mac_get_fw_settings(struct fbnic_dev *fbd, u8 *aui, u8 *fec);
#endif /* _FBNIC_MAC_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
index aa812c63d5af..7bd7812d9c06 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
@@ -736,8 +736,6 @@ struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd)
*/
netdev->ethtool->wol_enabled = true;
- fbn->fec = FBNIC_FEC_AUTO | FBNIC_FEC_RS;
- fbn->link_mode = FBNIC_LINK_AUTO | FBNIC_LINK_50R2;
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
index 561837e80ec8..86576ae04262 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
@@ -4,8 +4,8 @@
#ifndef _FBNIC_NETDEV_H_
#define _FBNIC_NETDEV_H_
-#include <linux/types.h>
#include <linux/phylink.h>
+#include <linux/types.h>
#include "fbnic_csr.h"
#include "fbnic_rpc.h"
@@ -42,9 +42,8 @@ struct fbnic_net {
struct phylink_config phylink_config;
struct phylink_pcs phylink_pcs;
- /* TBD: Remove these when phylink supports FEC and lane config */
+ u8 aui;
u8 fec;
- u8 link_mode;
/* Cached top bits of the HW time counter for 40b -> 64b conversion */
u32 time_high;
@@ -67,7 +66,7 @@ struct fbnic_net {
struct fbnic_queue_stats rx_stats;
u64 link_down_events;
- /* Time stampinn filter config */
+ /* Time stamping filter config */
struct kernel_hwtstamp_config hwtstamp_config;
};
@@ -82,6 +81,7 @@ int fbnic_netdev_register(struct net_device *netdev);
void fbnic_netdev_unregister(struct net_device *netdev);
void fbnic_reset_queues(struct fbnic_net *fbn,
unsigned int tx, unsigned int rx);
+
void fbnic_set_ethtool_ops(struct net_device *dev);
int fbnic_ptp_setup(struct fbnic_dev *fbd);
@@ -93,5 +93,13 @@ void fbnic_time_stop(struct fbnic_net *fbn);
void __fbnic_set_rx_mode(struct net_device *netdev);
void fbnic_clear_rx_mode(struct net_device *netdev);
+void fbnic_phylink_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause);
+int fbnic_phylink_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause);
+int fbnic_phylink_ethtool_ksettings_get(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd);
+int fbnic_phylink_get_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *fecparam);
int fbnic_phylink_init(struct net_device *netdev);
#endif /* _FBNIC_NETDEV_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c b/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c
index 860b02b22c15..7ce3fdd25282 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c
@@ -8,6 +8,99 @@
#include "fbnic_mac.h"
#include "fbnic_netdev.h"
+static phy_interface_t fbnic_phylink_select_interface(u8 aui)
+{
+ switch (aui) {
+ case FBNIC_AUI_100GAUI2:
+ return PHY_INTERFACE_MODE_100GBASEP;
+ case FBNIC_AUI_50GAUI1:
+ return PHY_INTERFACE_MODE_50GBASER;
+ case FBNIC_AUI_LAUI2:
+ return PHY_INTERFACE_MODE_LAUI;
+ case FBNIC_AUI_25GAUI:
+ return PHY_INTERFACE_MODE_25GBASER;
+ }
+
+ return PHY_INTERFACE_MODE_NA;
+}
+
+void fbnic_phylink_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ phylink_ethtool_get_pauseparam(fbn->phylink, pause);
+}
+
+int fbnic_phylink_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ return phylink_ethtool_set_pauseparam(fbn->phylink, pause);
+}
+
+static void
+fbnic_phylink_get_supported_fec_modes(unsigned long *supported)
+{
+ /* The NIC can support up to 8 possible combinations.
+ * Either 50G-CR, or 100G-CR2
+ * This is with RS FEC mode only
+ * Either 25G-CR, or 50G-CR2
+ * This is with No FEC, RS, or Base-R
+ */
+ if (phylink_test(supported, 100000baseCR2_Full) ||
+ phylink_test(supported, 50000baseCR_Full))
+ phylink_set(supported, FEC_RS);
+ if (phylink_test(supported, 50000baseCR2_Full) ||
+ phylink_test(supported, 25000baseCR_Full)) {
+ phylink_set(supported, FEC_BASER);
+ phylink_set(supported, FEC_NONE);
+ phylink_set(supported, FEC_RS);
+ }
+}
+
+int fbnic_phylink_ethtool_ksettings_get(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ int err;
+
+ err = phylink_ethtool_ksettings_get(fbn->phylink, cmd);
+ if (!err) {
+ unsigned long *supp = cmd->link_modes.supported;
+
+ cmd->base.port = PORT_DA;
+ cmd->lanes = (fbn->aui & FBNIC_AUI_MODE_R2) ? 2 : 1;
+
+ fbnic_phylink_get_supported_fec_modes(supp);
+ }
+
+ return err;
+}
+
+int fbnic_phylink_get_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *fecparam)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ if (fbn->fec & FBNIC_FEC_RS) {
+ fecparam->active_fec = ETHTOOL_FEC_RS;
+ fecparam->fec = ETHTOOL_FEC_RS;
+ } else if (fbn->fec & FBNIC_FEC_BASER) {
+ fecparam->active_fec = ETHTOOL_FEC_BASER;
+ fecparam->fec = ETHTOOL_FEC_BASER;
+ } else {
+ fecparam->active_fec = ETHTOOL_FEC_OFF;
+ fecparam->fec = ETHTOOL_FEC_OFF;
+ }
+
+ if (fbn->aui & FBNIC_AUI_MODE_PAM4)
+ fecparam->fec |= ETHTOOL_FEC_AUTO;
+
+ return 0;
+}
+
static struct fbnic_net *
fbnic_pcs_to_net(struct phylink_pcs *pcs)
{
@@ -21,23 +114,20 @@ fbnic_phylink_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
struct fbnic_net *fbn = fbnic_pcs_to_net(pcs);
struct fbnic_dev *fbd = fbn->fbd;
- /* For now we use hard-coded defaults and FW config to determine
- * the current values. In future patches we will add support for
- * reconfiguring these values and changing link settings.
- */
- switch (fbd->fw_cap.link_speed) {
- case FBNIC_FW_LINK_SPEED_25R1:
+ switch (fbn->aui) {
+ case FBNIC_AUI_25GAUI:
state->speed = SPEED_25000;
break;
- case FBNIC_FW_LINK_SPEED_50R2:
+ case FBNIC_AUI_LAUI2:
+ case FBNIC_AUI_50GAUI1:
state->speed = SPEED_50000;
break;
- case FBNIC_FW_LINK_SPEED_100R2:
+ case FBNIC_AUI_100GAUI2:
state->speed = SPEED_100000;
break;
default:
- state->speed = SPEED_UNKNOWN;
- break;
+ state->link = 0;
+ return;
}
state->duplex = DUPLEX_FULL;
@@ -131,6 +221,7 @@ static const struct phylink_mac_ops fbnic_phylink_mac_ops = {
int fbnic_phylink_init(struct net_device *netdev)
{
struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
struct phylink *phylink;
fbn->phylink_pcs.ops = &fbnic_phylink_pcs_ops;
@@ -138,18 +229,23 @@ int fbnic_phylink_init(struct net_device *netdev)
fbn->phylink_config.dev = &netdev->dev;
fbn->phylink_config.type = PHYLINK_NETDEV;
fbn->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
- MAC_10000FD | MAC_25000FD |
- MAC_40000FD | MAC_50000FD |
+ MAC_25000FD | MAC_50000FD |
MAC_100000FD;
fbn->phylink_config.default_an_inband = true;
- __set_bit(PHY_INTERFACE_MODE_XGMII,
+ __set_bit(PHY_INTERFACE_MODE_100GBASEP,
+ fbn->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_50GBASER,
fbn->phylink_config.supported_interfaces);
- __set_bit(PHY_INTERFACE_MODE_XLGMII,
+ __set_bit(PHY_INTERFACE_MODE_LAUI,
fbn->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_25GBASER,
+ fbn->phylink_config.supported_interfaces);
+
+ fbnic_mac_get_fw_settings(fbd, &fbn->aui, &fbn->fec);
phylink = phylink_create(&fbn->phylink_config, NULL,
- PHY_INTERFACE_MODE_XLGMII,
+ fbnic_phylink_select_interface(fbn->aui),
&fbnic_phylink_mac_ops);
if (IS_ERR(phylink))
return PTR_ERR(phylink);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
index f46616af41ea..2e361d6f03ff 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
@@ -141,9 +141,6 @@ struct fbnic_napi_vector {
struct fbnic_q_triad qt[];
};
-#define FBNIC_MAX_TXQS 128u
-#define FBNIC_MAX_RXQS 128u
-
netdev_tx_t fbnic_xmit_frame(struct sk_buff *skb, struct net_device *dev);
netdev_features_t
fbnic_features_check(struct sk_buff *skb, struct net_device *dev,
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
index 64a3b953cc17..40002d9fe274 100644
--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
@@ -913,23 +913,29 @@ static int lan743x_ethtool_get_sset_count(struct net_device *netdev, int sset)
}
}
+static int lan743x_ethtool_get_rxfh_fields(struct net_device *netdev,
+ struct ethtool_rxfh_fields *fields)
+{
+ fields->data = 0;
+
+ switch (fields->flow_type) {
+ case TCP_V4_FLOW:case UDP_V4_FLOW:
+ case TCP_V6_FLOW:case UDP_V6_FLOW:
+ fields->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ fallthrough;
+ case IPV4_FLOW: case IPV6_FLOW:
+ fields->data |= RXH_IP_SRC | RXH_IP_DST;
+ return 0;
+ }
+
+ return 0;
+}
+
static int lan743x_ethtool_get_rxnfc(struct net_device *netdev,
struct ethtool_rxnfc *rxnfc,
u32 *rule_locs)
{
switch (rxnfc->cmd) {
- case ETHTOOL_GRXFH:
- rxnfc->data = 0;
- switch (rxnfc->flow_type) {
- case TCP_V4_FLOW:case UDP_V4_FLOW:
- case TCP_V6_FLOW:case UDP_V6_FLOW:
- rxnfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- fallthrough;
- case IPV4_FLOW: case IPV6_FLOW:
- rxnfc->data |= RXH_IP_SRC | RXH_IP_DST;
- return 0;
- }
- break;
case ETHTOOL_GRXRINGS:
rxnfc->data = LAN743X_USED_RX_CHANNELS;
return 0;
@@ -1368,6 +1374,7 @@ const struct ethtool_ops lan743x_ethtool_ops = {
.get_rxfh_indir_size = lan743x_ethtool_get_rxfh_indir_size,
.get_rxfh = lan743x_ethtool_get_rxfh,
.set_rxfh = lan743x_ethtool_set_rxfh,
+ .get_rxfh_fields = lan743x_ethtool_get_rxfh_fields,
.get_ts_info = lan743x_ethtool_get_ts_info,
.get_eee = lan743x_ethtool_get_eee,
.set_eee = lan743x_ethtool_set_eee,
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index 52cf7112762c..55dd7dee718c 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -6,6 +6,8 @@
#include <linux/pci.h>
#include <linux/utsname.h>
#include <linux/version.h>
+#include <linux/msi.h>
+#include <linux/irqdomain.h>
#include <net/mana/mana.h>
@@ -83,8 +85,15 @@ static int mana_gd_query_max_resources(struct pci_dev *pdev)
return err ? err : -EPROTO;
}
- if (gc->num_msix_usable > resp.max_msix)
- gc->num_msix_usable = resp.max_msix;
+ if (!pci_msix_can_alloc_dyn(pdev)) {
+ if (gc->num_msix_usable > resp.max_msix)
+ gc->num_msix_usable = resp.max_msix;
+ } else {
+ /* If dynamic allocation is enabled we have already allocated
+ * hwc msi
+ */
+ gc->num_msix_usable = min(resp.max_msix, num_online_cpus() + 1);
+ }
if (gc->num_msix_usable <= 1)
return -ENOSPC;
@@ -355,11 +364,59 @@ void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit)
}
EXPORT_SYMBOL_NS(mana_gd_ring_cq, "NET_MANA");
+#define MANA_SERVICE_PERIOD 10
+
+struct mana_serv_work {
+ struct work_struct serv_work;
+ struct pci_dev *pdev;
+};
+
+static void mana_serv_func(struct work_struct *w)
+{
+ struct mana_serv_work *mns_wk;
+ struct pci_bus *bus, *parent;
+ struct pci_dev *pdev;
+
+ mns_wk = container_of(w, struct mana_serv_work, serv_work);
+ pdev = mns_wk->pdev;
+
+ pci_lock_rescan_remove();
+
+ if (!pdev)
+ goto out;
+
+ bus = pdev->bus;
+ if (!bus) {
+ dev_err(&pdev->dev, "MANA service: no bus\n");
+ goto out;
+ }
+
+ parent = bus->parent;
+ if (!parent) {
+ dev_err(&pdev->dev, "MANA service: no parent bus\n");
+ goto out;
+ }
+
+ pci_stop_and_remove_bus_device(bus->self);
+
+ msleep(MANA_SERVICE_PERIOD * 1000);
+
+ pci_rescan_bus(parent);
+
+out:
+ pci_unlock_rescan_remove();
+
+ pci_dev_put(pdev);
+ kfree(mns_wk);
+ module_put(THIS_MODULE);
+}
+
static void mana_gd_process_eqe(struct gdma_queue *eq)
{
u32 head = eq->head % (eq->queue_size / GDMA_EQE_SIZE);
struct gdma_context *gc = eq->gdma_dev->gdma_context;
struct gdma_eqe *eq_eqe_ptr = eq->queue_mem_ptr;
+ struct mana_serv_work *mns_wk;
union gdma_eqe_info eqe_info;
enum gdma_eqe_type type;
struct gdma_event event;
@@ -404,6 +461,33 @@ static void mana_gd_process_eqe(struct gdma_queue *eq)
eq->eq.callback(eq->eq.context, eq, &event);
break;
+ case GDMA_EQE_HWC_FPGA_RECONFIG:
+ dev_info(gc->dev, "Recv MANA service type:%d\n", type);
+
+ if (gc->in_service) {
+ dev_info(gc->dev, "Already in service\n");
+ break;
+ }
+
+ if (!try_module_get(THIS_MODULE)) {
+ dev_info(gc->dev, "Module is unloading\n");
+ break;
+ }
+
+ mns_wk = kzalloc(sizeof(*mns_wk), GFP_ATOMIC);
+ if (!mns_wk) {
+ module_put(THIS_MODULE);
+ break;
+ }
+
+ dev_info(gc->dev, "Start MANA service type:%d\n", type);
+ gc->in_service = true;
+ mns_wk->pdev = to_pci_dev(gc->dev);
+ pci_dev_get(mns_wk->pdev);
+ INIT_WORK(&mns_wk->serv_work, mana_serv_func);
+ schedule_work(&mns_wk->serv_work);
+ break;
+
default:
break;
}
@@ -486,7 +570,9 @@ static int mana_gd_register_irq(struct gdma_queue *queue,
}
queue->eq.msix_index = msi_index;
- gic = &gc->irq_contexts[msi_index];
+ gic = xa_load(&gc->irq_contexts, msi_index);
+ if (WARN_ON(!gic))
+ return -EINVAL;
spin_lock_irqsave(&gic->lock, flags);
list_add_rcu(&queue->entry, &gic->eq_list);
@@ -511,7 +597,10 @@ static void mana_gd_deregiser_irq(struct gdma_queue *queue)
if (WARN_ON(msix_index >= gc->num_msix_usable))
return;
- gic = &gc->irq_contexts[msix_index];
+ gic = xa_load(&gc->irq_contexts, msix_index);
+ if (WARN_ON(!gic))
+ return;
+
spin_lock_irqsave(&gic->lock, flags);
list_for_each_entry_rcu(eq, &gic->eq_list, entry) {
if (queue == eq) {
@@ -1291,7 +1380,49 @@ void mana_gd_free_res_map(struct gdma_resource *r)
r->size = 0;
}
-static int irq_setup(unsigned int *irqs, unsigned int len, int node)
+/*
+ * Spread on CPUs with the following heuristics:
+ *
+ * 1. No more than one IRQ per CPU, if possible;
+ * 2. NUMA locality is the second priority;
+ * 3. Sibling dislocality is the last priority.
+ *
+ * Let's consider this topology:
+ *
+ * Node 0 1
+ * Core 0 1 2 3
+ * CPU 0 1 2 3 4 5 6 7
+ *
+ * The most performant IRQ distribution based on the above topology
+ * and heuristics may look like this:
+ *
+ * IRQ Nodes Cores CPUs
+ * 0 1 0 0-1
+ * 1 1 1 2-3
+ * 2 1 0 0-1
+ * 3 1 1 2-3
+ * 4 2 2 4-5
+ * 5 2 3 6-7
+ * 6 2 2 4-5
+ * 7 2 3 6-7
+ *
+ * The heuristics is implemented as follows.
+ *
+ * The outer for_each() loop resets the 'weight' to the actual number
+ * of CPUs in the hop. Then inner for_each() loop decrements it by the
+ * number of sibling groups (cores) while assigning first set of IRQs
+ * to each group. IRQs 0 and 1 above are distributed this way.
+ *
+ * Now, because NUMA locality is more important, we should walk the
+ * same set of siblings and assign 2nd set of IRQs (2 and 3), and it's
+ * implemented by the medium while() loop. We do like this unless the
+ * number of IRQs assigned on this hop will not become equal to number
+ * of CPUs in the hop (weight == 0). Then we switch to the next hop and
+ * do the same thing.
+ */
+
+static int irq_setup(unsigned int *irqs, unsigned int len, int node,
+ bool skip_first_cpu)
{
const struct cpumask *next, *prev = cpu_none_mask;
cpumask_var_t cpus __free(free_cpumask_var);
@@ -1306,11 +1437,18 @@ static int irq_setup(unsigned int *irqs, unsigned int len, int node)
while (weight > 0) {
cpumask_andnot(cpus, next, prev);
for_each_cpu(cpu, cpus) {
+ cpumask_andnot(cpus, cpus, topology_sibling_cpumask(cpu));
+ --weight;
+
+ if (unlikely(skip_first_cpu)) {
+ skip_first_cpu = false;
+ continue;
+ }
+
if (len-- == 0)
goto done;
+
irq_set_affinity_and_hint(*irqs++, topology_sibling_cpumask(cpu));
- cpumask_andnot(cpus, cpus, topology_sibling_cpumask(cpu));
- --weight;
}
}
prev = next;
@@ -1320,47 +1458,108 @@ done:
return 0;
}
-static int mana_gd_setup_irqs(struct pci_dev *pdev)
+static int mana_gd_setup_dyn_irqs(struct pci_dev *pdev, int nvec)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
- unsigned int max_queues_per_port;
struct gdma_irq_context *gic;
- unsigned int max_irqs, cpu;
- int start_irq_index = 1;
- int nvec, *irqs, irq;
- int err, i = 0, j;
+ bool skip_first_cpu = false;
+ int *irqs, irq, err, i;
- cpus_read_lock();
- max_queues_per_port = num_online_cpus();
- if (max_queues_per_port > MANA_MAX_NUM_QUEUES)
- max_queues_per_port = MANA_MAX_NUM_QUEUES;
+ irqs = kmalloc_array(nvec, sizeof(int), GFP_KERNEL);
+ if (!irqs)
+ return -ENOMEM;
- /* Need 1 interrupt for the Hardware communication Channel (HWC) */
- max_irqs = max_queues_per_port + 1;
+ /*
+ * While processing the next pci irq vector, we start with index 1,
+ * as IRQ vector at index 0 is already processed for HWC.
+ * However, the population of irqs array starts with index 0, to be
+ * further used in irq_setup()
+ */
+ for (i = 1; i <= nvec; i++) {
+ gic = kzalloc(sizeof(*gic), GFP_KERNEL);
+ if (!gic) {
+ err = -ENOMEM;
+ goto free_irq;
+ }
+ gic->handler = mana_gd_process_eq_events;
+ INIT_LIST_HEAD(&gic->eq_list);
+ spin_lock_init(&gic->lock);
- nvec = pci_alloc_irq_vectors(pdev, 2, max_irqs, PCI_IRQ_MSIX);
- if (nvec < 0) {
- cpus_read_unlock();
- return nvec;
+ snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_q%d@pci:%s",
+ i - 1, pci_name(pdev));
+
+ /* one pci vector is already allocated for HWC */
+ irqs[i - 1] = pci_irq_vector(pdev, i);
+ if (irqs[i - 1] < 0) {
+ err = irqs[i - 1];
+ goto free_current_gic;
+ }
+
+ err = request_irq(irqs[i - 1], mana_gd_intr, 0, gic->name, gic);
+ if (err)
+ goto free_current_gic;
+
+ xa_store(&gc->irq_contexts, i, gic, GFP_KERNEL);
}
- if (nvec <= num_online_cpus())
- start_irq_index = 0;
- irqs = kmalloc_array((nvec - start_irq_index), sizeof(int), GFP_KERNEL);
- if (!irqs) {
- err = -ENOMEM;
- goto free_irq_vector;
+ /*
+ * When calling irq_setup() for dynamically added IRQs, if number of
+ * CPUs is more than or equal to allocated MSI-X, we need to skip the
+ * first CPU sibling group since they are already affinitized to HWC IRQ
+ */
+ cpus_read_lock();
+ if (gc->num_msix_usable <= num_online_cpus())
+ skip_first_cpu = true;
+
+ err = irq_setup(irqs, nvec, gc->numa_node, skip_first_cpu);
+ if (err) {
+ cpus_read_unlock();
+ goto free_irq;
}
- gc->irq_contexts = kcalloc(nvec, sizeof(struct gdma_irq_context),
- GFP_KERNEL);
- if (!gc->irq_contexts) {
- err = -ENOMEM;
- goto free_irq_array;
+ cpus_read_unlock();
+ kfree(irqs);
+ return 0;
+
+free_current_gic:
+ kfree(gic);
+free_irq:
+ for (i -= 1; i > 0; i--) {
+ irq = pci_irq_vector(pdev, i);
+ gic = xa_load(&gc->irq_contexts, i);
+ if (WARN_ON(!gic))
+ continue;
+
+ irq_update_affinity_hint(irq, NULL);
+ free_irq(irq, gic);
+ xa_erase(&gc->irq_contexts, i);
+ kfree(gic);
}
+ kfree(irqs);
+ return err;
+}
+
+static int mana_gd_setup_irqs(struct pci_dev *pdev, int nvec)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+ struct gdma_irq_context *gic;
+ int *irqs, *start_irqs, irq;
+ unsigned int cpu;
+ int err, i;
+
+ irqs = kmalloc_array(nvec, sizeof(int), GFP_KERNEL);
+ if (!irqs)
+ return -ENOMEM;
+
+ start_irqs = irqs;
for (i = 0; i < nvec; i++) {
- gic = &gc->irq_contexts[i];
+ gic = kzalloc(sizeof(*gic), GFP_KERNEL);
+ if (!gic) {
+ err = -ENOMEM;
+ goto free_irq;
+ }
+
gic->handler = mana_gd_process_eq_events;
INIT_LIST_HEAD(&gic->eq_list);
spin_lock_init(&gic->lock);
@@ -1372,69 +1571,128 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_q%d@pci:%s",
i - 1, pci_name(pdev));
- irq = pci_irq_vector(pdev, i);
- if (irq < 0) {
- err = irq;
- goto free_irq;
+ irqs[i] = pci_irq_vector(pdev, i);
+ if (irqs[i] < 0) {
+ err = irqs[i];
+ goto free_current_gic;
}
- if (!i) {
- err = request_irq(irq, mana_gd_intr, 0, gic->name, gic);
- if (err)
- goto free_irq;
-
- /* If number of IRQ is one extra than number of online CPUs,
- * then we need to assign IRQ0 (hwc irq) and IRQ1 to
- * same CPU.
- * Else we will use different CPUs for IRQ0 and IRQ1.
- * Also we are using cpumask_local_spread instead of
- * cpumask_first for the node, because the node can be
- * mem only.
- */
- if (start_irq_index) {
- cpu = cpumask_local_spread(i, gc->numa_node);
- irq_set_affinity_and_hint(irq, cpumask_of(cpu));
- } else {
- irqs[start_irq_index] = irq;
- }
- } else {
- irqs[i - start_irq_index] = irq;
- err = request_irq(irqs[i - start_irq_index], mana_gd_intr, 0,
- gic->name, gic);
- if (err)
- goto free_irq;
- }
+ err = request_irq(irqs[i], mana_gd_intr, 0, gic->name, gic);
+ if (err)
+ goto free_current_gic;
+
+ xa_store(&gc->irq_contexts, i, gic, GFP_KERNEL);
}
- err = irq_setup(irqs, (nvec - start_irq_index), gc->numa_node);
- if (err)
+ /* If number of IRQ is one extra than number of online CPUs,
+ * then we need to assign IRQ0 (hwc irq) and IRQ1 to
+ * same CPU.
+ * Else we will use different CPUs for IRQ0 and IRQ1.
+ * Also we are using cpumask_local_spread instead of
+ * cpumask_first for the node, because the node can be
+ * mem only.
+ */
+ cpus_read_lock();
+ if (nvec > num_online_cpus()) {
+ cpu = cpumask_local_spread(0, gc->numa_node);
+ irq_set_affinity_and_hint(irqs[0], cpumask_of(cpu));
+ irqs++;
+ nvec -= 1;
+ }
+
+ err = irq_setup(irqs, nvec, gc->numa_node, false);
+ if (err) {
+ cpus_read_unlock();
goto free_irq;
+ }
- gc->max_num_msix = nvec;
- gc->num_msix_usable = nvec;
cpus_read_unlock();
- kfree(irqs);
+ kfree(start_irqs);
return 0;
+free_current_gic:
+ kfree(gic);
free_irq:
- for (j = i - 1; j >= 0; j--) {
- irq = pci_irq_vector(pdev, j);
- gic = &gc->irq_contexts[j];
+ for (i -= 1; i >= 0; i--) {
+ irq = pci_irq_vector(pdev, i);
+ gic = xa_load(&gc->irq_contexts, i);
+ if (WARN_ON(!gic))
+ continue;
irq_update_affinity_hint(irq, NULL);
free_irq(irq, gic);
+ xa_erase(&gc->irq_contexts, i);
+ kfree(gic);
}
- kfree(gc->irq_contexts);
- gc->irq_contexts = NULL;
-free_irq_array:
- kfree(irqs);
-free_irq_vector:
- cpus_read_unlock();
- pci_free_irq_vectors(pdev);
+ kfree(start_irqs);
return err;
}
+static int mana_gd_setup_hwc_irqs(struct pci_dev *pdev)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+ unsigned int max_irqs, min_irqs;
+ int nvec, err;
+
+ if (pci_msix_can_alloc_dyn(pdev)) {
+ max_irqs = 1;
+ min_irqs = 1;
+ } else {
+ /* Need 1 interrupt for HWC */
+ max_irqs = min(num_online_cpus(), MANA_MAX_NUM_QUEUES) + 1;
+ min_irqs = 2;
+ }
+
+ nvec = pci_alloc_irq_vectors(pdev, min_irqs, max_irqs, PCI_IRQ_MSIX);
+ if (nvec < 0)
+ return nvec;
+
+ err = mana_gd_setup_irqs(pdev, nvec);
+ if (err) {
+ pci_free_irq_vectors(pdev);
+ return err;
+ }
+
+ gc->num_msix_usable = nvec;
+ gc->max_num_msix = nvec;
+
+ return 0;
+}
+
+static int mana_gd_setup_remaining_irqs(struct pci_dev *pdev)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+ struct msi_map irq_map;
+ int max_irqs, i, err;
+
+ if (!pci_msix_can_alloc_dyn(pdev))
+ /* remain irqs are already allocated with HWC IRQ */
+ return 0;
+
+ /* allocate only remaining IRQs*/
+ max_irqs = gc->num_msix_usable - 1;
+
+ for (i = 1; i <= max_irqs; i++) {
+ irq_map = pci_msix_alloc_irq_at(pdev, i, NULL);
+ if (!irq_map.virq) {
+ err = irq_map.index;
+ /* caller will handle cleaning up all allocated
+ * irqs, after HWC is destroyed
+ */
+ return err;
+ }
+ }
+
+ err = mana_gd_setup_dyn_irqs(pdev, max_irqs);
+ if (err)
+ return err;
+
+ gc->max_num_msix = gc->max_num_msix + max_irqs;
+
+ return 0;
+}
+
static void mana_gd_remove_irqs(struct pci_dev *pdev)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
@@ -1449,19 +1707,21 @@ static void mana_gd_remove_irqs(struct pci_dev *pdev)
if (irq < 0)
continue;
- gic = &gc->irq_contexts[i];
+ gic = xa_load(&gc->irq_contexts, i);
+ if (WARN_ON(!gic))
+ continue;
/* Need to clear the hint before free_irq */
irq_update_affinity_hint(irq, NULL);
free_irq(irq, gic);
+ xa_erase(&gc->irq_contexts, i);
+ kfree(gic);
}
pci_free_irq_vectors(pdev);
gc->max_num_msix = 0;
gc->num_msix_usable = 0;
- kfree(gc->irq_contexts);
- gc->irq_contexts = NULL;
}
static int mana_gd_setup(struct pci_dev *pdev)
@@ -1476,9 +1736,10 @@ static int mana_gd_setup(struct pci_dev *pdev)
if (!gc->service_wq)
return -ENOMEM;
- err = mana_gd_setup_irqs(pdev);
+ err = mana_gd_setup_hwc_irqs(pdev);
if (err) {
- dev_err(gc->dev, "Failed to setup IRQs: %d\n", err);
+ dev_err(gc->dev, "Failed to setup IRQs for HWC creation: %d\n",
+ err);
goto free_workqueue;
}
@@ -1494,6 +1755,12 @@ static int mana_gd_setup(struct pci_dev *pdev)
if (err)
goto destroy_hwc;
+ err = mana_gd_setup_remaining_irqs(pdev);
+ if (err) {
+ dev_err(gc->dev, "Failed to setup remaining IRQs: %d", err);
+ goto destroy_hwc;
+ }
+
err = mana_gd_detect_devices(pdev);
if (err)
goto destroy_hwc;
@@ -1574,6 +1841,7 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
gc->is_pf = mana_is_pf(pdev->device);
gc->bar0_va = bar0_va;
gc->dev = &pdev->dev;
+ xa_init(&gc->irq_contexts);
if (gc->is_pf)
gc->mana_pci_debugfs = debugfs_create_dir("0", mana_debugfs_root);
@@ -1608,6 +1876,7 @@ unmap_bar:
*/
debugfs_remove_recursive(gc->mana_pci_debugfs);
gc->mana_pci_debugfs = NULL;
+ xa_destroy(&gc->irq_contexts);
pci_iounmap(pdev, bar0_va);
free_gc:
pci_set_drvdata(pdev, NULL);
@@ -1633,6 +1902,8 @@ static void mana_gd_remove(struct pci_dev *pdev)
gc->mana_pci_debugfs = NULL;
+ xa_destroy(&gc->irq_contexts);
+
pci_iounmap(pdev, gc->bar0_va);
vfree(gc);
diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c
index a8c4d8db75a5..650d22654d49 100644
--- a/drivers/net/ethernet/microsoft/mana/hw_channel.c
+++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2021, Microsoft Corporation. */
#include <net/mana/gdma.h>
+#include <net/mana/mana.h>
#include <net/mana/hw_channel.h>
#include <linux/vmalloc.h>
@@ -890,8 +891,13 @@ int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
}
if (ctx->status_code && ctx->status_code != GDMA_STATUS_MORE_ENTRIES) {
- dev_err(hwc->dev, "HWC: Failed hw_channel req: 0x%x\n",
- ctx->status_code);
+ if (ctx->status_code == GDMA_STATUS_CMD_UNSUPPORTED) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+ if (req_msg->req.msg_type != MANA_QUERY_PHY_STAT)
+ dev_err(hwc->dev, "HWC: Failed hw_channel req: 0x%x\n",
+ ctx->status_code);
err = -EPROTO;
goto out;
}
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index ccd2885c939e..016fd808ccad 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -251,10 +251,10 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
struct netdev_queue *net_txq;
struct mana_stats_tx *tx_stats;
struct gdma_queue *gdma_sq;
+ int err, len, num_gso_seg;
unsigned int csum_type;
struct mana_txq *txq;
struct mana_cq *cq;
- int err, len;
if (unlikely(!apc->port_is_up))
goto tx_drop;
@@ -407,6 +407,7 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb_queue_tail(&txq->pending_skbs, skb);
len = skb->len;
+ num_gso_seg = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
net_txq = netdev_get_tx_queue(ndev, txq_idx);
err = mana_gd_post_work_request(gdma_sq, &pkg.wqe_req,
@@ -431,10 +432,13 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* skb may be freed after mana_gd_post_work_request. Do not use it. */
skb = NULL;
+ /* Populated the packet and bytes counters based on post GSO packet
+ * calculations
+ */
tx_stats = &txq->stats;
u64_stats_update_begin(&tx_stats->syncp);
- tx_stats->packets++;
- tx_stats->bytes += len;
+ tx_stats->packets += num_gso_seg;
+ tx_stats->bytes += len + ((num_gso_seg - 1) * gso_hs);
u64_stats_update_end(&tx_stats->syncp);
tx_busy:
@@ -719,6 +723,78 @@ out:
return err;
}
+static int mana_shaper_set(struct net_shaper_binding *binding,
+ const struct net_shaper *shaper,
+ struct netlink_ext_ack *extack)
+{
+ struct mana_port_context *apc = netdev_priv(binding->netdev);
+ u32 old_speed, rate;
+ int err;
+
+ if (shaper->handle.scope != NET_SHAPER_SCOPE_NETDEV) {
+ NL_SET_ERR_MSG_MOD(extack, "net shaper scope should be netdev");
+ return -EINVAL;
+ }
+
+ if (apc->handle.id && shaper->handle.id != apc->handle.id) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot create multiple shapers");
+ return -EOPNOTSUPP;
+ }
+
+ if (!shaper->bw_max || (shaper->bw_max % 100000000)) {
+ NL_SET_ERR_MSG_MOD(extack, "Please use multiples of 100Mbps for bandwidth");
+ return -EINVAL;
+ }
+
+ rate = div_u64(shaper->bw_max, 1000); /* Convert bps to Kbps */
+ rate = div_u64(rate, 1000); /* Convert Kbps to Mbps */
+
+ /* Get current speed */
+ err = mana_query_link_cfg(apc);
+ old_speed = (err) ? SPEED_UNKNOWN : apc->speed;
+
+ if (!err) {
+ err = mana_set_bw_clamp(apc, rate, TRI_STATE_TRUE);
+ apc->speed = (err) ? old_speed : rate;
+ apc->handle = (err) ? apc->handle : shaper->handle;
+ }
+
+ return err;
+}
+
+static int mana_shaper_del(struct net_shaper_binding *binding,
+ const struct net_shaper_handle *handle,
+ struct netlink_ext_ack *extack)
+{
+ struct mana_port_context *apc = netdev_priv(binding->netdev);
+ int err;
+
+ err = mana_set_bw_clamp(apc, 0, TRI_STATE_FALSE);
+
+ if (!err) {
+ /* Reset mana port context parameters */
+ apc->handle.id = 0;
+ apc->handle.scope = NET_SHAPER_SCOPE_UNSPEC;
+ apc->speed = 0;
+ }
+
+ return err;
+}
+
+static void mana_shaper_cap(struct net_shaper_binding *binding,
+ enum net_shaper_scope scope,
+ unsigned long *flags)
+{
+ *flags = BIT(NET_SHAPER_A_CAPS_SUPPORT_BW_MAX) |
+ BIT(NET_SHAPER_A_CAPS_SUPPORT_METRIC_BPS);
+}
+
+static const struct net_shaper_ops mana_shaper_ops = {
+ .set = mana_shaper_set,
+ .delete = mana_shaper_del,
+ .capabilities = mana_shaper_cap,
+};
+
static const struct net_device_ops mana_devops = {
.ndo_open = mana_open,
.ndo_stop = mana_close,
@@ -729,6 +805,7 @@ static const struct net_device_ops mana_devops = {
.ndo_bpf = mana_bpf,
.ndo_xdp_xmit = mana_xdp_xmit,
.ndo_change_mtu = mana_change_mtu,
+ .net_shaper_ops = &mana_shaper_ops,
};
static void mana_cleanup_port_context(struct mana_port_context *apc)
@@ -774,8 +851,12 @@ static int mana_send_request(struct mana_context *ac, void *in_buf,
err = mana_gd_send_request(gc, in_len, in_buf, out_len,
out_buf);
if (err || resp->status) {
- dev_err(dev, "Failed to send mana message: %d, 0x%x\n",
- err, resp->status);
+ if (err == -EOPNOTSUPP)
+ return err;
+
+ if (req->req.msg_type != MANA_QUERY_PHY_STAT)
+ dev_err(dev, "Failed to send mana message: %d, 0x%x\n",
+ err, resp->status);
return err ? err : -EPROTO;
}
@@ -1161,6 +1242,95 @@ out:
return err;
}
+int mana_query_link_cfg(struct mana_port_context *apc)
+{
+ struct net_device *ndev = apc->ndev;
+ struct mana_query_link_config_resp resp = {};
+ struct mana_query_link_config_req req = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_LINK_CONFIG,
+ sizeof(req), sizeof(resp));
+
+ req.vport = apc->port_handle;
+ req.hdr.resp.msg_version = GDMA_MESSAGE_V2;
+
+ err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+ sizeof(resp));
+
+ if (err) {
+ if (err == -EOPNOTSUPP) {
+ netdev_info_once(ndev, "MANA_QUERY_LINK_CONFIG not supported\n");
+ return err;
+ }
+ netdev_err(ndev, "Failed to query link config: %d\n", err);
+ return err;
+ }
+
+ err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_LINK_CONFIG,
+ sizeof(resp));
+
+ if (err || resp.hdr.status) {
+ netdev_err(ndev, "Failed to query link config: %d, 0x%x\n", err,
+ resp.hdr.status);
+ if (!err)
+ err = -EOPNOTSUPP;
+ return err;
+ }
+
+ if (resp.qos_unconfigured) {
+ err = -EINVAL;
+ return err;
+ }
+ apc->speed = resp.link_speed_mbps;
+ apc->max_speed = resp.qos_speed_mbps;
+ return 0;
+}
+
+int mana_set_bw_clamp(struct mana_port_context *apc, u32 speed,
+ int enable_clamping)
+{
+ struct mana_set_bw_clamp_resp resp = {};
+ struct mana_set_bw_clamp_req req = {};
+ struct net_device *ndev = apc->ndev;
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_SET_BW_CLAMP,
+ sizeof(req), sizeof(resp));
+ req.vport = apc->port_handle;
+ req.link_speed_mbps = speed;
+ req.enable_clamping = enable_clamping;
+
+ err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+ sizeof(resp));
+
+ if (err) {
+ if (err == -EOPNOTSUPP) {
+ netdev_info_once(ndev, "MANA_SET_BW_CLAMP not supported\n");
+ return err;
+ }
+ netdev_err(ndev, "Failed to set bandwidth clamp for speed %u, err = %d",
+ speed, err);
+ return err;
+ }
+
+ err = mana_verify_resp_hdr(&resp.hdr, MANA_SET_BW_CLAMP,
+ sizeof(resp));
+
+ if (err || resp.hdr.status) {
+ netdev_err(ndev, "Failed to set bandwidth clamp: %d, 0x%x\n", err,
+ resp.hdr.status);
+ if (!err)
+ err = -EOPNOTSUPP;
+ return err;
+ }
+
+ if (resp.qos_unconfigured)
+ netdev_info(ndev, "QoS is unconfigured\n");
+
+ return 0;
+}
+
int mana_create_wq_obj(struct mana_port_context *apc,
mana_handle_t vport,
u32 wq_type, struct mana_obj_spec *wq_spec,
@@ -1911,8 +2081,10 @@ static void mana_destroy_txq(struct mana_port_context *apc)
napi = &apc->tx_qp[i].tx_cq.napi;
if (apc->tx_qp[i].txq.napi_initialized) {
napi_synchronize(napi);
- napi_disable(napi);
- netif_napi_del(napi);
+ netdev_lock_ops_to_full(napi->dev);
+ napi_disable_locked(napi);
+ netif_napi_del_locked(napi);
+ netdev_unlock_full_to_ops(napi->dev);
apc->tx_qp[i].txq.napi_initialized = false;
}
mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object);
@@ -2064,8 +2236,11 @@ static int mana_create_txq(struct mana_port_context *apc,
mana_create_txq_debugfs(apc, i);
- netif_napi_add_tx(net, &cq->napi, mana_poll);
- napi_enable(&cq->napi);
+ set_bit(NAPI_STATE_NO_BUSY_POLL, &cq->napi.state);
+ netdev_lock_ops_to_full(net);
+ netif_napi_add_locked(net, &cq->napi, mana_poll);
+ napi_enable_locked(&cq->napi);
+ netdev_unlock_full_to_ops(net);
txq->napi_initialized = true;
mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
@@ -2101,9 +2276,10 @@ static void mana_destroy_rxq(struct mana_port_context *apc,
if (napi_initialized) {
napi_synchronize(napi);
- napi_disable(napi);
-
- netif_napi_del(napi);
+ netdev_lock_ops_to_full(napi->dev);
+ napi_disable_locked(napi);
+ netif_napi_del_locked(napi);
+ netdev_unlock_full_to_ops(napi->dev);
}
xdp_rxq_info_unreg(&rxq->xdp_rxq);
@@ -2354,14 +2530,18 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
gc->cq_table[cq->gdma_id] = cq->gdma_cq;
- netif_napi_add_weight(ndev, &cq->napi, mana_poll, 1);
+ netdev_lock_ops_to_full(ndev);
+ netif_napi_add_weight_locked(ndev, &cq->napi, mana_poll, 1);
+ netdev_unlock_full_to_ops(ndev);
WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx,
cq->napi.napi_id));
WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
rxq->page_pool));
- napi_enable(&cq->napi);
+ netdev_lock_ops_to_full(ndev);
+ napi_enable_locked(&cq->napi);
+ netdev_unlock_full_to_ops(ndev);
mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
out:
@@ -2611,6 +2791,88 @@ void mana_query_gf_stats(struct mana_port_context *apc)
apc->eth_stats.hc_tx_err_gdma = resp.tx_err_gdma;
}
+void mana_query_phy_stats(struct mana_port_context *apc)
+{
+ struct mana_query_phy_stat_resp resp = {};
+ struct mana_query_phy_stat_req req = {};
+ struct net_device *ndev = apc->ndev;
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_PHY_STAT,
+ sizeof(req), sizeof(resp));
+ err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+ sizeof(resp));
+ if (err)
+ return;
+
+ err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_PHY_STAT,
+ sizeof(resp));
+ if (err || resp.hdr.status) {
+ netdev_err(ndev,
+ "Failed to query PHY stats: %d, resp:0x%x\n",
+ err, resp.hdr.status);
+ return;
+ }
+
+ /* Aggregate drop counters */
+ apc->phy_stats.rx_pkt_drop_phy = resp.rx_pkt_drop_phy;
+ apc->phy_stats.tx_pkt_drop_phy = resp.tx_pkt_drop_phy;
+
+ /* Per TC traffic Counters */
+ apc->phy_stats.rx_pkt_tc0_phy = resp.rx_pkt_tc0_phy;
+ apc->phy_stats.tx_pkt_tc0_phy = resp.tx_pkt_tc0_phy;
+ apc->phy_stats.rx_pkt_tc1_phy = resp.rx_pkt_tc1_phy;
+ apc->phy_stats.tx_pkt_tc1_phy = resp.tx_pkt_tc1_phy;
+ apc->phy_stats.rx_pkt_tc2_phy = resp.rx_pkt_tc2_phy;
+ apc->phy_stats.tx_pkt_tc2_phy = resp.tx_pkt_tc2_phy;
+ apc->phy_stats.rx_pkt_tc3_phy = resp.rx_pkt_tc3_phy;
+ apc->phy_stats.tx_pkt_tc3_phy = resp.tx_pkt_tc3_phy;
+ apc->phy_stats.rx_pkt_tc4_phy = resp.rx_pkt_tc4_phy;
+ apc->phy_stats.tx_pkt_tc4_phy = resp.tx_pkt_tc4_phy;
+ apc->phy_stats.rx_pkt_tc5_phy = resp.rx_pkt_tc5_phy;
+ apc->phy_stats.tx_pkt_tc5_phy = resp.tx_pkt_tc5_phy;
+ apc->phy_stats.rx_pkt_tc6_phy = resp.rx_pkt_tc6_phy;
+ apc->phy_stats.tx_pkt_tc6_phy = resp.tx_pkt_tc6_phy;
+ apc->phy_stats.rx_pkt_tc7_phy = resp.rx_pkt_tc7_phy;
+ apc->phy_stats.tx_pkt_tc7_phy = resp.tx_pkt_tc7_phy;
+
+ /* Per TC byte Counters */
+ apc->phy_stats.rx_byte_tc0_phy = resp.rx_byte_tc0_phy;
+ apc->phy_stats.tx_byte_tc0_phy = resp.tx_byte_tc0_phy;
+ apc->phy_stats.rx_byte_tc1_phy = resp.rx_byte_tc1_phy;
+ apc->phy_stats.tx_byte_tc1_phy = resp.tx_byte_tc1_phy;
+ apc->phy_stats.rx_byte_tc2_phy = resp.rx_byte_tc2_phy;
+ apc->phy_stats.tx_byte_tc2_phy = resp.tx_byte_tc2_phy;
+ apc->phy_stats.rx_byte_tc3_phy = resp.rx_byte_tc3_phy;
+ apc->phy_stats.tx_byte_tc3_phy = resp.tx_byte_tc3_phy;
+ apc->phy_stats.rx_byte_tc4_phy = resp.rx_byte_tc4_phy;
+ apc->phy_stats.tx_byte_tc4_phy = resp.tx_byte_tc4_phy;
+ apc->phy_stats.rx_byte_tc5_phy = resp.rx_byte_tc5_phy;
+ apc->phy_stats.tx_byte_tc5_phy = resp.tx_byte_tc5_phy;
+ apc->phy_stats.rx_byte_tc6_phy = resp.rx_byte_tc6_phy;
+ apc->phy_stats.tx_byte_tc6_phy = resp.tx_byte_tc6_phy;
+ apc->phy_stats.rx_byte_tc7_phy = resp.rx_byte_tc7_phy;
+ apc->phy_stats.tx_byte_tc7_phy = resp.tx_byte_tc7_phy;
+
+ /* Per TC pause Counters */
+ apc->phy_stats.rx_pause_tc0_phy = resp.rx_pause_tc0_phy;
+ apc->phy_stats.tx_pause_tc0_phy = resp.tx_pause_tc0_phy;
+ apc->phy_stats.rx_pause_tc1_phy = resp.rx_pause_tc1_phy;
+ apc->phy_stats.tx_pause_tc1_phy = resp.tx_pause_tc1_phy;
+ apc->phy_stats.rx_pause_tc2_phy = resp.rx_pause_tc2_phy;
+ apc->phy_stats.tx_pause_tc2_phy = resp.tx_pause_tc2_phy;
+ apc->phy_stats.rx_pause_tc3_phy = resp.rx_pause_tc3_phy;
+ apc->phy_stats.tx_pause_tc3_phy = resp.tx_pause_tc3_phy;
+ apc->phy_stats.rx_pause_tc4_phy = resp.rx_pause_tc4_phy;
+ apc->phy_stats.tx_pause_tc4_phy = resp.tx_pause_tc4_phy;
+ apc->phy_stats.rx_pause_tc5_phy = resp.rx_pause_tc5_phy;
+ apc->phy_stats.tx_pause_tc5_phy = resp.tx_pause_tc5_phy;
+ apc->phy_stats.rx_pause_tc6_phy = resp.rx_pause_tc6_phy;
+ apc->phy_stats.tx_pause_tc6_phy = resp.tx_pause_tc6_phy;
+ apc->phy_stats.rx_pause_tc7_phy = resp.rx_pause_tc7_phy;
+ apc->phy_stats.tx_pause_tc7_phy = resp.tx_pause_tc7_phy;
+}
+
static int mana_init_port(struct net_device *ndev)
{
struct mana_port_context *apc = netdev_priv(ndev);
@@ -2918,6 +3180,8 @@ static int mana_probe_port(struct mana_context *ac, int port_idx,
goto free_indir;
}
+ debugfs_create_u32("current_speed", 0400, apc->mana_port_debugfs, &apc->speed);
+
return 0;
free_indir:
diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
index c419626073f5..a1afa75a9463 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
@@ -7,10 +7,12 @@
#include <net/mana/mana.h>
-static const struct {
+struct mana_stats_desc {
char name[ETH_GSTRING_LEN];
u16 offset;
-} mana_eth_stats[] = {
+};
+
+static const struct mana_stats_desc mana_eth_stats[] = {
{"stop_queue", offsetof(struct mana_ethtool_stats, stop_queue)},
{"wake_queue", offsetof(struct mana_ethtool_stats, wake_queue)},
{"hc_rx_discards_no_wqe", offsetof(struct mana_ethtool_stats,
@@ -75,6 +77,59 @@ static const struct {
rx_cqe_unknown_type)},
};
+static const struct mana_stats_desc mana_phy_stats[] = {
+ { "hc_rx_pkt_drop_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_drop_phy) },
+ { "hc_tx_pkt_drop_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_drop_phy) },
+ { "hc_tc0_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc0_phy) },
+ { "hc_tc0_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc0_phy) },
+ { "hc_tc0_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc0_phy) },
+ { "hc_tc0_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc0_phy) },
+ { "hc_tc1_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc1_phy) },
+ { "hc_tc1_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc1_phy) },
+ { "hc_tc1_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc1_phy) },
+ { "hc_tc1_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc1_phy) },
+ { "hc_tc2_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc2_phy) },
+ { "hc_tc2_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc2_phy) },
+ { "hc_tc2_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc2_phy) },
+ { "hc_tc2_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc2_phy) },
+ { "hc_tc3_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc3_phy) },
+ { "hc_tc3_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc3_phy) },
+ { "hc_tc3_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc3_phy) },
+ { "hc_tc3_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc3_phy) },
+ { "hc_tc4_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc4_phy) },
+ { "hc_tc4_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc4_phy) },
+ { "hc_tc4_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc4_phy) },
+ { "hc_tc4_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc4_phy) },
+ { "hc_tc5_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc5_phy) },
+ { "hc_tc5_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc5_phy) },
+ { "hc_tc5_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc5_phy) },
+ { "hc_tc5_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc5_phy) },
+ { "hc_tc6_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc6_phy) },
+ { "hc_tc6_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc6_phy) },
+ { "hc_tc6_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc6_phy) },
+ { "hc_tc6_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc6_phy) },
+ { "hc_tc7_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc7_phy) },
+ { "hc_tc7_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc7_phy) },
+ { "hc_tc7_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc7_phy) },
+ { "hc_tc7_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc7_phy) },
+ { "hc_tc0_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc0_phy) },
+ { "hc_tc0_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc0_phy) },
+ { "hc_tc1_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc1_phy) },
+ { "hc_tc1_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc1_phy) },
+ { "hc_tc2_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc2_phy) },
+ { "hc_tc2_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc2_phy) },
+ { "hc_tc3_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc3_phy) },
+ { "hc_tc3_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc3_phy) },
+ { "hc_tc4_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc4_phy) },
+ { "hc_tc4_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc4_phy) },
+ { "hc_tc5_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc5_phy) },
+ { "hc_tc5_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc5_phy) },
+ { "hc_tc6_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc6_phy) },
+ { "hc_tc6_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc6_phy) },
+ { "hc_tc7_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc7_phy) },
+ { "hc_tc7_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc7_phy) },
+};
+
static int mana_get_sset_count(struct net_device *ndev, int stringset)
{
struct mana_port_context *apc = netdev_priv(ndev);
@@ -83,8 +138,8 @@ static int mana_get_sset_count(struct net_device *ndev, int stringset)
if (stringset != ETH_SS_STATS)
return -EINVAL;
- return ARRAY_SIZE(mana_eth_stats) + num_queues *
- (MANA_STATS_RX_COUNT + MANA_STATS_TX_COUNT);
+ return ARRAY_SIZE(mana_eth_stats) + ARRAY_SIZE(mana_phy_stats) +
+ num_queues * (MANA_STATS_RX_COUNT + MANA_STATS_TX_COUNT);
}
static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
@@ -99,6 +154,9 @@ static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
for (i = 0; i < ARRAY_SIZE(mana_eth_stats); i++)
ethtool_puts(&data, mana_eth_stats[i].name);
+ for (i = 0; i < ARRAY_SIZE(mana_phy_stats); i++)
+ ethtool_puts(&data, mana_phy_stats[i].name);
+
for (i = 0; i < num_queues; i++) {
ethtool_sprintf(&data, "rx_%d_packets", i);
ethtool_sprintf(&data, "rx_%d_bytes", i);
@@ -128,6 +186,7 @@ static void mana_get_ethtool_stats(struct net_device *ndev,
struct mana_port_context *apc = netdev_priv(ndev);
unsigned int num_queues = apc->num_queues;
void *eth_stats = &apc->eth_stats;
+ void *phy_stats = &apc->phy_stats;
struct mana_stats_rx *rx_stats;
struct mana_stats_tx *tx_stats;
unsigned int start;
@@ -151,9 +210,18 @@ static void mana_get_ethtool_stats(struct net_device *ndev,
/* we call mana function to update stats from GDMA */
mana_query_gf_stats(apc);
+ /* We call this mana function to get the phy stats from GDMA and includes
+ * aggregate tx/rx drop counters, Per-TC(Traffic Channel) tx/rx and pause
+ * counters.
+ */
+ mana_query_phy_stats(apc);
+
for (q = 0; q < ARRAY_SIZE(mana_eth_stats); q++)
data[i++] = *(u64 *)(eth_stats + mana_eth_stats[q].offset);
+ for (q = 0; q < ARRAY_SIZE(mana_phy_stats); q++)
+ data[i++] = *(u64 *)(phy_stats + mana_phy_stats[q].offset);
+
for (q = 0; q < num_queues; q++) {
rx_stats = &apc->rxqs[q]->stats;
@@ -427,6 +495,12 @@ out:
static int mana_get_link_ksettings(struct net_device *ndev,
struct ethtool_link_ksettings *cmd)
{
+ struct mana_port_context *apc = netdev_priv(ndev);
+ int err;
+
+ err = mana_query_link_cfg(apc);
+ cmd->base.speed = (err) ? SPEED_UNKNOWN : apc->max_speed;
+
cmd->base.duplex = DUPLEX_FULL;
cmd->base.port = PORT_OTHER;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 932f59d70f41..132626a3f9f7 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -2394,8 +2394,7 @@ static int nfp_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
static const struct udp_tunnel_nic_info nfp_udp_tunnels = {
.sync_table = nfp_udp_tunnel_sync,
- .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
- UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+ .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
.tables = {
{
.n_entries = NFP_NET_N_VXLAN_PORTS,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index fbca8d0efd85..a36215195923 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -1303,9 +1303,10 @@ static u32 ethtool_flow_to_nfp_flag(u32 flow_type)
return xlate_ethtool_to_nfp[flow_type];
}
-static int nfp_net_get_rss_hash_opts(struct nfp_net *nn,
- struct ethtool_rxnfc *cmd)
+static int nfp_net_get_rxfh_fields(struct net_device *netdev,
+ struct ethtool_rxfh_fields *cmd)
{
+ struct nfp_net *nn = netdev_priv(netdev);
u32 nfp_rss_flag;
cmd->data = 0;
@@ -1451,16 +1452,16 @@ static int nfp_net_get_rxnfc(struct net_device *netdev,
case ETHTOOL_GRXCLSRLALL:
cmd->data = NFP_FS_MAX_ENTRY;
return nfp_net_get_fs_loc(nn, rule_locs);
- case ETHTOOL_GRXFH:
- return nfp_net_get_rss_hash_opts(nn, cmd);
default:
return -EOPNOTSUPP;
}
}
-static int nfp_net_set_rss_hash_opt(struct nfp_net *nn,
- struct ethtool_rxnfc *nfc)
+static int nfp_net_set_rxfh_fields(struct net_device *netdev,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
{
+ struct nfp_net *nn = netdev_priv(netdev);
u32 new_rss_cfg = nn->rss_cfg;
u32 nfp_rss_flag;
int err;
@@ -1763,8 +1764,6 @@ static int nfp_net_set_rxnfc(struct net_device *netdev,
struct nfp_net *nn = netdev_priv(netdev);
switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- return nfp_net_set_rss_hash_opt(nn, cmd);
case ETHTOOL_SRXCLSRLINS:
return nfp_net_fs_add(nn, cmd);
case ETHTOOL_SRXCLSRLDEL:
@@ -2506,6 +2505,8 @@ static const struct ethtool_ops nfp_net_ethtool_ops = {
.get_rxfh_key_size = nfp_net_get_rxfh_key_size,
.get_rxfh = nfp_net_get_rxfh,
.set_rxfh = nfp_net_set_rxfh,
+ .get_rxfh_fields = nfp_net_get_rxfh_fields,
+ .set_rxfh_fields = nfp_net_set_rxfh_fields,
.get_regs_len = nfp_net_get_regs_len,
.get_regs = nfp_net_get_regs,
.set_dump = nfp_app_set_dump,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
index 4c377bdc62c8..136bfa3516d0 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
@@ -409,6 +409,7 @@ static void ionic_remove(struct pci_dev *pdev)
timer_shutdown_sync(&ionic->watchdog_timer);
if (ionic->lif) {
+ cancel_work_sync(&ionic->lif->deferred.work);
/* prevent adminq cmds if already known as down */
if (test_and_clear_bit(IONIC_LIF_F_FW_RESET, ionic->lif->state))
set_bit(IONIC_LIF_F_FW_STOPPING, ionic->lif->state);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index 18b9c8a810ae..093c5358b6e8 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -424,9 +424,9 @@ do_check_time:
if (fw_hb_ready != idev->fw_hb_ready) {
idev->fw_hb_ready = fw_hb_ready;
if (!fw_hb_ready)
- dev_info(ionic->dev, "FW heartbeat stalled at %d\n", fw_hb);
+ dev_info(ionic->dev, "FW heartbeat stalled at %u\n", fw_hb);
else
- dev_info(ionic->dev, "FW heartbeat restored at %d\n", fw_hb);
+ dev_info(ionic->dev, "FW heartbeat restored at %u\n", fw_hb);
}
if (!fw_hb_ready)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 7707a9e53c43..48cb5d30b5f6 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -3526,10 +3526,6 @@ void ionic_lif_free(struct ionic_lif *lif)
lif->info = NULL;
lif->info_pa = 0;
- /* unmap doorbell page */
- ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
- lif->kern_dbpage = NULL;
-
mutex_destroy(&lif->config_lock);
mutex_destroy(&lif->queue_lock);
@@ -3555,6 +3551,9 @@ void ionic_lif_deinit(struct ionic_lif *lif)
ionic_lif_qcq_deinit(lif, lif->notifyqcq);
ionic_lif_qcq_deinit(lif, lif->adminqcq);
+ ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
+ lif->kern_dbpage = NULL;
+
ionic_lif_reset(lif);
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index e50e1df0a433..23982704273c 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -1168,8 +1168,11 @@ static int qede_set_phys_id(struct net_device *dev,
return 0;
}
-static int qede_get_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info)
+static int qede_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *info)
{
+ struct qede_dev *edev = netdev_priv(dev);
+
info->data = RXH_IP_SRC | RXH_IP_DST;
switch (info->flow_type) {
@@ -1206,9 +1209,6 @@ static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
case ETHTOOL_GRXRINGS:
info->data = QEDE_RSS_COUNT(edev);
break;
- case ETHTOOL_GRXFH:
- rc = qede_get_rss_flags(edev, info);
- break;
case ETHTOOL_GRXCLSRLCNT:
info->rule_cnt = qede_get_arfs_filter_count(edev);
info->data = QEDE_RFS_MAX_FLTR;
@@ -1227,14 +1227,17 @@ static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
return rc;
}
-static int qede_set_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info)
+static int qede_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *info,
+ struct netlink_ext_ack *extack)
{
struct qed_update_vport_params *vport_update_params;
+ struct qede_dev *edev = netdev_priv(dev);
u8 set_caps = 0, clr_caps = 0;
int rc = 0;
DP_VERBOSE(edev, QED_MSG_DEBUG,
- "Set rss flags command parameters: flow type = %d, data = %llu\n",
+ "Set rss flags command parameters: flow type = %d, data = %u\n",
info->flow_type, info->data);
switch (info->flow_type) {
@@ -1337,9 +1340,6 @@ static int qede_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
int rc;
switch (info->cmd) {
- case ETHTOOL_SRXFH:
- rc = qede_set_rss_flags(edev, info);
- break;
case ETHTOOL_SRXCLSRLINS:
rc = qede_add_cls_rule(edev, info);
break;
@@ -2293,6 +2293,8 @@ static const struct ethtool_ops qede_ethtool_ops = {
.get_rxfh_key_size = qede_get_rxfh_key_size,
.get_rxfh = qede_get_rxfh,
.set_rxfh = qede_set_rxfh,
+ .get_rxfh_fields = qede_get_rxfh_fields,
+ .set_rxfh_fields = qede_set_rxfh_fields,
.get_ts_info = qede_get_ts_info,
.get_channels = qede_get_channels,
.set_channels = qede_set_channels,
@@ -2335,6 +2337,8 @@ static const struct ethtool_ops qede_vf_ethtool_ops = {
.get_rxfh_key_size = qede_get_rxfh_key_size,
.get_rxfh = qede_get_rxfh,
.set_rxfh = qede_set_rxfh,
+ .get_rxfh_fields = qede_get_rxfh_fields,
+ .set_rxfh_fields = qede_set_rxfh_fields,
.get_channels = qede_get_channels,
.set_channels = qede_set_channels,
.get_per_queue_coalesce = qede_get_per_coalesce,
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index 985026dd816f..7e341e026489 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -987,20 +987,17 @@ static int qede_udp_tunnel_sync(struct net_device *dev, unsigned int table)
static const struct udp_tunnel_nic_info qede_udp_tunnels_both = {
.sync_table = qede_udp_tunnel_sync,
- .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP,
.tables = {
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
},
}, qede_udp_tunnels_vxlan = {
.sync_table = qede_udp_tunnel_sync,
- .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP,
.tables = {
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
},
}, qede_udp_tunnels_geneve = {
.sync_table = qede_udp_tunnel_sync,
- .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP,
.tables = {
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
},
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index eb69121df726..53cdd36c4123 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -486,7 +486,6 @@ static int qlcnic_udp_tunnel_sync(struct net_device *dev, unsigned int table)
static const struct udp_tunnel_nic_info qlcnic_udp_tunnels = {
.sync_table = qlcnic_udp_tunnel_sync,
- .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP,
.tables = {
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
},
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 43170500d566..9c601f271c02 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -216,8 +216,6 @@ static const struct pci_device_id rtl8169_pci_tbl[] = {
{ PCI_VDEVICE(REALTEK, 0x8168) },
{ PCI_VDEVICE(NCUBE, 0x8168) },
{ PCI_VDEVICE(REALTEK, 0x8169) },
- { PCI_VENDOR_ID_DLINK, 0x4300,
- PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0 },
{ PCI_VDEVICE(DLINK, 0x4300) },
{ PCI_VDEVICE(DLINK, 0x4302) },
{ PCI_VDEVICE(AT, 0xc107) },
@@ -5262,7 +5260,6 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
if (tp->mac_version == RTL_GIGA_MAC_VER_61)
phy_disable_eee_mode(tp->phydev,
ETHTOOL_LINK_MODE_2500baseT_Full_BIT);
- phy_disable_eee_mode(tp->phydev, ETHTOOL_LINK_MODE_5000baseT_Full_BIT);
/* PHY will be woken up in rtl_open() */
phy_suspend(tp->phydev);
diff --git a/drivers/net/ethernet/realtek/rtase/rtase.h b/drivers/net/ethernet/realtek/rtase/rtase.h
index 498cfe4d0cac..20decdeb9fdb 100644
--- a/drivers/net/ethernet/realtek/rtase/rtase.h
+++ b/drivers/net/ethernet/realtek/rtase/rtase.h
@@ -288,6 +288,7 @@ struct rtase_ring {
u32 cur_idx;
u32 dirty_idx;
u16 index;
+ u8 type;
struct sk_buff *skbuff[RTASE_NUM_DESC];
void *data_buf[RTASE_NUM_DESC];
diff --git a/drivers/net/ethernet/realtek/rtase/rtase_main.c b/drivers/net/ethernet/realtek/rtase/rtase_main.c
index 4d37217e9a14..ef13109c49cf 100644
--- a/drivers/net/ethernet/realtek/rtase/rtase_main.c
+++ b/drivers/net/ethernet/realtek/rtase/rtase_main.c
@@ -326,6 +326,7 @@ static void rtase_tx_desc_init(struct rtase_private *tp, u16 idx)
ring->cur_idx = 0;
ring->dirty_idx = 0;
ring->index = idx;
+ ring->type = NETDEV_QUEUE_TYPE_TX;
ring->alloc_fail = 0;
for (i = 0; i < RTASE_NUM_DESC; i++) {
@@ -345,6 +346,9 @@ static void rtase_tx_desc_init(struct rtase_private *tp, u16 idx)
ring->ivec = &tp->int_vector[0];
list_add_tail(&ring->ring_entry, &tp->int_vector[0].ring_list);
}
+
+ netif_queue_set_napi(tp->dev, ring->index,
+ ring->type, &ring->ivec->napi);
}
static void rtase_map_to_asic(union rtase_rx_desc *desc, dma_addr_t mapping,
@@ -590,6 +594,7 @@ static void rtase_rx_desc_init(struct rtase_private *tp, u16 idx)
ring->cur_idx = 0;
ring->dirty_idx = 0;
ring->index = idx;
+ ring->type = NETDEV_QUEUE_TYPE_RX;
ring->alloc_fail = 0;
for (i = 0; i < RTASE_NUM_DESC; i++)
@@ -597,6 +602,8 @@ static void rtase_rx_desc_init(struct rtase_private *tp, u16 idx)
ring->ring_handler = rx_handler;
ring->ivec = &tp->int_vector[idx];
+ netif_queue_set_napi(tp->dev, ring->index,
+ ring->type, &ring->ivec->napi);
list_add_tail(&ring->ring_entry, &tp->int_vector[idx].ring_list);
}
@@ -1161,8 +1168,12 @@ static void rtase_down(struct net_device *dev)
ivec = &tp->int_vector[i];
napi_disable(&ivec->napi);
list_for_each_entry_safe(ring, tmp, &ivec->ring_list,
- ring_entry)
+ ring_entry) {
+ netif_queue_set_napi(tp->dev, ring->index,
+ ring->type, NULL);
+
list_del(&ring->ring_entry);
+ }
}
netif_tx_disable(dev);
@@ -1518,8 +1529,12 @@ static void rtase_sw_reset(struct net_device *dev)
for (i = 0; i < tp->int_nums; i++) {
ivec = &tp->int_vector[i];
list_for_each_entry_safe(ring, tmp, &ivec->ring_list,
- ring_entry)
+ ring_entry) {
+ netif_queue_set_napi(tp->dev, ring->index,
+ ring->type, NULL);
+
list_del(&ring->ring_entry);
+ }
}
ret = rtase_init_ring(dev);
@@ -1871,6 +1886,18 @@ static void rtase_init_netdev_ops(struct net_device *dev)
dev->ethtool_ops = &rtase_ethtool_ops;
}
+static void rtase_init_napi(struct rtase_private *tp)
+{
+ u16 i;
+
+ for (i = 0; i < tp->int_nums; i++) {
+ netif_napi_add_config(tp->dev, &tp->int_vector[i].napi,
+ tp->int_vector[i].poll, i);
+ netif_napi_set_irq(&tp->int_vector[i].napi,
+ tp->int_vector[i].irq);
+ }
+}
+
static void rtase_reset_interrupt(struct pci_dev *pdev,
const struct rtase_private *tp)
{
@@ -1956,9 +1983,6 @@ static void rtase_init_int_vector(struct rtase_private *tp)
memset(tp->int_vector[0].name, 0x0, sizeof(tp->int_vector[0].name));
INIT_LIST_HEAD(&tp->int_vector[0].ring_list);
- netif_napi_add(tp->dev, &tp->int_vector[0].napi,
- tp->int_vector[0].poll);
-
/* interrupt vector 1 ~ 3 */
for (i = 1; i < tp->int_nums; i++) {
tp->int_vector[i].tp = tp;
@@ -1972,9 +1996,6 @@ static void rtase_init_int_vector(struct rtase_private *tp)
memset(tp->int_vector[i].name, 0x0,
sizeof(tp->int_vector[0].name));
INIT_LIST_HEAD(&tp->int_vector[i].ring_list);
-
- netif_napi_add(tp->dev, &tp->int_vector[i].napi,
- tp->int_vector[i].poll);
}
}
@@ -2206,6 +2227,8 @@ static int rtase_init_one(struct pci_dev *pdev,
goto err_out_del_napi;
}
+ rtase_init_napi(tp);
+
rtase_init_netdev_ops(dev);
dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
index 4a439b34114d..ad73733644f9 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
@@ -308,8 +308,8 @@ static int sxgbe_set_coalesce(struct net_device *dev,
return 0;
}
-static int sxgbe_get_rss_hash_opts(struct sxgbe_priv_data *priv,
- struct ethtool_rxnfc *cmd)
+static int sxgbe_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *cmd)
{
cmd->data = 0;
@@ -344,26 +344,11 @@ static int sxgbe_get_rss_hash_opts(struct sxgbe_priv_data *priv,
return 0;
}
-static int sxgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
- u32 *rule_locs)
+static int sxgbe_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *cmd,
+ struct netlink_ext_ack *extack)
{
struct sxgbe_priv_data *priv = netdev_priv(dev);
- int ret = -EOPNOTSUPP;
-
- switch (cmd->cmd) {
- case ETHTOOL_GRXFH:
- ret = sxgbe_get_rss_hash_opts(priv, cmd);
- break;
- default:
- break;
- }
-
- return ret;
-}
-
-static int sxgbe_set_rss_hash_opt(struct sxgbe_priv_data *priv,
- struct ethtool_rxnfc *cmd)
-{
u32 reg_val = 0;
/* RSS does not support anything other than hashing
@@ -421,22 +406,6 @@ static int sxgbe_set_rss_hash_opt(struct sxgbe_priv_data *priv,
return 0;
}
-static int sxgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
-{
- struct sxgbe_priv_data *priv = netdev_priv(dev);
- int ret = -EOPNOTSUPP;
-
- switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- ret = sxgbe_set_rss_hash_opt(priv, cmd);
- break;
- default:
- break;
- }
-
- return ret;
-}
-
static void sxgbe_get_regs(struct net_device *dev,
struct ethtool_regs *regs, void *space)
{
@@ -489,8 +458,8 @@ static const struct ethtool_ops sxgbe_ethtool_ops = {
.get_channels = sxgbe_get_channels,
.get_coalesce = sxgbe_get_coalesce,
.set_coalesce = sxgbe_set_coalesce,
- .get_rxnfc = sxgbe_get_rxnfc,
- .set_rxnfc = sxgbe_set_rxnfc,
+ .get_rxfh_fields = sxgbe_get_rxfh_fields,
+ .set_rxfh_fields = sxgbe_set_rxfh_fields,
.get_regs = sxgbe_get_regs,
.get_regs_len = sxgbe_get_regs_len,
.get_eee = sxgbe_get_eee,
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 47349c148c0c..fcec81f862ec 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -3985,7 +3985,6 @@ static int efx_ef10_udp_tnl_unset_port(struct net_device *dev,
static const struct udp_tunnel_nic_info efx_ef10_udp_tunnels = {
.set_port = efx_ef10_udp_tnl_set_port,
.unset_port = efx_ef10_udp_tnl_unset_port,
- .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP,
.tables = {
{
.n_entries = 16,
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 83d715544f7f..23c6a7df78d0 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -262,11 +262,13 @@ const struct ethtool_ops efx_ethtool_ops = {
.set_rxnfc = efx_ethtool_set_rxnfc,
.get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size,
.get_rxfh_key_size = efx_ethtool_get_rxfh_key_size,
+ .rxfh_per_ctx_fields = true,
.rxfh_per_ctx_key = true,
.cap_rss_rxnfc_adds = true,
.rxfh_priv_size = sizeof(struct efx_rss_context_priv),
.get_rxfh = efx_ethtool_get_rxfh,
.set_rxfh = efx_ethtool_set_rxfh,
+ .get_rxfh_fields = efx_ethtool_get_rxfh_fields,
.create_rxfh_context = efx_ethtool_create_rxfh_context,
.modify_rxfh_context = efx_ethtool_modify_rxfh_context,
.remove_rxfh_context = efx_ethtool_remove_rxfh_context,
diff --git a/drivers/net/ethernet/sfc/ethtool_common.c b/drivers/net/ethernet/sfc/ethtool_common.c
index 2d734496733f..823263969f92 100644
--- a/drivers/net/ethernet/sfc/ethtool_common.c
+++ b/drivers/net/ethernet/sfc/ethtool_common.c
@@ -800,66 +800,72 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
return rc;
}
-int efx_ethtool_get_rxnfc(struct net_device *net_dev,
- struct ethtool_rxnfc *info, u32 *rule_locs)
+int efx_ethtool_get_rxfh_fields(struct net_device *net_dev,
+ struct ethtool_rxfh_fields *info)
{
struct efx_nic *efx = efx_netdev_priv(net_dev);
- u32 rss_context = 0;
- s32 rc = 0;
-
- switch (info->cmd) {
- case ETHTOOL_GRXRINGS:
- info->data = efx->n_rx_channels;
- return 0;
+ struct efx_rss_context_priv *ctx;
+ __u64 data;
+ int rc = 0;
- case ETHTOOL_GRXFH: {
- struct efx_rss_context_priv *ctx = &efx->rss_context.priv;
- __u64 data;
+ ctx = &efx->rss_context.priv;
- mutex_lock(&net_dev->ethtool->rss_lock);
- if (info->flow_type & FLOW_RSS && info->rss_context) {
- ctx = efx_find_rss_context_entry(efx, info->rss_context);
- if (!ctx) {
- rc = -ENOENT;
- goto out_unlock;
- }
+ mutex_lock(&net_dev->ethtool->rss_lock);
+ if (info->rss_context) {
+ ctx = efx_find_rss_context_entry(efx, info->rss_context);
+ if (!ctx) {
+ rc = -ENOENT;
+ goto out_unlock;
}
+ }
- data = 0;
- if (!efx_rss_active(ctx)) /* No RSS */
- goto out_setdata_unlock;
+ data = 0;
+ if (!efx_rss_active(ctx)) /* No RSS */
+ goto out_setdata_unlock;
- switch (info->flow_type & ~FLOW_RSS) {
- case UDP_V4_FLOW:
- case UDP_V6_FLOW:
- if (ctx->rx_hash_udp_4tuple)
- data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 |
- RXH_IP_SRC | RXH_IP_DST);
- else
- data = RXH_IP_SRC | RXH_IP_DST;
- break;
- case TCP_V4_FLOW:
- case TCP_V6_FLOW:
+ switch (info->flow_type) {
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ if (ctx->rx_hash_udp_4tuple)
data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 |
RXH_IP_SRC | RXH_IP_DST);
- break;
- case SCTP_V4_FLOW:
- case SCTP_V6_FLOW:
- case AH_ESP_V4_FLOW:
- case AH_ESP_V6_FLOW:
- case IPV4_FLOW:
- case IPV6_FLOW:
+ else
data = RXH_IP_SRC | RXH_IP_DST;
- break;
- default:
- break;
- }
+ break;
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 |
+ RXH_IP_SRC | RXH_IP_DST);
+ break;
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ case AH_ESP_V4_FLOW:
+ case AH_ESP_V6_FLOW:
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ default:
+ break;
+ }
out_setdata_unlock:
- info->data = data;
+ info->data = data;
out_unlock:
- mutex_unlock(&net_dev->ethtool->rss_lock);
- return rc;
- }
+ mutex_unlock(&net_dev->ethtool->rss_lock);
+ return rc;
+}
+
+int efx_ethtool_get_rxnfc(struct net_device *net_dev,
+ struct ethtool_rxnfc *info, u32 *rule_locs)
+{
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
+ u32 rss_context = 0;
+ s32 rc = 0;
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXRINGS:
+ info->data = efx->n_rx_channels;
+ return 0;
case ETHTOOL_GRXCLSRLCNT:
info->data = efx_filter_get_rx_id_limit(efx);
diff --git a/drivers/net/ethernet/sfc/ethtool_common.h b/drivers/net/ethernet/sfc/ethtool_common.h
index fc52e891637d..24db4fccbe78 100644
--- a/drivers/net/ethernet/sfc/ethtool_common.h
+++ b/drivers/net/ethernet/sfc/ethtool_common.h
@@ -49,6 +49,8 @@ int efx_ethtool_get_rxfh(struct net_device *net_dev,
int efx_ethtool_set_rxfh(struct net_device *net_dev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack);
+int efx_ethtool_get_rxfh_fields(struct net_device *net_dev,
+ struct ethtool_rxfh_fields *info);
int efx_ethtool_create_rxfh_context(struct net_device *net_dev,
struct ethtool_rxfh_context *ctx,
const struct ethtool_rxfh_param *rxfh,
diff --git a/drivers/net/ethernet/sfc/falcon/ethtool.c b/drivers/net/ethernet/sfc/falcon/ethtool.c
index 04766448a545..6685e71ab13f 100644
--- a/drivers/net/ethernet/sfc/falcon/ethtool.c
+++ b/drivers/net/ethernet/sfc/falcon/ethtool.c
@@ -944,6 +944,33 @@ static int ef4_ethtool_get_class_rule(struct ef4_nic *efx,
}
static int
+ef4_ethtool_get_rxfh_fields(struct net_device *net_dev,
+ struct ethtool_rxfh_fields *info)
+{
+ struct ef4_nic *efx = netdev_priv(net_dev);
+ unsigned int min_revision = 0;
+
+ info->data = 0;
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ fallthrough;
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case IPV4_FLOW:
+ info->data |= RXH_IP_SRC | RXH_IP_DST;
+ min_revision = EF4_REV_FALCON_B0;
+ break;
+ default:
+ break;
+ }
+ if (ef4_nic_rev(efx) < min_revision)
+ info->data = 0;
+ return 0;
+}
+
+static int
ef4_ethtool_get_rxnfc(struct net_device *net_dev,
struct ethtool_rxnfc *info, u32 *rule_locs)
{
@@ -954,29 +981,6 @@ ef4_ethtool_get_rxnfc(struct net_device *net_dev,
info->data = efx->n_rx_channels;
return 0;
- case ETHTOOL_GRXFH: {
- unsigned min_revision = 0;
-
- info->data = 0;
- switch (info->flow_type) {
- case TCP_V4_FLOW:
- info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- fallthrough;
- case UDP_V4_FLOW:
- case SCTP_V4_FLOW:
- case AH_ESP_V4_FLOW:
- case IPV4_FLOW:
- info->data |= RXH_IP_SRC | RXH_IP_DST;
- min_revision = EF4_REV_FALCON_B0;
- break;
- default:
- break;
- }
- if (ef4_nic_rev(efx) < min_revision)
- info->data = 0;
- return 0;
- }
-
case ETHTOOL_GRXCLSRLCNT:
info->data = ef4_filter_get_rx_id_limit(efx);
if (info->data == 0)
@@ -1343,6 +1347,7 @@ const struct ethtool_ops ef4_ethtool_ops = {
.get_rxfh_indir_size = ef4_ethtool_get_rxfh_indir_size,
.get_rxfh = ef4_ethtool_get_rxfh,
.set_rxfh = ef4_ethtool_set_rxfh,
+ .get_rxfh_fields = ef4_ethtool_get_rxfh_fields,
.get_module_info = ef4_ethtool_get_module_info,
.get_module_eeprom = ef4_ethtool_get_module_eeprom,
.get_link_ksettings = ef4_ethtool_get_link_ksettings,
diff --git a/drivers/net/ethernet/sfc/siena/ethtool.c b/drivers/net/ethernet/sfc/siena/ethtool.c
index c5ad84db9613..994909789bfe 100644
--- a/drivers/net/ethernet/sfc/siena/ethtool.c
+++ b/drivers/net/ethernet/sfc/siena/ethtool.c
@@ -264,6 +264,7 @@ const struct ethtool_ops efx_siena_ethtool_ops = {
.get_rxfh_key_size = efx_siena_ethtool_get_rxfh_key_size,
.get_rxfh = efx_siena_ethtool_get_rxfh,
.set_rxfh = efx_siena_ethtool_set_rxfh,
+ .get_rxfh_fields = efx_siena_ethtool_get_rxfh_fields,
.get_ts_info = efx_ethtool_get_ts_info,
.get_module_info = efx_siena_ethtool_get_module_info,
.get_module_eeprom = efx_siena_ethtool_get_module_eeprom,
diff --git a/drivers/net/ethernet/sfc/siena/ethtool_common.c b/drivers/net/ethernet/sfc/siena/ethtool_common.c
index eeee676fdca7..47cd16a113cf 100644
--- a/drivers/net/ethernet/sfc/siena/ethtool_common.c
+++ b/drivers/net/ethernet/sfc/siena/ethtool_common.c
@@ -801,6 +801,46 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
return rc;
}
+int efx_siena_ethtool_get_rxfh_fields(struct net_device *net_dev,
+ struct ethtool_rxfh_fields *info)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ __u64 data;
+
+ data = 0;
+ if (!efx_rss_active(&efx->rss_context)) /* No RSS */
+ goto out_setdata;
+
+ switch (info->flow_type) {
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ if (efx->rss_context.rx_hash_udp_4tuple)
+ data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 |
+ RXH_IP_SRC | RXH_IP_DST);
+ else
+ data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 |
+ RXH_IP_SRC | RXH_IP_DST);
+ break;
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ case AH_ESP_V4_FLOW:
+ case AH_ESP_V6_FLOW:
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ default:
+ break;
+ }
+out_setdata:
+ info->data = data;
+ return 0;
+}
+
int efx_siena_ethtool_get_rxnfc(struct net_device *net_dev,
struct ethtool_rxnfc *info, u32 *rule_locs)
{
@@ -813,43 +853,6 @@ int efx_siena_ethtool_get_rxnfc(struct net_device *net_dev,
info->data = efx->n_rx_channels;
return 0;
- case ETHTOOL_GRXFH: {
- __u64 data;
-
- data = 0;
- if (!efx_rss_active(&efx->rss_context)) /* No RSS */
- goto out_setdata;
-
- switch (info->flow_type) {
- case UDP_V4_FLOW:
- case UDP_V6_FLOW:
- if (efx->rss_context.rx_hash_udp_4tuple)
- data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 |
- RXH_IP_SRC | RXH_IP_DST);
- else
- data = RXH_IP_SRC | RXH_IP_DST;
- break;
- case TCP_V4_FLOW:
- case TCP_V6_FLOW:
- data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 |
- RXH_IP_SRC | RXH_IP_DST);
- break;
- case SCTP_V4_FLOW:
- case SCTP_V6_FLOW:
- case AH_ESP_V4_FLOW:
- case AH_ESP_V6_FLOW:
- case IPV4_FLOW:
- case IPV6_FLOW:
- data = RXH_IP_SRC | RXH_IP_DST;
- break;
- default:
- break;
- }
-out_setdata:
- info->data = data;
- return rc;
- }
-
case ETHTOOL_GRXCLSRLCNT:
info->data = efx_filter_get_rx_id_limit(efx);
if (info->data == 0)
diff --git a/drivers/net/ethernet/sfc/siena/ethtool_common.h b/drivers/net/ethernet/sfc/siena/ethtool_common.h
index d674bab0f65b..278d69e920d9 100644
--- a/drivers/net/ethernet/sfc/siena/ethtool_common.h
+++ b/drivers/net/ethernet/sfc/siena/ethtool_common.h
@@ -46,6 +46,8 @@ int efx_siena_ethtool_get_rxfh(struct net_device *net_dev,
int efx_siena_ethtool_set_rxfh(struct net_device *net_dev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack);
+int efx_siena_ethtool_get_rxfh_fields(struct net_device *net_dev,
+ struct ethtool_rxfh_fields *info);
int efx_siena_ethtool_reset(struct net_device *net_dev, u32 *flags);
int efx_siena_ethtool_get_module_eeprom(struct net_device *net_dev,
struct ethtool_eeprom *ee,
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index ea5da5793362..cbffccb3b9af 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -396,17 +396,6 @@ enum request_irq_err {
#define CORE_IRQ_MTL_RX_OVERFLOW BIT(8)
-/* Physical Coding Sublayer */
-struct rgmii_adv {
- unsigned int pause;
- unsigned int duplex;
- unsigned int lp_pause;
- unsigned int lp_duplex;
-};
-
-#define STMMAC_PCS_PAUSE 1
-#define STMMAC_PCS_ASYM_PAUSE 2
-
/* DMA HW capabilities */
struct dma_features {
unsigned int mbps_10_100;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c
index 3e86810717d3..32b5d1492e2e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c
@@ -44,24 +44,50 @@
struct ls1x_dwmac {
struct plat_stmmacenet_data *plat_dat;
struct regmap *regmap;
+ unsigned int id;
};
-static int ls1b_dwmac_syscon_init(struct platform_device *pdev, void *priv)
+struct ls1x_data {
+ int (*setup)(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat_dat);
+ int (*init)(struct platform_device *pdev, void *bsp_priv);
+};
+
+static int ls1b_dwmac_setup(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat_dat)
{
- struct ls1x_dwmac *dwmac = priv;
- struct plat_stmmacenet_data *plat = dwmac->plat_dat;
- struct regmap *regmap = dwmac->regmap;
+ struct ls1x_dwmac *dwmac = plat_dat->bsp_priv;
struct resource *res;
- unsigned long reg_base;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
+ /* This shouldn't fail - stmmac_get_platform_resources()
+ * already mapped this resource.
+ */
dev_err(&pdev->dev, "Could not get IO_MEM resources\n");
return -EINVAL;
}
- reg_base = (unsigned long)res->start;
- if (reg_base == LS1B_GMAC0_BASE) {
+ if (res->start == LS1B_GMAC0_BASE) {
+ dwmac->id = 0;
+ } else if (res->start == LS1B_GMAC1_BASE) {
+ dwmac->id = 1;
+ } else {
+ dev_err(&pdev->dev, "Invalid Ethernet MAC base address %pR",
+ res);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ls1b_dwmac_syscon_init(struct platform_device *pdev, void *priv)
+{
+ struct ls1x_dwmac *dwmac = priv;
+ struct plat_stmmacenet_data *plat = dwmac->plat_dat;
+ struct regmap *regmap = dwmac->regmap;
+
+ if (dwmac->id == 0) {
switch (plat->phy_interface) {
case PHY_INTERFACE_MODE_RGMII_ID:
regmap_update_bits(regmap, LS1X_SYSCON0,
@@ -80,7 +106,7 @@ static int ls1b_dwmac_syscon_init(struct platform_device *pdev, void *priv)
}
regmap_update_bits(regmap, LS1X_SYSCON0, GMAC0_SHUT, 0);
- } else if (reg_base == LS1B_GMAC1_BASE) {
+ } else if (dwmac->id == 1) {
regmap_update_bits(regmap, LS1X_SYSCON0,
GMAC1_USE_UART1 | GMAC1_USE_UART0,
GMAC1_USE_UART1 | GMAC1_USE_UART0);
@@ -104,10 +130,6 @@ static int ls1b_dwmac_syscon_init(struct platform_device *pdev, void *priv)
}
regmap_update_bits(regmap, LS1X_SYSCON1, GMAC1_SHUT, 0);
- } else {
- dev_err(&pdev->dev, "Invalid Ethernet MAC base address %lx",
- reg_base);
- return -EINVAL;
}
return 0;
@@ -143,9 +165,9 @@ static int ls1x_dwmac_probe(struct platform_device *pdev)
{
struct plat_stmmacenet_data *plat_dat;
struct stmmac_resources stmmac_res;
+ const struct ls1x_data *data;
struct regmap *regmap;
struct ls1x_dwmac *dwmac;
- int (*init)(struct platform_device *pdev, void *priv);
int ret;
ret = stmmac_get_platform_resources(pdev, &stmmac_res);
@@ -159,8 +181,8 @@ static int ls1x_dwmac_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, PTR_ERR(regmap),
"Unable to find syscon\n");
- init = of_device_get_match_data(&pdev->dev);
- if (!init) {
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data) {
dev_err(&pdev->dev, "No of match data provided\n");
return -EINVAL;
}
@@ -175,21 +197,36 @@ static int ls1x_dwmac_probe(struct platform_device *pdev)
"dt configuration failed\n");
plat_dat->bsp_priv = dwmac;
- plat_dat->init = init;
+ plat_dat->init = data->init;
dwmac->plat_dat = plat_dat;
dwmac->regmap = regmap;
+ if (data->setup) {
+ ret = data->setup(pdev, plat_dat);
+ if (ret)
+ return ret;
+ }
+
return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
}
+static const struct ls1x_data ls1b_dwmac_data = {
+ .setup = ls1b_dwmac_setup,
+ .init = ls1b_dwmac_syscon_init,
+};
+
+static const struct ls1x_data ls1c_dwmac_data = {
+ .init = ls1c_dwmac_syscon_init,
+};
+
static const struct of_device_id ls1x_dwmac_match[] = {
{
.compatible = "loongson,ls1b-gmac",
- .data = &ls1b_dwmac_syscon_init,
+ .data = &ls1b_dwmac_data,
},
{
.compatible = "loongson,ls1c-emac",
- .data = &ls1c_dwmac_syscon_init,
+ .data = &ls1c_dwmac_data,
},
{ }
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
index 22653ffd2a04..c0c44916f849 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
@@ -41,6 +41,7 @@ static int lpc18xx_dwmac_probe(struct platform_device *pdev)
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
+ plat_dat->mac_interface = PHY_INTERFACE_MODE_NA;
plat_dat->has_gmac = true;
reg = syscon_regmap_lookup_by_compatible("nxp,lpc1850-creg");
@@ -49,9 +50,9 @@ static int lpc18xx_dwmac_probe(struct platform_device *pdev)
return PTR_ERR(reg);
}
- if (plat_dat->mac_interface == PHY_INTERFACE_MODE_MII) {
+ if (plat_dat->phy_interface == PHY_INTERFACE_MODE_MII) {
ethmode = LPC18XX_CREG_CREG6_ETHMODE_MII;
- } else if (plat_dat->mac_interface == PHY_INTERFACE_MODE_RMII) {
+ } else if (plat_dat->phy_interface == PHY_INTERFACE_MODE_RMII) {
ethmode = LPC18XX_CREG_CREG6_ETHMODE_RMII;
} else {
dev_err(&pdev->dev, "Only MII and RMII mode supported\n");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index e30bdf72331a..d8fd4d8f6ced 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -622,6 +622,11 @@ static void ethqos_set_serdes_speed(struct qcom_ethqos *ethqos, int speed)
}
}
+static void ethqos_pcs_set_inband(struct stmmac_priv *priv, bool enable)
+{
+ stmmac_pcs_ctrl_ane(priv, enable, 0, 0);
+}
+
/* On interface toggle MAC registers gets reset.
* Configure MAC block for SGMII on ethernet phy link up
*/
@@ -640,7 +645,7 @@ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos, int speed)
RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
RGMII_IO_MACRO_CONFIG2);
ethqos_set_serdes_speed(ethqos, SPEED_2500);
- stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 0, 0, 0);
+ ethqos_pcs_set_inband(priv, false);
break;
case SPEED_1000:
val &= ~ETHQOS_MAC_CTRL_PORT_SEL;
@@ -648,12 +653,12 @@ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos, int speed)
RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
RGMII_IO_MACRO_CONFIG2);
ethqos_set_serdes_speed(ethqos, SPEED_1000);
- stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, 0, 0);
+ ethqos_pcs_set_inband(priv, true);
break;
case SPEED_100:
val |= ETHQOS_MAC_CTRL_PORT_SEL | ETHQOS_MAC_CTRL_SPEED_MODE;
ethqos_set_serdes_speed(ethqos, SPEED_1000);
- stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, 0, 0);
+ ethqos_pcs_set_inband(priv, true);
break;
case SPEED_10:
val |= ETHQOS_MAC_CTRL_PORT_SEL;
@@ -663,7 +668,7 @@ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos, int speed)
SGMII_10M_RX_CLK_DVDR),
RGMII_IO_MACRO_CONFIG);
ethqos_set_serdes_speed(ethqos, SPEED_1000);
- stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, 0, 0);
+ ethqos_pcs_set_inband(priv, true);
break;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 700858ff6f7c..79b92130a03f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -24,12 +24,21 @@
#include "stmmac_platform.h"
struct rk_priv_data;
+
+struct rk_reg_speed_data {
+ unsigned int rgmii_10;
+ unsigned int rgmii_100;
+ unsigned int rgmii_1000;
+ unsigned int rmii_10;
+ unsigned int rmii_100;
+};
+
struct rk_gmac_ops {
void (*set_to_rgmii)(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay);
void (*set_to_rmii)(struct rk_priv_data *bsp_priv);
- void (*set_rgmii_speed)(struct rk_priv_data *bsp_priv, int speed);
- void (*set_rmii_speed)(struct rk_priv_data *bsp_priv, int speed);
+ int (*set_speed)(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed);
void (*set_clock_selection)(struct rk_priv_data *bsp_priv, bool input,
bool enable);
void (*integrated_phy_powerup)(struct rk_priv_data *bsp_priv);
@@ -58,7 +67,7 @@ enum rk_clocks_index {
};
struct rk_priv_data {
- struct platform_device *pdev;
+ struct device *dev;
phy_interface_t phy_iface;
int id;
struct regulator *regulator;
@@ -71,7 +80,6 @@ struct rk_priv_data {
struct clk_bulk_data *clks;
int num_clks;
- struct clk *clk_mac;
struct clk *clk_phy;
struct reset_control *phy_reset;
@@ -83,6 +91,64 @@ struct rk_priv_data {
struct regmap *php_grf;
};
+static int rk_set_reg_speed(struct rk_priv_data *bsp_priv,
+ const struct rk_reg_speed_data *rsd,
+ unsigned int reg, phy_interface_t interface,
+ int speed)
+{
+ unsigned int val;
+
+ if (phy_interface_mode_is_rgmii(interface)) {
+ if (speed == SPEED_10) {
+ val = rsd->rgmii_10;
+ } else if (speed == SPEED_100) {
+ val = rsd->rgmii_100;
+ } else if (speed == SPEED_1000) {
+ val = rsd->rgmii_1000;
+ } else {
+ /* Phylink will not allow inappropriate speeds for
+ * interface modes, so this should never happen.
+ */
+ return -EINVAL;
+ }
+ } else if (interface == PHY_INTERFACE_MODE_RMII) {
+ if (speed == SPEED_10) {
+ val = rsd->rmii_10;
+ } else if (speed == SPEED_100) {
+ val = rsd->rmii_100;
+ } else {
+ /* Phylink will not allow inappropriate speeds for
+ * interface modes, so this should never happen.
+ */
+ return -EINVAL;
+ }
+ } else {
+ /* This should never happen, as .get_interfaces() limits
+ * the interface modes that are supported to RGMII and/or
+ * RMII.
+ */
+ return -EINVAL;
+ }
+
+ regmap_write(bsp_priv->grf, reg, val);
+
+ return 0;
+
+}
+
+static int rk_set_clk_mac_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
+{
+ struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk;
+ long rate;
+
+ rate = rgmii_clock(speed);
+ if (rate < 0)
+ return rate;
+
+ return clk_set_rate(clk_mac_speed, rate);
+}
+
#define HIWORD_UPDATE(val, mask, shift) \
((val) << (shift) | (mask) << ((shift) + 16))
@@ -177,42 +243,38 @@ static void px30_set_to_rmii(struct rk_priv_data *bsp_priv)
PX30_GMAC_PHY_INTF_SEL_RMII);
}
-static void px30_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+static int px30_set_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
{
struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk;
- struct device *dev = &bsp_priv->pdev->dev;
- int ret;
+ struct device *dev = bsp_priv->dev;
+ unsigned int con1;
+ long rate;
if (!clk_mac_speed) {
dev_err(dev, "%s: Missing clk_mac_speed clock\n", __func__);
- return;
+ return -EINVAL;
}
if (speed == 10) {
- regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1,
- PX30_GMAC_SPEED_10M);
-
- ret = clk_set_rate(clk_mac_speed, 2500000);
- if (ret)
- dev_err(dev, "%s: set clk_mac_speed rate 2500000 failed: %d\n",
- __func__, ret);
+ con1 = PX30_GMAC_SPEED_10M;
+ rate = 2500000;
} else if (speed == 100) {
- regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1,
- PX30_GMAC_SPEED_100M);
-
- ret = clk_set_rate(clk_mac_speed, 25000000);
- if (ret)
- dev_err(dev, "%s: set clk_mac_speed rate 25000000 failed: %d\n",
- __func__, ret);
-
+ con1 = PX30_GMAC_SPEED_100M;
+ rate = 25000000;
} else {
dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
+ return -EINVAL;
}
+
+ regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1, con1);
+
+ return clk_set_rate(clk_mac_speed, rate);
}
static const struct rk_gmac_ops px30_ops = {
.set_to_rmii = px30_set_to_rmii,
- .set_rmii_speed = px30_set_rmii_speed,
+ .set_speed = px30_set_speed,
};
#define RK3128_GRF_MAC_CON0 0x0168
@@ -261,45 +323,25 @@ static void rk3128_set_to_rmii(struct rk_priv_data *bsp_priv)
RK3128_GMAC_PHY_INTF_SEL_RMII | RK3128_GMAC_RMII_MODE);
}
-static void rk3128_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
-{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (speed == 10)
- regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
- RK3128_GMAC_CLK_2_5M);
- else if (speed == 100)
- regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
- RK3128_GMAC_CLK_25M);
- else if (speed == 1000)
- regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
- RK3128_GMAC_CLK_125M);
- else
- dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
-}
+static const struct rk_reg_speed_data rk3128_reg_speed_data = {
+ .rgmii_10 = RK3128_GMAC_CLK_2_5M,
+ .rgmii_100 = RK3128_GMAC_CLK_25M,
+ .rgmii_1000 = RK3128_GMAC_CLK_125M,
+ .rmii_10 = RK3128_GMAC_RMII_CLK_2_5M | RK3128_GMAC_SPEED_10M,
+ .rmii_100 = RK3128_GMAC_RMII_CLK_25M | RK3128_GMAC_SPEED_100M,
+};
-static void rk3128_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+static int rk3128_set_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (speed == 10) {
- regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
- RK3128_GMAC_RMII_CLK_2_5M |
- RK3128_GMAC_SPEED_10M);
- } else if (speed == 100) {
- regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
- RK3128_GMAC_RMII_CLK_25M |
- RK3128_GMAC_SPEED_100M);
- } else {
- dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
- }
+ return rk_set_reg_speed(bsp_priv, &rk3128_reg_speed_data,
+ RK3128_GRF_MAC_CON1, interface, speed);
}
static const struct rk_gmac_ops rk3128_ops = {
.set_to_rgmii = rk3128_set_to_rgmii,
.set_to_rmii = rk3128_set_to_rmii,
- .set_rgmii_speed = rk3128_set_rgmii_speed,
- .set_rmii_speed = rk3128_set_rmii_speed,
+ .set_speed = rk3128_set_speed,
};
#define RK3228_GRF_MAC_CON0 0x0900
@@ -358,37 +400,19 @@ static void rk3228_set_to_rmii(struct rk_priv_data *bsp_priv)
regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1, GRF_BIT(11));
}
-static void rk3228_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
-{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (speed == 10)
- regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
- RK3228_GMAC_CLK_2_5M);
- else if (speed == 100)
- regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
- RK3228_GMAC_CLK_25M);
- else if (speed == 1000)
- regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
- RK3228_GMAC_CLK_125M);
- else
- dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
-}
+static const struct rk_reg_speed_data rk3228_reg_speed_data = {
+ .rgmii_10 = RK3228_GMAC_CLK_2_5M,
+ .rgmii_100 = RK3228_GMAC_CLK_25M,
+ .rgmii_1000 = RK3228_GMAC_CLK_125M,
+ .rmii_10 = RK3228_GMAC_RMII_CLK_2_5M | RK3228_GMAC_SPEED_10M,
+ .rmii_100 = RK3228_GMAC_RMII_CLK_25M | RK3228_GMAC_SPEED_100M,
+};
-static void rk3228_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+static int rk3228_set_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (speed == 10)
- regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
- RK3228_GMAC_RMII_CLK_2_5M |
- RK3228_GMAC_SPEED_10M);
- else if (speed == 100)
- regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
- RK3228_GMAC_RMII_CLK_25M |
- RK3228_GMAC_SPEED_100M);
- else
- dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
+ return rk_set_reg_speed(bsp_priv, &rk3228_reg_speed_data,
+ RK3228_GRF_MAC_CON1, interface, speed);
}
static void rk3228_integrated_phy_powerup(struct rk_priv_data *priv)
@@ -402,8 +426,7 @@ static void rk3228_integrated_phy_powerup(struct rk_priv_data *priv)
static const struct rk_gmac_ops rk3228_ops = {
.set_to_rgmii = rk3228_set_to_rgmii,
.set_to_rmii = rk3228_set_to_rmii,
- .set_rgmii_speed = rk3228_set_rgmii_speed,
- .set_rmii_speed = rk3228_set_rmii_speed,
+ .set_speed = rk3228_set_speed,
.integrated_phy_powerup = rk3228_integrated_phy_powerup,
.integrated_phy_powerdown = rk_gmac_integrated_ephy_powerdown,
};
@@ -454,45 +477,25 @@ static void rk3288_set_to_rmii(struct rk_priv_data *bsp_priv)
RK3288_GMAC_PHY_INTF_SEL_RMII | RK3288_GMAC_RMII_MODE);
}
-static void rk3288_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
-{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (speed == 10)
- regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
- RK3288_GMAC_CLK_2_5M);
- else if (speed == 100)
- regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
- RK3288_GMAC_CLK_25M);
- else if (speed == 1000)
- regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
- RK3288_GMAC_CLK_125M);
- else
- dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
-}
+static const struct rk_reg_speed_data rk3288_reg_speed_data = {
+ .rgmii_10 = RK3288_GMAC_CLK_2_5M,
+ .rgmii_100 = RK3288_GMAC_CLK_25M,
+ .rgmii_1000 = RK3288_GMAC_CLK_125M,
+ .rmii_10 = RK3288_GMAC_RMII_CLK_2_5M | RK3288_GMAC_SPEED_10M,
+ .rmii_100 = RK3288_GMAC_RMII_CLK_25M | RK3288_GMAC_SPEED_100M,
+};
-static void rk3288_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+static int rk3288_set_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (speed == 10) {
- regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
- RK3288_GMAC_RMII_CLK_2_5M |
- RK3288_GMAC_SPEED_10M);
- } else if (speed == 100) {
- regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
- RK3288_GMAC_RMII_CLK_25M |
- RK3288_GMAC_SPEED_100M);
- } else {
- dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
- }
+ return rk_set_reg_speed(bsp_priv, &rk3288_reg_speed_data,
+ RK3288_GRF_SOC_CON1, interface, speed);
}
static const struct rk_gmac_ops rk3288_ops = {
.set_to_rgmii = rk3288_set_to_rgmii,
.set_to_rmii = rk3288_set_to_rmii,
- .set_rgmii_speed = rk3288_set_rgmii_speed,
- .set_rmii_speed = rk3288_set_rmii_speed,
+ .set_speed = rk3288_set_speed,
};
#define RK3308_GRF_MAC_CON0 0x04a0
@@ -511,24 +514,21 @@ static void rk3308_set_to_rmii(struct rk_priv_data *bsp_priv)
RK3308_GMAC_PHY_INTF_SEL_RMII);
}
-static void rk3308_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
-{
- struct device *dev = &bsp_priv->pdev->dev;
+static const struct rk_reg_speed_data rk3308_reg_speed_data = {
+ .rmii_10 = RK3308_GMAC_SPEED_10M,
+ .rmii_100 = RK3308_GMAC_SPEED_100M,
+};
- if (speed == 10) {
- regmap_write(bsp_priv->grf, RK3308_GRF_MAC_CON0,
- RK3308_GMAC_SPEED_10M);
- } else if (speed == 100) {
- regmap_write(bsp_priv->grf, RK3308_GRF_MAC_CON0,
- RK3308_GMAC_SPEED_100M);
- } else {
- dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
- }
+static int rk3308_set_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
+{
+ return rk_set_reg_speed(bsp_priv, &rk3308_reg_speed_data,
+ RK3308_GRF_MAC_CON0, interface, speed);
}
static const struct rk_gmac_ops rk3308_ops = {
.set_to_rmii = rk3308_set_to_rmii,
- .set_rmii_speed = rk3308_set_rmii_speed,
+ .set_speed = rk3308_set_speed,
};
#define RK3328_GRF_MAC_CON0 0x0900
@@ -590,41 +590,26 @@ static void rk3328_set_to_rmii(struct rk_priv_data *bsp_priv)
RK3328_GMAC_RMII_MODE);
}
-static void rk3328_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
-{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (speed == 10)
- regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1,
- RK3328_GMAC_CLK_2_5M);
- else if (speed == 100)
- regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1,
- RK3328_GMAC_CLK_25M);
- else if (speed == 1000)
- regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1,
- RK3328_GMAC_CLK_125M);
- else
- dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
-}
+static const struct rk_reg_speed_data rk3328_reg_speed_data = {
+ .rgmii_10 = RK3328_GMAC_CLK_2_5M,
+ .rgmii_100 = RK3328_GMAC_CLK_25M,
+ .rgmii_1000 = RK3328_GMAC_CLK_125M,
+ .rmii_10 = RK3328_GMAC_RMII_CLK_2_5M | RK3328_GMAC_SPEED_10M,
+ .rmii_100 = RK3328_GMAC_RMII_CLK_25M | RK3328_GMAC_SPEED_100M,
+};
-static void rk3328_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+static int rk3328_set_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
{
- struct device *dev = &bsp_priv->pdev->dev;
unsigned int reg;
- reg = bsp_priv->integrated_phy ? RK3328_GRF_MAC_CON2 :
- RK3328_GRF_MAC_CON1;
-
- if (speed == 10)
- regmap_write(bsp_priv->grf, reg,
- RK3328_GMAC_RMII_CLK_2_5M |
- RK3328_GMAC_SPEED_10M);
- else if (speed == 100)
- regmap_write(bsp_priv->grf, reg,
- RK3328_GMAC_RMII_CLK_25M |
- RK3328_GMAC_SPEED_100M);
+ if (interface == PHY_INTERFACE_MODE_RMII && bsp_priv->integrated_phy)
+ reg = RK3328_GRF_MAC_CON2;
else
- dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
+ reg = RK3328_GRF_MAC_CON1;
+
+ return rk_set_reg_speed(bsp_priv, &rk3328_reg_speed_data, reg,
+ interface, speed);
}
static void rk3328_integrated_phy_powerup(struct rk_priv_data *priv)
@@ -638,8 +623,7 @@ static void rk3328_integrated_phy_powerup(struct rk_priv_data *priv)
static const struct rk_gmac_ops rk3328_ops = {
.set_to_rgmii = rk3328_set_to_rgmii,
.set_to_rmii = rk3328_set_to_rmii,
- .set_rgmii_speed = rk3328_set_rgmii_speed,
- .set_rmii_speed = rk3328_set_rmii_speed,
+ .set_speed = rk3328_set_speed,
.integrated_phy_powerup = rk3328_integrated_phy_powerup,
.integrated_phy_powerdown = rk_gmac_integrated_ephy_powerdown,
};
@@ -690,45 +674,25 @@ static void rk3366_set_to_rmii(struct rk_priv_data *bsp_priv)
RK3366_GMAC_PHY_INTF_SEL_RMII | RK3366_GMAC_RMII_MODE);
}
-static void rk3366_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
-{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (speed == 10)
- regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
- RK3366_GMAC_CLK_2_5M);
- else if (speed == 100)
- regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
- RK3366_GMAC_CLK_25M);
- else if (speed == 1000)
- regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
- RK3366_GMAC_CLK_125M);
- else
- dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
-}
+static const struct rk_reg_speed_data rk3366_reg_speed_data = {
+ .rgmii_10 = RK3366_GMAC_CLK_2_5M,
+ .rgmii_100 = RK3366_GMAC_CLK_25M,
+ .rgmii_1000 = RK3366_GMAC_CLK_125M,
+ .rmii_10 = RK3366_GMAC_RMII_CLK_2_5M | RK3366_GMAC_SPEED_10M,
+ .rmii_100 = RK3366_GMAC_RMII_CLK_25M | RK3366_GMAC_SPEED_100M,
+};
-static void rk3366_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+static int rk3366_set_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (speed == 10) {
- regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
- RK3366_GMAC_RMII_CLK_2_5M |
- RK3366_GMAC_SPEED_10M);
- } else if (speed == 100) {
- regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
- RK3366_GMAC_RMII_CLK_25M |
- RK3366_GMAC_SPEED_100M);
- } else {
- dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
- }
+ return rk_set_reg_speed(bsp_priv, &rk3366_reg_speed_data,
+ RK3366_GRF_SOC_CON6, interface, speed);
}
static const struct rk_gmac_ops rk3366_ops = {
.set_to_rgmii = rk3366_set_to_rgmii,
.set_to_rmii = rk3366_set_to_rmii,
- .set_rgmii_speed = rk3366_set_rgmii_speed,
- .set_rmii_speed = rk3366_set_rmii_speed,
+ .set_speed = rk3366_set_speed,
};
#define RK3368_GRF_SOC_CON15 0x043c
@@ -777,45 +741,25 @@ static void rk3368_set_to_rmii(struct rk_priv_data *bsp_priv)
RK3368_GMAC_PHY_INTF_SEL_RMII | RK3368_GMAC_RMII_MODE);
}
-static void rk3368_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
-{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (speed == 10)
- regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
- RK3368_GMAC_CLK_2_5M);
- else if (speed == 100)
- regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
- RK3368_GMAC_CLK_25M);
- else if (speed == 1000)
- regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
- RK3368_GMAC_CLK_125M);
- else
- dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
-}
+static const struct rk_reg_speed_data rk3368_reg_speed_data = {
+ .rgmii_10 = RK3368_GMAC_CLK_2_5M,
+ .rgmii_100 = RK3368_GMAC_CLK_25M,
+ .rgmii_1000 = RK3368_GMAC_CLK_125M,
+ .rmii_10 = RK3368_GMAC_RMII_CLK_2_5M | RK3368_GMAC_SPEED_10M,
+ .rmii_100 = RK3368_GMAC_RMII_CLK_25M | RK3368_GMAC_SPEED_100M,
+};
-static void rk3368_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+static int rk3368_set_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (speed == 10) {
- regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
- RK3368_GMAC_RMII_CLK_2_5M |
- RK3368_GMAC_SPEED_10M);
- } else if (speed == 100) {
- regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
- RK3368_GMAC_RMII_CLK_25M |
- RK3368_GMAC_SPEED_100M);
- } else {
- dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
- }
+ return rk_set_reg_speed(bsp_priv, &rk3368_reg_speed_data,
+ RK3368_GRF_SOC_CON15, interface, speed);
}
static const struct rk_gmac_ops rk3368_ops = {
.set_to_rgmii = rk3368_set_to_rgmii,
.set_to_rmii = rk3368_set_to_rmii,
- .set_rgmii_speed = rk3368_set_rgmii_speed,
- .set_rmii_speed = rk3368_set_rmii_speed,
+ .set_speed = rk3368_set_speed,
};
#define RK3399_GRF_SOC_CON5 0xc214
@@ -864,45 +808,25 @@ static void rk3399_set_to_rmii(struct rk_priv_data *bsp_priv)
RK3399_GMAC_PHY_INTF_SEL_RMII | RK3399_GMAC_RMII_MODE);
}
-static void rk3399_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
-{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (speed == 10)
- regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
- RK3399_GMAC_CLK_2_5M);
- else if (speed == 100)
- regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
- RK3399_GMAC_CLK_25M);
- else if (speed == 1000)
- regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
- RK3399_GMAC_CLK_125M);
- else
- dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
-}
+static const struct rk_reg_speed_data rk3399_reg_speed_data = {
+ .rgmii_10 = RK3399_GMAC_CLK_2_5M,
+ .rgmii_100 = RK3399_GMAC_CLK_25M,
+ .rgmii_1000 = RK3399_GMAC_CLK_125M,
+ .rmii_10 = RK3399_GMAC_RMII_CLK_2_5M | RK3399_GMAC_SPEED_10M,
+ .rmii_100 = RK3399_GMAC_RMII_CLK_25M | RK3399_GMAC_SPEED_100M,
+};
-static void rk3399_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+static int rk3399_set_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (speed == 10) {
- regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
- RK3399_GMAC_RMII_CLK_2_5M |
- RK3399_GMAC_SPEED_10M);
- } else if (speed == 100) {
- regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
- RK3399_GMAC_RMII_CLK_25M |
- RK3399_GMAC_SPEED_100M);
- } else {
- dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
- }
+ return rk_set_reg_speed(bsp_priv, &rk3399_reg_speed_data,
+ RK3399_GRF_SOC_CON5, interface, speed);
}
static const struct rk_gmac_ops rk3399_ops = {
.set_to_rgmii = rk3399_set_to_rgmii,
.set_to_rmii = rk3399_set_to_rmii,
- .set_rgmii_speed = rk3399_set_rgmii_speed,
- .set_rmii_speed = rk3399_set_rmii_speed,
+ .set_speed = rk3399_set_speed,
};
#define RK3528_VO_GRF_GMAC_CON 0x0018
@@ -965,43 +889,34 @@ static void rk3528_set_to_rmii(struct rk_priv_data *bsp_priv)
RK3528_GMAC0_CLK_RMII_DIV2);
}
-static void rk3528_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
-{
- struct device *dev = &bsp_priv->pdev->dev;
+static const struct rk_reg_speed_data rk3528_gmac0_reg_speed_data = {
+ .rmii_10 = RK3528_GMAC0_CLK_RMII_DIV20,
+ .rmii_100 = RK3528_GMAC0_CLK_RMII_DIV2,
+};
- if (speed == 10)
- regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5,
- RK3528_GMAC1_CLK_RGMII_DIV50);
- else if (speed == 100)
- regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5,
- RK3528_GMAC1_CLK_RGMII_DIV5);
- else if (speed == 1000)
- regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5,
- RK3528_GMAC1_CLK_RGMII_DIV1);
- else
- dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
-}
+static const struct rk_reg_speed_data rk3528_gmac1_reg_speed_data = {
+ .rgmii_10 = RK3528_GMAC1_CLK_RGMII_DIV50,
+ .rgmii_100 = RK3528_GMAC1_CLK_RGMII_DIV5,
+ .rgmii_1000 = RK3528_GMAC1_CLK_RGMII_DIV1,
+ .rmii_10 = RK3528_GMAC1_CLK_RMII_DIV20,
+ .rmii_100 = RK3528_GMAC1_CLK_RMII_DIV2,
+};
-static void rk3528_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+static int rk3528_set_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
{
- struct device *dev = &bsp_priv->pdev->dev;
- unsigned int reg, val;
+ const struct rk_reg_speed_data *rsd;
+ unsigned int reg;
- if (speed == 10)
- val = bsp_priv->id == 1 ? RK3528_GMAC1_CLK_RMII_DIV20 :
- RK3528_GMAC0_CLK_RMII_DIV20;
- else if (speed == 100)
- val = bsp_priv->id == 1 ? RK3528_GMAC1_CLK_RMII_DIV2 :
- RK3528_GMAC0_CLK_RMII_DIV2;
- else {
- dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
- return;
+ if (bsp_priv->id == 1) {
+ rsd = &rk3528_gmac1_reg_speed_data;
+ reg = RK3528_VPU_GRF_GMAC_CON5;
+ } else {
+ rsd = &rk3528_gmac0_reg_speed_data;
+ reg = RK3528_VO_GRF_GMAC_CON;
}
- reg = bsp_priv->id == 1 ? RK3528_VPU_GRF_GMAC_CON5 :
- RK3528_VO_GRF_GMAC_CON;
-
- regmap_write(bsp_priv->grf, reg, val);
+ return rk_set_reg_speed(bsp_priv, rsd, reg, interface, speed);
}
static void rk3528_set_clock_selection(struct rk_priv_data *bsp_priv,
@@ -1035,8 +950,7 @@ static void rk3528_integrated_phy_powerdown(struct rk_priv_data *bsp_priv)
static const struct rk_gmac_ops rk3528_ops = {
.set_to_rgmii = rk3528_set_to_rgmii,
.set_to_rmii = rk3528_set_to_rmii,
- .set_rgmii_speed = rk3528_set_rgmii_speed,
- .set_rmii_speed = rk3528_set_rmii_speed,
+ .set_speed = rk3528_set_speed,
.set_clock_selection = rk3528_set_clock_selection,
.integrated_phy_powerup = rk3528_integrated_phy_powerup,
.integrated_phy_powerdown = rk3528_integrated_phy_powerdown,
@@ -1098,30 +1012,10 @@ static void rk3568_set_to_rmii(struct rk_priv_data *bsp_priv)
regmap_write(bsp_priv->grf, con1, RK3568_GMAC_PHY_INTF_SEL_RMII);
}
-static void rk3568_set_gmac_speed(struct rk_priv_data *bsp_priv, int speed)
-{
- struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk;
- struct device *dev = &bsp_priv->pdev->dev;
- long rate;
- int ret;
-
- rate = rgmii_clock(speed);
- if (rate < 0) {
- dev_err(dev, "unknown speed value for GMAC speed=%d", speed);
- return;
- }
-
- ret = clk_set_rate(clk_mac_speed, rate);
- if (ret)
- dev_err(dev, "%s: set clk_mac_speed rate %ld failed %d\n",
- __func__, rate, ret);
-}
-
static const struct rk_gmac_ops rk3568_ops = {
.set_to_rgmii = rk3568_set_to_rgmii,
.set_to_rmii = rk3568_set_to_rmii,
- .set_rgmii_speed = rk3568_set_gmac_speed,
- .set_rmii_speed = rk3568_set_gmac_speed,
+ .set_speed = rk_set_clk_mac_speed,
.regs_valid = true,
.regs = {
0xfe2a0000, /* gmac0 */
@@ -1205,42 +1099,24 @@ static void rk3576_set_to_rmii(struct rk_priv_data *bsp_priv)
regmap_write(bsp_priv->grf, offset_con, RK3576_GMAC_RMII_MODE);
}
-static void rk3576_set_gmac_speed(struct rk_priv_data *bsp_priv, int speed)
-{
- struct device *dev = &bsp_priv->pdev->dev;
- unsigned int val = 0, offset_con;
+static const struct rk_reg_speed_data rk3578_reg_speed_data = {
+ .rgmii_10 = RK3576_GMAC_CLK_RGMII_DIV50,
+ .rgmii_100 = RK3576_GMAC_CLK_RGMII_DIV5,
+ .rgmii_1000 = RK3576_GMAC_CLK_RGMII_DIV1,
+ .rmii_10 = RK3576_GMAC_CLK_RMII_DIV20,
+ .rmii_100 = RK3576_GMAC_CLK_RMII_DIV2,
+};
- switch (speed) {
- case 10:
- if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
- val = RK3576_GMAC_CLK_RMII_DIV20;
- else
- val = RK3576_GMAC_CLK_RGMII_DIV50;
- break;
- case 100:
- if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
- val = RK3576_GMAC_CLK_RMII_DIV2;
- else
- val = RK3576_GMAC_CLK_RGMII_DIV5;
- break;
- case 1000:
- if (bsp_priv->phy_iface != PHY_INTERFACE_MODE_RMII)
- val = RK3576_GMAC_CLK_RGMII_DIV1;
- else
- goto err;
- break;
- default:
- goto err;
- }
+static int rk3576_set_gmac_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
+{
+ unsigned int offset_con;
offset_con = bsp_priv->id == 1 ? RK3576_GRF_GMAC_CON1 :
RK3576_GRF_GMAC_CON0;
- regmap_write(bsp_priv->grf, offset_con, val);
-
- return;
-err:
- dev_err(dev, "unknown speed value for GMAC speed=%d", speed);
+ return rk_set_reg_speed(bsp_priv, &rk3578_reg_speed_data, offset_con,
+ interface, speed);
}
static void rk3576_set_clock_selection(struct rk_priv_data *bsp_priv, bool input,
@@ -1262,8 +1138,7 @@ static void rk3576_set_clock_selection(struct rk_priv_data *bsp_priv, bool input
static const struct rk_gmac_ops rk3576_ops = {
.set_to_rgmii = rk3576_set_to_rgmii,
.set_to_rmii = rk3576_set_to_rmii,
- .set_rgmii_speed = rk3576_set_gmac_speed,
- .set_rmii_speed = rk3576_set_gmac_speed,
+ .set_speed = rk3576_set_gmac_speed,
.set_clock_selection = rk3576_set_clock_selection,
.php_grf_required = true,
.regs_valid = true,
@@ -1347,26 +1222,26 @@ static void rk3588_set_to_rmii(struct rk_priv_data *bsp_priv)
RK3588_GMAC_CLK_RMII_MODE(bsp_priv->id));
}
-static void rk3588_set_gmac_speed(struct rk_priv_data *bsp_priv, int speed)
+static int rk3588_set_gmac_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
{
- struct device *dev = &bsp_priv->pdev->dev;
unsigned int val = 0, id = bsp_priv->id;
switch (speed) {
case 10:
- if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
+ if (interface == PHY_INTERFACE_MODE_RMII)
val = RK3588_GMA_CLK_RMII_DIV20(id);
else
val = RK3588_GMAC_CLK_RGMII_DIV50(id);
break;
case 100:
- if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
+ if (interface == PHY_INTERFACE_MODE_RMII)
val = RK3588_GMA_CLK_RMII_DIV2(id);
else
val = RK3588_GMAC_CLK_RGMII_DIV5(id);
break;
case 1000:
- if (bsp_priv->phy_iface != PHY_INTERFACE_MODE_RMII)
+ if (interface != PHY_INTERFACE_MODE_RMII)
val = RK3588_GMAC_CLK_RGMII_DIV1(id);
else
goto err;
@@ -1377,9 +1252,9 @@ static void rk3588_set_gmac_speed(struct rk_priv_data *bsp_priv, int speed)
regmap_write(bsp_priv->php_grf, RK3588_GRF_CLK_CON1, val);
- return;
+ return 0;
err:
- dev_err(dev, "unknown speed value for GMAC speed=%d", speed);
+ return -EINVAL;
}
static void rk3588_set_clock_selection(struct rk_priv_data *bsp_priv, bool input,
@@ -1397,8 +1272,7 @@ static void rk3588_set_clock_selection(struct rk_priv_data *bsp_priv, bool input
static const struct rk_gmac_ops rk3588_ops = {
.set_to_rgmii = rk3588_set_to_rgmii,
.set_to_rmii = rk3588_set_to_rmii,
- .set_rgmii_speed = rk3588_set_gmac_speed,
- .set_rmii_speed = rk3588_set_gmac_speed,
+ .set_speed = rk3588_set_gmac_speed,
.set_clock_selection = rk3588_set_clock_selection,
.php_grf_required = true,
.regs_valid = true,
@@ -1427,26 +1301,21 @@ static void rv1108_set_to_rmii(struct rk_priv_data *bsp_priv)
RV1108_GMAC_PHY_INTF_SEL_RMII);
}
-static void rv1108_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
-{
- struct device *dev = &bsp_priv->pdev->dev;
+static const struct rk_reg_speed_data rv1108_reg_speed_data = {
+ .rmii_10 = RV1108_GMAC_RMII_CLK_2_5M | RV1108_GMAC_SPEED_10M,
+ .rmii_100 = RV1108_GMAC_RMII_CLK_25M | RV1108_GMAC_SPEED_100M,
+};
- if (speed == 10) {
- regmap_write(bsp_priv->grf, RV1108_GRF_GMAC_CON0,
- RV1108_GMAC_RMII_CLK_2_5M |
- RV1108_GMAC_SPEED_10M);
- } else if (speed == 100) {
- regmap_write(bsp_priv->grf, RV1108_GRF_GMAC_CON0,
- RV1108_GMAC_RMII_CLK_25M |
- RV1108_GMAC_SPEED_100M);
- } else {
- dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
- }
+static int rv1108_set_speed(struct rk_priv_data *bsp_priv,
+ phy_interface_t interface, int speed)
+{
+ return rk_set_reg_speed(bsp_priv, &rv1108_reg_speed_data,
+ RV1108_GRF_GMAC_CON0, interface, speed);
}
static const struct rk_gmac_ops rv1108_ops = {
.set_to_rmii = rv1108_set_to_rmii,
- .set_rmii_speed = rv1108_set_rmii_speed,
+ .set_speed = rv1108_set_speed,
};
#define RV1126_GRF_GMAC_CON0 0X0070
@@ -1501,62 +1370,17 @@ static void rv1126_set_to_rmii(struct rk_priv_data *bsp_priv)
RV1126_GMAC_PHY_INTF_SEL_RMII);
}
-static void rv1126_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
-{
- struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk;
- struct device *dev = &bsp_priv->pdev->dev;
- long rate;
- int ret;
-
- rate = rgmii_clock(speed);
- if (rate < 0) {
- dev_err(dev, "unknown speed value for RGMII speed=%d", speed);
- return;
- }
-
- ret = clk_set_rate(clk_mac_speed, rate);
- if (ret)
- dev_err(dev, "%s: set clk_mac_speed rate %ld failed %d\n",
- __func__, rate, ret);
-}
-
-static void rv1126_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
-{
- struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk;
- struct device *dev = &bsp_priv->pdev->dev;
- unsigned long rate;
- int ret;
-
- switch (speed) {
- case 10:
- rate = 2500000;
- break;
- case 100:
- rate = 25000000;
- break;
- default:
- dev_err(dev, "unknown speed value for RGMII speed=%d", speed);
- return;
- }
-
- ret = clk_set_rate(clk_mac_speed, rate);
- if (ret)
- dev_err(dev, "%s: set clk_mac_speed rate %ld failed %d\n",
- __func__, rate, ret);
-}
-
static const struct rk_gmac_ops rv1126_ops = {
.set_to_rgmii = rv1126_set_to_rgmii,
.set_to_rmii = rv1126_set_to_rmii,
- .set_rgmii_speed = rv1126_set_rgmii_speed,
- .set_rmii_speed = rv1126_set_rmii_speed,
+ .set_speed = rk_set_clk_mac_speed,
};
static int rk_gmac_clk_init(struct plat_stmmacenet_data *plat)
{
struct rk_priv_data *bsp_priv = plat->bsp_priv;
- struct device *dev = &bsp_priv->pdev->dev;
int phy_iface = bsp_priv->phy_iface;
+ struct device *dev = bsp_priv->dev;
int i, j, ret;
bsp_priv->clk_enabled = false;
@@ -1583,16 +1407,10 @@ static int rk_gmac_clk_init(struct plat_stmmacenet_data *plat)
if (ret)
return dev_err_probe(dev, ret, "Failed to get clocks\n");
- /* "stmmaceth" will be enabled by the core */
- bsp_priv->clk_mac = devm_clk_get(dev, "stmmaceth");
- ret = PTR_ERR_OR_ZERO(bsp_priv->clk_mac);
- if (ret)
- return dev_err_probe(dev, ret, "Cannot get stmmaceth clock\n");
-
if (bsp_priv->clock_input) {
dev_info(dev, "clock input from PHY\n");
} else if (phy_iface == PHY_INTERFACE_MODE_RMII) {
- clk_set_rate(bsp_priv->clk_mac, 50000000);
+ clk_set_rate(plat->stmmac_clk, 50000000);
}
if (plat->phy_node && bsp_priv->integrated_phy) {
@@ -1648,8 +1466,8 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable)
static int phy_power_on(struct rk_priv_data *bsp_priv, bool enable)
{
struct regulator *ldo = bsp_priv->regulator;
+ struct device *dev = bsp_priv->dev;
int ret;
- struct device *dev = &bsp_priv->pdev->dev;
if (enable) {
ret = regulator_enable(ldo);
@@ -1773,7 +1591,7 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
dev_info(dev, "integrated PHY? (%s).\n",
bsp_priv->integrated_phy ? "yes" : "no");
- bsp_priv->pdev = pdev;
+ bsp_priv->dev = dev;
return bsp_priv;
}
@@ -1793,7 +1611,7 @@ static int rk_gmac_check_ops(struct rk_priv_data *bsp_priv)
return -EINVAL;
break;
default:
- dev_err(&bsp_priv->pdev->dev,
+ dev_err(bsp_priv->dev,
"unsupported interface %d", bsp_priv->phy_iface);
}
return 0;
@@ -1801,8 +1619,8 @@ static int rk_gmac_check_ops(struct rk_priv_data *bsp_priv)
static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
{
+ struct device *dev = bsp_priv->dev;
int ret;
- struct device *dev = &bsp_priv->pdev->dev;
ret = rk_gmac_check_ops(bsp_priv);
if (ret)
@@ -1858,35 +1676,34 @@ static void rk_gmac_powerdown(struct rk_priv_data *gmac)
if (gmac->integrated_phy && gmac->ops->integrated_phy_powerdown)
gmac->ops->integrated_phy_powerdown(gmac);
- pm_runtime_put_sync(&gmac->pdev->dev);
+ pm_runtime_put_sync(gmac->dev);
phy_power_on(gmac, false);
gmac_clk_enable(gmac, false);
}
+static void rk_get_interfaces(struct stmmac_priv *priv, void *bsp_priv,
+ unsigned long *interfaces)
+{
+ struct rk_priv_data *rk = bsp_priv;
+
+ if (rk->ops->set_to_rgmii)
+ phy_interface_set_rgmii(interfaces);
+
+ if (rk->ops->set_to_rmii)
+ __set_bit(PHY_INTERFACE_MODE_RMII, interfaces);
+}
+
static int rk_set_clk_tx_rate(void *bsp_priv_, struct clk *clk_tx_i,
phy_interface_t interface, int speed)
{
struct rk_priv_data *bsp_priv = bsp_priv_;
- struct device *dev = &bsp_priv->pdev->dev;
- switch (bsp_priv->phy_iface) {
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- if (bsp_priv->ops->set_rgmii_speed)
- bsp_priv->ops->set_rgmii_speed(bsp_priv, speed);
- break;
- case PHY_INTERFACE_MODE_RMII:
- if (bsp_priv->ops->set_rmii_speed)
- bsp_priv->ops->set_rmii_speed(bsp_priv, speed);
- break;
- default:
- dev_err(dev, "unsupported interface %d", bsp_priv->phy_iface);
- }
+ if (bsp_priv->ops->set_speed)
+ return bsp_priv->ops->set_speed(bsp_priv, bsp_priv->phy_iface,
+ speed);
- return 0;
+ return -EINVAL;
}
static int rk_gmac_probe(struct platform_device *pdev)
@@ -1919,6 +1736,7 @@ static int rk_gmac_probe(struct platform_device *pdev)
plat_dat->tx_fifo_size = 2048;
}
+ plat_dat->get_interfaces = rk_get_interfaces;
plat_dat->set_clk_tx_rate = rk_set_clk_tx_rate;
plat_dat->bsp_priv = rk_gmac_setup(pdev, plat_dat, data);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
index 5e6ac82a89b9..bd65d4239054 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
@@ -48,7 +48,6 @@
struct visconti_eth {
void __iomem *reg;
- u32 phy_intf_sel;
struct clk *phy_ref_clk;
struct device *dev;
};
@@ -57,42 +56,35 @@ static int visconti_eth_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
phy_interface_t interface, int speed)
{
struct visconti_eth *dwmac = bsp_priv;
- struct net_device *netdev = dev_get_drvdata(dwmac->dev);
- unsigned int val, clk_sel_val = 0;
-
- switch (speed) {
- case SPEED_1000:
- if (dwmac->phy_intf_sel == ETHER_CONFIG_INTF_RGMII)
- clk_sel_val = ETHER_CLK_SEL_FREQ_SEL_125M;
- break;
- case SPEED_100:
- if (dwmac->phy_intf_sel == ETHER_CONFIG_INTF_RGMII)
- clk_sel_val = ETHER_CLK_SEL_FREQ_SEL_25M;
- if (dwmac->phy_intf_sel == ETHER_CONFIG_INTF_RMII)
- clk_sel_val = ETHER_CLK_SEL_DIV_SEL_2;
- break;
- case SPEED_10:
- if (dwmac->phy_intf_sel == ETHER_CONFIG_INTF_RGMII)
- clk_sel_val = ETHER_CLK_SEL_FREQ_SEL_2P5M;
- if (dwmac->phy_intf_sel == ETHER_CONFIG_INTF_RMII)
- clk_sel_val = ETHER_CLK_SEL_DIV_SEL_20;
- break;
- default:
- /* No bit control */
- netdev_err(netdev, "Unsupported speed request (%d)", speed);
- return -EINVAL;
- }
-
- /* Stop internal clock */
- val = readl(dwmac->reg + REG_ETHER_CLOCK_SEL);
- val &= ~(ETHER_CLK_SEL_RMII_CLK_EN | ETHER_CLK_SEL_RX_TX_CLK_EN);
- val |= ETHER_CLK_SEL_TX_O_E_N_IN;
- writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+ unsigned long clk_sel, val;
+
+ if (phy_interface_mode_is_rgmii(interface)) {
+ switch (speed) {
+ case SPEED_1000:
+ clk_sel = ETHER_CLK_SEL_FREQ_SEL_125M;
+ break;
+
+ case SPEED_100:
+ clk_sel = ETHER_CLK_SEL_FREQ_SEL_25M;
+ break;
+
+ case SPEED_10:
+ clk_sel = ETHER_CLK_SEL_FREQ_SEL_2P5M;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /* Stop internal clock */
+ val = readl(dwmac->reg + REG_ETHER_CLOCK_SEL);
+ val &= ~(ETHER_CLK_SEL_RMII_CLK_EN |
+ ETHER_CLK_SEL_RX_TX_CLK_EN);
+ val |= ETHER_CLK_SEL_TX_O_E_N_IN;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
- /* Set Clock-Mux, Start clock, Set TX_O direction */
- switch (dwmac->phy_intf_sel) {
- case ETHER_CONFIG_INTF_RGMII:
- val = clk_sel_val | ETHER_CLK_SEL_RX_CLK_EXT_SEL_RXC;
+ /* Set Clock-Mux, Start clock, Set TX_O direction */
+ val = clk_sel | ETHER_CLK_SEL_RX_CLK_EXT_SEL_RXC;
writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
val |= ETHER_CLK_SEL_RX_TX_CLK_EN;
@@ -100,11 +92,32 @@ static int visconti_eth_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
val &= ~ETHER_CLK_SEL_TX_O_E_N_IN;
writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
- break;
- case ETHER_CONFIG_INTF_RMII:
- val = clk_sel_val | ETHER_CLK_SEL_RX_CLK_EXT_SEL_DIV |
- ETHER_CLK_SEL_TX_CLK_EXT_SEL_DIV | ETHER_CLK_SEL_TX_O_E_N_IN |
- ETHER_CLK_SEL_RMII_CLK_SEL_RX_C;
+ } else if (interface == PHY_INTERFACE_MODE_RMII) {
+ switch (speed) {
+ case SPEED_100:
+ clk_sel = ETHER_CLK_SEL_DIV_SEL_2;
+ break;
+
+ case SPEED_10:
+ clk_sel = ETHER_CLK_SEL_DIV_SEL_20;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /* Stop internal clock */
+ val = readl(dwmac->reg + REG_ETHER_CLOCK_SEL);
+ val &= ~(ETHER_CLK_SEL_RMII_CLK_EN |
+ ETHER_CLK_SEL_RX_TX_CLK_EN);
+ val |= ETHER_CLK_SEL_TX_O_E_N_IN;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+
+ /* Set Clock-Mux, Start clock, Set TX_O direction */
+ val = clk_sel | ETHER_CLK_SEL_RX_CLK_EXT_SEL_DIV |
+ ETHER_CLK_SEL_TX_CLK_EXT_SEL_DIV |
+ ETHER_CLK_SEL_TX_O_E_N_IN |
+ ETHER_CLK_SEL_RMII_CLK_SEL_RX_C;
writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
val |= ETHER_CLK_SEL_RMII_CLK_RST;
@@ -112,16 +125,22 @@ static int visconti_eth_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
val |= ETHER_CLK_SEL_RMII_CLK_EN | ETHER_CLK_SEL_RX_TX_CLK_EN;
writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
- break;
- case ETHER_CONFIG_INTF_MII:
- default:
- val = clk_sel_val | ETHER_CLK_SEL_RX_CLK_EXT_SEL_RXC |
- ETHER_CLK_SEL_TX_CLK_EXT_SEL_TXC | ETHER_CLK_SEL_TX_O_E_N_IN;
+ } else {
+ /* Stop internal clock */
+ val = readl(dwmac->reg + REG_ETHER_CLOCK_SEL);
+ val &= ~(ETHER_CLK_SEL_RMII_CLK_EN |
+ ETHER_CLK_SEL_RX_TX_CLK_EN);
+ val |= ETHER_CLK_SEL_TX_O_E_N_IN;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+
+ /* Set Clock-Mux, Start clock, Set TX_O direction */
+ val = ETHER_CLK_SEL_RX_CLK_EXT_SEL_RXC |
+ ETHER_CLK_SEL_TX_CLK_EXT_SEL_TXC |
+ ETHER_CLK_SEL_TX_O_E_N_IN;
writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
val |= ETHER_CLK_SEL_RX_TX_CLK_EN;
writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
- break;
}
return 0;
@@ -130,28 +149,28 @@ static int visconti_eth_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
static int visconti_eth_init_hw(struct platform_device *pdev, struct plat_stmmacenet_data *plat_dat)
{
struct visconti_eth *dwmac = plat_dat->bsp_priv;
- unsigned int reg_val, clk_sel_val;
+ unsigned int clk_sel_val;
+ u32 phy_intf_sel;
switch (plat_dat->phy_interface) {
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
- dwmac->phy_intf_sel = ETHER_CONFIG_INTF_RGMII;
+ phy_intf_sel = ETHER_CONFIG_INTF_RGMII;
break;
case PHY_INTERFACE_MODE_MII:
- dwmac->phy_intf_sel = ETHER_CONFIG_INTF_MII;
+ phy_intf_sel = ETHER_CONFIG_INTF_MII;
break;
case PHY_INTERFACE_MODE_RMII:
- dwmac->phy_intf_sel = ETHER_CONFIG_INTF_RMII;
+ phy_intf_sel = ETHER_CONFIG_INTF_RMII;
break;
default:
dev_err(&pdev->dev, "Unsupported phy-mode (%d)\n", plat_dat->phy_interface);
return -EOPNOTSUPP;
}
- reg_val = dwmac->phy_intf_sel;
- writel(reg_val, dwmac->reg + REG_ETHER_CONTROL);
+ writel(phy_intf_sel, dwmac->reg + REG_ETHER_CONTROL);
/* Enable TX/RX clock */
clk_sel_val = ETHER_CLK_SEL_FREQ_SEL_125M;
@@ -161,8 +180,8 @@ static int visconti_eth_init_hw(struct platform_device *pdev, struct plat_stmmac
dwmac->reg + REG_ETHER_CLOCK_SEL);
/* release internal-reset */
- reg_val |= ETHER_ETH_CONTROL_RESET;
- writel(reg_val, dwmac->reg + REG_ETHER_CONTROL);
+ phy_intf_sel |= ETHER_ETH_CONTROL_RESET;
+ writel(phy_intf_sel, dwmac->reg + REG_ETHER_CONTROL);
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index 56b76aaa58f0..fe776ddf6889 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -393,15 +393,10 @@ static void dwmac1000_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
writel(value, ioaddr + LPI_TIMER_CTRL);
}
-static void dwmac1000_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral,
- bool loopback)
+static void dwmac1000_ctrl_ane(struct stmmac_priv *priv, bool ane,
+ bool srgmi_ral, bool loopback)
{
- dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback);
-}
-
-static void dwmac1000_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv)
-{
- dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv);
+ dwmac_ctrl_ane(priv->ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback);
}
static void dwmac1000_debug(struct stmmac_priv *priv, void __iomem *ioaddr,
@@ -508,7 +503,6 @@ const struct stmmac_ops dwmac1000_ops = {
.set_eee_pls = dwmac1000_set_eee_pls,
.debug = dwmac1000_debug,
.pcs_ctrl_ane = dwmac1000_ctrl_ane,
- .pcs_get_adv_lp = dwmac1000_get_adv_lp,
.set_mac_loopback = dwmac1000_set_mac_loopback,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 9c2549d4100f..d85bc0bb5c3c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -583,15 +583,10 @@ static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
}
}
-static void dwmac4_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral,
+static void dwmac4_ctrl_ane(struct stmmac_priv *priv, bool ane, bool srgmi_ral,
bool loopback)
{
- dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback);
-}
-
-static void dwmac4_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv)
-{
- dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv);
+ dwmac_ctrl_ane(priv->ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback);
}
/* RGMII or SMII interface */
@@ -958,7 +953,6 @@ const struct stmmac_ops dwmac4_ops = {
.set_eee_timer = dwmac4_set_eee_timer,
.set_eee_pls = dwmac4_set_eee_pls,
.pcs_ctrl_ane = dwmac4_ctrl_ane,
- .pcs_get_adv_lp = dwmac4_get_adv_lp,
.debug = dwmac4_debug,
.set_filter = dwmac4_set_filter,
.set_mac_loopback = dwmac4_set_mac_loopback,
@@ -993,7 +987,6 @@ const struct stmmac_ops dwmac410_ops = {
.set_eee_timer = dwmac4_set_eee_timer,
.set_eee_pls = dwmac4_set_eee_pls,
.pcs_ctrl_ane = dwmac4_ctrl_ane,
- .pcs_get_adv_lp = dwmac4_get_adv_lp,
.debug = dwmac4_debug,
.set_filter = dwmac4_set_filter,
.flex_pps_config = dwmac5_flex_pps_config,
@@ -1030,7 +1023,6 @@ const struct stmmac_ops dwmac510_ops = {
.set_eee_timer = dwmac4_set_eee_timer,
.set_eee_pls = dwmac4_set_eee_pls,
.pcs_ctrl_ane = dwmac4_ctrl_ane,
- .pcs_get_adv_lp = dwmac4_get_adv_lp,
.debug = dwmac4_debug,
.set_filter = dwmac4_set_filter,
.safety_feat_config = dwmac5_safety_feat_config,
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index ae4efffb785f..14dbe0685997 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -300,7 +300,6 @@ struct stmmac_dma_ops {
struct mac_device_info;
struct net_device;
-struct rgmii_adv;
struct stmmac_tc_entry;
struct stmmac_pps_cfg;
struct stmmac_rss;
@@ -375,9 +374,8 @@ struct stmmac_ops {
struct stmmac_extra_stats *x, u32 rx_queues,
u32 tx_queues);
/* PCS calls */
- void (*pcs_ctrl_ane)(void __iomem *ioaddr, bool ane, bool srgmi_ral,
+ void (*pcs_ctrl_ane)(struct stmmac_priv *priv, bool ane, bool srgmi_ral,
bool loopback);
- void (*pcs_get_adv_lp)(void __iomem *ioaddr, struct rgmii_adv *adv);
/* Safety Features */
int (*safety_feat_config)(void __iomem *ioaddr, unsigned int asp,
struct stmmac_safety_feature_cfg *safety_cfg);
@@ -466,9 +464,7 @@ struct stmmac_ops {
#define stmmac_mac_debug(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, debug, __priv, __args)
#define stmmac_pcs_ctrl_ane(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, pcs_ctrl_ane, __args)
-#define stmmac_pcs_get_adv_lp(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, pcs_get_adv_lp, __args)
+ stmmac_do_void_callback(__priv, mac, pcs_ctrl_ane, __priv, __args)
#define stmmac_safety_feat_config(__priv, __args...) \
stmmac_do_callback(__priv, mac, safety_feat_config, __args)
#define stmmac_safety_feat_irq_status(__priv, __args...) \
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index f702f7b7bf9f..77758a7299b4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -325,7 +325,6 @@ static int stmmac_ethtool_get_link_ksettings(struct net_device *dev,
if (!(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS) &&
(priv->hw->pcs & STMMAC_PCS_RGMII ||
priv->hw->pcs & STMMAC_PCS_SGMII)) {
- struct rgmii_adv adv;
u32 supported, advertising, lp_advertising;
if (!priv->xstats.pcs_link) {
@@ -337,10 +336,6 @@ static int stmmac_ethtool_get_link_ksettings(struct net_device *dev,
cmd->base.speed = priv->xstats.pcs_speed;
- /* Get and convert ADV/LP_ADV from the HW AN registers */
- if (stmmac_pcs_get_adv_lp(priv, priv->ioaddr, &adv))
- return -EOPNOTSUPP; /* should never happen indeed */
-
/* Encoding of PSE bits is defined in 802.3z, 37.2.1.4 */
ethtool_convert_link_mode_to_legacy_u32(
@@ -350,44 +345,12 @@ static int stmmac_ethtool_get_link_ksettings(struct net_device *dev,
ethtool_convert_link_mode_to_legacy_u32(
&lp_advertising, cmd->link_modes.lp_advertising);
- if (adv.pause & STMMAC_PCS_PAUSE)
- advertising |= ADVERTISED_Pause;
- if (adv.pause & STMMAC_PCS_ASYM_PAUSE)
- advertising |= ADVERTISED_Asym_Pause;
- if (adv.lp_pause & STMMAC_PCS_PAUSE)
- lp_advertising |= ADVERTISED_Pause;
- if (adv.lp_pause & STMMAC_PCS_ASYM_PAUSE)
- lp_advertising |= ADVERTISED_Asym_Pause;
-
/* Reg49[3] always set because ANE is always supported */
cmd->base.autoneg = ADVERTISED_Autoneg;
supported |= SUPPORTED_Autoneg;
advertising |= ADVERTISED_Autoneg;
lp_advertising |= ADVERTISED_Autoneg;
- if (adv.duplex) {
- supported |= (SUPPORTED_1000baseT_Full |
- SUPPORTED_100baseT_Full |
- SUPPORTED_10baseT_Full);
- advertising |= (ADVERTISED_1000baseT_Full |
- ADVERTISED_100baseT_Full |
- ADVERTISED_10baseT_Full);
- } else {
- supported |= (SUPPORTED_1000baseT_Half |
- SUPPORTED_100baseT_Half |
- SUPPORTED_10baseT_Half);
- advertising |= (ADVERTISED_1000baseT_Half |
- ADVERTISED_100baseT_Half |
- ADVERTISED_10baseT_Half);
- }
- if (adv.lp_duplex)
- lp_advertising |= (ADVERTISED_1000baseT_Full |
- ADVERTISED_100baseT_Full |
- ADVERTISED_10baseT_Full);
- else
- lp_advertising |= (ADVERTISED_1000baseT_Half |
- ADVERTISED_100baseT_Half |
- ADVERTISED_10baseT_Half);
cmd->base.port = PORT_OTHER;
ethtool_convert_legacy_u32_to_link_mode(
@@ -417,7 +380,7 @@ stmmac_ethtool_set_link_ksettings(struct net_device *dev,
return -EINVAL;
mutex_lock(&priv->lock);
- stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
+ stmmac_pcs_ctrl_ane(priv, 1, priv->hw->ps, 0);
mutex_unlock(&priv->lock);
return 0;
@@ -515,12 +478,9 @@ stmmac_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct stmmac_priv *priv = netdev_priv(netdev);
- struct rgmii_adv adv_lp;
- if (priv->hw->pcs && !stmmac_pcs_get_adv_lp(priv, priv->ioaddr, &adv_lp)) {
+ if (priv->hw->pcs) {
pause->autoneg = 1;
- if (!adv_lp.pause)
- return;
} else {
phylink_ethtool_get_pauseparam(priv->phylink, pause);
}
@@ -531,12 +491,9 @@ stmmac_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct stmmac_priv *priv = netdev_priv(netdev);
- struct rgmii_adv adv_lp;
- if (priv->hw->pcs && !stmmac_pcs_get_adv_lp(priv, priv->ioaddr, &adv_lp)) {
+ if (priv->hw->pcs) {
pause->autoneg = 1;
- if (!adv_lp.pause)
- return -EOPNOTSUPP;
return 0;
} else {
return phylink_ethtool_set_pauseparam(priv->phylink, pause);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index b948df1bff9a..f350a6662880 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1062,8 +1062,8 @@ static void stmmac_mac_link_up(struct phylink_config *config,
interface, speed);
if (ret < 0)
netdev_err(priv->dev,
- "failed to configure transmit clock for %dMbps: %pe\n",
- speed, ERR_PTR(ret));
+ "failed to configure %s transmit clock for %dMbps: %pe\n",
+ phy_modes(interface), speed, ERR_PTR(ret));
}
stmmac_mac_set(priv, priv->ioaddr, true);
@@ -3586,7 +3586,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
}
if (priv->hw->pcs)
- stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
+ stmmac_pcs_ctrl_ane(priv, 1, priv->hw->ps, 0);
/* set TX and RX rings length */
stmmac_set_rings_length(priv);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h
index 1bdf87b237c4..4a684c97dfae 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h
@@ -16,6 +16,8 @@
/* PCS registers (AN/TBI/SGMII/RGMII) offsets */
#define GMAC_AN_CTRL(x) (x) /* AN control */
#define GMAC_AN_STATUS(x) (x + 0x4) /* AN status */
+
+/* ADV, LPA and EXP are only available for the TBI and RTBI interfaces */
#define GMAC_ANE_ADV(x) (x + 0x8) /* ANE Advertisement */
#define GMAC_ANE_LPA(x) (x + 0xc) /* ANE link partener ability */
#define GMAC_ANE_EXP(x) (x + 0x10) /* ANE expansion */
@@ -107,34 +109,4 @@ static inline void dwmac_ctrl_ane(void __iomem *ioaddr, u32 reg, bool ane,
writel(value, ioaddr + GMAC_AN_CTRL(reg));
}
-
-/**
- * dwmac_get_adv_lp - Get ADV and LP cap
- * @ioaddr: IO registers pointer
- * @reg: Base address of the AN Control Register.
- * @adv_lp: structure to store the adv,lp status
- * Description: this is to expose the ANE advertisement and Link partner ability
- * status to ethtool support.
- */
-static inline void dwmac_get_adv_lp(void __iomem *ioaddr, u32 reg,
- struct rgmii_adv *adv_lp)
-{
- u32 value = readl(ioaddr + GMAC_ANE_ADV(reg));
-
- if (value & GMAC_ANE_FD)
- adv_lp->duplex = DUPLEX_FULL;
- if (value & GMAC_ANE_HD)
- adv_lp->duplex |= DUPLEX_HALF;
-
- adv_lp->pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT;
-
- value = readl(ioaddr + GMAC_ANE_LPA(reg));
-
- if (value & GMAC_ANE_FD)
- adv_lp->lp_duplex = DUPLEX_FULL;
- if (value & GMAC_ANE_HD)
- adv_lp->lp_duplex = DUPLEX_HALF;
-
- adv_lp->lp_pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT;
-}
#endif /* __STMMAC_PCS_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index b80c1efdb323..4164b3a580d8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -579,6 +579,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
plat->pmt = 1;
if (of_property_read_bool(np, "snps,tso"))
plat->flags |= STMMAC_FLAG_TSO_EN;
+ of_property_read_u32(np, "snps,multicast-filter-bins",
+ &plat->multicast_filter_bins);
}
dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index ddca8fc7883e..75d7e10944d4 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -7077,8 +7077,10 @@ static int niu_ethflow_to_flowkey(u64 ethflow, u64 *flow_key)
}
-static int niu_get_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc)
+static int niu_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *nfc)
{
+ struct niu *np = netdev_priv(dev);
u64 class;
nfc->data = 0;
@@ -7290,9 +7292,6 @@ static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
int ret = 0;
switch (cmd->cmd) {
- case ETHTOOL_GRXFH:
- ret = niu_get_hash_opts(np, cmd);
- break;
case ETHTOOL_GRXRINGS:
cmd->data = np->num_rx_rings;
break;
@@ -7313,8 +7312,11 @@ static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
return ret;
}
-static int niu_set_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc)
+static int niu_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
{
+ struct niu *np = netdev_priv(dev);
u64 class;
u64 flow_key = 0;
unsigned long flags;
@@ -7656,9 +7658,6 @@ static int niu_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
int ret = 0;
switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- ret = niu_set_hash_opts(np, cmd);
- break;
case ETHTOOL_SRXCLSRLINS:
ret = niu_add_ethtool_tcam_entry(np, cmd);
break;
@@ -7912,6 +7911,8 @@ static const struct ethtool_ops niu_ethtool_ops = {
.set_phys_id = niu_set_phys_id,
.get_rxnfc = niu_get_nfc,
.set_rxnfc = niu_set_nfc,
+ .get_rxfh_fields = niu_get_rxfh_fields,
+ .set_rxfh_fields = niu_set_rxfh_fields,
.get_link_ksettings = niu_get_link_ksettings,
.set_link_ksettings = niu_set_link_ksettings,
};
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index f20d1ff192ef..519757e618ad 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -2602,6 +2602,7 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
return -ENOENT;
for_each_child_of_node(node, port_np) {
+ phy_interface_t phy_if;
struct am65_cpsw_port *port;
u32 port_id;
@@ -2667,14 +2668,36 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
/* get phy/link info */
port->slave.port_np = of_node_get(port_np);
- ret = of_get_phy_mode(port_np, &port->slave.phy_if);
+ ret = of_get_phy_mode(port_np, &phy_if);
if (ret) {
dev_err(dev, "%pOF read phy-mode err %d\n",
port_np, ret);
goto of_node_put;
}
- ret = phy_set_mode_ext(port->slave.ifphy, PHY_MODE_ETHERNET, port->slave.phy_if);
+ /* CPSW controllers supported by this driver have a fixed
+ * internal TX delay in RGMII mode. Fix up PHY mode to account
+ * for this and warn about Device Trees that claim to have a TX
+ * delay on the PCB.
+ */
+ switch (phy_if) {
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ phy_if = PHY_INTERFACE_MODE_RGMII_RXID;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ phy_if = PHY_INTERFACE_MODE_RGMII;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ dev_warn(dev,
+ "RGMII mode without internal TX delay unsupported; please fix your Device Tree\n");
+ break;
+ default:
+ break;
+ }
+
+ port->slave.phy_if = phy_if;
+ ret = phy_set_mode_ext(port->slave.ifphy, PHY_MODE_ETHERNET, phy_if);
if (ret)
goto of_node_put;
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index 86fc1278127c..2aa812cbab92 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -125,45 +125,6 @@ static irqreturn_t prueth_tx_ts_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static struct icssg_firmwares icssg_hsr_firmwares[] = {
- {
- .pru = "ti-pruss/am65x-sr2-pru0-pruhsr-fw.elf",
- .rtu = "ti-pruss/am65x-sr2-rtu0-pruhsr-fw.elf",
- .txpru = "ti-pruss/am65x-sr2-txpru0-pruhsr-fw.elf",
- },
- {
- .pru = "ti-pruss/am65x-sr2-pru1-pruhsr-fw.elf",
- .rtu = "ti-pruss/am65x-sr2-rtu1-pruhsr-fw.elf",
- .txpru = "ti-pruss/am65x-sr2-txpru1-pruhsr-fw.elf",
- }
-};
-
-static struct icssg_firmwares icssg_switch_firmwares[] = {
- {
- .pru = "ti-pruss/am65x-sr2-pru0-prusw-fw.elf",
- .rtu = "ti-pruss/am65x-sr2-rtu0-prusw-fw.elf",
- .txpru = "ti-pruss/am65x-sr2-txpru0-prusw-fw.elf",
- },
- {
- .pru = "ti-pruss/am65x-sr2-pru1-prusw-fw.elf",
- .rtu = "ti-pruss/am65x-sr2-rtu1-prusw-fw.elf",
- .txpru = "ti-pruss/am65x-sr2-txpru1-prusw-fw.elf",
- }
-};
-
-static struct icssg_firmwares icssg_emac_firmwares[] = {
- {
- .pru = "ti-pruss/am65x-sr2-pru0-prueth-fw.elf",
- .rtu = "ti-pruss/am65x-sr2-rtu0-prueth-fw.elf",
- .txpru = "ti-pruss/am65x-sr2-txpru0-prueth-fw.elf",
- },
- {
- .pru = "ti-pruss/am65x-sr2-pru1-prueth-fw.elf",
- .rtu = "ti-pruss/am65x-sr2-rtu1-prueth-fw.elf",
- .txpru = "ti-pruss/am65x-sr2-txpru1-prueth-fw.elf",
- }
-};
-
static int prueth_start(struct rproc *rproc, const char *fw_name)
{
int ret;
@@ -186,11 +147,13 @@ static int prueth_emac_start(struct prueth *prueth)
int ret, slice;
if (prueth->is_switch_mode)
- firmwares = icssg_switch_firmwares;
- else if (prueth->is_hsr_offload_mode)
- firmwares = icssg_hsr_firmwares;
+ firmwares = prueth->icssg_switch_firmwares;
+ else if (prueth->is_hsr_offload_mode && HSR_V1 == prueth->hsr_prp_version)
+ firmwares = prueth->icssg_hsr_firmwares;
+ else if (prueth->is_hsr_offload_mode && PRP_V1 == prueth->hsr_prp_version)
+ firmwares = prueth->icssg_prp_firmwares;
else
- firmwares = icssg_emac_firmwares;
+ firmwares = prueth->icssg_emac_firmwares;
for (slice = 0; slice < PRUETH_NUM_MACS; slice++) {
ret = prueth_start(prueth->pru[slice], firmwares[slice].pru);
@@ -1566,6 +1529,7 @@ static int prueth_netdevice_event(struct notifier_block *unused,
struct netdev_notifier_changeupper_info *info;
struct prueth_emac *emac = netdev_priv(ndev);
struct prueth *prueth = emac->prueth;
+ enum hsr_version hsr_ndev_version;
int ret = NOTIFY_DONE;
if (ndev->netdev_ops != &emac_netdev_ops)
@@ -1577,6 +1541,11 @@ static int prueth_netdevice_event(struct notifier_block *unused,
if ((ndev->features & NETIF_PRUETH_HSR_OFFLOAD_FEATURES) &&
is_hsr_master(info->upper_dev)) {
+ hsr_get_version(info->upper_dev, &hsr_ndev_version);
+ if (hsr_ndev_version != HSR_V1 && hsr_ndev_version != PRP_V1)
+ return -EOPNOTSUPP;
+ prueth->hsr_prp_version = hsr_ndev_version;
+
if (info->linking) {
if (!prueth->hsr_dev) {
prueth->hsr_dev = info->upper_dev;
@@ -1632,6 +1601,87 @@ static void prueth_unregister_notifiers(struct prueth *prueth)
unregister_netdevice_notifier(&prueth->prueth_netdevice_nb);
}
+static void icssg_read_firmware_names(struct device_node *np,
+ struct icssg_firmwares *fw)
+{
+ int i;
+
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ of_property_read_string_index(np, "firmware-name", i * 3 + 0,
+ &fw[i].pru);
+ of_property_read_string_index(np, "firmware-name", i * 3 + 1,
+ &fw[i].rtu);
+ of_property_read_string_index(np, "firmware-name", i * 3 + 2,
+ &fw[i].txpru);
+ }
+}
+
+/* icssg_firmware_name_replace - Replace a substring in firmware name
+ * @dev: device pointer for memory allocation
+ * @src: source firmware name string
+ * @from: substring to replace
+ * @to: replacement substring
+ *
+ * Return: a newly allocated string with the replacement, or the original
+ * string if replacement is not possible.
+ */
+static const char *icssg_firmware_name_replace(struct device *dev,
+ const char *src,
+ const char *from,
+ const char *to)
+{
+ size_t prefix, from_len, to_len, total;
+ const char *p = strstr(src, from);
+ char *buf;
+
+ if (!p)
+ return src; /* fallback: no replacement, use original */
+
+ prefix = p - src;
+ from_len = strlen(from);
+ to_len = strlen(to);
+ total = strlen(src) - from_len + to_len + 1;
+
+ buf = devm_kzalloc(dev, total, GFP_KERNEL);
+ if (!buf)
+ return src; /* fallback: allocation failed, use original */
+
+ strscpy(buf, src, prefix + 1);
+ strscpy(buf + prefix, to, to_len + 1);
+ strscpy(buf + prefix + to_len, p + from_len, total - prefix - to_len);
+
+ return buf;
+}
+
+/**
+ * icssg_mode_firmware_names - Generate firmware names for a specific mode
+ * @dev: device pointer for logging and context
+ * @src: source array of firmware name structures
+ * @dst: destination array to store updated firmware name structures
+ * @from: substring in firmware names to be replaced
+ * @to: substring to replace @from in firmware names
+ *
+ * Iterates over all MACs and replaces occurrences of the @from substring
+ * with @to in the firmware names (pru, rtu, txpru) for each MAC. The
+ * updated firmware names are stored in the @dst array.
+ */
+static void icssg_mode_firmware_names(struct device *dev,
+ struct icssg_firmwares *src,
+ struct icssg_firmwares *dst,
+ const char *from, const char *to)
+{
+ int i;
+
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ dst[i].pru = icssg_firmware_name_replace(dev, src[i].pru,
+ from, to);
+ dst[i].rtu = icssg_firmware_name_replace(dev, src[i].rtu,
+ from, to);
+ dst[i].txpru = icssg_firmware_name_replace(dev, src[i].txpru,
+ from, to);
+ }
+}
+
static int prueth_probe(struct platform_device *pdev)
{
struct device_node *eth_node, *eth_ports_node;
@@ -1808,6 +1858,17 @@ static int prueth_probe(struct platform_device *pdev)
icss_iep_init_fw(prueth->iep1);
}
+ /* Read EMAC firmware names from device tree */
+ icssg_read_firmware_names(np, prueth->icssg_emac_firmwares);
+
+ /* Generate other mode firmware names based on EMAC firmware names */
+ icssg_mode_firmware_names(dev, prueth->icssg_emac_firmwares,
+ prueth->icssg_switch_firmwares, "eth", "sw");
+ icssg_mode_firmware_names(dev, prueth->icssg_emac_firmwares,
+ prueth->icssg_hsr_firmwares, "eth", "hsr");
+ icssg_mode_firmware_names(dev, prueth->icssg_emac_firmwares,
+ prueth->icssg_prp_firmwares, "eth", "prp");
+
spin_lock_init(&prueth->vtbl_lock);
spin_lock_init(&prueth->stats_lock);
/* setup netdev interfaces */
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
index 23c465f1ce7f..9ca2e7fdefbd 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
@@ -13,6 +13,7 @@
#include <linux/etherdevice.h>
#include <linux/genalloc.h>
#include <linux/if_vlan.h>
+#include <linux/if_hsr.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
@@ -259,9 +260,9 @@ struct prueth_pdata {
};
struct icssg_firmwares {
- char *pru;
- char *rtu;
- char *txpru;
+ const char *pru;
+ const char *rtu;
+ const char *txpru;
};
/**
@@ -290,6 +291,7 @@ struct icssg_firmwares {
* @vlan_tbl: VLAN-FID table pointer
* @hw_bridge_dev: pointer to HW bridge net device
* @hsr_dev: pointer to the HSR net device
+ * @hsr_prp_version: enum to store the protocol version of hsr master
* @br_members: bitmask of bridge member ports
* @hsr_members: bitmask of hsr member ports
* @prueth_netdevice_nb: netdevice notifier block
@@ -300,6 +302,10 @@ struct icssg_firmwares {
* @is_switchmode_supported: indicates platform support for switch mode
* @switch_id: ID for mapping switch ports to bridge
* @default_vlan: Default VLAN for host
+ * @icssg_emac_firmwares: Firmware names for EMAC mode, indexed per MAC
+ * @icssg_switch_firmwares: Firmware names for SWITCH mode, indexed per MAC
+ * @icssg_hsr_firmwares: Firmware names for HSR mode, indexed per MAC
+ * @icssg_prp_firmwares: Firmware names for PRP mode, indexed per MAC
*/
struct prueth {
struct device *dev;
@@ -329,6 +335,7 @@ struct prueth {
struct net_device *hw_bridge_dev;
struct net_device *hsr_dev;
+ enum hsr_version hsr_prp_version;
u8 br_members;
u8 hsr_members;
struct notifier_block prueth_netdevice_nb;
@@ -343,6 +350,10 @@ struct prueth {
spinlock_t vtbl_lock;
/** @stats_lock: Lock for reading icssg stats */
spinlock_t stats_lock;
+ struct icssg_firmwares icssg_emac_firmwares[PRUETH_NUM_MACS];
+ struct icssg_firmwares icssg_switch_firmwares[PRUETH_NUM_MACS];
+ struct icssg_firmwares icssg_hsr_firmwares[PRUETH_NUM_MACS];
+ struct icssg_firmwares icssg_prp_firmwares[PRUETH_NUM_MACS];
};
struct emac_tx_ts_response {
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index ffc15a432689..54384f9b3872 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -41,6 +41,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
/* per-network namespace private data for this module */
struct geneve_net {
struct list_head geneve_list;
+ /* sock_list is protected by rtnl lock */
struct list_head sock_list;
};
@@ -921,8 +922,8 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
udp_tunnel_xmit_skb(rt, gs4->sock->sk, skb, saddr, info->key.u.ipv4.dst,
tos, ttl, df, sport, geneve->cfg.info.key.tp_dst,
!net_eq(geneve->net, dev_net(geneve->dev)),
- !test_bit(IP_TUNNEL_CSUM_BIT,
- info->key.tun_flags));
+ !test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags),
+ 0);
return 0;
}
@@ -1014,7 +1015,8 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
&saddr, &key->u.ipv6.dst, prio, ttl,
info->key.label, sport, geneve->cfg.info.key.tp_dst,
!test_bit(IP_TUNNEL_CSUM_BIT,
- info->key.tun_flags));
+ info->key.tun_flags),
+ 0);
return 0;
}
#endif
@@ -1179,8 +1181,9 @@ static void geneve_offload_rx_ports(struct net_device *dev, bool push)
struct geneve_net *gn = net_generic(net, geneve_net_id);
struct geneve_sock *gs;
- rcu_read_lock();
- list_for_each_entry_rcu(gs, &gn->sock_list, list) {
+ ASSERT_RTNL();
+
+ list_for_each_entry(gs, &gn->sock_list, list) {
if (push) {
udp_tunnel_push_rx_port(dev, gs->sock,
UDP_TUNNEL_TYPE_GENEVE);
@@ -1189,7 +1192,6 @@ static void geneve_offload_rx_ports(struct net_device *dev, bool push)
UDP_TUNNEL_TYPE_GENEVE);
}
}
- rcu_read_unlock();
}
/* Initialize the device structure. */
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index d4dec741c7f4..4b668ebaa0f7 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -446,7 +446,8 @@ static int gtp0_send_echo_resp_ip(struct gtp_dev *gtp, struct sk_buff *skb)
htons(GTP0_PORT), htons(GTP0_PORT),
!net_eq(sock_net(gtp->sk1u),
dev_net(gtp->dev)),
- false);
+ false,
+ 0);
return 0;
}
@@ -704,7 +705,8 @@ static int gtp1u_send_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
htons(GTP1U_PORT), htons(GTP1U_PORT),
!net_eq(sock_net(gtp->sk1u),
dev_net(gtp->dev)),
- false);
+ false,
+ 0);
return 0;
}
@@ -1304,7 +1306,7 @@ static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
pktinfo.gtph_port, pktinfo.gtph_port,
!net_eq(sock_net(pktinfo.pctx->sk),
dev_net(dev)),
- false);
+ false, 0);
break;
case AF_INET6:
#if IS_ENABLED(CONFIG_IPV6)
@@ -1314,7 +1316,7 @@ static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
ip6_dst_hoplimit(&pktinfo.rt->dst),
0,
pktinfo.gtph_port, pktinfo.gtph_port,
- false);
+ false, 0);
#else
goto tx_err;
#endif
@@ -2405,7 +2407,7 @@ static int gtp_genl_send_echo_req(struct sk_buff *skb, struct genl_info *info)
port, port,
!net_eq(sock_net(sk),
dev_net(gtp->dev)),
- false);
+ false, 0);
return 0;
}
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index c41a025c66f0..42d98e99566e 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -1580,9 +1580,10 @@ static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
}
static int
-netvsc_get_rss_hash_opts(struct net_device_context *ndc,
- struct ethtool_rxnfc *info)
+netvsc_get_rxfh_fields(struct net_device *ndev,
+ struct ethtool_rxfh_fields *info)
{
+ struct net_device_context *ndc = netdev_priv(ndev);
const u32 l4_flag = RXH_L4_B_0_1 | RXH_L4_B_2_3;
info->data = RXH_IP_SRC | RXH_IP_DST;
@@ -1637,16 +1638,17 @@ netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
case ETHTOOL_GRXRINGS:
info->data = nvdev->num_chn;
return 0;
-
- case ETHTOOL_GRXFH:
- return netvsc_get_rss_hash_opts(ndc, info);
}
return -EOPNOTSUPP;
}
-static int netvsc_set_rss_hash_opts(struct net_device_context *ndc,
- struct ethtool_rxnfc *info)
+static int
+netvsc_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *info,
+ struct netlink_ext_ack *extack)
{
+ struct net_device_context *ndc = netdev_priv(dev);
+
if (info->data == (RXH_IP_SRC | RXH_IP_DST |
RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
switch (info->flow_type) {
@@ -1701,17 +1703,6 @@ static int netvsc_set_rss_hash_opts(struct net_device_context *ndc,
return -EOPNOTSUPP;
}
-static int
-netvsc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *info)
-{
- struct net_device_context *ndc = netdev_priv(ndev);
-
- if (info->cmd == ETHTOOL_SRXFH)
- return netvsc_set_rss_hash_opts(ndc, info);
-
- return -EOPNOTSUPP;
-}
-
static u32 netvsc_get_rxfh_key_size(struct net_device *dev)
{
return NETVSC_HASH_KEYLEN;
@@ -1979,11 +1970,12 @@ static const struct ethtool_ops ethtool_ops = {
.set_channels = netvsc_set_channels,
.get_ts_info = ethtool_op_get_ts_info,
.get_rxnfc = netvsc_get_rxnfc,
- .set_rxnfc = netvsc_set_rxnfc,
.get_rxfh_key_size = netvsc_get_rxfh_key_size,
.get_rxfh_indir_size = netvsc_rss_indir_size,
.get_rxfh = netvsc_get_rxfh,
.set_rxfh = netvsc_set_rxfh,
+ .get_rxfh_fields = netvsc_get_rxfh_fields,
+ .set_rxfh_fields = netvsc_set_rxfh_fields,
.get_link_ksettings = netvsc_get_link_ksettings,
.set_link_ksettings = netvsc_set_link_ksettings,
.get_ringparam = netvsc_get_ringparam,
diff --git a/drivers/net/mdio/fwnode_mdio.c b/drivers/net/mdio/fwnode_mdio.c
index aea0f0357568..9b41d4697a40 100644
--- a/drivers/net/mdio/fwnode_mdio.c
+++ b/drivers/net/mdio/fwnode_mdio.c
@@ -18,7 +18,8 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("FWNODE MDIO bus (Ethernet PHY) accessors");
static struct pse_control *
-fwnode_find_pse_control(struct fwnode_handle *fwnode)
+fwnode_find_pse_control(struct fwnode_handle *fwnode,
+ struct phy_device *phydev)
{
struct pse_control *psec;
struct device_node *np;
@@ -30,7 +31,7 @@ fwnode_find_pse_control(struct fwnode_handle *fwnode)
if (!np)
return NULL;
- psec = of_pse_control_get(np);
+ psec = of_pse_control_get(np, phydev);
if (PTR_ERR(psec) == -ENOENT)
return NULL;
@@ -128,15 +129,9 @@ int fwnode_mdiobus_register_phy(struct mii_bus *bus,
u32 phy_id;
int rc;
- psec = fwnode_find_pse_control(child);
- if (IS_ERR(psec))
- return PTR_ERR(psec);
-
mii_ts = fwnode_find_mii_timestamper(child);
- if (IS_ERR(mii_ts)) {
- rc = PTR_ERR(mii_ts);
- goto clean_pse;
- }
+ if (IS_ERR(mii_ts))
+ return PTR_ERR(mii_ts);
is_c45 = fwnode_device_is_compatible(child, "ethernet-phy-ieee802.3-c45");
if (is_c45 || fwnode_get_phy_id(child, &phy_id))
@@ -169,6 +164,12 @@ int fwnode_mdiobus_register_phy(struct mii_bus *bus,
goto clean_phy;
}
+ psec = fwnode_find_pse_control(child, phy);
+ if (IS_ERR(psec)) {
+ rc = PTR_ERR(psec);
+ goto unregister_phy;
+ }
+
phy->psec = psec;
/* phy->mii_ts may already be defined by the PHY driver. A
@@ -180,12 +181,13 @@ int fwnode_mdiobus_register_phy(struct mii_bus *bus,
return 0;
+unregister_phy:
+ if (is_acpi_node(child) || is_of_node(child))
+ phy_device_remove(phy);
clean_phy:
phy_device_free(phy);
clean_mii_ts:
unregister_mii_timestamper(mii_ts);
-clean_pse:
- pse_control_put(psec);
return rc;
}
diff --git a/drivers/net/mdio/mdio-mux-gpio.c b/drivers/net/mdio/mdio-mux-gpio.c
index ef77bd1abae9..fefa40ea5227 100644
--- a/drivers/net/mdio/mdio-mux-gpio.c
+++ b/drivers/net/mdio/mdio-mux-gpio.c
@@ -30,8 +30,7 @@ static int mdio_mux_gpio_switch_fn(int current_child, int desired_child,
values[0] = desired_child;
- gpiod_set_array_value_cansleep(s->gpios->ndescs, s->gpios->desc,
- s->gpios->info, values);
+ gpiod_multi_set_value_cansleep(s->gpios, values);
return 0;
}
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index 176935a8645f..e3722de08ea9 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -86,10 +86,10 @@ static DEFINE_SPINLOCK(target_list_lock);
static DEFINE_MUTEX(target_cleanup_list_lock);
/*
- * Console driver for extended netconsoles. Registered on the first use to
- * avoid unnecessarily enabling ext message formatting.
+ * Console driver for netconsoles. Register only consoles that have
+ * an associated target of the same type.
*/
-static struct console netconsole_ext;
+static struct console netconsole_ext, netconsole;
struct netconsole_target_stats {
u64_stats_t xmit_drop_count;
@@ -97,6 +97,11 @@ struct netconsole_target_stats {
struct u64_stats_sync syncp;
};
+enum console_type {
+ CONS_BASIC = BIT(0),
+ CONS_EXTENDED = BIT(1),
+};
+
/* Features enabled in sysdata. Contrary to userdata, this data is populated by
* the kernel. The fields are designed as bitwise flags, allowing multiple
* features to be set in sysdata_fields.
@@ -108,6 +113,8 @@ enum sysdata_feature {
SYSDATA_TASKNAME = BIT(1),
/* Kernel release/version as part of sysdata */
SYSDATA_RELEASE = BIT(2),
+ /* Include a per-target message ID as part of sysdata */
+ SYSDATA_MSGID = BIT(3),
};
/**
@@ -118,6 +125,7 @@ enum sysdata_feature {
* @extradata_complete: Cached, formatted string of append
* @userdata_length: String length of usedata in extradata_complete.
* @sysdata_fields: Sysdata features enabled.
+ * @msgcounter: Message sent counter.
* @stats: Packet send stats for the target. Used for debugging.
* @enabled: On / off knob to enable / disable target.
* Visible from userspace (read-write).
@@ -148,6 +156,8 @@ struct netconsole_target {
size_t userdata_length;
/* bit-wise with sysdata_feature bits */
u32 sysdata_fields;
+ /* protected by target_list_lock */
+ u32 msgcounter;
#endif
struct netconsole_target_stats stats;
bool enabled;
@@ -273,6 +283,23 @@ static void netconsole_process_cleanups_core(void)
mutex_unlock(&target_cleanup_list_lock);
}
+static void netconsole_print_banner(struct netpoll *np)
+{
+ np_info(np, "local port %d\n", np->local_port);
+ if (np->ipv6)
+ np_info(np, "local IPv6 address %pI6c\n", &np->local_ip.in6);
+ else
+ np_info(np, "local IPv4 address %pI4\n", &np->local_ip.ip);
+ np_info(np, "interface name '%s'\n", np->dev_name);
+ np_info(np, "local ethernet address '%pM'\n", np->dev_mac);
+ np_info(np, "remote port %d\n", np->remote_port);
+ if (np->ipv6)
+ np_info(np, "remote IPv6 address %pI6c\n", &np->remote_ip.in6);
+ else
+ np_info(np, "remote IPv4 address %pI4\n", &np->remote_ip.ip);
+ np_info(np, "remote ethernet address %pM\n", np->remote_mac);
+}
+
#ifdef CONFIG_NETCONSOLE_DYNAMIC
/*
@@ -455,6 +482,46 @@ static ssize_t sysdata_release_enabled_show(struct config_item *item,
return sysfs_emit(buf, "%d\n", release_enabled);
}
+/* Iterate in the list of target, and make sure we don't have any console
+ * register without targets of the same type
+ */
+static void unregister_netcons_consoles(void)
+{
+ struct netconsole_target *nt;
+ u32 console_type_needed = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&target_list_lock, flags);
+ list_for_each_entry(nt, &target_list, list) {
+ if (nt->extended)
+ console_type_needed |= CONS_EXTENDED;
+ else
+ console_type_needed |= CONS_BASIC;
+ }
+ spin_unlock_irqrestore(&target_list_lock, flags);
+
+ if (!(console_type_needed & CONS_EXTENDED) &&
+ console_is_registered(&netconsole_ext))
+ unregister_console(&netconsole_ext);
+
+ if (!(console_type_needed & CONS_BASIC) &&
+ console_is_registered(&netconsole))
+ unregister_console(&netconsole);
+}
+
+static ssize_t sysdata_msgid_enabled_show(struct config_item *item,
+ char *buf)
+{
+ struct netconsole_target *nt = to_target(item->ci_parent);
+ bool msgid_enabled;
+
+ mutex_lock(&dynamic_netconsole_mutex);
+ msgid_enabled = !!(nt->sysdata_fields & SYSDATA_MSGID);
+ mutex_unlock(&dynamic_netconsole_mutex);
+
+ return sysfs_emit(buf, "%d\n", msgid_enabled);
+}
+
/*
* This one is special -- targets created through the configfs interface
* are not enabled (and the corresponding netpoll activated) by default.
@@ -488,14 +555,24 @@ static ssize_t enabled_store(struct config_item *item,
goto out_unlock;
}
- if (nt->extended && !console_is_registered(&netconsole_ext))
+ if (nt->extended && !console_is_registered(&netconsole_ext)) {
+ netconsole_ext.flags |= CON_ENABLED;
register_console(&netconsole_ext);
+ }
+
+ /* User might be enabling the basic format target for the very
+ * first time, make sure the console is registered.
+ */
+ if (!nt->extended && !console_is_registered(&netconsole)) {
+ netconsole.flags |= CON_ENABLED;
+ register_console(&netconsole);
+ }
/*
- * Skip netpoll_parse_options() -- all the attributes are
+ * Skip netconsole_parser_cmdline() -- all the attributes are
* already configured via configfs. Just print them out.
*/
- netpoll_print_options(&nt->np);
+ netconsole_print_banner(&nt->np);
ret = netpoll_setup(&nt->np);
if (ret)
@@ -517,6 +594,10 @@ static ssize_t enabled_store(struct config_item *item,
list_move(&nt->list, &target_cleanup_list);
spin_unlock_irqrestore(&target_list_lock, flags);
mutex_unlock(&target_cleanup_list_lock);
+ /* Unregister consoles, whose the last target of that type got
+ * disabled.
+ */
+ unregister_netcons_consoles();
}
ret = strnlen(buf, count);
@@ -736,6 +817,8 @@ static size_t count_extradata_entries(struct netconsole_target *nt)
entries += 1;
if (nt->sysdata_fields & SYSDATA_RELEASE)
entries += 1;
+ if (nt->sysdata_fields & SYSDATA_MSGID)
+ entries += 1;
return entries;
}
@@ -872,6 +955,40 @@ static void disable_sysdata_feature(struct netconsole_target *nt,
nt->extradata_complete[nt->userdata_length] = 0;
}
+static ssize_t sysdata_msgid_enabled_store(struct config_item *item,
+ const char *buf, size_t count)
+{
+ struct netconsole_target *nt = to_target(item->ci_parent);
+ bool msgid_enabled, curr;
+ ssize_t ret;
+
+ ret = kstrtobool(buf, &msgid_enabled);
+ if (ret)
+ return ret;
+
+ mutex_lock(&dynamic_netconsole_mutex);
+ curr = !!(nt->sysdata_fields & SYSDATA_MSGID);
+ if (msgid_enabled == curr)
+ goto unlock_ok;
+
+ if (msgid_enabled &&
+ count_extradata_entries(nt) >= MAX_EXTRADATA_ITEMS) {
+ ret = -ENOSPC;
+ goto unlock;
+ }
+
+ if (msgid_enabled)
+ nt->sysdata_fields |= SYSDATA_MSGID;
+ else
+ disable_sysdata_feature(nt, SYSDATA_MSGID);
+
+unlock_ok:
+ ret = strnlen(buf, count);
+unlock:
+ mutex_unlock(&dynamic_netconsole_mutex);
+ return ret;
+}
+
static ssize_t sysdata_release_enabled_store(struct config_item *item,
const char *buf, size_t count)
{
@@ -987,6 +1104,7 @@ CONFIGFS_ATTR(userdatum_, value);
CONFIGFS_ATTR(sysdata_, cpu_nr_enabled);
CONFIGFS_ATTR(sysdata_, taskname_enabled);
CONFIGFS_ATTR(sysdata_, release_enabled);
+CONFIGFS_ATTR(sysdata_, msgid_enabled);
static struct configfs_attribute *userdatum_attrs[] = {
&userdatum_attr_value,
@@ -1049,6 +1167,7 @@ static struct configfs_attribute *userdata_attrs[] = {
&sysdata_attr_cpu_nr_enabled,
&sysdata_attr_taskname_enabled,
&sysdata_attr_release_enabled,
+ &sysdata_attr_msgid_enabled,
NULL,
};
@@ -1246,6 +1365,14 @@ static int sysdata_append_release(struct netconsole_target *nt, int offset)
init_utsname()->release);
}
+static int sysdata_append_msgid(struct netconsole_target *nt, int offset)
+{
+ wrapping_assign_add(nt->msgcounter, 1);
+ return scnprintf(&nt->extradata_complete[offset],
+ MAX_EXTRADATA_ENTRY_LEN, " msgid=%u\n",
+ nt->msgcounter);
+}
+
/*
* prepare_extradata - append sysdata at extradata_complete in runtime
* @nt: target to send message to
@@ -1268,6 +1395,8 @@ static int prepare_extradata(struct netconsole_target *nt)
extradata_len += sysdata_append_taskname(nt, extradata_len);
if (nt->sysdata_fields & SYSDATA_RELEASE)
extradata_len += sysdata_append_release(nt, extradata_len);
+ if (nt->sysdata_fields & SYSDATA_MSGID)
+ extradata_len += sysdata_append_msgid(nt, extradata_len);
WARN_ON_ONCE(extradata_len >
MAX_EXTRADATA_ENTRY_LEN * MAX_EXTRADATA_ITEMS);
@@ -1613,6 +1742,120 @@ static void write_msg(struct console *con, const char *msg, unsigned int len)
spin_unlock_irqrestore(&target_list_lock, flags);
}
+static int netpoll_parse_ip_addr(const char *str, union inet_addr *addr)
+{
+ const char *end;
+
+ if (!strchr(str, ':') &&
+ in4_pton(str, -1, (void *)addr, -1, &end) > 0) {
+ if (!*end)
+ return 0;
+ }
+ if (in6_pton(str, -1, addr->in6.s6_addr, -1, &end) > 0) {
+#if IS_ENABLED(CONFIG_IPV6)
+ if (!*end)
+ return 1;
+#else
+ return -1;
+#endif
+ }
+ return -1;
+}
+
+static int netconsole_parser_cmdline(struct netpoll *np, char *opt)
+{
+ bool ipversion_set = false;
+ char *cur = opt;
+ char *delim;
+ int ipv6;
+
+ if (*cur != '@') {
+ delim = strchr(cur, '@');
+ if (!delim)
+ goto parse_failed;
+ *delim = 0;
+ if (kstrtou16(cur, 10, &np->local_port))
+ goto parse_failed;
+ cur = delim;
+ }
+ cur++;
+
+ if (*cur != '/') {
+ ipversion_set = true;
+ delim = strchr(cur, '/');
+ if (!delim)
+ goto parse_failed;
+ *delim = 0;
+ ipv6 = netpoll_parse_ip_addr(cur, &np->local_ip);
+ if (ipv6 < 0)
+ goto parse_failed;
+ else
+ np->ipv6 = (bool)ipv6;
+ cur = delim;
+ }
+ cur++;
+
+ if (*cur != ',') {
+ /* parse out dev_name or dev_mac */
+ delim = strchr(cur, ',');
+ if (!delim)
+ goto parse_failed;
+ *delim = 0;
+
+ np->dev_name[0] = '\0';
+ eth_broadcast_addr(np->dev_mac);
+ if (!strchr(cur, ':'))
+ strscpy(np->dev_name, cur, sizeof(np->dev_name));
+ else if (!mac_pton(cur, np->dev_mac))
+ goto parse_failed;
+
+ cur = delim;
+ }
+ cur++;
+
+ if (*cur != '@') {
+ /* dst port */
+ delim = strchr(cur, '@');
+ if (!delim)
+ goto parse_failed;
+ *delim = 0;
+ if (*cur == ' ' || *cur == '\t')
+ np_info(np, "warning: whitespace is not allowed\n");
+ if (kstrtou16(cur, 10, &np->remote_port))
+ goto parse_failed;
+ cur = delim;
+ }
+ cur++;
+
+ /* dst ip */
+ delim = strchr(cur, '/');
+ if (!delim)
+ goto parse_failed;
+ *delim = 0;
+ ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip);
+ if (ipv6 < 0)
+ goto parse_failed;
+ else if (ipversion_set && np->ipv6 != (bool)ipv6)
+ goto parse_failed;
+ else
+ np->ipv6 = (bool)ipv6;
+ cur = delim + 1;
+
+ if (*cur != 0) {
+ /* MAC address */
+ if (!mac_pton(cur, np->remote_mac))
+ goto parse_failed;
+ }
+
+ netconsole_print_banner(np);
+
+ return 0;
+
+ parse_failed:
+ np_info(np, "couldn't parse config at '%s'!\n", cur);
+ return -1;
+}
+
/* Allocate new target (from boot/module param) and setup netpoll for it */
static struct netconsole_target *alloc_param_target(char *target_config,
int cmdline_count)
@@ -1642,7 +1885,7 @@ static struct netconsole_target *alloc_param_target(char *target_config,
}
/* Parse parameters and setup netpoll */
- err = netpoll_parse_options(&nt->np, target_config);
+ err = netconsole_parser_cmdline(&nt->np, target_config);
if (err)
goto fail;
@@ -1690,8 +1933,8 @@ static int __init init_netconsole(void)
{
int err;
struct netconsole_target *nt, *tmp;
+ u32 console_type_needed = 0;
unsigned int count = 0;
- bool extended = false;
unsigned long flags;
char *target_config;
char *input = config;
@@ -1707,9 +1950,10 @@ static int __init init_netconsole(void)
}
/* Dump existing printks when we register */
if (nt->extended) {
- extended = true;
+ console_type_needed |= CONS_EXTENDED;
netconsole_ext.flags |= CON_PRINTBUFFER;
} else {
+ console_type_needed |= CONS_BASIC;
netconsole.flags |= CON_PRINTBUFFER;
}
@@ -1728,9 +1972,10 @@ static int __init init_netconsole(void)
if (err)
goto undonotifier;
- if (extended)
+ if (console_type_needed & CONS_EXTENDED)
register_console(&netconsole_ext);
- register_console(&netconsole);
+ if (console_type_needed & CONS_BASIC)
+ register_console(&netconsole);
pr_info("network logging started\n");
return err;
@@ -1760,7 +2005,8 @@ static void __exit cleanup_netconsole(void)
if (console_is_registered(&netconsole_ext))
unregister_console(&netconsole_ext);
- unregister_console(&netconsole);
+ if (console_is_registered(&netconsole))
+ unregister_console(&netconsole);
dynamic_netconsole_exit();
unregister_netdevice_notifier(&netconsole_netdev_notifier);
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index fa5fbd97ad69..e36d3e846c2d 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -93,19 +93,14 @@ static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev)
hrtimer_start(&rq->napi_timer, us_to_ktime(5), HRTIMER_MODE_REL);
rcu_read_unlock();
- u64_stats_update_begin(&ns->syncp);
- ns->tx_packets++;
- ns->tx_bytes += len;
- u64_stats_update_end(&ns->syncp);
+ dev_dstats_tx_add(dev, len);
return NETDEV_TX_OK;
out_drop_free:
dev_kfree_skb(skb);
out_drop_cnt:
rcu_read_unlock();
- u64_stats_update_begin(&ns->syncp);
- ns->tx_dropped++;
- u64_stats_update_end(&ns->syncp);
+ dev_dstats_tx_dropped(dev);
return NETDEV_TX_OK;
}
@@ -126,20 +121,6 @@ static int nsim_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
-static void
-nsim_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
-{
- struct netdevsim *ns = netdev_priv(dev);
- unsigned int start;
-
- do {
- start = u64_stats_fetch_begin(&ns->syncp);
- stats->tx_bytes = ns->tx_bytes;
- stats->tx_packets = ns->tx_packets;
- stats->tx_dropped = ns->tx_dropped;
- } while (u64_stats_fetch_retry(&ns->syncp, start));
-}
-
static int
nsim_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
{
@@ -350,16 +331,24 @@ static int nsim_get_iflink(const struct net_device *dev)
static int nsim_rcv(struct nsim_rq *rq, int budget)
{
+ struct net_device *dev = rq->napi.dev;
struct sk_buff *skb;
- int i;
+ unsigned int skblen;
+ int i, ret;
for (i = 0; i < budget; i++) {
if (skb_queue_empty(&rq->skb_queue))
break;
skb = skb_dequeue(&rq->skb_queue);
+ /* skb might be discard at netif_receive_skb, save the len */
+ skblen = skb->len;
skb_mark_napi_id(skb, &rq->napi);
- netif_receive_skb(skb);
+ ret = netif_receive_skb(skb);
+ if (ret == NET_RX_SUCCESS)
+ dev_dstats_rx_add(dev, skblen);
+ else
+ dev_dstats_rx_dropped(dev);
}
return i;
@@ -556,7 +545,6 @@ static const struct net_device_ops nsim_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = nsim_change_mtu,
- .ndo_get_stats64 = nsim_get_stats64,
.ndo_set_vf_mac = nsim_set_vf_mac,
.ndo_set_vf_vlan = nsim_set_vf_vlan,
.ndo_set_vf_rate = nsim_set_vf_rate,
@@ -580,7 +568,6 @@ static const struct net_device_ops nsim_vf_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = nsim_change_mtu,
- .ndo_get_stats64 = nsim_get_stats64,
.ndo_setup_tc = nsim_setup_tc,
.ndo_set_features = nsim_set_features,
};
@@ -594,7 +581,7 @@ static void nsim_get_queue_stats_rx(struct net_device *dev, int idx,
struct rtnl_link_stats64 rtstats = {};
if (!idx)
- nsim_get_stats64(dev, &rtstats);
+ dev_get_stats(dev, &rtstats);
stats->packets = rtstats.rx_packets - !!rtstats.rx_packets;
stats->bytes = rtstats.rx_bytes;
@@ -606,7 +593,7 @@ static void nsim_get_queue_stats_tx(struct net_device *dev, int idx,
struct rtnl_link_stats64 rtstats = {};
if (!idx)
- nsim_get_stats64(dev, &rtstats);
+ dev_get_stats(dev, &rtstats);
stats->packets = rtstats.tx_packets - !!rtstats.tx_packets;
stats->bytes = rtstats.tx_bytes;
@@ -618,7 +605,7 @@ static void nsim_get_base_stats(struct net_device *dev,
{
struct rtnl_link_stats64 rtstats = {};
- nsim_get_stats64(dev, &rtstats);
+ dev_get_stats(dev, &rtstats);
rx->packets = !!rtstats.rx_packets;
rx->bytes = 0;
@@ -645,9 +632,12 @@ static struct nsim_rq *nsim_queue_alloc(void)
return rq;
}
-static void nsim_queue_free(struct nsim_rq *rq)
+static void nsim_queue_free(struct net_device *dev, struct nsim_rq *rq)
{
hrtimer_cancel(&rq->napi_timer);
+ local_bh_disable();
+ dev_dstats_rx_dropped_add(dev, rq->skb_queue.qlen);
+ local_bh_enable();
skb_queue_purge_reason(&rq->skb_queue, SKB_DROP_REASON_QUEUE_PURGE);
kfree(rq);
}
@@ -694,7 +684,7 @@ nsim_queue_mem_alloc(struct net_device *dev, void *per_queue_mem, int idx)
return 0;
err_free:
- nsim_queue_free(qmem->rq);
+ nsim_queue_free(dev, qmem->rq);
return err;
}
@@ -708,7 +698,7 @@ static void nsim_queue_mem_free(struct net_device *dev, void *per_queue_mem)
if (!ns->rq_reset_mode)
netif_napi_del_locked(&qmem->rq->napi);
page_pool_destroy(qmem->rq->page_pool);
- nsim_queue_free(qmem->rq);
+ nsim_queue_free(dev, qmem->rq);
}
}
@@ -890,6 +880,7 @@ static void nsim_setup(struct net_device *dev)
NETIF_F_HW_CSUM |
NETIF_F_LRO |
NETIF_F_TSO;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
dev->max_mtu = ETH_MAX_MTU;
dev->xdp_features = NETDEV_XDP_ACT_HW_OFFLOAD;
}
@@ -925,7 +916,7 @@ static void nsim_queue_uninit(struct netdevsim *ns)
int i;
for (i = 0; i < dev->num_rx_queues; i++)
- nsim_queue_free(ns->rq[i]);
+ nsim_queue_free(dev, ns->rq[i]);
kfree(ns->rq);
ns->rq = NULL;
@@ -1022,7 +1013,6 @@ nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port)
dev_net_set(dev, nsim_dev_net(nsim_dev));
ns = netdev_priv(dev);
ns->netdev = dev;
- u64_stats_init(&ns->syncp);
ns->nsim_dev = nsim_dev;
ns->nsim_dev_port = nsim_dev_port;
ns->nsim_bus_dev = nsim_dev->nsim_bus_dev;
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index d04401f0bdf7..4a0c48c7a384 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -108,11 +108,6 @@ struct netdevsim {
int rq_reset_mode;
- u64 tx_packets;
- u64 tx_bytes;
- u64 tx_dropped;
- struct u64_stats_sync syncp;
-
struct nsim_bus_dev *nsim_bus_dev;
struct bpf_prog *bpf_offloaded;
@@ -131,7 +126,6 @@ struct netdevsim {
struct nsim_macsec macsec;
struct {
u32 inject_error;
- u32 sleep;
u32 __ports[2][NSIM_UDP_TUNNEL_N_PORTS];
u32 (*ports)[NSIM_UDP_TUNNEL_N_PORTS];
struct dentry *ddir;
@@ -342,7 +336,6 @@ struct nsim_dev {
bool ipv4_only;
bool shared;
bool static_iana_vxlan;
- u32 sleep;
} udp_ports;
struct nsim_dev_psample *psample;
u16 esw_mode;
diff --git a/drivers/net/netdevsim/udp_tunnels.c b/drivers/net/netdevsim/udp_tunnels.c
index 640b4983a9a0..89fff76e51cf 100644
--- a/drivers/net/netdevsim/udp_tunnels.c
+++ b/drivers/net/netdevsim/udp_tunnels.c
@@ -18,9 +18,6 @@ nsim_udp_tunnel_set_port(struct net_device *dev, unsigned int table,
ret = -ns->udp_ports.inject_error;
ns->udp_ports.inject_error = 0;
- if (ns->udp_ports.sleep)
- msleep(ns->udp_ports.sleep);
-
if (!ret) {
if (ns->udp_ports.ports[table][entry]) {
WARN(1, "entry already in use\n");
@@ -47,8 +44,6 @@ nsim_udp_tunnel_unset_port(struct net_device *dev, unsigned int table,
ret = -ns->udp_ports.inject_error;
ns->udp_ports.inject_error = 0;
- if (ns->udp_ports.sleep)
- msleep(ns->udp_ports.sleep);
if (!ret) {
u32 val = be16_to_cpu(ti->port) << 16 | ti->type;
@@ -112,12 +107,10 @@ nsim_udp_tunnels_info_reset_write(struct file *file, const char __user *data,
struct net_device *dev = file->private_data;
struct netdevsim *ns = netdev_priv(dev);
- rtnl_lock();
if (dev->reg_state == NETREG_REGISTERED) {
memset(ns->udp_ports.ports, 0, sizeof(ns->udp_ports.__ports));
udp_tunnel_nic_reset_ntf(dev);
}
- rtnl_unlock();
return count;
}
@@ -172,7 +165,6 @@ int nsim_udp_tunnels_info_create(struct nsim_dev *nsim_dev,
GFP_KERNEL);
if (!info)
return -ENOMEM;
- ns->udp_ports.sleep = nsim_dev->udp_ports.sleep;
if (nsim_dev->udp_ports.sync_all) {
info->set_port = NULL;
@@ -181,8 +173,6 @@ int nsim_udp_tunnels_info_create(struct nsim_dev *nsim_dev,
info->sync_table = NULL;
}
- if (ns->udp_ports.sleep)
- info->flags |= UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
if (nsim_dev->udp_ports.open_only)
info->flags |= UDP_TUNNEL_NIC_INFO_OPEN_ONLY;
if (nsim_dev->udp_ports.ipv4_only)
@@ -217,6 +207,4 @@ void nsim_udp_tunnels_debugfs_create(struct nsim_dev *nsim_dev)
&nsim_dev->udp_ports.shared);
debugfs_create_bool("udp_ports_static_iana_vxlan", 0600, nsim_dev->ddir,
&nsim_dev->udp_ports.static_iana_vxlan);
- debugfs_create_u32("udp_ports_sleep", 0600, nsim_dev->ddir,
- &nsim_dev->udp_ports.sleep);
}
diff --git a/drivers/net/ovpn/udp.c b/drivers/net/ovpn/udp.c
index bff00946eae2..254cc94c4617 100644
--- a/drivers/net/ovpn/udp.c
+++ b/drivers/net/ovpn/udp.c
@@ -199,7 +199,7 @@ static int ovpn_udp4_output(struct ovpn_peer *peer, struct ovpn_bind *bind,
transmit:
udp_tunnel_xmit_skb(rt, sk, skb, fl.saddr, fl.daddr, 0,
ip4_dst_hoplimit(&rt->dst), 0, fl.fl4_sport,
- fl.fl4_dport, false, sk->sk_no_check_tx);
+ fl.fl4_dport, false, sk->sk_no_check_tx, 0);
ret = 0;
err:
local_bh_enable();
@@ -274,7 +274,7 @@ transmit:
skb->ignore_df = 1;
udp_tunnel6_xmit_skb(dst, sk, skb, skb->dev, &fl.saddr, &fl.daddr, 0,
ip6_dst_hoplimit(dst), 0, fl.fl6_sport,
- fl.fl6_dport, udp_get_no_check6_tx(sk));
+ fl.fl6_dport, udp_get_no_check6_tx(sk), 0);
ret = 0;
err:
local_bh_enable();
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 53dad2482026..28acc6392cfc 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -25,6 +25,9 @@ if PHYLIB
config SWPHY
bool
+config PHY_PACKAGE
+ tristate
+
config LED_TRIGGER_PHY
bool "Support LED triggers for tracking link state"
depends on LEDS_TRIGGERS
@@ -157,6 +160,7 @@ config BCM54140_PHY
tristate "Broadcom BCM54140 PHY"
depends on HWMON || HWMON=n
select BCM_NET_PHYLIB
+ select PHY_PACKAGE
help
Support the Broadcom BCM54140 Quad SGMII/QSGMII PHY.
@@ -292,6 +296,7 @@ source "drivers/net/phy/mediatek/Kconfig"
config MICREL_PHY
tristate "Micrel PHYs"
depends on PTP_1588_CLOCK_OPTIONAL
+ select PHY_PACKAGE
help
Supports the KSZ9021, VSC8201, KS8001 PHYs.
@@ -323,6 +328,7 @@ config MICROSEMI_PHY
depends on MACSEC || MACSEC=n
depends on PTP_1588_CLOCK_OPTIONAL || !NETWORK_PHY_TIMESTAMPING
select CRYPTO_LIB_AES if MACSEC
+ select PHY_PACKAGE
help
Currently supports VSC8514, VSC8530, VSC8531, VSC8540 and VSC8541 PHYs
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 7827609e9032..b4795aaf9c1c 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -3,7 +3,7 @@
libphy-y := phy.o phy-c45.o phy-core.o phy_device.o \
linkmode.o phy_link_topology.o \
- phy_package.o phy_caps.o mdio_bus_provider.o
+ phy_caps.o mdio_bus_provider.o
mdio-bus-y += mdio_bus.o mdio_device.o
ifdef CONFIG_PHYLIB
@@ -19,6 +19,7 @@ obj-$(CONFIG_MDIO_BUS) += mdio-bus.o
obj-$(CONFIG_PHYLINK) += phylink.o
obj-$(CONFIG_PHYLIB) += libphy.o
obj-$(CONFIG_PHYLIB) += mdio_devres.o
+obj-$(CONFIG_PHY_PACKAGE) += phy_package.o
obj-$(CONFIG_NETWORK_PHY_TIMESTAMPING) += mii_timestamper.o
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index 01255dada600..33db21251f2e 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -516,7 +516,6 @@ static int dp83822_config_init_leds(struct phy_device *phydev)
static int dp83822_config_init(struct phy_device *phydev)
{
struct dp83822_private *dp83822 = phydev->priv;
- struct device *dev = &phydev->mdio.dev;
int rgmii_delay = 0;
s32 rx_int_delay;
s32 tx_int_delay;
@@ -549,15 +548,13 @@ static int dp83822_config_init(struct phy_device *phydev)
return err;
if (phy_interface_is_rgmii(phydev)) {
- rx_int_delay = phy_get_internal_delay(phydev, dev, NULL, 0,
- true);
+ rx_int_delay = phy_get_internal_delay(phydev, NULL, 0, true);
/* Set DP83822_RX_CLK_SHIFT to enable rx clk internal delay */
if (rx_int_delay > 0)
rgmii_delay |= DP83822_RX_CLK_SHIFT;
- tx_int_delay = phy_get_internal_delay(phydev, dev, NULL, 0,
- false);
+ tx_int_delay = phy_get_internal_delay(phydev, NULL, 0, false);
/* Set DP83822_TX_CLK_SHIFT to disable tx clk internal delay */
if (tx_int_delay <= 0)
diff --git a/drivers/net/phy/dp83869.c b/drivers/net/phy/dp83869.c
index a62cd838a9ea..a2cd1cc35cde 100644
--- a/drivers/net/phy/dp83869.c
+++ b/drivers/net/phy/dp83869.c
@@ -540,9 +540,8 @@ static const int dp83869_internal_delay[] = {250, 500, 750, 1000, 1250, 1500,
static int dp83869_of_init(struct phy_device *phydev)
{
+ struct device_node *of_node = phydev->mdio.dev.of_node;
struct dp83869_private *dp83869 = phydev->priv;
- struct device *dev = &phydev->mdio.dev;
- struct device_node *of_node = dev->of_node;
int delay_size = ARRAY_SIZE(dp83869_internal_delay);
int ret;
@@ -597,13 +596,13 @@ static int dp83869_of_init(struct phy_device *phydev)
&dp83869->tx_fifo_depth))
dp83869->tx_fifo_depth = DP83869_PHYCR_FIFO_DEPTH_4_B_NIB;
- dp83869->rx_int_delay = phy_get_internal_delay(phydev, dev,
+ dp83869->rx_int_delay = phy_get_internal_delay(phydev,
&dp83869_internal_delay[0],
delay_size, true);
if (dp83869->rx_int_delay < 0)
dp83869->rx_int_delay = DP83869_CLK_DELAY_DEF;
- dp83869->tx_int_delay = phy_get_internal_delay(phydev, dev,
+ dp83869->tx_int_delay = phy_get_internal_delay(phydev,
&dp83869_internal_delay[0],
delay_size, false);
if (dp83869->tx_int_delay < 0)
diff --git a/drivers/net/phy/dp83tg720.c b/drivers/net/phy/dp83tg720.c
index 7e76323409c4..391c1d868808 100644
--- a/drivers/net/phy/dp83tg720.c
+++ b/drivers/net/phy/dp83tg720.c
@@ -13,21 +13,92 @@
#include "open_alliance_helpers.h"
/*
+ * DP83TG720 PHY Limitations and Workarounds
+ *
+ * The DP83TG720 1000BASE-T1 PHY has several limitations that require
+ * software-side mitigations. These workarounds are implemented throughout
+ * this driver. This section documents the known issues and their corresponding
+ * mitigation strategies.
+ *
+ * 1. Unreliable Link Detection and Synchronized Reset Deadlock
+ * ------------------------------------------------------------
+ * After a link loss or during link establishment, the DP83TG720 PHY may fail
+ * to detect or report link status correctly. As of June 2025, no public
+ * errata sheet for the DP83TG720 PHY documents this behavior.
+ * The "DP83TC81x, DP83TG72x Software Implementation Guide" application note
+ * (SNLA404, available at https://www.ti.com/lit/an/snla404/snla404.pdf)
+ * recommends performing a soft restart if polling for a link fails to establish
+ * a connection after 100ms. This procedure is adopted as the workaround for the
+ * observed link detection issue.
+ *
+ * However, in point-to-point setups where both link partners use the same
+ * driver (e.g. Linux on both sides), a synchronized reset pattern may emerge.
+ * This leads to a deadlock, where both PHYs reset at the same time and
+ * continuously miss each other during auto-negotiation.
+ *
+ * To address this, the reset procedure includes two components:
+ *
+ * - A **fixed minimum delay of 1ms** after a hardware reset. The datasheet
+ * "DP83TG720S-Q1 1000BASE-T1 Automotive Ethernet PHY with SGMII and RGMII"
+ * specifies this as the "Post reset stabilization-time prior to MDC preamble
+ * for register access" (T6.2), ensuring the PHY is ready for MDIO
+ * operations.
+ *
+ * - An **additional asymmetric delay**, empirically chosen based on
+ * master/slave role. This reduces the risk of synchronized resets on both
+ * link partners. Values are selected to avoid periodic overlap and ensure
+ * the link is re-established within a few cycles.
+ *
+ * The functions that implement this logic are:
+ * - dp83tg720_soft_reset()
+ * - dp83tg720_get_next_update_time()
+ *
+ * 2. Polling-Based Link Detection and IRQ Support
+ * -----------------------------------------------
+ * Due to the PHY-specific limitation described in section 1, link-up events
+ * cannot be reliably detected via interrupts on the DP83TG720. Therefore,
+ * polling is required to detect transitions from link-down to link-up.
+ *
+ * While link-down events *can* be detected via IRQs on this PHY, this driver
+ * currently does **not** implement interrupt support. As a result, all link
+ * state changes must be detected using polling.
+ *
+ * Polling behavior:
+ * - When the link is up: slow polling (e.g. 1s).
+ * - When the link just went down: fast polling for a short time.
+ * - When the link stays down: fallback to slow polling.
+ *
+ * This design balances responsiveness and CPU usage. It sacrifices fast link-up
+ * times in cases where the link is expected to remain down for extended periods,
+ * assuming that such systems do not require immediate reactivity.
+ */
+
+/*
* DP83TG720S_POLL_ACTIVE_LINK - Polling interval in milliseconds when the link
* is active.
- * DP83TG720S_POLL_NO_LINK_MIN - Minimum polling interval in milliseconds when
- * the link is down.
- * DP83TG720S_POLL_NO_LINK_MAX - Maximum polling interval in milliseconds when
- * the link is down.
+ * DP83TG720S_POLL_NO_LINK - Polling interval in milliseconds when the
+ * link is down.
+ * DP83TG720S_FAST_POLL_DURATION_MS - Timeout in milliseconds for no-link
+ * polling after which polling interval is
+ * increased.
+ * DP83TG720S_POLL_SLOW - Slow polling interval when there is no
+ * link for a prolongued period.
+ * DP83TG720S_RESET_DELAY_MS_MASTER - Delay after a reset before attempting
+ * to establish a link again for master phy.
+ * DP83TG720S_RESET_DELAY_MS_SLAVE - Delay after a reset before attempting
+ * to establish a link again for slave phy.
*
* These values are not documented or officially recommended by the vendor but
* were determined through empirical testing. They achieve a good balance in
* minimizing the number of reset retries while ensuring reliable link recovery
* within a reasonable timeframe.
*/
-#define DP83TG720S_POLL_ACTIVE_LINK 1000
-#define DP83TG720S_POLL_NO_LINK_MIN 100
-#define DP83TG720S_POLL_NO_LINK_MAX 1000
+#define DP83TG720S_POLL_ACTIVE_LINK 421
+#define DP83TG720S_POLL_NO_LINK 149
+#define DP83TG720S_FAST_POLL_DURATION_MS 6000
+#define DP83TG720S_POLL_SLOW 1117
+#define DP83TG720S_RESET_DELAY_MS_MASTER 97
+#define DP83TG720S_RESET_DELAY_MS_SLAVE 149
#define DP83TG720S_PHY_ID 0x2000a284
@@ -124,6 +195,7 @@ struct dp83tg720_stats {
struct dp83tg720_priv {
struct dp83tg720_stats stats;
+ unsigned long last_link_down_jiffies;
};
/**
@@ -201,6 +273,26 @@ static int dp83tg720_update_stats(struct phy_device *phydev)
return 0;
}
+static int dp83tg720_soft_reset(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_write(phydev, DP83TG720S_PHY_RESET, DP83TG720S_HW_RESET);
+ if (ret)
+ return ret;
+
+ /* Include mandatory MDC-access delay (1ms) + extra asymmetric delay to
+ * avoid synchronized reset deadlock. See section 1 in the top-of-file
+ * comment block.
+ */
+ if (phydev->master_slave_state == MASTER_SLAVE_STATE_SLAVE)
+ msleep(DP83TG720S_RESET_DELAY_MS_SLAVE);
+ else
+ msleep(DP83TG720S_RESET_DELAY_MS_MASTER);
+
+ return ret;
+}
+
static void dp83tg720_get_link_stats(struct phy_device *phydev,
struct ethtool_link_ext_stats *link_stats)
{
@@ -382,21 +474,11 @@ static int dp83tg720_read_status(struct phy_device *phydev)
/* According to the "DP83TC81x, DP83TG72x Software
* Implementation Guide", the PHY needs to be reset after a
* link loss or if no link is created after at least 100ms.
- *
- * Currently we are polling with the PHY_STATE_TIME (1000ms)
- * interval, which is still enough for not automotive use cases.
*/
ret = phy_init_hw(phydev);
if (ret)
return ret;
- /* Sleep 600ms for PHY stabilization post-reset.
- * Empirically chosen value (not documented).
- * Helps reduce reset bounces with link partners having similar
- * issues.
- */
- msleep(600);
-
/* After HW reset we need to restore master/slave configuration.
* genphy_c45_pma_baset1_read_master_slave() call will be done
* by the dp83tg720_config_aneg() function.
@@ -477,19 +559,11 @@ static int dp83tg720_config_init(struct phy_device *phydev)
{
int ret;
- /* Software Restart is not enough to recover from a link failure.
- * Using Hardware Reset instead.
- */
- ret = phy_write(phydev, DP83TG720S_PHY_RESET, DP83TG720S_HW_RESET);
+ /* Reset the PHY to recover from a link failure */
+ ret = dp83tg720_soft_reset(phydev);
if (ret)
return ret;
- /* Wait until MDC can be used again.
- * The wait value of one 1ms is documented in "DP83TG720S-Q1 1000BASE-T1
- * Automotive Ethernet PHY with SGMII and RGMII" datasheet.
- */
- usleep_range(1000, 2000);
-
if (phy_interface_is_rgmii(phydev)) {
ret = dp83tg720_config_rgmii_delay(phydev);
if (ret)
@@ -525,50 +599,42 @@ static int dp83tg720_probe(struct phy_device *phydev)
}
/**
- * dp83tg720_get_next_update_time - Determine the next update time for PHY
- * state
+ * dp83tg720_get_next_update_time - Return next polling interval for PHY state
* @phydev: Pointer to the phy_device structure
*
- * This function addresses a limitation of the DP83TG720 PHY, which cannot
- * reliably detect or report a stable link state. To recover from such
- * scenarios, the PHY must be periodically reset when the link is down. However,
- * if the link partner also runs Linux with the same driver, synchronized reset
- * intervals can lead to a deadlock where the link never establishes due to
- * simultaneous resets on both sides.
- *
- * To avoid this, the function implements randomized polling intervals when the
- * link is down. It ensures that reset intervals are desynchronized by
- * introducing a random delay between a configured minimum and maximum range.
- * When the link is up, a fixed polling interval is used to minimize overhead.
- *
- * This mechanism guarantees that the link will reestablish within 10 seconds
- * in the worst-case scenario.
+ * Implements adaptive polling interval logic depending on link state and
+ * downtime duration. See the "2. Polling-Based Link Detection and IRQ Support"
+ * section at the top of this file for details.
*
- * Return: Time (in jiffies) until the next update event for the PHY state
- * machine.
+ * Return: Time (in jiffies) until the next poll
*/
static unsigned int dp83tg720_get_next_update_time(struct phy_device *phydev)
{
+ struct dp83tg720_priv *priv = phydev->priv;
unsigned int next_time_jiffies;
if (phydev->link) {
- /* When the link is up, use a fixed 1000ms interval
- * (in jiffies)
- */
+ priv->last_link_down_jiffies = 0;
+
+ /* When the link is up, use a slower interval (in jiffies) */
next_time_jiffies =
msecs_to_jiffies(DP83TG720S_POLL_ACTIVE_LINK);
} else {
- unsigned int min_jiffies, max_jiffies, rand_jiffies;
-
- /* When the link is down, randomize interval between min/max
- * (in jiffies)
- */
- min_jiffies = msecs_to_jiffies(DP83TG720S_POLL_NO_LINK_MIN);
- max_jiffies = msecs_to_jiffies(DP83TG720S_POLL_NO_LINK_MAX);
-
- rand_jiffies = min_jiffies +
- get_random_u32_below(max_jiffies - min_jiffies + 1);
- next_time_jiffies = rand_jiffies;
+ unsigned long now = jiffies;
+
+ if (!priv->last_link_down_jiffies)
+ priv->last_link_down_jiffies = now;
+
+ if (time_before(now, priv->last_link_down_jiffies +
+ msecs_to_jiffies(DP83TG720S_FAST_POLL_DURATION_MS))) {
+ /* Link recently went down: fast polling */
+ next_time_jiffies =
+ msecs_to_jiffies(DP83TG720S_POLL_NO_LINK);
+ } else {
+ /* Link has been down for a while: slow polling */
+ next_time_jiffies =
+ msecs_to_jiffies(DP83TG720S_POLL_SLOW);
+ }
}
/* Ensure the polling time is at least one jiffy */
@@ -582,6 +648,7 @@ static struct phy_driver dp83tg720_driver[] = {
.flags = PHY_POLL_CABLE_TEST,
.probe = dp83tg720_probe,
+ .soft_reset = dp83tg720_soft_reset,
.config_aneg = dp83tg720_config_aneg,
.read_status = dp83tg720_read_status,
.get_features = genphy_c45_pma_read_ext_abilities,
diff --git a/drivers/net/phy/intel-xway.c b/drivers/net/phy/intel-xway.c
index a44771e8acdc..9766dd99afaa 100644
--- a/drivers/net/phy/intel-xway.c
+++ b/drivers/net/phy/intel-xway.c
@@ -174,7 +174,6 @@ static const int xway_internal_delay[] = {0, 500, 1000, 1500, 2000, 2500,
static int xway_gphy_rgmii_init(struct phy_device *phydev)
{
- struct device *dev = &phydev->mdio.dev;
unsigned int delay_size = ARRAY_SIZE(xway_internal_delay);
s32 int_delay;
int val = 0;
@@ -207,8 +206,7 @@ static int xway_gphy_rgmii_init(struct phy_device *phydev)
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
- int_delay = phy_get_internal_delay(phydev, dev,
- xway_internal_delay,
+ int_delay = phy_get_internal_delay(phydev, xway_internal_delay,
delay_size, true);
/* if rx-internal-delay-ps is missing, use default of 2.0 ns */
@@ -220,8 +218,7 @@ static int xway_gphy_rgmii_init(struct phy_device *phydev)
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
- int_delay = phy_get_internal_delay(phydev, dev,
- xway_internal_delay,
+ int_delay = phy_get_internal_delay(phydev, xway_internal_delay,
delay_size, false);
/* if tx-internal-delay-ps is missing, use default of 2.0 ns */
diff --git a/drivers/net/phy/mdio-boardinfo.c b/drivers/net/phy/mdio-boardinfo.c
index 2de679a68115..d3184e8f12ec 100644
--- a/drivers/net/phy/mdio-boardinfo.c
+++ b/drivers/net/phy/mdio-boardinfo.c
@@ -3,17 +3,23 @@
* mdio-boardinfo - Collect pre-declarations for MDIO devices
*/
-#include <linux/kernel.h>
-#include <linux/slab.h>
#include <linux/export.h>
-#include <linux/mutex.h>
+#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/phy.h>
+#include <linux/slab.h>
#include "mdio-boardinfo.h"
static LIST_HEAD(mdio_board_list);
static DEFINE_MUTEX(mdio_board_lock);
+struct mdio_board_entry {
+ struct list_head list;
+ struct mdio_board_info board_info;
+};
+
/**
* mdiobus_setup_mdiodev_from_board_info - create and setup MDIO devices
* from pre-collected board specific MDIO information
@@ -26,24 +32,18 @@ void mdiobus_setup_mdiodev_from_board_info(struct mii_bus *bus,
(struct mii_bus *bus,
struct mdio_board_info *bi))
{
- struct mdio_board_entry *be;
- struct mdio_board_entry *tmp;
- struct mdio_board_info *bi;
- int ret;
+ struct mdio_board_entry *be, *tmp;
mutex_lock(&mdio_board_lock);
list_for_each_entry_safe(be, tmp, &mdio_board_list, list) {
- bi = &be->board_info;
+ struct mdio_board_info *bi = &be->board_info;
if (strcmp(bus->id, bi->bus_id))
continue;
mutex_unlock(&mdio_board_lock);
- ret = cb(bus, bi);
+ cb(bus, bi);
mutex_lock(&mdio_board_lock);
- if (ret)
- continue;
-
}
mutex_unlock(&mdio_board_lock);
}
@@ -62,14 +62,13 @@ int mdiobus_register_board_info(const struct mdio_board_info *info,
unsigned int n)
{
struct mdio_board_entry *be;
- unsigned int i;
be = kcalloc(n, sizeof(*be), GFP_KERNEL);
if (!be)
return -ENOMEM;
- for (i = 0; i < n; i++, be++, info++) {
- memcpy(&be->board_info, info, sizeof(*info));
+ for (int i = 0; i < n; i++, be++) {
+ be->board_info = info[i];
mutex_lock(&mdio_board_lock);
list_add_tail(&be->list, &mdio_board_list);
mutex_unlock(&mdio_board_lock);
diff --git a/drivers/net/phy/mdio-boardinfo.h b/drivers/net/phy/mdio-boardinfo.h
index 773bb51399be..0878b77878d4 100644
--- a/drivers/net/phy/mdio-boardinfo.h
+++ b/drivers/net/phy/mdio-boardinfo.h
@@ -7,13 +7,8 @@
#ifndef __MDIO_BOARD_INFO_H
#define __MDIO_BOARD_INFO_H
-#include <linux/phy.h>
-#include <linux/mutex.h>
-
-struct mdio_board_entry {
- struct list_head list;
- struct mdio_board_info board_info;
-};
+struct mii_bus;
+struct mdio_board_info;
void mdiobus_setup_mdiodev_from_board_info(struct mii_bus *bus,
int (*cb)
diff --git a/drivers/net/phy/mdio_bus_provider.c b/drivers/net/phy/mdio_bus_provider.c
index 65850e36284d..48dc4bf85125 100644
--- a/drivers/net/phy/mdio_bus_provider.c
+++ b/drivers/net/phy/mdio_bus_provider.c
@@ -152,7 +152,6 @@ static int mdiobus_create_device(struct mii_bus *bus,
strscpy(mdiodev->modalias, bi->modalias,
sizeof(mdiodev->modalias));
- mdiodev->bus_match = mdio_device_bus_match;
mdiodev->dev.platform_data = (void *)bi->platform_data;
ret = mdio_device_register(mdiodev);
diff --git a/drivers/net/phy/mdio_device.c b/drivers/net/phy/mdio_device.c
index cce3f405d1a4..f64176e0e197 100644
--- a/drivers/net/phy/mdio_device.c
+++ b/drivers/net/phy/mdio_device.c
@@ -35,7 +35,8 @@ static void mdio_device_release(struct device *dev)
kfree(to_mdio_device(dev));
}
-int mdio_device_bus_match(struct device *dev, const struct device_driver *drv)
+static int mdio_device_bus_match(struct device *dev,
+ const struct device_driver *drv)
{
struct mdio_device *mdiodev = to_mdio_device(dev);
const struct mdio_driver *mdiodrv = to_mdio_driver(drv);
@@ -45,7 +46,6 @@ int mdio_device_bus_match(struct device *dev, const struct device_driver *drv)
return strcmp(mdiodev->modalias, drv->name) == 0;
}
-EXPORT_SYMBOL_GPL(mdio_device_bus_match);
struct mdio_device *mdio_device_create(struct mii_bus *bus, int addr)
{
@@ -59,6 +59,7 @@ struct mdio_device *mdio_device_create(struct mii_bus *bus, int addr)
mdiodev->dev.release = mdio_device_release;
mdiodev->dev.parent = &bus->dev;
mdiodev->dev.bus = &mdio_bus_type;
+ mdiodev->bus_match = mdio_device_bus_match;
mdiodev->device_free = mdio_device_free;
mdiodev->device_remove = mdio_device_remove;
mdiodev->bus = bus;
diff --git a/drivers/net/phy/mediatek/Kconfig b/drivers/net/phy/mediatek/Kconfig
index 9f30a91be8dd..bb7dc876271e 100644
--- a/drivers/net/phy/mediatek/Kconfig
+++ b/drivers/net/phy/mediatek/Kconfig
@@ -27,6 +27,7 @@ config MEDIATEK_GE_SOC_PHY
depends on ARCH_AIROHA || (ARCH_MEDIATEK && NVMEM_MTK_EFUSE) || \
COMPILE_TEST
select MTK_NET_PHYLIB
+ select PHY_PACKAGE
help
Supports MediaTek SoC built-in Gigabit Ethernet PHYs.
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 64aa03aed770..d0429dc8f561 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -431,6 +431,10 @@ struct kszphy_ptp_priv {
spinlock_t seconds_lock;
};
+struct kszphy_phy_stats {
+ u64 rx_err_pkt_cnt;
+};
+
struct kszphy_priv {
struct kszphy_ptp_priv ptp_priv;
const struct kszphy_type *type;
@@ -441,6 +445,7 @@ struct kszphy_priv {
bool rmii_ref_clk_sel_val;
bool clk_enable;
u64 stats[ARRAY_SIZE(kszphy_hw_stats)];
+ struct kszphy_phy_stats phy_stats;
};
static const struct kszphy_type lan8814_type = {
@@ -1718,7 +1723,8 @@ static int ksz9x31_cable_test_fault_length(struct phy_device *phydev, u16 stat)
*
* distance to fault = (VCT_DATA - 22) * 4 / cable propagation velocity
*/
- if (phydev_id_compare(phydev, PHY_ID_KSZ9131))
+ if (phydev_id_compare(phydev, PHY_ID_KSZ9131) ||
+ phydev_id_compare(phydev, PHY_ID_KSZ9477))
dt = clamp(dt - 22, 0, 255);
return (dt * 400) / 10;
@@ -1792,12 +1798,20 @@ static int ksz9x31_cable_test_get_status(struct phy_device *phydev,
bool *finished)
{
struct kszphy_priv *priv = phydev->priv;
- unsigned long pair_mask = 0xf;
+ unsigned long pair_mask;
int retries = 20;
int pair, ret, rv;
*finished = false;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ phydev->supported) ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ phydev->supported))
+ pair_mask = 0xf; /* All pairs */
+ else
+ pair_mask = 0x3; /* Pairs A and B only */
+
/* Try harder if link partner is active */
while (pair_mask && retries--) {
for_each_set_bit(pair, &pair_mask, 4) {
@@ -1948,6 +1962,56 @@ static int ksz886x_read_status(struct phy_device *phydev)
return genphy_read_status(phydev);
}
+static int ksz9477_mdix_update(struct phy_device *phydev)
+{
+ if (phydev->mdix_ctrl != ETH_TP_MDI_AUTO)
+ phydev->mdix = phydev->mdix_ctrl;
+ else
+ phydev->mdix = ETH_TP_MDI_INVALID;
+
+ return 0;
+}
+
+static int ksz9477_read_mdix_ctrl(struct phy_device *phydev)
+{
+ int val;
+
+ val = phy_read(phydev, MII_KSZ9131_AUTO_MDIX);
+ if (val < 0)
+ return val;
+
+ if (!(val & MII_KSZ9131_AUTO_MDIX_SWAP_OFF))
+ phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+ else if (val & MII_KSZ9131_AUTO_MDI_SET)
+ phydev->mdix_ctrl = ETH_TP_MDI;
+ else
+ phydev->mdix_ctrl = ETH_TP_MDI_X;
+
+ return 0;
+}
+
+static int ksz9477_read_status(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = ksz9477_mdix_update(phydev);
+ if (ret)
+ return ret;
+
+ return genphy_read_status(phydev);
+}
+
+static int ksz9477_config_aneg(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = ksz9131_config_mdix(phydev, phydev->mdix_ctrl);
+ if (ret)
+ return ret;
+
+ return genphy_config_aneg(phydev);
+}
+
struct ksz9477_errata_write {
u8 dev_addr;
u8 reg_addr;
@@ -2029,6 +2093,13 @@ static int ksz9477_config_init(struct phy_device *phydev)
return err;
}
+ /* Read initial MDI-X config state. So, we do not need to poll it
+ * later on.
+ */
+ err = ksz9477_read_mdix_ctrl(phydev);
+ if (err)
+ return err;
+
return kszphy_config_init(phydev);
}
@@ -2073,6 +2144,35 @@ static void kszphy_get_stats(struct phy_device *phydev,
data[i] = kszphy_get_stat(phydev, i);
}
+/* KSZ9477 PHY RXER Counter. Probably supported by other PHYs like KSZ9313,
+ * etc. The counter is incremented when the PHY receives a frame with one or
+ * more symbol errors. The counter is cleared when the register is read.
+ */
+#define MII_KSZ9477_PHY_RXER_COUNTER 0x15
+
+static int kszphy_update_stats(struct phy_device *phydev)
+{
+ struct kszphy_priv *priv = phydev->priv;
+ int ret;
+
+ ret = phy_read(phydev, MII_KSZ9477_PHY_RXER_COUNTER);
+ if (ret < 0)
+ return ret;
+
+ priv->phy_stats.rx_err_pkt_cnt += ret;
+
+ return 0;
+}
+
+static void kszphy_get_phy_stats(struct phy_device *phydev,
+ struct ethtool_eth_phy_stats *eth_stats,
+ struct ethtool_phy_stats *stats)
+{
+ struct kszphy_priv *priv = phydev->priv;
+
+ stats->rx_errors = priv->phy_stats.rx_err_pkt_cnt;
+}
+
static void kszphy_enable_clk(struct phy_device *phydev)
{
struct kszphy_priv *priv = phydev->priv;
@@ -5688,12 +5788,19 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ9477,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Microchip KSZ9477",
+ .probe = kszphy_probe,
/* PHY_GBIT_FEATURES */
.config_init = ksz9477_config_init,
.config_intr = kszphy_config_intr,
+ .config_aneg = ksz9477_config_aneg,
+ .read_status = ksz9477_read_status,
.handle_interrupt = kszphy_handle_interrupt,
.suspend = genphy_suspend,
.resume = ksz9477_resume,
+ .get_phy_stats = kszphy_get_phy_stats,
+ .update_stats = kszphy_update_stats,
+ .cable_test_start = ksz9x31_cable_test_start,
+ .cable_test_get_status = ksz9x31_cable_test_get_status,
} };
module_phy_driver(ksphy_driver);
diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c
index 7ff975efd8e7..7ed6522fb0ef 100644
--- a/drivers/net/phy/mscc/mscc_main.c
+++ b/drivers/net/phy/mscc/mscc_main.c
@@ -530,7 +530,6 @@ static int vsc85xx_update_rgmii_cntl(struct phy_device *phydev, u32 rgmii_cntl,
u16 rgmii_rx_delay_pos = ffs(rgmii_rx_delay_mask) - 1;
u16 rgmii_tx_delay_pos = ffs(rgmii_tx_delay_mask) - 1;
int delay_size = ARRAY_SIZE(vsc85xx_internal_delay);
- struct device *dev = &phydev->mdio.dev;
u16 reg_val = 0;
u16 mask = 0;
s32 rx_delay;
@@ -549,7 +548,7 @@ static int vsc85xx_update_rgmii_cntl(struct phy_device *phydev, u32 rgmii_cntl,
if (phy_interface_is_rgmii(phydev))
mask |= rgmii_rx_delay_mask | rgmii_tx_delay_mask;
- rx_delay = phy_get_internal_delay(phydev, dev, vsc85xx_internal_delay,
+ rx_delay = phy_get_internal_delay(phydev, vsc85xx_internal_delay,
delay_size, true);
if (rx_delay < 0) {
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID ||
@@ -559,7 +558,7 @@ static int vsc85xx_update_rgmii_cntl(struct phy_device *phydev, u32 rgmii_cntl,
rx_delay = RGMII_CLK_DELAY_0_2_NS;
}
- tx_delay = phy_get_internal_delay(phydev, dev, vsc85xx_internal_delay,
+ tx_delay = phy_get_internal_delay(phydev, vsc85xx_internal_delay,
delay_size, false);
if (tx_delay < 0) {
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID ||
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index bdd70d424491..61670be0f095 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -1573,10 +1573,3 @@ int genphy_c45_ethtool_set_eee(struct phy_device *phydev,
return ret;
}
EXPORT_SYMBOL(genphy_c45_ethtool_set_eee);
-
-struct phy_driver genphy_c45_driver = {
- .phy_id = 0xffffffff,
- .phy_id_mask = 0xffffffff,
- .name = "Generic Clause 45 PHY",
- .read_status = genphy_c45_read_status,
-};
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
index e177037f9110..c480bb40fa73 100644
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -142,6 +142,9 @@ int phy_interface_num_ports(phy_interface_t interface)
case PHY_INTERFACE_MODE_RXAUI:
case PHY_INTERFACE_MODE_XAUI:
case PHY_INTERFACE_MODE_1000BASEKX:
+ case PHY_INTERFACE_MODE_50GBASER:
+ case PHY_INTERFACE_MODE_LAUI:
+ case PHY_INTERFACE_MODE_100GBASEP:
return 1;
case PHY_INTERFACE_MODE_QSGMII:
case PHY_INTERFACE_MODE_QUSGMII:
@@ -375,8 +378,8 @@ static void mmd_phy_indirect(struct mii_bus *bus, int phy_addr, int devad,
devad | MII_MMD_CTRL_NOINCR);
}
-static int mmd_phy_read(struct mii_bus *bus, int phy_addr, bool is_c45,
- int devad, u32 regnum)
+int mmd_phy_read(struct mii_bus *bus, int phy_addr, bool is_c45,
+ int devad, u32 regnum)
{
if (is_c45)
return __mdiobus_c45_read(bus, phy_addr, devad, regnum);
@@ -385,9 +388,10 @@ static int mmd_phy_read(struct mii_bus *bus, int phy_addr, bool is_c45,
/* Read the content of the MMD's selected register */
return __mdiobus_read(bus, phy_addr, MII_MMD_DATA);
}
+EXPORT_SYMBOL_GPL(mmd_phy_read);
-static int mmd_phy_write(struct mii_bus *bus, int phy_addr, bool is_c45,
- int devad, u32 regnum, u16 val)
+int mmd_phy_write(struct mii_bus *bus, int phy_addr, bool is_c45,
+ int devad, u32 regnum, u16 val)
{
if (is_c45)
return __mdiobus_c45_write(bus, phy_addr, devad, regnum, val);
@@ -396,6 +400,7 @@ static int mmd_phy_write(struct mii_bus *bus, int phy_addr, bool is_c45,
/* Write the data into MMD's selected register */
return __mdiobus_write(bus, phy_addr, MII_MMD_DATA, val);
}
+EXPORT_SYMBOL_GPL(mmd_phy_write);
/**
* __phy_read_mmd - Convenience function for reading a register
@@ -486,71 +491,6 @@ int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val)
EXPORT_SYMBOL(phy_write_mmd);
/**
- * __phy_package_read_mmd - read MMD reg relative to PHY package base addr
- * @phydev: The phy_device struct
- * @addr_offset: The offset to be added to PHY package base_addr
- * @devad: The MMD to read from
- * @regnum: The register on the MMD to read
- *
- * Convenience helper for reading a register of an MMD on a given PHY
- * using the PHY package base address. The base address is added to
- * the addr_offset value.
- *
- * Same calling rules as for __phy_read();
- *
- * NOTE: It's assumed that the entire PHY package is either C22 or C45.
- */
-int __phy_package_read_mmd(struct phy_device *phydev,
- unsigned int addr_offset, int devad,
- u32 regnum)
-{
- int addr = phy_package_address(phydev, addr_offset);
-
- if (addr < 0)
- return addr;
-
- if (regnum > (u16)~0 || devad > 32)
- return -EINVAL;
-
- return mmd_phy_read(phydev->mdio.bus, addr, phydev->is_c45, devad,
- regnum);
-}
-EXPORT_SYMBOL(__phy_package_read_mmd);
-
-/**
- * __phy_package_write_mmd - write MMD reg relative to PHY package base addr
- * @phydev: The phy_device struct
- * @addr_offset: The offset to be added to PHY package base_addr
- * @devad: The MMD to write to
- * @regnum: The register on the MMD to write
- * @val: value to write to @regnum
- *
- * Convenience helper for writing a register of an MMD on a given PHY
- * using the PHY package base address. The base address is added to
- * the addr_offset value.
- *
- * Same calling rules as for __phy_write();
- *
- * NOTE: It's assumed that the entire PHY package is either C22 or C45.
- */
-int __phy_package_write_mmd(struct phy_device *phydev,
- unsigned int addr_offset, int devad,
- u32 regnum, u16 val)
-{
- int addr = phy_package_address(phydev, addr_offset);
-
- if (addr < 0)
- return addr;
-
- if (regnum > (u16)~0 || devad > 32)
- return -EINVAL;
-
- return mmd_phy_write(phydev->mdio.bus, addr, phydev->is_c45, devad,
- regnum, val);
-}
-EXPORT_SYMBOL(__phy_package_write_mmd);
-
-/**
* phy_modify_changed - Function for modifying a PHY register
* @phydev: the phy_device struct
* @regnum: register number to modify
diff --git a/drivers/net/phy/phy_caps.c b/drivers/net/phy/phy_caps.c
index 38417e288611..d11ce1c7e712 100644
--- a/drivers/net/phy/phy_caps.c
+++ b/drivers/net/phy/phy_caps.c
@@ -351,6 +351,15 @@ unsigned long phy_caps_from_interface(phy_interface_t interface)
link_caps |= BIT(LINK_CAPA_40000FD);
break;
+ case PHY_INTERFACE_MODE_50GBASER:
+ case PHY_INTERFACE_MODE_LAUI:
+ link_caps |= BIT(LINK_CAPA_50000FD);
+ break;
+
+ case PHY_INTERFACE_MODE_100GBASEP:
+ link_caps |= BIT(LINK_CAPA_100000FD);
+ break;
+
case PHY_INTERFACE_MODE_INTERNAL:
link_caps |= LINK_CAPA_ALL;
break;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 73f9cb2e2844..90951681523c 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -59,6 +59,13 @@ struct phy_fixup {
int (*run)(struct phy_device *phydev);
};
+static struct phy_driver genphy_c45_driver = {
+ .phy_id = 0xffffffff,
+ .phy_id_mask = 0xffffffff,
+ .name = "Generic Clause 45 PHY",
+ .read_status = genphy_c45_read_status,
+};
+
__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_features) __ro_after_init;
EXPORT_SYMBOL_GPL(phy_basic_features);
@@ -645,11 +652,119 @@ static struct attribute *phy_dev_attrs[] = {
&dev_attr_phy_dev_flags.attr,
NULL,
};
-ATTRIBUTE_GROUPS(phy_dev);
+
+static const struct attribute_group phy_dev_group = {
+ .attrs = phy_dev_attrs,
+};
+
+#define MMD_DEVICE_ID_ATTR(n) \
+static ssize_t mmd##n##_device_id_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct phy_device *phydev = to_phy_device(dev); \
+ return sysfs_emit(buf, "0x%.8lx\n", \
+ (unsigned long)phydev->c45_ids.device_ids[n]); \
+} \
+static DEVICE_ATTR_RO(mmd##n##_device_id)
+
+MMD_DEVICE_ID_ATTR(1);
+MMD_DEVICE_ID_ATTR(2);
+MMD_DEVICE_ID_ATTR(3);
+MMD_DEVICE_ID_ATTR(4);
+MMD_DEVICE_ID_ATTR(5);
+MMD_DEVICE_ID_ATTR(6);
+MMD_DEVICE_ID_ATTR(7);
+MMD_DEVICE_ID_ATTR(8);
+MMD_DEVICE_ID_ATTR(9);
+MMD_DEVICE_ID_ATTR(10);
+MMD_DEVICE_ID_ATTR(11);
+MMD_DEVICE_ID_ATTR(12);
+MMD_DEVICE_ID_ATTR(13);
+MMD_DEVICE_ID_ATTR(14);
+MMD_DEVICE_ID_ATTR(15);
+MMD_DEVICE_ID_ATTR(16);
+MMD_DEVICE_ID_ATTR(17);
+MMD_DEVICE_ID_ATTR(18);
+MMD_DEVICE_ID_ATTR(19);
+MMD_DEVICE_ID_ATTR(20);
+MMD_DEVICE_ID_ATTR(21);
+MMD_DEVICE_ID_ATTR(22);
+MMD_DEVICE_ID_ATTR(23);
+MMD_DEVICE_ID_ATTR(24);
+MMD_DEVICE_ID_ATTR(25);
+MMD_DEVICE_ID_ATTR(26);
+MMD_DEVICE_ID_ATTR(27);
+MMD_DEVICE_ID_ATTR(28);
+MMD_DEVICE_ID_ATTR(29);
+MMD_DEVICE_ID_ATTR(30);
+MMD_DEVICE_ID_ATTR(31);
+
+static struct attribute *phy_mmd_attrs[] = {
+ &dev_attr_mmd1_device_id.attr,
+ &dev_attr_mmd2_device_id.attr,
+ &dev_attr_mmd3_device_id.attr,
+ &dev_attr_mmd4_device_id.attr,
+ &dev_attr_mmd5_device_id.attr,
+ &dev_attr_mmd6_device_id.attr,
+ &dev_attr_mmd7_device_id.attr,
+ &dev_attr_mmd8_device_id.attr,
+ &dev_attr_mmd9_device_id.attr,
+ &dev_attr_mmd10_device_id.attr,
+ &dev_attr_mmd11_device_id.attr,
+ &dev_attr_mmd12_device_id.attr,
+ &dev_attr_mmd13_device_id.attr,
+ &dev_attr_mmd14_device_id.attr,
+ &dev_attr_mmd15_device_id.attr,
+ &dev_attr_mmd16_device_id.attr,
+ &dev_attr_mmd17_device_id.attr,
+ &dev_attr_mmd18_device_id.attr,
+ &dev_attr_mmd19_device_id.attr,
+ &dev_attr_mmd20_device_id.attr,
+ &dev_attr_mmd21_device_id.attr,
+ &dev_attr_mmd22_device_id.attr,
+ &dev_attr_mmd23_device_id.attr,
+ &dev_attr_mmd24_device_id.attr,
+ &dev_attr_mmd25_device_id.attr,
+ &dev_attr_mmd26_device_id.attr,
+ &dev_attr_mmd27_device_id.attr,
+ &dev_attr_mmd28_device_id.attr,
+ &dev_attr_mmd29_device_id.attr,
+ &dev_attr_mmd30_device_id.attr,
+ &dev_attr_mmd31_device_id.attr,
+ NULL
+};
+
+static umode_t phy_mmd_is_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct phy_device *phydev = to_phy_device(dev);
+ const int i = index + 1;
+
+ if (!phydev->is_c45)
+ return 0;
+ if (i >= ARRAY_SIZE(phydev->c45_ids.device_ids) ||
+ phydev->c45_ids.device_ids[i] == 0xffffffff)
+ return 0;
+
+ return attr->mode;
+}
+
+static const struct attribute_group phy_mmd_group = {
+ .name = "c45_phy_ids",
+ .attrs = phy_mmd_attrs,
+ .is_visible = phy_mmd_is_visible,
+};
+
+static const struct attribute_group *phy_device_groups[] = {
+ &phy_dev_group,
+ &phy_mmd_group,
+ NULL,
+};
static const struct device_type mdio_bus_phy_type = {
.name = "PHY",
- .groups = phy_dev_groups,
+ .groups = phy_device_groups,
.release = phy_device_release,
.pm = pm_ptr(&mdio_bus_phy_pm_ops),
};
@@ -1515,7 +1630,6 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
struct mii_bus *bus = phydev->mdio.bus;
struct device *d = &phydev->mdio.dev;
struct module *ndev_owner = NULL;
- bool using_genphy = false;
int err;
/* For Ethernet device drivers that register their own MDIO bus, we
@@ -1541,7 +1655,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
else
d->driver = &genphy_driver.mdiodrv.driver;
- using_genphy = true;
+ phydev->is_genphy_driven = 1;
}
if (!try_module_get(d->driver->owner)) {
@@ -1550,7 +1664,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
goto error_put_device;
}
- if (using_genphy) {
+ if (phydev->is_genphy_driven) {
err = d->driver->probe(d);
if (err >= 0)
err = device_bind_driver(d);
@@ -1620,7 +1734,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
* the generic PHY driver we can't figure it out, thus set the old
* legacy PORT_MII value.
*/
- if (using_genphy)
+ if (phydev->is_genphy_driven)
phydev->port = PORT_MII;
/* Initial carrier state is off as the phy is about to be
@@ -1659,6 +1773,7 @@ error:
error_module_put:
module_put(d->driver->owner);
+ phydev->is_genphy_driven = 0;
d->driver = NULL;
error_put_device:
put_device(d);
@@ -1706,36 +1821,6 @@ struct phy_device *phy_attach(struct net_device *dev, const char *bus_id,
}
EXPORT_SYMBOL(phy_attach);
-static bool phy_driver_is_genphy_kind(struct phy_device *phydev,
- struct device_driver *driver)
-{
- struct device *d = &phydev->mdio.dev;
- bool ret = false;
-
- if (!phydev->drv)
- return ret;
-
- get_device(d);
- ret = d->driver == driver;
- put_device(d);
-
- return ret;
-}
-
-bool phy_driver_is_genphy(struct phy_device *phydev)
-{
- return phy_driver_is_genphy_kind(phydev,
- &genphy_driver.mdiodrv.driver);
-}
-EXPORT_SYMBOL_GPL(phy_driver_is_genphy);
-
-bool phy_driver_is_genphy_10g(struct phy_device *phydev)
-{
- return phy_driver_is_genphy_kind(phydev,
- &genphy_c45_driver.mdiodrv.driver);
-}
-EXPORT_SYMBOL_GPL(phy_driver_is_genphy_10g);
-
/**
* phy_detach - detach a PHY device from its network device
* @phydev: target phy_device struct
@@ -1792,9 +1877,10 @@ void phy_detach(struct phy_device *phydev)
* from the generic driver so that there's a chance a
* real driver could be loaded
*/
- if (phy_driver_is_genphy(phydev) ||
- phy_driver_is_genphy_10g(phydev))
+ if (phydev->is_genphy_driven) {
device_release_driver(&phydev->mdio.dev);
+ phydev->is_genphy_driven = 0;
+ }
/* Assert the reset signal */
phy_device_reset(phydev, 1);
@@ -2899,7 +2985,6 @@ static int phy_get_u32_property(struct device *dev, const char *name, u32 *val)
/**
* phy_get_internal_delay - returns the index of the internal delay
* @phydev: phy_device struct
- * @dev: pointer to the devices device struct
* @delay_values: array of delays the PHY supports
* @size: the size of the delay array
* @is_rx: boolean to indicate to get the rx internal delay
@@ -2912,9 +2997,10 @@ static int phy_get_u32_property(struct device *dev, const char *name, u32 *val)
* array then size = 0 and the value of the delay property is returned.
* Return -EINVAL if the delay is invalid or cannot be found.
*/
-s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev,
- const int *delay_values, int size, bool is_rx)
+s32 phy_get_internal_delay(struct phy_device *phydev, const int *delay_values,
+ int size, bool is_rx)
{
+ struct device *dev = &phydev->mdio.dev;
int i, ret;
u32 delay;
diff --git a/drivers/net/phy/phy_package.c b/drivers/net/phy/phy_package.c
index c738f76e8664..3024da0bbf7b 100644
--- a/drivers/net/phy/phy_package.c
+++ b/drivers/net/phy/phy_package.c
@@ -52,7 +52,8 @@ void *phy_package_get_priv(struct phy_device *phydev)
}
EXPORT_SYMBOL_GPL(phy_package_get_priv);
-int phy_package_address(struct phy_device *phydev, unsigned int addr_offset)
+static int phy_package_address(struct phy_device *phydev,
+ unsigned int addr_offset)
{
struct phy_package_shared *shared = phydev->shared;
u8 base_addr = shared->base_addr;
@@ -90,6 +91,71 @@ int __phy_package_write(struct phy_device *phydev, unsigned int addr_offset,
}
EXPORT_SYMBOL_GPL(__phy_package_write);
+/**
+ * __phy_package_read_mmd - read MMD reg relative to PHY package base addr
+ * @phydev: The phy_device struct
+ * @addr_offset: The offset to be added to PHY package base_addr
+ * @devad: The MMD to read from
+ * @regnum: The register on the MMD to read
+ *
+ * Convenience helper for reading a register of an MMD on a given PHY
+ * using the PHY package base address. The base address is added to
+ * the addr_offset value.
+ *
+ * Same calling rules as for __phy_read();
+ *
+ * NOTE: It's assumed that the entire PHY package is either C22 or C45.
+ */
+int __phy_package_read_mmd(struct phy_device *phydev,
+ unsigned int addr_offset, int devad,
+ u32 regnum)
+{
+ int addr = phy_package_address(phydev, addr_offset);
+
+ if (addr < 0)
+ return addr;
+
+ if (regnum > (u16)~0 || devad > 32)
+ return -EINVAL;
+
+ return mmd_phy_read(phydev->mdio.bus, addr, phydev->is_c45, devad,
+ regnum);
+}
+EXPORT_SYMBOL(__phy_package_read_mmd);
+
+/**
+ * __phy_package_write_mmd - write MMD reg relative to PHY package base addr
+ * @phydev: The phy_device struct
+ * @addr_offset: The offset to be added to PHY package base_addr
+ * @devad: The MMD to write to
+ * @regnum: The register on the MMD to write
+ * @val: value to write to @regnum
+ *
+ * Convenience helper for writing a register of an MMD on a given PHY
+ * using the PHY package base address. The base address is added to
+ * the addr_offset value.
+ *
+ * Same calling rules as for __phy_write();
+ *
+ * NOTE: It's assumed that the entire PHY package is either C22 or C45.
+ */
+int __phy_package_write_mmd(struct phy_device *phydev,
+ unsigned int addr_offset, int devad,
+ u32 regnum, u16 val)
+{
+ int addr = phy_package_address(phydev, addr_offset);
+
+ if (addr < 0)
+ return addr;
+
+ if (regnum > (u16)~0 || devad > 32)
+ return -EINVAL;
+
+ return mmd_phy_write(phydev->mdio.bus, addr, phydev->is_c45, devad,
+ regnum, val);
+}
+EXPORT_SYMBOL(__phy_package_write_mmd);
+
static bool __phy_package_set_once(struct phy_device *phydev, unsigned int b)
{
struct phy_package_shared *shared = phydev->shared;
@@ -348,3 +414,6 @@ int devm_of_phy_package_join(struct device *dev, struct phy_device *phydev,
return ret;
}
EXPORT_SYMBOL_GPL(devm_of_phy_package_join);
+
+MODULE_DESCRIPTION("PHY package support");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/phylib-internal.h b/drivers/net/phy/phylib-internal.h
index afac2bd15b50..ebda74eb60a5 100644
--- a/drivers/net/phy/phylib-internal.h
+++ b/drivers/net/phy/phylib-internal.h
@@ -7,6 +7,7 @@
#define __PHYLIB_INTERNAL_H
struct phy_device;
+struct mii_bus;
/*
* phy_supported_speeds - return all speeds currently supported by a PHY device
@@ -20,7 +21,10 @@ void of_set_phy_timing_role(struct phy_device *phydev);
int phy_speed_down_core(struct phy_device *phydev);
void phy_check_downshift(struct phy_device *phydev);
-int phy_package_address(struct phy_device *phydev, unsigned int addr_offset);
+int mmd_phy_read(struct mii_bus *bus, int phy_addr, bool is_c45,
+ int devad, u32 regnum);
+int mmd_phy_write(struct mii_bus *bus, int phy_addr, bool is_c45,
+ int devad, u32 regnum, u16 val);
int genphy_c45_read_eee_adv(struct phy_device *phydev, unsigned long *adv);
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 0faa3d97e06b..67218d278ce6 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -127,6 +127,9 @@ do { \
#endif
static const phy_interface_t phylink_sfp_interface_preference[] = {
+ PHY_INTERFACE_MODE_100GBASEP,
+ PHY_INTERFACE_MODE_50GBASER,
+ PHY_INTERFACE_MODE_LAUI,
PHY_INTERFACE_MODE_25GBASER,
PHY_INTERFACE_MODE_USXGMII,
PHY_INTERFACE_MODE_10GBASER,
@@ -274,6 +277,13 @@ static int phylink_interface_max_speed(phy_interface_t interface)
case PHY_INTERFACE_MODE_XLGMII:
return SPEED_40000;
+ case PHY_INTERFACE_MODE_50GBASER:
+ case PHY_INTERFACE_MODE_LAUI:
+ return SPEED_50000;
+
+ case PHY_INTERFACE_MODE_100GBASEP:
+ return SPEED_100000;
+
case PHY_INTERFACE_MODE_INTERNAL:
case PHY_INTERFACE_MODE_NA:
case PHY_INTERFACE_MODE_MAX:
@@ -798,6 +808,9 @@ static int phylink_parse_mode(struct phylink *pl,
case PHY_INTERFACE_MODE_10GKR:
case PHY_INTERFACE_MODE_10GBASER:
case PHY_INTERFACE_MODE_XLGMII:
+ case PHY_INTERFACE_MODE_50GBASER:
+ case PHY_INTERFACE_MODE_LAUI:
+ case PHY_INTERFACE_MODE_100GBASEP:
caps = ~(MAC_SYM_PAUSE | MAC_ASYM_PAUSE);
caps = phylink_get_capabilities(pl->link_config.interface, caps,
RATE_MATCH_NONE);
diff --git a/drivers/net/phy/qcom/Kconfig b/drivers/net/phy/qcom/Kconfig
index 570626cc8e14..06e8430c13b1 100644
--- a/drivers/net/phy/qcom/Kconfig
+++ b/drivers/net/phy/qcom/Kconfig
@@ -7,7 +7,7 @@ config AT803X_PHY
select QCOM_NET_PHYLIB
depends on REGULATOR
help
- Currently supports the AR8030, AR8031, AR8033, AR8035 model
+ Currently supports the AR8030, AR8031, AR8033, AR8035, IPQ5018 model
config QCA83XX_PHY
tristate "Qualcomm Atheros QCA833x PHYs"
@@ -24,6 +24,7 @@ config QCA808X_PHY
config QCA807X_PHY
tristate "Qualcomm QCA807x PHYs"
select QCOM_NET_PHYLIB
+ select PHY_PACKAGE
depends on OF_MDIO
help
Currently supports the Qualcomm QCA8072, QCA8075 and the PSGMII
diff --git a/drivers/net/phy/qcom/at803x.c b/drivers/net/phy/qcom/at803x.c
index 26350b962890..43e604171828 100644
--- a/drivers/net/phy/qcom/at803x.c
+++ b/drivers/net/phy/qcom/at803x.c
@@ -19,6 +19,7 @@
#include <linux/regulator/consumer.h>
#include <linux/of.h>
#include <linux/phylink.h>
+#include <linux/reset.h>
#include <linux/sfp.h>
#include <dt-bindings/net/qca-ar803x.h>
@@ -96,6 +97,8 @@
#define ATH8035_PHY_ID 0x004dd072
#define AT8030_PHY_ID_MASK 0xffffffef
+#define IPQ5018_PHY_ID 0x004dd0c0
+
#define QCA9561_PHY_ID 0x004dd042
#define AT803X_PAGE_FIBER 0
@@ -108,6 +111,48 @@
/* disable hibernation mode */
#define AT803X_DISABLE_HIBERNATION_MODE BIT(2)
+#define IPQ5018_PHY_FIFO_CONTROL 0x19
+#define IPQ5018_PHY_FIFO_RESET GENMASK(1, 0)
+
+#define IPQ5018_PHY_DEBUG_EDAC 0x4380
+#define IPQ5018_PHY_MMD1_MDAC 0x8100
+#define IPQ5018_PHY_DAC_MASK GENMASK(15, 8)
+
+/* MDAC and EDAC values for short cable length */
+#define IPQ5018_PHY_DEBUG_EDAC_VAL 0x10
+#define IPQ5018_PHY_MMD1_MDAC_VAL 0x10
+
+#define IPQ5018_PHY_MMD1_MSE_THRESH1 0x1000
+#define IPQ5018_PHY_MMD1_MSE_THRESH2 0x1001
+#define IPQ5018_PHY_PCS_EEE_TX_TIMER 0x8008
+#define IPQ5018_PHY_PCS_EEE_RX_TIMER 0x8009
+#define IPQ5018_PHY_PCS_CDT_THRESH_CTRL3 0x8074
+#define IPQ5018_PHY_PCS_CDT_THRESH_CTRL4 0x8075
+#define IPQ5018_PHY_PCS_CDT_THRESH_CTRL5 0x8076
+#define IPQ5018_PHY_PCS_CDT_THRESH_CTRL6 0x8077
+#define IPQ5018_PHY_PCS_CDT_THRESH_CTRL7 0x8078
+#define IPQ5018_PHY_PCS_CDT_THRESH_CTRL9 0x807a
+#define IPQ5018_PHY_PCS_CDT_THRESH_CTRL13 0x807e
+#define IPQ5018_PHY_PCS_CDT_THRESH_CTRL14 0x807f
+
+#define IPQ5018_PHY_MMD1_MSE_THRESH1_VAL 0xf1
+#define IPQ5018_PHY_MMD1_MSE_THRESH2_VAL 0x1f6
+#define IPQ5018_PHY_PCS_EEE_TX_TIMER_VAL 0x7880
+#define IPQ5018_PHY_PCS_EEE_RX_TIMER_VAL 0xc8
+#define IPQ5018_PHY_PCS_CDT_THRESH_CTRL3_VAL 0xc040
+#define IPQ5018_PHY_PCS_CDT_THRESH_CTRL4_VAL 0xa060
+#define IPQ5018_PHY_PCS_CDT_THRESH_CTRL5_VAL 0xc040
+#define IPQ5018_PHY_PCS_CDT_THRESH_CTRL6_VAL 0xa060
+#define IPQ5018_PHY_PCS_CDT_THRESH_CTRL7_VAL 0xc24c
+#define IPQ5018_PHY_PCS_CDT_THRESH_CTRL9_VAL 0xc060
+#define IPQ5018_PHY_PCS_CDT_THRESH_CTRL13_VAL 0xb060
+#define IPQ5018_PHY_PCS_NEAR_ECHO_THRESH_VAL 0x90b0
+
+#define IPQ5018_PHY_DEBUG_ANA_LDO_EFUSE 0x1
+#define IPQ5018_PHY_DEBUG_ANA_LDO_EFUSE_MASK GENMASK(7, 4)
+#define IPQ5018_PHY_DEBUG_ANA_LDO_EFUSE_DEFAULT 0x50
+#define IPQ5018_PHY_DEBUG_ANA_DAC_FILTER 0xa080
+
MODULE_DESCRIPTION("Qualcomm Atheros AR803x PHY driver");
MODULE_AUTHOR("Matus Ujhelyi");
MODULE_LICENSE("GPL");
@@ -133,6 +178,11 @@ struct at803x_context {
u16 led_control;
};
+struct ipq5018_priv {
+ struct reset_control *rst;
+ bool set_short_cable_dac;
+};
+
static int at803x_write_page(struct phy_device *phydev, int page)
{
int mask;
@@ -987,6 +1037,109 @@ static int at8035_probe(struct phy_device *phydev)
return at8035_parse_dt(phydev);
}
+static int ipq5018_cable_test_start(struct phy_device *phydev)
+{
+ phy_write_mmd(phydev, MDIO_MMD_PCS, IPQ5018_PHY_PCS_CDT_THRESH_CTRL3,
+ IPQ5018_PHY_PCS_CDT_THRESH_CTRL3_VAL);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, IPQ5018_PHY_PCS_CDT_THRESH_CTRL4,
+ IPQ5018_PHY_PCS_CDT_THRESH_CTRL4_VAL);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, IPQ5018_PHY_PCS_CDT_THRESH_CTRL5,
+ IPQ5018_PHY_PCS_CDT_THRESH_CTRL5_VAL);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, IPQ5018_PHY_PCS_CDT_THRESH_CTRL6,
+ IPQ5018_PHY_PCS_CDT_THRESH_CTRL6_VAL);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, IPQ5018_PHY_PCS_CDT_THRESH_CTRL7,
+ IPQ5018_PHY_PCS_CDT_THRESH_CTRL7_VAL);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, IPQ5018_PHY_PCS_CDT_THRESH_CTRL9,
+ IPQ5018_PHY_PCS_CDT_THRESH_CTRL9_VAL);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, IPQ5018_PHY_PCS_CDT_THRESH_CTRL13,
+ IPQ5018_PHY_PCS_CDT_THRESH_CTRL13_VAL);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, IPQ5018_PHY_PCS_CDT_THRESH_CTRL3,
+ IPQ5018_PHY_PCS_NEAR_ECHO_THRESH_VAL);
+
+ /* we do all the (time consuming) work later */
+ return 0;
+}
+
+static int ipq5018_config_init(struct phy_device *phydev)
+{
+ struct ipq5018_priv *priv = phydev->priv;
+ u16 val;
+
+ /*
+ * set LDO efuse: first temporarily store ANA_DAC_FILTER value from
+ * debug register as it will be reset once the ANA_LDO_EFUSE register
+ * is written to
+ */
+ val = at803x_debug_reg_read(phydev, IPQ5018_PHY_DEBUG_ANA_DAC_FILTER);
+ at803x_debug_reg_mask(phydev, IPQ5018_PHY_DEBUG_ANA_LDO_EFUSE,
+ IPQ5018_PHY_DEBUG_ANA_LDO_EFUSE_MASK,
+ IPQ5018_PHY_DEBUG_ANA_LDO_EFUSE_DEFAULT);
+ at803x_debug_reg_write(phydev, IPQ5018_PHY_DEBUG_ANA_DAC_FILTER, val);
+
+ /* set 8023AZ EEE TX and RX timer values */
+ phy_write_mmd(phydev, MDIO_MMD_PCS, IPQ5018_PHY_PCS_EEE_TX_TIMER,
+ IPQ5018_PHY_PCS_EEE_TX_TIMER_VAL);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, IPQ5018_PHY_PCS_EEE_RX_TIMER,
+ IPQ5018_PHY_PCS_EEE_RX_TIMER_VAL);
+
+ /* set MSE threshold values */
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, IPQ5018_PHY_MMD1_MSE_THRESH1,
+ IPQ5018_PHY_MMD1_MSE_THRESH1_VAL);
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, IPQ5018_PHY_MMD1_MSE_THRESH2,
+ IPQ5018_PHY_MMD1_MSE_THRESH2_VAL);
+
+ /* PHY DAC values are optional and only set in a PHY to PHY link architecture */
+ if (priv->set_short_cable_dac) {
+ /* setting MDAC (Multi-level Digital-to-Analog Converter) in MMD1 */
+ phy_modify_mmd(phydev, MDIO_MMD_PMAPMD, IPQ5018_PHY_MMD1_MDAC,
+ IPQ5018_PHY_DAC_MASK, IPQ5018_PHY_MMD1_MDAC_VAL);
+
+ /* setting EDAC (Error-detection and Correction) in debug register */
+ at803x_debug_reg_mask(phydev, IPQ5018_PHY_DEBUG_EDAC,
+ IPQ5018_PHY_DAC_MASK, IPQ5018_PHY_DEBUG_EDAC_VAL);
+ }
+
+ return 0;
+}
+
+static void ipq5018_link_change_notify(struct phy_device *phydev)
+{
+ /*
+ * Reset the FIFO buffer upon link disconnects to clear any residual data
+ * which may cause issues with the FIFO which it cannot recover from.
+ */
+ mdiobus_modify_changed(phydev->mdio.bus, phydev->mdio.addr,
+ IPQ5018_PHY_FIFO_CONTROL, IPQ5018_PHY_FIFO_RESET,
+ phydev->link ? IPQ5018_PHY_FIFO_RESET : 0);
+}
+
+static int ipq5018_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct ipq5018_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->set_short_cable_dac = of_property_read_bool(dev->of_node,
+ "qcom,dac-preset-short-cable");
+
+ priv->rst = devm_reset_control_array_get_exclusive(dev);
+ if (IS_ERR(priv->rst))
+ return dev_err_probe(dev, PTR_ERR(priv->rst),
+ "failed to acquire reset\n");
+
+ ret = reset_control_reset(priv->rst);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to reset\n");
+
+ phydev->priv = priv;
+
+ return 0;
+}
+
static struct phy_driver at803x_driver[] = {
{
/* Qualcomm Atheros AR8035 */
@@ -1079,6 +1232,19 @@ static struct phy_driver at803x_driver[] = {
.soft_reset = genphy_soft_reset,
.config_aneg = at803x_config_aneg,
}, {
+ PHY_ID_MATCH_EXACT(IPQ5018_PHY_ID),
+ .name = "Qualcomm Atheros IPQ5018 internal PHY",
+ .flags = PHY_IS_INTERNAL | PHY_POLL_CABLE_TEST,
+ .probe = ipq5018_probe,
+ .config_init = ipq5018_config_init,
+ .link_change_notify = ipq5018_link_change_notify,
+ .read_status = at803x_read_status,
+ .config_intr = at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
+ .cable_test_start = ipq5018_cable_test_start,
+ .cable_test_get_status = qca808x_cable_test_get_status,
+ .soft_reset = genphy_soft_reset,
+}, {
/* Qualcomm Atheros QCA9561 */
PHY_ID_MATCH_EXACT(QCA9561_PHY_ID),
.name = "Qualcomm Atheros QCA9561 built-in PHY",
@@ -1104,6 +1270,7 @@ static const struct mdio_device_id __maybe_unused atheros_tbl[] = {
{ PHY_ID_MATCH_EXACT(ATH8032_PHY_ID) },
{ PHY_ID_MATCH_EXACT(ATH8035_PHY_ID) },
{ PHY_ID_MATCH_EXACT(ATH9331_PHY_ID) },
+ { PHY_ID_MATCH_EXACT(IPQ5018_PHY_ID) },
{ PHY_ID_MATCH_EXACT(QCA9561_PHY_ID) },
{ }
};
diff --git a/drivers/net/phy/qcom/qca807x.c b/drivers/net/phy/qcom/qca807x.c
index 1af6b5ead74b..6d10ef7e9a8a 100644
--- a/drivers/net/phy/qcom/qca807x.c
+++ b/drivers/net/phy/qcom/qca807x.c
@@ -377,7 +377,7 @@ static int qca807x_gpio_get(struct gpio_chip *gc, unsigned int offset)
return FIELD_GET(QCA807X_GPIO_FORCE_MODE_MASK, val);
}
-static void qca807x_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
+static int qca807x_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
{
struct qca807x_gpio_priv *priv = gpiochip_get_data(gc);
u16 reg;
@@ -386,18 +386,19 @@ static void qca807x_gpio_set(struct gpio_chip *gc, unsigned int offset, int valu
reg = QCA807X_MMD7_LED_FORCE_CTRL(offset);
val = phy_read_mmd(priv->phy, MDIO_MMD_AN, reg);
+ if (val < 0)
+ return val;
+
val &= ~QCA807X_GPIO_FORCE_MODE_MASK;
val |= QCA807X_GPIO_FORCE_EN;
val |= FIELD_PREP(QCA807X_GPIO_FORCE_MODE_MASK, value);
- phy_write_mmd(priv->phy, MDIO_MMD_AN, reg, val);
+ return phy_write_mmd(priv->phy, MDIO_MMD_AN, reg, val);
}
static int qca807x_gpio_dir_out(struct gpio_chip *gc, unsigned int offset, int value)
{
- qca807x_gpio_set(gc, offset, value);
-
- return 0;
+ return qca807x_gpio_set(gc, offset, value);
}
static int qca807x_gpio(struct phy_device *phydev)
@@ -425,7 +426,7 @@ static int qca807x_gpio(struct phy_device *phydev)
gc->get_direction = qca807x_gpio_get_direction;
gc->direction_output = qca807x_gpio_dir_out;
gc->get = qca807x_gpio_get;
- gc->set = qca807x_gpio_set;
+ gc->set_rv = qca807x_gpio_set;
return devm_gpiochip_add_data(dev, gc, priv);
}
diff --git a/drivers/net/phy/realtek/realtek_main.c b/drivers/net/phy/realtek/realtek_main.c
index c3dcb6257430..dd0d675149ad 100644
--- a/drivers/net/phy/realtek/realtek_main.c
+++ b/drivers/net/phy/realtek/realtek_main.c
@@ -436,9 +436,15 @@ static irqreturn_t rtl8211f_handle_interrupt(struct phy_device *phydev)
static void rtl8211f_get_wol(struct phy_device *dev, struct ethtool_wolinfo *wol)
{
+ int wol_events;
+
wol->supported = WAKE_MAGIC;
- if (phy_read_paged(dev, RTL8211F_WOL_SETTINGS_PAGE, RTL8211F_WOL_SETTINGS_EVENTS)
- & RTL8211F_WOL_EVENT_MAGIC)
+
+ wol_events = phy_read_paged(dev, RTL8211F_WOL_SETTINGS_PAGE, RTL8211F_WOL_SETTINGS_EVENTS);
+ if (wol_events < 0)
+ return;
+
+ if (wol_events & RTL8211F_WOL_EVENT_MAGIC)
wol->wolopts = WAKE_MAGIC;
}
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 347c1e0e94d9..5347c95d1e77 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -361,6 +361,11 @@ static void sfp_fixup_ignore_tx_fault(struct sfp *sfp)
sfp->state_ignore_mask |= SFP_F_TX_FAULT;
}
+static void sfp_fixup_ignore_hw(struct sfp *sfp, unsigned int mask)
+{
+ sfp->state_hw_mask &= ~mask;
+}
+
static void sfp_fixup_nokia(struct sfp *sfp)
{
sfp_fixup_long_startup(sfp);
@@ -409,7 +414,19 @@ static void sfp_fixup_halny_gsfp(struct sfp *sfp)
* these are possibly used for other purposes on this
* module, e.g. a serial port.
*/
- sfp->state_hw_mask &= ~(SFP_F_TX_FAULT | SFP_F_LOS);
+ sfp_fixup_ignore_hw(sfp, SFP_F_TX_FAULT | SFP_F_LOS);
+}
+
+static void sfp_fixup_potron(struct sfp *sfp)
+{
+ /*
+ * The TX_FAULT and LOS pins on this device are used for serial
+ * communication, so ignore them. Additionally, provide extra
+ * time for this device to fully start up.
+ */
+
+ sfp_fixup_long_startup(sfp);
+ sfp_fixup_ignore_hw(sfp, SFP_F_TX_FAULT | SFP_F_LOS);
}
static void sfp_fixup_rollball_cc(struct sfp *sfp)
@@ -512,6 +529,8 @@ static const struct sfp_quirk sfp_quirks[] = {
SFP_QUIRK_F("Walsun", "HXSX-ATRC-1", sfp_fixup_fs_10gt),
SFP_QUIRK_F("Walsun", "HXSX-ATRI-1", sfp_fixup_fs_10gt),
+ SFP_QUIRK_F("YV", "SFP+ONU-XGSPON", sfp_fixup_potron),
+
// OEM SFP-GE-T is a 1000Base-T module with broken TX_FAULT indicator
SFP_QUIRK_F("OEM", "SFP-GE-T", sfp_fixup_ignore_tx_fault),
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index def84e87e05b..4cf9d1822a83 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -108,18 +108,6 @@ struct ppp_file {
#define PF_TO_CHANNEL(pf) PF_TO_X(pf, struct channel)
/*
- * Data structure to hold primary network stats for which
- * we want to use 64 bit storage. Other network stats
- * are stored in dev->stats of the ppp strucute.
- */
-struct ppp_link_stats {
- u64 rx_packets;
- u64 tx_packets;
- u64 rx_bytes;
- u64 tx_bytes;
-};
-
-/*
* Data structure describing one ppp unit.
* A ppp unit corresponds to a ppp network interface device
* and represents a multilink bundle.
@@ -162,7 +150,6 @@ struct ppp {
struct bpf_prog *active_filter; /* filter for pkts to reset idle */
#endif /* CONFIG_PPP_FILTER */
struct net *ppp_net; /* the net we belong to */
- struct ppp_link_stats stats64; /* 64 bit network stats */
};
/*
@@ -1539,23 +1526,12 @@ ppp_net_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
static void
ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64)
{
- struct ppp *ppp = netdev_priv(dev);
-
- ppp_recv_lock(ppp);
- stats64->rx_packets = ppp->stats64.rx_packets;
- stats64->rx_bytes = ppp->stats64.rx_bytes;
- ppp_recv_unlock(ppp);
-
- ppp_xmit_lock(ppp);
- stats64->tx_packets = ppp->stats64.tx_packets;
- stats64->tx_bytes = ppp->stats64.tx_bytes;
- ppp_xmit_unlock(ppp);
-
stats64->rx_errors = dev->stats.rx_errors;
stats64->tx_errors = dev->stats.tx_errors;
stats64->rx_dropped = dev->stats.rx_dropped;
stats64->tx_dropped = dev->stats.tx_dropped;
stats64->rx_length_errors = dev->stats.rx_length_errors;
+ dev_fetch_sw_netstats(stats64, dev->tstats);
}
static int ppp_dev_init(struct net_device *dev)
@@ -1650,6 +1626,7 @@ static void ppp_setup(struct net_device *dev)
dev->type = ARPHRD_PPP;
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
dev->priv_destructor = ppp_dev_priv_destructor;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
netif_keep_dst(dev);
}
@@ -1796,8 +1773,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
#endif /* CONFIG_PPP_FILTER */
}
- ++ppp->stats64.tx_packets;
- ppp->stats64.tx_bytes += skb->len - PPP_PROTO_LEN;
+ dev_sw_netstats_tx_add(ppp->dev, 1, skb->len - PPP_PROTO_LEN);
switch (proto) {
case PPP_IP:
@@ -2474,8 +2450,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
break;
}
- ++ppp->stats64.rx_packets;
- ppp->stats64.rx_bytes += skb->len - 2;
+ dev_sw_netstats_rx_add(ppp->dev, skb->len - PPP_PROTO_LEN);
npi = proto_to_npindex(proto);
if (npi < 0) {
@@ -3303,14 +3278,25 @@ static void
ppp_get_stats(struct ppp *ppp, struct ppp_stats *st)
{
struct slcompress *vj = ppp->vj;
+ int cpu;
memset(st, 0, sizeof(*st));
- st->p.ppp_ipackets = ppp->stats64.rx_packets;
+ for_each_possible_cpu(cpu) {
+ struct pcpu_sw_netstats *p = per_cpu_ptr(ppp->dev->tstats, cpu);
+ u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
+
+ rx_packets = u64_stats_read(&p->rx_packets);
+ rx_bytes = u64_stats_read(&p->rx_bytes);
+ tx_packets = u64_stats_read(&p->tx_packets);
+ tx_bytes = u64_stats_read(&p->tx_bytes);
+
+ st->p.ppp_ipackets += rx_packets;
+ st->p.ppp_ibytes += rx_bytes;
+ st->p.ppp_opackets += tx_packets;
+ st->p.ppp_obytes += tx_bytes;
+ }
st->p.ppp_ierrors = ppp->dev->stats.rx_errors;
- st->p.ppp_ibytes = ppp->stats64.rx_bytes;
- st->p.ppp_opackets = ppp->stats64.tx_packets;
st->p.ppp_oerrors = ppp->dev->stats.tx_errors;
- st->p.ppp_obytes = ppp->stats64.tx_bytes;
if (!vj)
return;
st->vj.vjs_packets = vj->sls_o_compressed + vj->sls_o_uncompressed;
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 68e631718ab0..410effa42ade 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -372,9 +372,6 @@ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb)
* can't change.
*/
- if (skb->pkt_type == PACKET_OTHERHOST)
- goto abort_kfree;
-
if (sk->sk_state & PPPOX_BOUND) {
ppp_input(&po->chan, skb);
} else if (sk->sk_state & PPPOX_RELAY) {
@@ -418,6 +415,9 @@ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev,
struct pppoe_net *pn;
int len;
+ if (skb->pkt_type == PACKET_OTHERHOST)
+ goto drop;
+
skb = skb_share_check(skb, GFP_ATOMIC);
if (!skb)
goto out;
diff --git a/drivers/net/pse-pd/pd692x0.c b/drivers/net/pse-pd/pd692x0.c
index 7d60a714ca53..4de004813560 100644
--- a/drivers/net/pse-pd/pd692x0.c
+++ b/drivers/net/pse-pd/pd692x0.c
@@ -12,6 +12,8 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pse-pd/pse.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
#define PD692X0_PSE_NAME "pd692x0_pse"
@@ -76,6 +78,8 @@ enum {
PD692X0_MSG_GET_PORT_CLASS,
PD692X0_MSG_GET_PORT_MEAS,
PD692X0_MSG_GET_PORT_PARAM,
+ PD692X0_MSG_GET_POWER_BANK,
+ PD692X0_MSG_SET_POWER_BANK,
/* add new message above here */
PD692X0_MSG_CNT
@@ -95,6 +99,8 @@ struct pd692x0_priv {
unsigned long last_cmd_key_time;
enum ethtool_c33_pse_admin_state admin_state[PD692X0_MAX_PIS];
+ struct regulator_dev *manager_reg[PD692X0_MAX_MANAGERS];
+ int manager_pw_budget[PD692X0_MAX_MANAGERS];
};
/* Template list of communication messages. The non-null bytes defined here
@@ -170,6 +176,16 @@ static const struct pd692x0_msg pd692x0_msg_template_list[PD692X0_MSG_CNT] = {
.data = {0x4e, 0x4e, 0x4e, 0x4e,
0x4e, 0x4e, 0x4e, 0x4e},
},
+ [PD692X0_MSG_GET_POWER_BANK] = {
+ .key = PD692X0_KEY_REQ,
+ .sub = {0x07, 0x0b, 0x57},
+ .data = { 0, 0x4e, 0x4e, 0x4e,
+ 0x4e, 0x4e, 0x4e, 0x4e},
+ },
+ [PD692X0_MSG_SET_POWER_BANK] = {
+ .key = PD692X0_KEY_CMD,
+ .sub = {0x07, 0x0b, 0x57},
+ },
};
static u8 pd692x0_build_msg(struct pd692x0_msg *msg, u8 echo)
@@ -739,6 +755,29 @@ pd692x0_pi_get_actual_pw(struct pse_controller_dev *pcdev, int id)
return (buf.data[0] << 4 | buf.data[1]) * 100;
}
+static int
+pd692x0_pi_get_prio(struct pse_controller_dev *pcdev, int id)
+{
+ struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
+ struct pd692x0_msg msg, buf = {0};
+ int ret;
+
+ ret = pd692x0_fw_unavailable(priv);
+ if (ret)
+ return ret;
+
+ msg = pd692x0_msg_template_list[PD692X0_MSG_GET_PORT_PARAM];
+ msg.sub[2] = id;
+ ret = pd692x0_sendrecv_msg(priv, &msg, &buf);
+ if (ret < 0)
+ return ret;
+ if (!buf.data[2] || buf.data[2] > pcdev->pis_prio_max + 1)
+ return -ERANGE;
+
+ /* PSE core priority start at 0 */
+ return buf.data[2] - 1;
+}
+
static struct pd692x0_msg_ver pd692x0_get_sw_version(struct pd692x0_priv *priv)
{
struct device *dev = &priv->client->dev;
@@ -766,6 +805,7 @@ static struct pd692x0_msg_ver pd692x0_get_sw_version(struct pd692x0_priv *priv)
struct pd692x0_manager {
struct device_node *port_node[PD692X0_MAX_MANAGER_PORTS];
+ struct device_node *node;
int nports;
};
@@ -857,6 +897,8 @@ pd692x0_of_get_managers(struct pd692x0_priv *priv,
if (ret)
goto out;
+ of_node_get(node);
+ manager[manager_id].node = node;
nmanagers++;
}
@@ -869,6 +911,8 @@ out:
of_node_put(manager[i].port_node[j]);
manager[i].port_node[j] = NULL;
}
+ of_node_put(manager[i].node);
+ manager[i].node = NULL;
}
of_node_put(node);
@@ -876,6 +920,143 @@ out:
return ret;
}
+static const struct regulator_ops dummy_ops;
+
+static struct regulator_dev *
+pd692x0_register_manager_regulator(struct device *dev, char *reg_name,
+ struct device_node *node)
+{
+ struct regulator_init_data *rinit_data;
+ struct regulator_config rconfig = {0};
+ struct regulator_desc *rdesc;
+ struct regulator_dev *rdev;
+
+ rinit_data = devm_kzalloc(dev, sizeof(*rinit_data),
+ GFP_KERNEL);
+ if (!rinit_data)
+ return ERR_PTR(-ENOMEM);
+
+ rdesc = devm_kzalloc(dev, sizeof(*rdesc), GFP_KERNEL);
+ if (!rdesc)
+ return ERR_PTR(-ENOMEM);
+
+ rdesc->name = reg_name;
+ rdesc->type = REGULATOR_VOLTAGE;
+ rdesc->ops = &dummy_ops;
+ rdesc->owner = THIS_MODULE;
+
+ rinit_data->supply_regulator = "vmain";
+
+ rconfig.dev = dev;
+ rconfig.init_data = rinit_data;
+ rconfig.of_node = node;
+
+ rdev = devm_regulator_register(dev, rdesc, &rconfig);
+ if (IS_ERR(rdev)) {
+ dev_err_probe(dev, PTR_ERR(rdev),
+ "Failed to register regulator\n");
+ return rdev;
+ }
+
+ return rdev;
+}
+
+static int
+pd692x0_register_managers_regulator(struct pd692x0_priv *priv,
+ const struct pd692x0_manager *manager,
+ int nmanagers)
+{
+ struct device *dev = &priv->client->dev;
+ size_t reg_name_len;
+ int i;
+
+ /* Each regulator name len is dev name + 12 char +
+ * int max digit number (10) + 1
+ */
+ reg_name_len = strlen(dev_name(dev)) + 23;
+
+ for (i = 0; i < nmanagers; i++) {
+ static const char * const regulators[] = { "vaux5", "vaux3p3" };
+ struct regulator_dev *rdev;
+ char *reg_name;
+ int ret;
+
+ reg_name = devm_kzalloc(dev, reg_name_len, GFP_KERNEL);
+ if (!reg_name)
+ return -ENOMEM;
+ snprintf(reg_name, 26, "pse-%s-manager%d", dev_name(dev), i);
+ rdev = pd692x0_register_manager_regulator(dev, reg_name,
+ manager[i].node);
+ if (IS_ERR(rdev))
+ return PTR_ERR(rdev);
+
+ /* VMAIN is described as main supply for the manager.
+ * Add other VAUX power supplies and link them to the
+ * virtual device rdev->dev.
+ */
+ ret = devm_regulator_bulk_get_enable(&rdev->dev,
+ ARRAY_SIZE(regulators),
+ regulators);
+ if (ret)
+ return dev_err_probe(&rdev->dev, ret,
+ "Failed to enable regulators\n");
+
+ priv->manager_reg[i] = rdev;
+ }
+
+ return 0;
+}
+
+static int
+pd692x0_conf_manager_power_budget(struct pd692x0_priv *priv, int id, int pw)
+{
+ struct pd692x0_msg msg, buf;
+ int ret, pw_mW = pw / 1000;
+
+ msg = pd692x0_msg_template_list[PD692X0_MSG_GET_POWER_BANK];
+ msg.data[0] = id;
+ ret = pd692x0_sendrecv_msg(priv, &msg, &buf);
+ if (ret < 0)
+ return ret;
+
+ msg = pd692x0_msg_template_list[PD692X0_MSG_SET_POWER_BANK];
+ msg.data[0] = id;
+ msg.data[1] = pw_mW >> 8;
+ msg.data[2] = pw_mW & 0xff;
+ msg.data[3] = buf.sub[2];
+ msg.data[4] = buf.data[0];
+ msg.data[5] = buf.data[1];
+ msg.data[6] = buf.data[2];
+ msg.data[7] = buf.data[3];
+ return pd692x0_sendrecv_msg(priv, &msg, &buf);
+}
+
+static int
+pd692x0_configure_managers(struct pd692x0_priv *priv, int nmanagers)
+{
+ int i, ret;
+
+ for (i = 0; i < nmanagers; i++) {
+ struct regulator *supply = priv->manager_reg[i]->supply;
+ int pw_budget;
+
+ pw_budget = regulator_get_unclaimed_power_budget(supply);
+ /* Max power budget per manager */
+ if (pw_budget > 6000000)
+ pw_budget = 6000000;
+ ret = regulator_request_power_budget(supply, pw_budget);
+ if (ret < 0)
+ return ret;
+
+ priv->manager_pw_budget[i] = pw_budget;
+ ret = pd692x0_conf_manager_power_budget(priv, i, pw_budget);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
static int
pd692x0_set_port_matrix(const struct pse_pi_pairset *pairset,
const struct pd692x0_manager *manager,
@@ -998,6 +1179,14 @@ static int pd692x0_setup_pi_matrix(struct pse_controller_dev *pcdev)
return ret;
nmanagers = ret;
+ ret = pd692x0_register_managers_regulator(priv, manager, nmanagers);
+ if (ret)
+ goto out;
+
+ ret = pd692x0_configure_managers(priv, nmanagers);
+ if (ret)
+ goto out;
+
ret = pd692x0_set_ports_matrix(priv, manager, nmanagers, port_matrix);
if (ret)
goto out;
@@ -1008,8 +1197,14 @@ static int pd692x0_setup_pi_matrix(struct pse_controller_dev *pcdev)
out:
for (i = 0; i < nmanagers; i++) {
+ struct regulator *supply = priv->manager_reg[i]->supply;
+
+ regulator_free_power_budget(supply,
+ priv->manager_pw_budget[i]);
+
for (j = 0; j < manager[i].nports; j++)
of_node_put(manager[i].port_node[j]);
+ of_node_put(manager[i].node);
}
return ret;
}
@@ -1071,6 +1266,25 @@ static int pd692x0_pi_set_pw_limit(struct pse_controller_dev *pcdev,
return pd692x0_sendrecv_msg(priv, &msg, &buf);
}
+static int pd692x0_pi_set_prio(struct pse_controller_dev *pcdev, int id,
+ unsigned int prio)
+{
+ struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
+ struct pd692x0_msg msg, buf = {0};
+ int ret;
+
+ ret = pd692x0_fw_unavailable(priv);
+ if (ret)
+ return ret;
+
+ msg = pd692x0_msg_template_list[PD692X0_MSG_SET_PORT_PARAM];
+ msg.sub[2] = id;
+ /* Controller priority from 1 to 3 */
+ msg.data[4] = prio + 1;
+
+ return pd692x0_sendrecv_msg(priv, &msg, &buf);
+}
+
static const struct pse_controller_ops pd692x0_ops = {
.setup_pi_matrix = pd692x0_setup_pi_matrix,
.pi_get_admin_state = pd692x0_pi_get_admin_state,
@@ -1084,6 +1298,8 @@ static const struct pse_controller_ops pd692x0_ops = {
.pi_get_pw_limit = pd692x0_pi_get_pw_limit,
.pi_set_pw_limit = pd692x0_pi_set_pw_limit,
.pi_get_pw_limit_ranges = pd692x0_pi_get_pw_limit_ranges,
+ .pi_get_prio = pd692x0_pi_get_prio,
+ .pi_set_prio = pd692x0_pi_set_prio,
};
#define PD692X0_FW_LINE_MAX_SZ 0xff
@@ -1437,6 +1653,7 @@ static const struct fw_upload_ops pd692x0_fw_ops = {
static int pd692x0_i2c_probe(struct i2c_client *client)
{
+ static const char * const regulators[] = { "vdd", "vdda" };
struct pd692x0_msg msg, buf = {0}, zero = {0};
struct device *dev = &client->dev;
struct pd692x0_msg_ver ver;
@@ -1444,6 +1661,12 @@ static int pd692x0_i2c_probe(struct i2c_client *client)
struct fw_upload *fwl;
int ret;
+ ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(regulators),
+ regulators);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to enable regulators\n");
+
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
dev_err(dev, "i2c check functionality failed\n");
return -ENXIO;
@@ -1500,6 +1723,8 @@ static int pd692x0_i2c_probe(struct i2c_client *client)
priv->pcdev.ops = &pd692x0_ops;
priv->pcdev.dev = dev;
priv->pcdev.types = ETHTOOL_PSE_C33;
+ priv->pcdev.supp_budget_eval_strategies = PSE_BUDGET_EVAL_STRAT_DYNAMIC;
+ priv->pcdev.pis_prio_max = 2;
ret = devm_pse_controller_register(dev, &priv->pcdev);
if (ret)
return dev_err_probe(dev, ret,
diff --git a/drivers/net/pse-pd/pse_core.c b/drivers/net/pse-pd/pse_core.c
index 4602e26eb8c8..23eb3c9d0bcd 100644
--- a/drivers/net/pse-pd/pse_core.c
+++ b/drivers/net/pse-pd/pse_core.c
@@ -7,13 +7,21 @@
#include <linux/device.h>
#include <linux/ethtool.h>
+#include <linux/ethtool_netlink.h>
#include <linux/of.h>
+#include <linux/phy.h>
#include <linux/pse-pd/pse.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
+#include <linux/rtnetlink.h>
+#include <net/net_trackers.h>
+
+#define PSE_PW_D_LIMIT INT_MAX
static DEFINE_MUTEX(pse_list_mutex);
static LIST_HEAD(pse_controller_list);
+static DEFINE_XARRAY_ALLOC(pse_pw_d_map);
+static DEFINE_MUTEX(pse_pw_d_mutex);
/**
* struct pse_control - a PSE control
@@ -23,6 +31,7 @@ static LIST_HEAD(pse_controller_list);
* @list: list entry for the pcdev's PSE controller list
* @id: ID of the PSE line in the PSE controller device
* @refcnt: Number of gets of this pse_control
+ * @attached_phydev: PHY device pointer attached by the PSE control
*/
struct pse_control {
struct pse_controller_dev *pcdev;
@@ -30,6 +39,22 @@ struct pse_control {
struct list_head list;
unsigned int id;
struct kref refcnt;
+ struct phy_device *attached_phydev;
+};
+
+/**
+ * struct pse_power_domain - a PSE power domain
+ * @id: ID of the power domain
+ * @supply: Power supply the Power Domain
+ * @refcnt: Number of gets of this pse_power_domain
+ * @budget_eval_strategy: Current power budget evaluation strategy of the
+ * power domain
+ */
+struct pse_power_domain {
+ int id;
+ struct regulator *supply;
+ struct kref refcnt;
+ u32 budget_eval_strategy;
};
static int of_load_single_pse_pi_pairset(struct device_node *node,
@@ -208,10 +233,185 @@ out:
return ret;
}
+/**
+ * pse_control_find_net_by_id - Find net attached to the pse control id
+ * @pcdev: a pointer to the PSE
+ * @id: index of the PSE control
+ *
+ * Return: pse_control pointer or NULL. The device returned has had a
+ * reference added and the pointer is safe until the user calls
+ * pse_control_put() to indicate they have finished with it.
+ */
+static struct pse_control *
+pse_control_find_by_id(struct pse_controller_dev *pcdev, int id)
+{
+ struct pse_control *psec;
+
+ mutex_lock(&pse_list_mutex);
+ list_for_each_entry(psec, &pcdev->pse_control_head, list) {
+ if (psec->id == id) {
+ kref_get(&psec->refcnt);
+ mutex_unlock(&pse_list_mutex);
+ return psec;
+ }
+ }
+ mutex_unlock(&pse_list_mutex);
+ return NULL;
+}
+
+/**
+ * pse_control_get_netdev - Return netdev associated to a PSE control
+ * @psec: PSE control pointer
+ *
+ * Return: netdev pointer or NULL
+ */
+static struct net_device *pse_control_get_netdev(struct pse_control *psec)
+{
+ ASSERT_RTNL();
+
+ if (!psec || !psec->attached_phydev)
+ return NULL;
+
+ return psec->attached_phydev->attached_dev;
+}
+
+/**
+ * pse_pi_is_hw_enabled - Is PI enabled at the hardware level
+ * @pcdev: a pointer to the PSE controller device
+ * @id: Index of the PI
+ *
+ * Return: 1 if the PI is enabled at the hardware level, 0 if not, and
+ * a failure value on error
+ */
+static int pse_pi_is_hw_enabled(struct pse_controller_dev *pcdev, int id)
+{
+ struct pse_admin_state admin_state = {0};
+ int ret;
+
+ ret = pcdev->ops->pi_get_admin_state(pcdev, id, &admin_state);
+ if (ret < 0)
+ return ret;
+
+ /* PI is well enabled at the hardware level */
+ if (admin_state.podl_admin_state == ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED ||
+ admin_state.c33_admin_state == ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * pse_pi_is_admin_enable_pending - Check if PI is in admin enable pending state
+ * which mean the power is not yet being
+ * delivered
+ * @pcdev: a pointer to the PSE controller device
+ * @id: Index of the PI
+ *
+ * Detects if a PI is enabled in software with a PD detected, but the hardware
+ * admin state hasn't been applied yet.
+ *
+ * This function is used in the power delivery and retry mechanisms to determine
+ * which PIs need to have power delivery attempted again.
+ *
+ * Return: true if the PI has admin enable flag set in software but not yet
+ * reflected in the hardware admin state, false otherwise.
+ */
+static bool
+pse_pi_is_admin_enable_pending(struct pse_controller_dev *pcdev, int id)
+{
+ int ret;
+
+ /* PI not enabled or nothing is plugged */
+ if (!pcdev->pi[id].admin_state_enabled ||
+ !pcdev->pi[id].isr_pd_detected)
+ return false;
+
+ ret = pse_pi_is_hw_enabled(pcdev, id);
+ /* PSE PI is already enabled at hardware level */
+ if (ret == 1)
+ return false;
+
+ return true;
+}
+
+static int _pse_pi_delivery_power_sw_pw_ctrl(struct pse_controller_dev *pcdev,
+ int id,
+ struct netlink_ext_ack *extack);
+
+/**
+ * pse_pw_d_retry_power_delivery - Retry power delivery for pending ports in a
+ * PSE power domain
+ * @pcdev: a pointer to the PSE controller device
+ * @pw_d: a pointer to the PSE power domain
+ *
+ * Scans all ports in the specified power domain and attempts to enable power
+ * delivery to any ports that have admin enable state set but don't yet have
+ * hardware power enabled. Used when there are changes in connection status,
+ * admin state, or priority that might allow previously unpowered ports to
+ * receive power, especially in over-budget conditions.
+ */
+static void pse_pw_d_retry_power_delivery(struct pse_controller_dev *pcdev,
+ struct pse_power_domain *pw_d)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < pcdev->nr_lines; i++) {
+ int prio_max = pcdev->nr_lines;
+ struct netlink_ext_ack extack;
+
+ if (pcdev->pi[i].pw_d != pw_d)
+ continue;
+
+ if (!pse_pi_is_admin_enable_pending(pcdev, i))
+ continue;
+
+ /* Do not try to enable PI with a lower prio (higher value)
+ * than one which already can't be enabled.
+ */
+ if (pcdev->pi[i].prio > prio_max)
+ continue;
+
+ ret = _pse_pi_delivery_power_sw_pw_ctrl(pcdev, i, &extack);
+ if (ret == -ERANGE)
+ prio_max = pcdev->pi[i].prio;
+ }
+}
+
+/**
+ * pse_pw_d_is_sw_pw_control - Determine if power control is software managed
+ * @pcdev: a pointer to the PSE controller device
+ * @pw_d: a pointer to the PSE power domain
+ *
+ * This function determines whether the power control for a specific power
+ * domain is managed by software in the interrupt handler rather than directly
+ * by hardware.
+ *
+ * Software power control is active in the following cases:
+ * - When the budget evaluation strategy is set to static
+ * - When the budget evaluation strategy is disabled but the PSE controller
+ * has an interrupt handler that can report if a Powered Device is connected
+ *
+ * Return: true if the power control of the power domain is managed by software,
+ * false otherwise
+ */
+static bool pse_pw_d_is_sw_pw_control(struct pse_controller_dev *pcdev,
+ struct pse_power_domain *pw_d)
+{
+ if (!pw_d)
+ return false;
+
+ if (pw_d->budget_eval_strategy == PSE_BUDGET_EVAL_STRAT_STATIC)
+ return true;
+ if (pw_d->budget_eval_strategy == PSE_BUDGET_EVAL_STRAT_DISABLED &&
+ pcdev->ops->pi_enable && pcdev->irq)
+ return true;
+
+ return false;
+}
+
static int pse_pi_is_enabled(struct regulator_dev *rdev)
{
struct pse_controller_dev *pcdev = rdev_get_drvdata(rdev);
- struct pse_admin_state admin_state = {0};
const struct pse_controller_ops *ops;
int id, ret;
@@ -221,13 +421,12 @@ static int pse_pi_is_enabled(struct regulator_dev *rdev)
id = rdev_get_id(rdev);
mutex_lock(&pcdev->lock);
- ret = ops->pi_get_admin_state(pcdev, id, &admin_state);
- if (ret)
+ if (pse_pw_d_is_sw_pw_control(pcdev, pcdev->pi[id].pw_d)) {
+ ret = pcdev->pi[id].admin_state_enabled;
goto out;
+ }
- if (admin_state.podl_admin_state == ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED ||
- admin_state.c33_admin_state == ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED)
- ret = 1;
+ ret = pse_pi_is_hw_enabled(pcdev, id);
out:
mutex_unlock(&pcdev->lock);
@@ -235,11 +434,239 @@ out:
return ret;
}
+/**
+ * pse_pi_deallocate_pw_budget - Deallocate power budget of the PI
+ * @pi: a pointer to the PSE PI
+ */
+static void pse_pi_deallocate_pw_budget(struct pse_pi *pi)
+{
+ if (!pi->pw_d || !pi->pw_allocated_mW)
+ return;
+
+ regulator_free_power_budget(pi->pw_d->supply, pi->pw_allocated_mW);
+ pi->pw_allocated_mW = 0;
+}
+
+/**
+ * _pse_pi_disable - Call disable operation. Assumes the PSE lock has been
+ * acquired.
+ * @pcdev: a pointer to the PSE
+ * @id: index of the PSE control
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int _pse_pi_disable(struct pse_controller_dev *pcdev, int id)
+{
+ const struct pse_controller_ops *ops = pcdev->ops;
+ int ret;
+
+ if (!ops->pi_disable)
+ return -EOPNOTSUPP;
+
+ ret = ops->pi_disable(pcdev, id);
+ if (ret)
+ return ret;
+
+ pse_pi_deallocate_pw_budget(&pcdev->pi[id]);
+
+ if (pse_pw_d_is_sw_pw_control(pcdev, pcdev->pi[id].pw_d))
+ pse_pw_d_retry_power_delivery(pcdev, pcdev->pi[id].pw_d);
+
+ return 0;
+}
+
+/**
+ * pse_disable_pi_pol - Disable a PI on a power budget policy
+ * @pcdev: a pointer to the PSE
+ * @id: index of the PSE PI
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int pse_disable_pi_pol(struct pse_controller_dev *pcdev, int id)
+{
+ unsigned long notifs = ETHTOOL_PSE_EVENT_OVER_BUDGET;
+ struct pse_ntf ntf = {};
+ int ret;
+
+ dev_dbg(pcdev->dev, "Disabling PI %d to free power budget\n", id);
+
+ ret = _pse_pi_disable(pcdev, id);
+ if (ret)
+ notifs |= ETHTOOL_PSE_EVENT_SW_PW_CONTROL_ERROR;
+
+ ntf.notifs = notifs;
+ ntf.id = id;
+ kfifo_in_spinlocked(&pcdev->ntf_fifo, &ntf, 1, &pcdev->ntf_fifo_lock);
+ schedule_work(&pcdev->ntf_work);
+
+ return ret;
+}
+
+/**
+ * pse_disable_pi_prio - Disable all PIs of a given priority inside a PSE
+ * power domain
+ * @pcdev: a pointer to the PSE
+ * @pw_d: a pointer to the PSE power domain
+ * @prio: priority
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int pse_disable_pi_prio(struct pse_controller_dev *pcdev,
+ struct pse_power_domain *pw_d,
+ int prio)
+{
+ int i;
+
+ for (i = 0; i < pcdev->nr_lines; i++) {
+ int ret;
+
+ if (pcdev->pi[i].prio != prio ||
+ pcdev->pi[i].pw_d != pw_d ||
+ pse_pi_is_hw_enabled(pcdev, i) <= 0)
+ continue;
+
+ ret = pse_disable_pi_pol(pcdev, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * pse_pi_allocate_pw_budget_static_prio - Allocate power budget for the PI
+ * when the budget eval strategy is
+ * static
+ * @pcdev: a pointer to the PSE
+ * @id: index of the PSE control
+ * @pw_req: power requested in mW
+ * @extack: extack for error reporting
+ *
+ * Allocates power using static budget evaluation strategy, where allocation
+ * is based on PD classification. When insufficient budget is available,
+ * lower-priority ports (higher priority numbers) are turned off first.
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int
+pse_pi_allocate_pw_budget_static_prio(struct pse_controller_dev *pcdev, int id,
+ int pw_req, struct netlink_ext_ack *extack)
+{
+ struct pse_pi *pi = &pcdev->pi[id];
+ int ret, _prio;
+
+ _prio = pcdev->nr_lines;
+ while (regulator_request_power_budget(pi->pw_d->supply, pw_req) == -ERANGE) {
+ if (_prio <= pi->prio) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "PI %d: not enough power budget available",
+ id);
+ return -ERANGE;
+ }
+
+ ret = pse_disable_pi_prio(pcdev, pi->pw_d, _prio);
+ if (ret < 0)
+ return ret;
+
+ _prio--;
+ }
+
+ pi->pw_allocated_mW = pw_req;
+ return 0;
+}
+
+/**
+ * pse_pi_allocate_pw_budget - Allocate power budget for the PI
+ * @pcdev: a pointer to the PSE
+ * @id: index of the PSE control
+ * @pw_req: power requested in mW
+ * @extack: extack for error reporting
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int pse_pi_allocate_pw_budget(struct pse_controller_dev *pcdev, int id,
+ int pw_req, struct netlink_ext_ack *extack)
+{
+ struct pse_pi *pi = &pcdev->pi[id];
+
+ if (!pi->pw_d)
+ return 0;
+
+ /* PSE_BUDGET_EVAL_STRAT_STATIC */
+ if (pi->pw_d->budget_eval_strategy == PSE_BUDGET_EVAL_STRAT_STATIC)
+ return pse_pi_allocate_pw_budget_static_prio(pcdev, id, pw_req,
+ extack);
+
+ return 0;
+}
+
+/**
+ * _pse_pi_delivery_power_sw_pw_ctrl - Enable PSE PI in case of software power
+ * control. Assumes the PSE lock has been
+ * acquired.
+ * @pcdev: a pointer to the PSE
+ * @id: index of the PSE control
+ * @extack: extack for error reporting
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int _pse_pi_delivery_power_sw_pw_ctrl(struct pse_controller_dev *pcdev,
+ int id,
+ struct netlink_ext_ack *extack)
+{
+ const struct pse_controller_ops *ops = pcdev->ops;
+ struct pse_pi *pi = &pcdev->pi[id];
+ int ret, pw_req;
+
+ if (!ops->pi_get_pw_req) {
+ /* No power allocation management */
+ ret = ops->pi_enable(pcdev, id);
+ if (ret)
+ NL_SET_ERR_MSG_FMT(extack,
+ "PI %d: enable error %d",
+ id, ret);
+ return ret;
+ }
+
+ ret = ops->pi_get_pw_req(pcdev, id);
+ if (ret < 0)
+ return ret;
+
+ pw_req = ret;
+
+ /* Compare requested power with port power limit and use the lowest
+ * one.
+ */
+ if (ops->pi_get_pw_limit) {
+ ret = ops->pi_get_pw_limit(pcdev, id);
+ if (ret < 0)
+ return ret;
+
+ if (ret < pw_req)
+ pw_req = ret;
+ }
+
+ ret = pse_pi_allocate_pw_budget(pcdev, id, pw_req, extack);
+ if (ret)
+ return ret;
+
+ ret = ops->pi_enable(pcdev, id);
+ if (ret) {
+ pse_pi_deallocate_pw_budget(pi);
+ NL_SET_ERR_MSG_FMT(extack,
+ "PI %d: enable error %d",
+ id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int pse_pi_enable(struct regulator_dev *rdev)
{
struct pse_controller_dev *pcdev = rdev_get_drvdata(rdev);
const struct pse_controller_ops *ops;
- int id, ret;
+ int id, ret = 0;
ops = pcdev->ops;
if (!ops->pi_enable)
@@ -247,6 +674,23 @@ static int pse_pi_enable(struct regulator_dev *rdev)
id = rdev_get_id(rdev);
mutex_lock(&pcdev->lock);
+ if (pse_pw_d_is_sw_pw_control(pcdev, pcdev->pi[id].pw_d)) {
+ /* Manage enabled status by software.
+ * Real enable process will happen if a port is connected.
+ */
+ if (pcdev->pi[id].isr_pd_detected) {
+ struct netlink_ext_ack extack;
+
+ ret = _pse_pi_delivery_power_sw_pw_ctrl(pcdev, id, &extack);
+ }
+ if (!ret || ret == -ERANGE) {
+ pcdev->pi[id].admin_state_enabled = 1;
+ ret = 0;
+ }
+ mutex_unlock(&pcdev->lock);
+ return ret;
+ }
+
ret = ops->pi_enable(pcdev, id);
if (!ret)
pcdev->pi[id].admin_state_enabled = 1;
@@ -258,21 +702,18 @@ static int pse_pi_enable(struct regulator_dev *rdev)
static int pse_pi_disable(struct regulator_dev *rdev)
{
struct pse_controller_dev *pcdev = rdev_get_drvdata(rdev);
- const struct pse_controller_ops *ops;
+ struct pse_pi *pi;
int id, ret;
- ops = pcdev->ops;
- if (!ops->pi_disable)
- return -EOPNOTSUPP;
-
id = rdev_get_id(rdev);
+ pi = &pcdev->pi[id];
mutex_lock(&pcdev->lock);
- ret = ops->pi_disable(pcdev, id);
+ ret = _pse_pi_disable(pcdev, id);
if (!ret)
- pcdev->pi[id].admin_state_enabled = 0;
- mutex_unlock(&pcdev->lock);
+ pi->admin_state_enabled = 0;
- return ret;
+ mutex_unlock(&pcdev->lock);
+ return 0;
}
static int _pse_pi_get_voltage(struct regulator_dev *rdev)
@@ -437,6 +878,158 @@ devm_pse_pi_regulator_register(struct pse_controller_dev *pcdev,
return 0;
}
+static void __pse_pw_d_release(struct kref *kref)
+{
+ struct pse_power_domain *pw_d = container_of(kref,
+ struct pse_power_domain,
+ refcnt);
+
+ regulator_put(pw_d->supply);
+ xa_erase(&pse_pw_d_map, pw_d->id);
+ mutex_unlock(&pse_pw_d_mutex);
+}
+
+/**
+ * pse_flush_pw_ds - flush all PSE power domains of a PSE
+ * @pcdev: a pointer to the initialized PSE controller device
+ */
+static void pse_flush_pw_ds(struct pse_controller_dev *pcdev)
+{
+ struct pse_power_domain *pw_d;
+ int i;
+
+ for (i = 0; i < pcdev->nr_lines; i++) {
+ if (!pcdev->pi[i].pw_d)
+ continue;
+
+ pw_d = xa_load(&pse_pw_d_map, pcdev->pi[i].pw_d->id);
+ if (!pw_d)
+ continue;
+
+ kref_put_mutex(&pw_d->refcnt, __pse_pw_d_release,
+ &pse_pw_d_mutex);
+ }
+}
+
+/**
+ * devm_pse_alloc_pw_d - allocate a new PSE power domain for a device
+ * @dev: device that is registering this PSE power domain
+ *
+ * Return: Pointer to the newly allocated PSE power domain or error pointers
+ */
+static struct pse_power_domain *devm_pse_alloc_pw_d(struct device *dev)
+{
+ struct pse_power_domain *pw_d;
+ int index, ret;
+
+ pw_d = devm_kzalloc(dev, sizeof(*pw_d), GFP_KERNEL);
+ if (!pw_d)
+ return ERR_PTR(-ENOMEM);
+
+ ret = xa_alloc(&pse_pw_d_map, &index, pw_d, XA_LIMIT(1, PSE_PW_D_LIMIT),
+ GFP_KERNEL);
+ if (ret)
+ return ERR_PTR(ret);
+
+ kref_init(&pw_d->refcnt);
+ pw_d->id = index;
+ return pw_d;
+}
+
+/**
+ * pse_register_pw_ds - register the PSE power domains for a PSE
+ * @pcdev: a pointer to the PSE controller device
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int pse_register_pw_ds(struct pse_controller_dev *pcdev)
+{
+ int i, ret = 0;
+
+ mutex_lock(&pse_pw_d_mutex);
+ for (i = 0; i < pcdev->nr_lines; i++) {
+ struct regulator_dev *rdev = pcdev->pi[i].rdev;
+ struct pse_power_domain *pw_d;
+ struct regulator *supply;
+ bool present = false;
+ unsigned long index;
+
+ /* No regulator or regulator parent supply registered.
+ * We need a regulator parent to register a PSE power domain
+ */
+ if (!rdev || !rdev->supply)
+ continue;
+
+ xa_for_each(&pse_pw_d_map, index, pw_d) {
+ /* Power supply already registered as a PSE power
+ * domain.
+ */
+ if (regulator_is_equal(pw_d->supply, rdev->supply)) {
+ present = true;
+ pcdev->pi[i].pw_d = pw_d;
+ break;
+ }
+ }
+ if (present) {
+ kref_get(&pw_d->refcnt);
+ continue;
+ }
+
+ pw_d = devm_pse_alloc_pw_d(pcdev->dev);
+ if (IS_ERR(pw_d)) {
+ ret = PTR_ERR(pw_d);
+ goto out;
+ }
+
+ supply = regulator_get(&rdev->dev, rdev->supply_name);
+ if (IS_ERR(supply)) {
+ xa_erase(&pse_pw_d_map, pw_d->id);
+ ret = PTR_ERR(supply);
+ goto out;
+ }
+
+ pw_d->supply = supply;
+ if (pcdev->supp_budget_eval_strategies)
+ pw_d->budget_eval_strategy = pcdev->supp_budget_eval_strategies;
+ else
+ pw_d->budget_eval_strategy = PSE_BUDGET_EVAL_STRAT_DISABLED;
+ kref_init(&pw_d->refcnt);
+ pcdev->pi[i].pw_d = pw_d;
+ }
+
+out:
+ mutex_unlock(&pse_pw_d_mutex);
+ return ret;
+}
+
+/**
+ * pse_send_ntf_worker - Worker to send PSE notifications
+ * @work: work object
+ *
+ * Manage and send PSE netlink notifications using a workqueue to avoid
+ * deadlock between pcdev_lock and pse_list_mutex.
+ */
+static void pse_send_ntf_worker(struct work_struct *work)
+{
+ struct pse_controller_dev *pcdev;
+ struct pse_ntf ntf;
+
+ pcdev = container_of(work, struct pse_controller_dev, ntf_work);
+
+ while (kfifo_out(&pcdev->ntf_fifo, &ntf, 1)) {
+ struct net_device *netdev;
+ struct pse_control *psec;
+
+ psec = pse_control_find_by_id(pcdev, ntf.id);
+ rtnl_lock();
+ netdev = pse_control_get_netdev(psec);
+ if (netdev)
+ ethnl_pse_send_ntf(netdev, ntf.notifs);
+ rtnl_unlock();
+ pse_control_put(psec);
+ }
+}
+
/**
* pse_controller_register - register a PSE controller device
* @pcdev: a pointer to the initialized PSE controller device
@@ -450,6 +1043,13 @@ int pse_controller_register(struct pse_controller_dev *pcdev)
mutex_init(&pcdev->lock);
INIT_LIST_HEAD(&pcdev->pse_control_head);
+ spin_lock_init(&pcdev->ntf_fifo_lock);
+ ret = kfifo_alloc(&pcdev->ntf_fifo, pcdev->nr_lines, GFP_KERNEL);
+ if (ret) {
+ dev_err(pcdev->dev, "failed to allocate kfifo notifications\n");
+ return ret;
+ }
+ INIT_WORK(&pcdev->ntf_work, pse_send_ntf_worker);
if (!pcdev->nr_lines)
pcdev->nr_lines = 1;
@@ -496,6 +1096,10 @@ int pse_controller_register(struct pse_controller_dev *pcdev)
return ret;
}
+ ret = pse_register_pw_ds(pcdev);
+ if (ret)
+ return ret;
+
mutex_lock(&pse_list_mutex);
list_add(&pcdev->list, &pse_controller_list);
mutex_unlock(&pse_list_mutex);
@@ -510,7 +1114,12 @@ EXPORT_SYMBOL_GPL(pse_controller_register);
*/
void pse_controller_unregister(struct pse_controller_dev *pcdev)
{
+ pse_flush_pw_ds(pcdev);
pse_release_pis(pcdev);
+ if (pcdev->irq)
+ disable_irq(pcdev->irq);
+ cancel_work_sync(&pcdev->ntf_work);
+ kfifo_free(&pcdev->ntf_fifo);
mutex_lock(&pse_list_mutex);
list_del(&pcdev->list);
mutex_unlock(&pse_list_mutex);
@@ -557,6 +1166,191 @@ int devm_pse_controller_register(struct device *dev,
}
EXPORT_SYMBOL_GPL(devm_pse_controller_register);
+struct pse_irq {
+ struct pse_controller_dev *pcdev;
+ struct pse_irq_desc desc;
+ unsigned long *notifs;
+};
+
+/**
+ * pse_to_regulator_notifs - Convert PSE notifications to Regulator
+ * notifications
+ * @notifs: PSE notifications
+ *
+ * Return: Regulator notifications
+ */
+static unsigned long pse_to_regulator_notifs(unsigned long notifs)
+{
+ unsigned long rnotifs = 0;
+
+ if (notifs & ETHTOOL_PSE_EVENT_OVER_CURRENT)
+ rnotifs |= REGULATOR_EVENT_OVER_CURRENT;
+ if (notifs & ETHTOOL_PSE_EVENT_OVER_TEMP)
+ rnotifs |= REGULATOR_EVENT_OVER_TEMP;
+
+ return rnotifs;
+}
+
+/**
+ * pse_set_config_isr - Set PSE control config according to the PSE
+ * notifications
+ * @pcdev: a pointer to the PSE
+ * @id: index of the PSE control
+ * @notifs: PSE event notifications
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int pse_set_config_isr(struct pse_controller_dev *pcdev, int id,
+ unsigned long notifs)
+{
+ int ret = 0;
+
+ if (notifs & PSE_BUDGET_EVAL_STRAT_DYNAMIC)
+ return 0;
+
+ if ((notifs & ETHTOOL_C33_PSE_EVENT_DISCONNECTION) &&
+ ((notifs & ETHTOOL_C33_PSE_EVENT_DETECTION) ||
+ (notifs & ETHTOOL_C33_PSE_EVENT_CLASSIFICATION))) {
+ dev_dbg(pcdev->dev,
+ "PI %d: error, connection and disconnection reported simultaneously",
+ id);
+ return -EINVAL;
+ }
+
+ if (notifs & ETHTOOL_C33_PSE_EVENT_CLASSIFICATION) {
+ struct netlink_ext_ack extack;
+
+ pcdev->pi[id].isr_pd_detected = true;
+ if (pcdev->pi[id].admin_state_enabled) {
+ ret = _pse_pi_delivery_power_sw_pw_ctrl(pcdev, id,
+ &extack);
+ if (ret == -ERANGE)
+ ret = 0;
+ }
+ } else if (notifs & ETHTOOL_C33_PSE_EVENT_DISCONNECTION) {
+ if (pcdev->pi[id].admin_state_enabled &&
+ pcdev->pi[id].isr_pd_detected)
+ ret = _pse_pi_disable(pcdev, id);
+ pcdev->pi[id].isr_pd_detected = false;
+ }
+
+ return ret;
+}
+
+/**
+ * pse_isr - IRQ handler for PSE
+ * @irq: irq number
+ * @data: pointer to user interrupt structure
+ *
+ * Return: irqreturn_t - status of IRQ
+ */
+static irqreturn_t pse_isr(int irq, void *data)
+{
+ struct pse_controller_dev *pcdev;
+ unsigned long notifs_mask = 0;
+ struct pse_irq_desc *desc;
+ struct pse_irq *h = data;
+ int ret, i;
+
+ desc = &h->desc;
+ pcdev = h->pcdev;
+
+ /* Clear notifs mask */
+ memset(h->notifs, 0, pcdev->nr_lines * sizeof(*h->notifs));
+ mutex_lock(&pcdev->lock);
+ ret = desc->map_event(irq, pcdev, h->notifs, &notifs_mask);
+ if (ret || !notifs_mask) {
+ mutex_unlock(&pcdev->lock);
+ return IRQ_NONE;
+ }
+
+ for_each_set_bit(i, &notifs_mask, pcdev->nr_lines) {
+ unsigned long notifs, rnotifs;
+ struct pse_ntf ntf = {};
+
+ /* Do nothing PI not described */
+ if (!pcdev->pi[i].rdev)
+ continue;
+
+ notifs = h->notifs[i];
+ if (pse_pw_d_is_sw_pw_control(pcdev, pcdev->pi[i].pw_d)) {
+ ret = pse_set_config_isr(pcdev, i, notifs);
+ if (ret)
+ notifs |= ETHTOOL_PSE_EVENT_SW_PW_CONTROL_ERROR;
+ }
+
+ dev_dbg(h->pcdev->dev,
+ "Sending PSE notification EVT 0x%lx\n", notifs);
+
+ ntf.notifs = notifs;
+ ntf.id = i;
+ kfifo_in_spinlocked(&pcdev->ntf_fifo, &ntf, 1,
+ &pcdev->ntf_fifo_lock);
+ schedule_work(&pcdev->ntf_work);
+
+ rnotifs = pse_to_regulator_notifs(notifs);
+ regulator_notifier_call_chain(pcdev->pi[i].rdev, rnotifs,
+ NULL);
+ }
+
+ mutex_unlock(&pcdev->lock);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * devm_pse_irq_helper - Register IRQ based PSE event notifier
+ * @pcdev: a pointer to the PSE
+ * @irq: the irq value to be passed to request_irq
+ * @irq_flags: the flags to be passed to request_irq
+ * @d: PSE interrupt description
+ *
+ * Return: 0 on success and errno on failure
+ */
+int devm_pse_irq_helper(struct pse_controller_dev *pcdev, int irq,
+ int irq_flags, const struct pse_irq_desc *d)
+{
+ struct device *dev = pcdev->dev;
+ size_t irq_name_len;
+ struct pse_irq *h;
+ char *irq_name;
+ int ret;
+
+ if (!d || !d->map_event || !d->name)
+ return -EINVAL;
+
+ h = devm_kzalloc(dev, sizeof(*h), GFP_KERNEL);
+ if (!h)
+ return -ENOMEM;
+
+ h->pcdev = pcdev;
+ h->desc = *d;
+
+ /* IRQ name len is pcdev dev name + 5 char + irq desc name + 1 */
+ irq_name_len = strlen(dev_name(pcdev->dev)) + 5 + strlen(d->name) + 1;
+ irq_name = devm_kzalloc(dev, irq_name_len, GFP_KERNEL);
+ if (!irq_name)
+ return -ENOMEM;
+
+ snprintf(irq_name, irq_name_len, "pse-%s:%s", dev_name(pcdev->dev),
+ d->name);
+
+ h->notifs = devm_kcalloc(dev, pcdev->nr_lines,
+ sizeof(*h->notifs), GFP_KERNEL);
+ if (!h->notifs)
+ return -ENOMEM;
+
+ ret = devm_request_threaded_irq(dev, irq, NULL, pse_isr,
+ IRQF_ONESHOT | irq_flags,
+ irq_name, h);
+ if (ret)
+ dev_err(pcdev->dev, "Failed to request IRQ %d\n", irq);
+
+ pcdev->irq = irq;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_pse_irq_helper);
+
/* PSE control section */
static void __pse_control_release(struct kref *kref)
@@ -599,7 +1393,8 @@ void pse_control_put(struct pse_control *psec)
EXPORT_SYMBOL_GPL(pse_control_put);
static struct pse_control *
-pse_control_get_internal(struct pse_controller_dev *pcdev, unsigned int index)
+pse_control_get_internal(struct pse_controller_dev *pcdev, unsigned int index,
+ struct phy_device *phydev)
{
struct pse_control *psec;
int ret;
@@ -622,6 +1417,20 @@ pse_control_get_internal(struct pse_controller_dev *pcdev, unsigned int index)
goto free_psec;
}
+ if (!pcdev->ops->pi_get_admin_state) {
+ ret = -EOPNOTSUPP;
+ goto free_psec;
+ }
+
+ /* Initialize admin_state_enabled before the regulator_get. This
+ * aims to have the right value reported in the first is_enabled
+ * call in case of control managed by software.
+ */
+ ret = pse_pi_is_hw_enabled(pcdev, index);
+ if (ret < 0)
+ goto free_psec;
+
+ pcdev->pi[index].admin_state_enabled = ret;
psec->ps = devm_regulator_get_exclusive(pcdev->dev,
rdev_get_name(pcdev->pi[index].rdev));
if (IS_ERR(psec->ps)) {
@@ -629,21 +1438,14 @@ pse_control_get_internal(struct pse_controller_dev *pcdev, unsigned int index)
goto put_module;
}
- ret = regulator_is_enabled(psec->ps);
- if (ret < 0)
- goto regulator_put;
-
- pcdev->pi[index].admin_state_enabled = ret;
-
psec->pcdev = pcdev;
list_add(&psec->list, &pcdev->pse_control_head);
psec->id = index;
+ psec->attached_phydev = phydev;
kref_init(&psec->refcnt);
return psec;
-regulator_put:
- devm_regulator_put(psec->ps);
put_module:
module_put(pcdev->owner);
free_psec:
@@ -693,7 +1495,8 @@ static int psec_id_xlate(struct pse_controller_dev *pcdev,
return pse_spec->args[0];
}
-struct pse_control *of_pse_control_get(struct device_node *node)
+struct pse_control *of_pse_control_get(struct device_node *node,
+ struct phy_device *phydev)
{
struct pse_controller_dev *r, *pcdev;
struct of_phandle_args args;
@@ -743,7 +1546,7 @@ struct pse_control *of_pse_control_get(struct device_node *node)
}
/* pse_list_mutex also protects the pcdev's pse_control list */
- psec = pse_control_get_internal(pcdev, psec_id);
+ psec = pse_control_get_internal(pcdev, psec_id, phydev);
out:
mutex_unlock(&pse_list_mutex);
@@ -754,6 +1557,35 @@ out:
EXPORT_SYMBOL_GPL(of_pse_control_get);
/**
+ * pse_get_sw_admin_state - Convert the software admin state to c33 or podl
+ * admin state value used in the standard
+ * @psec: PSE control pointer
+ * @admin_state: a pointer to the admin_state structure
+ */
+static void pse_get_sw_admin_state(struct pse_control *psec,
+ struct pse_admin_state *admin_state)
+{
+ struct pse_pi *pi = &psec->pcdev->pi[psec->id];
+
+ if (pse_has_podl(psec)) {
+ if (pi->admin_state_enabled)
+ admin_state->podl_admin_state =
+ ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED;
+ else
+ admin_state->podl_admin_state =
+ ETHTOOL_PODL_PSE_ADMIN_STATE_DISABLED;
+ }
+ if (pse_has_c33(psec)) {
+ if (pi->admin_state_enabled)
+ admin_state->c33_admin_state =
+ ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED;
+ else
+ admin_state->c33_admin_state =
+ ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED;
+ }
+}
+
+/**
* pse_ethtool_get_status - get status of PSE control
* @psec: PSE control pointer
* @extack: extack for reporting useful error messages
@@ -769,16 +1601,46 @@ int pse_ethtool_get_status(struct pse_control *psec,
struct pse_pw_status pw_status = {0};
const struct pse_controller_ops *ops;
struct pse_controller_dev *pcdev;
+ struct pse_pi *pi;
int ret;
pcdev = psec->pcdev;
ops = pcdev->ops;
+
+ pi = &pcdev->pi[psec->id];
mutex_lock(&pcdev->lock);
- ret = ops->pi_get_admin_state(pcdev, psec->id, &admin_state);
- if (ret)
- goto out;
- status->podl_admin_state = admin_state.podl_admin_state;
- status->c33_admin_state = admin_state.c33_admin_state;
+ if (pi->pw_d) {
+ status->pw_d_id = pi->pw_d->id;
+ if (pse_pw_d_is_sw_pw_control(pcdev, pi->pw_d)) {
+ pse_get_sw_admin_state(psec, &admin_state);
+ } else {
+ ret = ops->pi_get_admin_state(pcdev, psec->id,
+ &admin_state);
+ if (ret)
+ goto out;
+ }
+ status->podl_admin_state = admin_state.podl_admin_state;
+ status->c33_admin_state = admin_state.c33_admin_state;
+
+ switch (pi->pw_d->budget_eval_strategy) {
+ case PSE_BUDGET_EVAL_STRAT_STATIC:
+ status->prio_max = pcdev->nr_lines - 1;
+ status->prio = pi->prio;
+ break;
+ case PSE_BUDGET_EVAL_STRAT_DYNAMIC:
+ status->prio_max = pcdev->pis_prio_max;
+ if (ops->pi_get_prio) {
+ ret = ops->pi_get_prio(pcdev, psec->id);
+ if (ret < 0)
+ goto out;
+
+ status->prio = ret;
+ }
+ break;
+ default:
+ break;
+ }
+ }
ret = ops->pi_get_pw_status(pcdev, psec->id, &pw_status);
if (ret)
@@ -928,6 +1790,52 @@ int pse_ethtool_set_config(struct pse_control *psec,
EXPORT_SYMBOL_GPL(pse_ethtool_set_config);
/**
+ * pse_pi_update_pw_budget - Update PSE power budget allocated with new
+ * power in mW
+ * @pcdev: a pointer to the PSE controller device
+ * @id: index of the PSE PI
+ * @pw_req: power requested
+ * @extack: extack for reporting useful error messages
+ *
+ * Return: Previous power allocated on success and failure value on error
+ */
+static int pse_pi_update_pw_budget(struct pse_controller_dev *pcdev, int id,
+ const unsigned int pw_req,
+ struct netlink_ext_ack *extack)
+{
+ struct pse_pi *pi = &pcdev->pi[id];
+ int previous_pw_allocated;
+ int pw_diff, ret = 0;
+
+ /* We don't want pw_allocated_mW value change in the middle of an
+ * power budget update
+ */
+ mutex_lock(&pcdev->lock);
+ previous_pw_allocated = pi->pw_allocated_mW;
+ pw_diff = pw_req - previous_pw_allocated;
+ if (!pw_diff) {
+ goto out;
+ } else if (pw_diff > 0) {
+ ret = regulator_request_power_budget(pi->pw_d->supply, pw_diff);
+ if (ret) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "PI %d: not enough power budget available",
+ id);
+ goto out;
+ }
+
+ } else {
+ regulator_free_power_budget(pi->pw_d->supply, -pw_diff);
+ }
+ pi->pw_allocated_mW = pw_req;
+ ret = previous_pw_allocated;
+
+out:
+ mutex_unlock(&pcdev->lock);
+ return ret;
+}
+
+/**
* pse_ethtool_set_pw_limit - set PSE control power limit
* @psec: PSE control pointer
* @extack: extack for reporting useful error messages
@@ -939,7 +1847,7 @@ int pse_ethtool_set_pw_limit(struct pse_control *psec,
struct netlink_ext_ack *extack,
const unsigned int pw_limit)
{
- int uV, uA, ret;
+ int uV, uA, ret, previous_pw_allocated = 0;
s64 tmp_64;
if (pw_limit > MAX_PI_PW)
@@ -963,10 +1871,100 @@ int pse_ethtool_set_pw_limit(struct pse_control *psec,
/* uA = mW * 1000000000 / uV */
uA = DIV_ROUND_CLOSEST_ULL(tmp_64, uV);
- return regulator_set_current_limit(psec->ps, 0, uA);
+ /* Update power budget only in software power control case and
+ * if a Power Device is powered.
+ */
+ if (pse_pw_d_is_sw_pw_control(psec->pcdev,
+ psec->pcdev->pi[psec->id].pw_d) &&
+ psec->pcdev->pi[psec->id].admin_state_enabled &&
+ psec->pcdev->pi[psec->id].isr_pd_detected) {
+ ret = pse_pi_update_pw_budget(psec->pcdev, psec->id,
+ pw_limit, extack);
+ if (ret < 0)
+ return ret;
+ previous_pw_allocated = ret;
+ }
+
+ ret = regulator_set_current_limit(psec->ps, 0, uA);
+ if (ret < 0 && previous_pw_allocated) {
+ pse_pi_update_pw_budget(psec->pcdev, psec->id,
+ previous_pw_allocated, extack);
+ }
+
+ return ret;
}
EXPORT_SYMBOL_GPL(pse_ethtool_set_pw_limit);
+/**
+ * pse_ethtool_set_prio - Set PSE PI priority according to the budget
+ * evaluation strategy
+ * @psec: PSE control pointer
+ * @extack: extack for reporting useful error messages
+ * @prio: priovity value
+ *
+ * Return: 0 on success and failure value on error
+ */
+int pse_ethtool_set_prio(struct pse_control *psec,
+ struct netlink_ext_ack *extack,
+ unsigned int prio)
+{
+ struct pse_controller_dev *pcdev = psec->pcdev;
+ const struct pse_controller_ops *ops;
+ int ret = 0;
+
+ if (!pcdev->pi[psec->id].pw_d) {
+ NL_SET_ERR_MSG(extack, "no power domain attached");
+ return -EOPNOTSUPP;
+ }
+
+ /* We don't want priority change in the middle of an
+ * enable/disable call or a priority mode change
+ */
+ mutex_lock(&pcdev->lock);
+ switch (pcdev->pi[psec->id].pw_d->budget_eval_strategy) {
+ case PSE_BUDGET_EVAL_STRAT_STATIC:
+ if (prio >= pcdev->nr_lines) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "priority %d exceed priority max %d",
+ prio, pcdev->nr_lines);
+ ret = -ERANGE;
+ goto out;
+ }
+
+ pcdev->pi[psec->id].prio = prio;
+ pse_pw_d_retry_power_delivery(pcdev, pcdev->pi[psec->id].pw_d);
+ break;
+
+ case PSE_BUDGET_EVAL_STRAT_DYNAMIC:
+ ops = psec->pcdev->ops;
+ if (!ops->pi_set_prio) {
+ NL_SET_ERR_MSG(extack,
+ "pse driver does not support setting port priority");
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (prio > pcdev->pis_prio_max) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "priority %d exceed priority max %d",
+ prio, pcdev->pis_prio_max);
+ ret = -ERANGE;
+ goto out;
+ }
+
+ ret = ops->pi_set_prio(pcdev, psec->id, prio);
+ break;
+
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+out:
+ mutex_unlock(&pcdev->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pse_ethtool_set_prio);
+
bool pse_has_podl(struct pse_control *psec)
{
return psec->pcdev->types & ETHTOOL_PSE_PODL;
diff --git a/drivers/net/pse-pd/tps23881.c b/drivers/net/pse-pd/tps23881.c
index 5e9dda2c0eac..63f8f43062bc 100644
--- a/drivers/net/pse-pd/tps23881.c
+++ b/drivers/net/pse-pd/tps23881.c
@@ -16,15 +16,34 @@
#include <linux/pse-pd/pse.h>
#define TPS23881_MAX_CHANS 8
-
+#define TPS23881_MAX_IRQ_RETRIES 10
+
+#define TPS23881_REG_IT 0x0
+#define TPS23881_REG_IT_MASK 0x1
+#define TPS23881_REG_IT_DISF BIT(2)
+#define TPS23881_REG_IT_DETC BIT(3)
+#define TPS23881_REG_IT_CLASC BIT(4)
+#define TPS23881_REG_IT_IFAULT BIT(5)
+#define TPS23881_REG_IT_SUPF BIT(7)
+#define TPS23881_REG_DET_EVENT 0x5
+#define TPS23881_REG_FAULT 0x7
+#define TPS23881_REG_SUPF_EVENT 0xb
+#define TPS23881_REG_TSD BIT(7)
+#define TPS23881_REG_DISC 0xc
#define TPS23881_REG_PW_STATUS 0x10
#define TPS23881_REG_OP_MODE 0x12
+#define TPS23881_REG_DISC_EN 0x13
#define TPS23881_OP_MODE_SEMIAUTO 0xaaaa
#define TPS23881_REG_DIS_EN 0x13
#define TPS23881_REG_DET_CLA_EN 0x14
#define TPS23881_REG_GEN_MASK 0x17
+#define TPS23881_REG_CLCHE BIT(2)
+#define TPS23881_REG_DECHE BIT(3)
#define TPS23881_REG_NBITACC BIT(5)
+#define TPS23881_REG_INTEN BIT(7)
#define TPS23881_REG_PW_EN 0x19
+#define TPS23881_REG_RESET 0x1a
+#define TPS23881_REG_CLRAIN BIT(7)
#define TPS23881_REG_2PAIR_POL1 0x1e
#define TPS23881_REG_PORT_MAP 0x26
#define TPS23881_REG_PORT_POWER 0x29
@@ -51,6 +70,7 @@ struct tps23881_port_desc {
u8 chan[2];
bool is_4p;
int pw_pol;
+ bool exist;
};
struct tps23881_priv {
@@ -168,6 +188,7 @@ static int tps23881_pi_enable(struct pse_controller_dev *pcdev, int id)
struct i2c_client *client = priv->client;
u8 chan;
u16 val;
+ int ret;
if (id >= TPS23881_MAX_CHANS)
return -ERANGE;
@@ -181,7 +202,22 @@ static int tps23881_pi_enable(struct pse_controller_dev *pcdev, int id)
BIT(chan % 4));
}
- return i2c_smbus_write_word_data(client, TPS23881_REG_PW_EN, val);
+ ret = i2c_smbus_write_word_data(client, TPS23881_REG_PW_EN, val);
+ if (ret)
+ return ret;
+
+ /* Enable DC disconnect*/
+ chan = priv->port[id].chan[0];
+ ret = i2c_smbus_read_word_data(client, TPS23881_REG_DISC_EN);
+ if (ret < 0)
+ return ret;
+
+ val = tps23881_set_val(ret, chan, 0, BIT(chan % 4), BIT(chan % 4));
+ ret = i2c_smbus_write_word_data(client, TPS23881_REG_DISC_EN, val);
+ if (ret)
+ return ret;
+
+ return 0;
}
static int tps23881_pi_disable(struct pse_controller_dev *pcdev, int id)
@@ -214,6 +250,17 @@ static int tps23881_pi_disable(struct pse_controller_dev *pcdev, int id)
*/
mdelay(5);
+ /* Disable DC disconnect*/
+ chan = priv->port[id].chan[0];
+ ret = i2c_smbus_read_word_data(client, TPS23881_REG_DISC_EN);
+ if (ret < 0)
+ return ret;
+
+ val = tps23881_set_val(ret, chan, 0, 0, BIT(chan % 4));
+ ret = i2c_smbus_write_word_data(client, TPS23881_REG_DISC_EN, val);
+ if (ret)
+ return ret;
+
/* Enable detection and classification */
ret = i2c_smbus_read_word_data(client, TPS23881_REG_DET_CLA_EN);
if (ret < 0)
@@ -782,8 +829,10 @@ tps23881_write_port_matrix(struct tps23881_priv *priv,
hw_chan = port_matrix[i].hw_chan[0] % 4;
/* Set software port matrix for existing ports */
- if (port_matrix[i].exist)
+ if (port_matrix[i].exist) {
priv->port[pi_id].chan[0] = lgcl_chan;
+ priv->port[pi_id].exist = true;
+ }
/* Initialize power policy internal value */
priv->port[pi_id].pw_pol = -1;
@@ -907,6 +956,47 @@ static int tps23881_setup_pi_matrix(struct pse_controller_dev *pcdev)
return ret;
}
+static int tps23881_power_class_table[] = {
+ -ERANGE,
+ 4000,
+ 7000,
+ 15500,
+ 30000,
+ 15500,
+ 15500,
+ -ERANGE,
+ 45000,
+ 60000,
+ 75000,
+ 90000,
+ 15500,
+ 45000,
+ -ERANGE,
+ -ERANGE,
+};
+
+static int tps23881_pi_get_pw_req(struct pse_controller_dev *pcdev, int id)
+{
+ struct tps23881_priv *priv = to_tps23881_priv(pcdev);
+ struct i2c_client *client = priv->client;
+ u8 reg, chan;
+ int ret;
+ u16 val;
+
+ /* For a 4-pair the classification need 5ms to be completed */
+ if (priv->port[id].is_4p)
+ mdelay(5);
+
+ chan = priv->port[id].chan[0];
+ reg = TPS23881_REG_DISC + (chan % 4);
+ ret = i2c_smbus_read_word_data(client, reg);
+ if (ret < 0)
+ return ret;
+
+ val = tps23881_calc_val(ret, chan, 4, 0xf);
+ return tps23881_power_class_table[val];
+}
+
static const struct pse_controller_ops tps23881_ops = {
.setup_pi_matrix = tps23881_setup_pi_matrix,
.pi_enable = tps23881_pi_enable,
@@ -919,6 +1009,7 @@ static const struct pse_controller_ops tps23881_ops = {
.pi_get_pw_limit = tps23881_pi_get_pw_limit,
.pi_set_pw_limit = tps23881_pi_set_pw_limit,
.pi_get_pw_limit_ranges = tps23881_pi_get_pw_limit_ranges,
+ .pi_get_pw_req = tps23881_pi_get_pw_req,
};
static const char fw_parity_name[] = "ti/tps23881/tps23881-parity-14.bin";
@@ -1017,6 +1108,307 @@ static int tps23881_flash_sram_fw(struct i2c_client *client)
return 0;
}
+/* Convert interrupt events to 0xff to be aligned with the chan
+ * number.
+ */
+static u8 tps23881_irq_export_chans_helper(u16 reg_val, u8 field_offset)
+{
+ u8 val;
+
+ val = (reg_val >> (4 + field_offset) & 0xf0) |
+ (reg_val >> field_offset & 0x0f);
+
+ return val;
+}
+
+/* Convert chan number to port number */
+static void tps23881_set_notifs_helper(struct tps23881_priv *priv,
+ u8 chans,
+ unsigned long *notifs,
+ unsigned long *notifs_mask,
+ enum ethtool_pse_event event)
+{
+ u8 chan;
+ int i;
+
+ if (!chans)
+ return;
+
+ for (i = 0; i < TPS23881_MAX_CHANS; i++) {
+ if (!priv->port[i].exist)
+ continue;
+ /* No need to look at the 2nd channel in case of PoE4 as
+ * both registers are set.
+ */
+ chan = priv->port[i].chan[0];
+
+ if (BIT(chan) & chans) {
+ *notifs_mask |= BIT(i);
+ notifs[i] |= event;
+ }
+ }
+}
+
+static void tps23881_irq_event_over_temp(struct tps23881_priv *priv,
+ u16 reg_val,
+ unsigned long *notifs,
+ unsigned long *notifs_mask)
+{
+ int i;
+
+ if (reg_val & TPS23881_REG_TSD) {
+ for (i = 0; i < TPS23881_MAX_CHANS; i++) {
+ if (!priv->port[i].exist)
+ continue;
+
+ *notifs_mask |= BIT(i);
+ notifs[i] |= ETHTOOL_PSE_EVENT_OVER_TEMP;
+ }
+ }
+}
+
+static int tps23881_irq_event_over_current(struct tps23881_priv *priv,
+ u16 reg_val,
+ unsigned long *notifs,
+ unsigned long *notifs_mask)
+{
+ int i, ret;
+ u8 chans;
+
+ chans = tps23881_irq_export_chans_helper(reg_val, 0);
+ if (!chans)
+ return 0;
+
+ tps23881_set_notifs_helper(priv, chans, notifs, notifs_mask,
+ ETHTOOL_PSE_EVENT_OVER_CURRENT |
+ ETHTOOL_C33_PSE_EVENT_DISCONNECTION);
+
+ /* Over Current event resets the power limit registers so we need
+ * to configured it again.
+ */
+ for_each_set_bit(i, notifs_mask, priv->pcdev.nr_lines) {
+ if (priv->port[i].pw_pol < 0)
+ continue;
+
+ ret = tps23881_pi_enable_manual_pol(priv, i);
+ if (ret < 0)
+ return ret;
+
+ /* Set power policy */
+ ret = tps23881_pi_set_pw_pol_limit(priv, i,
+ priv->port[i].pw_pol,
+ priv->port[i].is_4p);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void tps23881_irq_event_disconnection(struct tps23881_priv *priv,
+ u16 reg_val,
+ unsigned long *notifs,
+ unsigned long *notifs_mask)
+{
+ u8 chans;
+
+ chans = tps23881_irq_export_chans_helper(reg_val, 4);
+ if (chans)
+ tps23881_set_notifs_helper(priv, chans, notifs, notifs_mask,
+ ETHTOOL_C33_PSE_EVENT_DISCONNECTION);
+}
+
+static int tps23881_irq_event_detection(struct tps23881_priv *priv,
+ u16 reg_val,
+ unsigned long *notifs,
+ unsigned long *notifs_mask)
+{
+ enum ethtool_pse_event event;
+ int reg, ret, i, val;
+ unsigned long chans;
+
+ chans = tps23881_irq_export_chans_helper(reg_val, 0);
+ for_each_set_bit(i, &chans, TPS23881_MAX_CHANS) {
+ reg = TPS23881_REG_DISC + (i % 4);
+ ret = i2c_smbus_read_word_data(priv->client, reg);
+ if (ret < 0)
+ return ret;
+
+ val = tps23881_calc_val(ret, i, 0, 0xf);
+ /* If detection valid */
+ if (val == 0x4)
+ event = ETHTOOL_C33_PSE_EVENT_DETECTION;
+ else
+ event = ETHTOOL_C33_PSE_EVENT_DISCONNECTION;
+
+ tps23881_set_notifs_helper(priv, BIT(i), notifs,
+ notifs_mask, event);
+ }
+
+ return 0;
+}
+
+static int tps23881_irq_event_classification(struct tps23881_priv *priv,
+ u16 reg_val,
+ unsigned long *notifs,
+ unsigned long *notifs_mask)
+{
+ int reg, ret, val, i;
+ unsigned long chans;
+
+ chans = tps23881_irq_export_chans_helper(reg_val, 4);
+ for_each_set_bit(i, &chans, TPS23881_MAX_CHANS) {
+ reg = TPS23881_REG_DISC + (i % 4);
+ ret = i2c_smbus_read_word_data(priv->client, reg);
+ if (ret < 0)
+ return ret;
+
+ val = tps23881_calc_val(ret, i, 4, 0xf);
+ /* Do not report classification event for unknown class */
+ if (!val || val == 0x8 || val == 0xf)
+ continue;
+
+ tps23881_set_notifs_helper(priv, BIT(i), notifs,
+ notifs_mask,
+ ETHTOOL_C33_PSE_EVENT_CLASSIFICATION);
+ }
+
+ return 0;
+}
+
+static int tps23881_irq_event_handler(struct tps23881_priv *priv, u16 reg,
+ unsigned long *notifs,
+ unsigned long *notifs_mask)
+{
+ struct i2c_client *client = priv->client;
+ int ret, val;
+
+ /* The Supply event bit is repeated twice so we only need to read
+ * the one from the first byte.
+ */
+ if (reg & TPS23881_REG_IT_SUPF) {
+ ret = i2c_smbus_read_word_data(client, TPS23881_REG_SUPF_EVENT);
+ if (ret < 0)
+ return ret;
+ tps23881_irq_event_over_temp(priv, ret, notifs, notifs_mask);
+ }
+
+ if (reg & (TPS23881_REG_IT_IFAULT | TPS23881_REG_IT_IFAULT << 8 |
+ TPS23881_REG_IT_DISF | TPS23881_REG_IT_DISF << 8)) {
+ ret = i2c_smbus_read_word_data(client, TPS23881_REG_FAULT);
+ if (ret < 0)
+ return ret;
+ ret = tps23881_irq_event_over_current(priv, ret, notifs,
+ notifs_mask);
+ if (ret)
+ return ret;
+
+ tps23881_irq_event_disconnection(priv, ret, notifs, notifs_mask);
+ }
+
+ if (reg & (TPS23881_REG_IT_DETC | TPS23881_REG_IT_DETC << 8 |
+ TPS23881_REG_IT_CLASC | TPS23881_REG_IT_CLASC << 8)) {
+ ret = i2c_smbus_read_word_data(client, TPS23881_REG_DET_EVENT);
+ if (ret < 0)
+ return ret;
+
+ val = ret;
+ ret = tps23881_irq_event_detection(priv, val, notifs,
+ notifs_mask);
+ if (ret)
+ return ret;
+
+ ret = tps23881_irq_event_classification(priv, val, notifs,
+ notifs_mask);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static int tps23881_irq_handler(int irq, struct pse_controller_dev *pcdev,
+ unsigned long *notifs,
+ unsigned long *notifs_mask)
+{
+ struct tps23881_priv *priv = to_tps23881_priv(pcdev);
+ struct i2c_client *client = priv->client;
+ int ret, it_mask, retry;
+
+ /* Get interruption mask */
+ ret = i2c_smbus_read_word_data(client, TPS23881_REG_IT_MASK);
+ if (ret < 0)
+ return ret;
+ it_mask = ret;
+
+ /* Read interrupt register until it frees the interruption pin. */
+ retry = 0;
+ while (true) {
+ if (retry > TPS23881_MAX_IRQ_RETRIES) {
+ dev_err(&client->dev, "interrupt never freed");
+ return -ETIMEDOUT;
+ }
+
+ ret = i2c_smbus_read_word_data(client, TPS23881_REG_IT);
+ if (ret < 0)
+ return ret;
+
+ /* No more relevant interruption */
+ if (!(ret & it_mask))
+ return 0;
+
+ ret = tps23881_irq_event_handler(priv, (u16)ret, notifs,
+ notifs_mask);
+ if (ret)
+ return ret;
+
+ retry++;
+ }
+ return 0;
+}
+
+static int tps23881_setup_irq(struct tps23881_priv *priv, int irq)
+{
+ struct i2c_client *client = priv->client;
+ struct pse_irq_desc irq_desc = {
+ .name = "tps23881-irq",
+ .map_event = tps23881_irq_handler,
+ };
+ int ret;
+ u16 val;
+
+ if (!irq) {
+ dev_err(&client->dev, "interrupt is missing");
+ return -EINVAL;
+ }
+
+ val = TPS23881_REG_IT_IFAULT | TPS23881_REG_IT_SUPF |
+ TPS23881_REG_IT_DETC | TPS23881_REG_IT_CLASC |
+ TPS23881_REG_IT_DISF;
+ val |= val << 8;
+ ret = i2c_smbus_write_word_data(client, TPS23881_REG_IT_MASK, val);
+ if (ret)
+ return ret;
+
+ ret = i2c_smbus_read_word_data(client, TPS23881_REG_GEN_MASK);
+ if (ret < 0)
+ return ret;
+
+ val = TPS23881_REG_INTEN | TPS23881_REG_CLCHE | TPS23881_REG_DECHE;
+ val |= val << 8;
+ val |= (u16)ret;
+ ret = i2c_smbus_write_word_data(client, TPS23881_REG_GEN_MASK, val);
+ if (ret < 0)
+ return ret;
+
+ /* Reset interrupts registers */
+ ret = i2c_smbus_write_word_data(client, TPS23881_REG_RESET,
+ TPS23881_REG_CLRAIN);
+ if (ret < 0)
+ return ret;
+
+ return devm_pse_irq_helper(&priv->pcdev, irq, 0, &irq_desc);
+}
+
static int tps23881_i2c_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
@@ -1091,12 +1483,17 @@ static int tps23881_i2c_probe(struct i2c_client *client)
priv->pcdev.dev = dev;
priv->pcdev.types = ETHTOOL_PSE_C33;
priv->pcdev.nr_lines = TPS23881_MAX_CHANS;
+ priv->pcdev.supp_budget_eval_strategies = PSE_BUDGET_EVAL_STRAT_STATIC;
ret = devm_pse_controller_register(dev, &priv->pcdev);
if (ret) {
return dev_err_probe(dev, ret,
"failed to register PSE controller\n");
}
+ ret = tps23881_setup_irq(priv, client->irq);
+ if (ret)
+ return ret;
+
return ret;
}
diff --git a/drivers/net/team/team_core.c b/drivers/net/team/team_core.c
index 8bc56186b2a3..17f07eb0ee52 100644
--- a/drivers/net/team/team_core.c
+++ b/drivers/net/team/team_core.c
@@ -933,7 +933,7 @@ static bool team_port_find(const struct team *team,
* Enable/disable port by adding to enabled port hashlist and setting
* port->index (Might be racy so reader could see incorrect ifindex when
* processing a flying packet, but that is not a problem). Write guarded
- * by team->lock.
+ * by RTNL.
*/
static void team_port_enable(struct team *team,
struct team_port *port)
@@ -1660,8 +1660,6 @@ static int team_init(struct net_device *dev)
goto err_options_register;
netif_carrier_off(dev);
- lockdep_register_key(&team->team_lock_key);
- __mutex_init(&team->lock, "team->team_lock_key", &team->team_lock_key);
netdev_lockdep_set_classes(dev);
return 0;
@@ -1682,7 +1680,8 @@ static void team_uninit(struct net_device *dev)
struct team_port *port;
struct team_port *tmp;
- mutex_lock(&team->lock);
+ ASSERT_RTNL();
+
list_for_each_entry_safe(port, tmp, &team->port_list, list)
team_port_del(team, port->dev);
@@ -1691,9 +1690,7 @@ static void team_uninit(struct net_device *dev)
team_mcast_rejoin_fini(team);
team_notify_peers_fini(team);
team_queue_override_fini(team);
- mutex_unlock(&team->lock);
netdev_change_features(dev);
- lockdep_unregister_key(&team->team_lock_key);
}
static void team_destructor(struct net_device *dev)
@@ -1778,7 +1775,8 @@ static void team_change_rx_flags(struct net_device *dev, int change)
struct team_port *port;
int inc;
- mutex_lock(&team->lock);
+ ASSERT_RTNL();
+
list_for_each_entry(port, &team->port_list, list) {
if (change & IFF_PROMISC) {
inc = dev->flags & IFF_PROMISC ? 1 : -1;
@@ -1789,7 +1787,6 @@ static void team_change_rx_flags(struct net_device *dev, int change)
dev_set_allmulti(port->dev, inc);
}
}
- mutex_unlock(&team->lock);
}
static void team_set_rx_mode(struct net_device *dev)
@@ -1811,14 +1808,14 @@ static int team_set_mac_address(struct net_device *dev, void *p)
struct team *team = netdev_priv(dev);
struct team_port *port;
+ ASSERT_RTNL();
+
if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
dev_addr_set(dev, addr->sa_data);
- mutex_lock(&team->lock);
list_for_each_entry(port, &team->port_list, list)
if (team->ops.port_change_dev_addr)
team->ops.port_change_dev_addr(team, port);
- mutex_unlock(&team->lock);
return 0;
}
@@ -1828,11 +1825,8 @@ static int team_change_mtu(struct net_device *dev, int new_mtu)
struct team_port *port;
int err;
- /*
- * Alhough this is reader, it's guarded by team lock. It's not possible
- * to traverse list in reverse under rcu_read_lock
- */
- mutex_lock(&team->lock);
+ ASSERT_RTNL();
+
team->port_mtu_change_allowed = true;
list_for_each_entry(port, &team->port_list, list) {
err = dev_set_mtu(port->dev, new_mtu);
@@ -1843,7 +1837,6 @@ static int team_change_mtu(struct net_device *dev, int new_mtu)
}
}
team->port_mtu_change_allowed = false;
- mutex_unlock(&team->lock);
WRITE_ONCE(dev->mtu, new_mtu);
@@ -1853,7 +1846,6 @@ unwind:
list_for_each_entry_continue_reverse(port, &team->port_list, list)
dev_set_mtu(port->dev, dev->mtu);
team->port_mtu_change_allowed = false;
- mutex_unlock(&team->lock);
return err;
}
@@ -1903,24 +1895,19 @@ static int team_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
struct team_port *port;
int err;
- /*
- * Alhough this is reader, it's guarded by team lock. It's not possible
- * to traverse list in reverse under rcu_read_lock
- */
- mutex_lock(&team->lock);
+ ASSERT_RTNL();
+
list_for_each_entry(port, &team->port_list, list) {
err = vlan_vid_add(port->dev, proto, vid);
if (err)
goto unwind;
}
- mutex_unlock(&team->lock);
return 0;
unwind:
list_for_each_entry_continue_reverse(port, &team->port_list, list)
vlan_vid_del(port->dev, proto, vid);
- mutex_unlock(&team->lock);
return err;
}
@@ -1930,10 +1917,10 @@ static int team_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
struct team *team = netdev_priv(dev);
struct team_port *port;
- mutex_lock(&team->lock);
+ ASSERT_RTNL();
+
list_for_each_entry(port, &team->port_list, list)
vlan_vid_del(port->dev, proto, vid);
- mutex_unlock(&team->lock);
return 0;
}
@@ -1955,9 +1942,9 @@ static void team_netpoll_cleanup(struct net_device *dev)
{
struct team *team = netdev_priv(dev);
- mutex_lock(&team->lock);
+ ASSERT_RTNL();
+
__team_netpoll_cleanup(team);
- mutex_unlock(&team->lock);
}
static int team_netpoll_setup(struct net_device *dev)
@@ -1966,7 +1953,8 @@ static int team_netpoll_setup(struct net_device *dev)
struct team_port *port;
int err = 0;
- mutex_lock(&team->lock);
+ ASSERT_RTNL();
+
list_for_each_entry(port, &team->port_list, list) {
err = __team_port_enable_netpoll(port);
if (err) {
@@ -1974,7 +1962,6 @@ static int team_netpoll_setup(struct net_device *dev)
break;
}
}
- mutex_unlock(&team->lock);
return err;
}
#endif
@@ -1985,9 +1972,9 @@ static int team_add_slave(struct net_device *dev, struct net_device *port_dev,
struct team *team = netdev_priv(dev);
int err;
- mutex_lock(&team->lock);
+ ASSERT_RTNL();
+
err = team_port_add(team, port_dev, extack);
- mutex_unlock(&team->lock);
if (!err)
netdev_change_features(dev);
@@ -2000,18 +1987,13 @@ static int team_del_slave(struct net_device *dev, struct net_device *port_dev)
struct team *team = netdev_priv(dev);
int err;
- mutex_lock(&team->lock);
+ ASSERT_RTNL();
+
err = team_port_del(team, port_dev);
- mutex_unlock(&team->lock);
if (err)
return err;
- if (netif_is_team_master(port_dev)) {
- lockdep_unregister_key(&team->team_lock_key);
- lockdep_register_key(&team->team_lock_key);
- lockdep_set_class(&team->lock, &team->team_lock_key);
- }
netdev_change_features(dev);
return err;
@@ -2304,9 +2286,10 @@ err_msg_put:
static struct team *team_nl_team_get(struct genl_info *info)
{
struct net *net = genl_info_net(info);
- int ifindex;
struct net_device *dev;
- struct team *team;
+ int ifindex;
+
+ ASSERT_RTNL();
if (!info->attrs[TEAM_ATTR_TEAM_IFINDEX])
return NULL;
@@ -2318,14 +2301,11 @@ static struct team *team_nl_team_get(struct genl_info *info)
return NULL;
}
- team = netdev_priv(dev);
- mutex_lock(&team->lock);
- return team;
+ return netdev_priv(dev);
}
static void team_nl_team_put(struct team *team)
{
- mutex_unlock(&team->lock);
dev_put(team->dev);
}
@@ -2515,9 +2495,13 @@ int team_nl_options_get_doit(struct sk_buff *skb, struct genl_info *info)
int err;
LIST_HEAD(sel_opt_inst_list);
+ rtnl_lock();
+
team = team_nl_team_get(info);
- if (!team)
- return -EINVAL;
+ if (!team) {
+ err = -EINVAL;
+ goto rtnl_unlock;
+ }
list_for_each_entry(opt_inst, &team->option_inst_list, list)
list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
@@ -2527,6 +2511,9 @@ int team_nl_options_get_doit(struct sk_buff *skb, struct genl_info *info)
team_nl_team_put(team);
+rtnl_unlock:
+ rtnl_unlock();
+
return err;
}
@@ -2805,15 +2792,22 @@ int team_nl_port_list_get_doit(struct sk_buff *skb,
struct team *team;
int err;
+ rtnl_lock();
+
team = team_nl_team_get(info);
- if (!team)
- return -EINVAL;
+ if (!team) {
+ err = -EINVAL;
+ goto rtnl_unlock;
+ }
err = team_nl_send_port_list_get(team, info->snd_portid, info->snd_seq,
NLM_F_ACK, team_nl_send_unicast, NULL);
team_nl_team_put(team);
+rtnl_unlock:
+ rtnl_unlock();
+
return err;
}
@@ -2961,11 +2955,9 @@ static void __team_port_change_port_removed(struct team_port *port)
static void team_port_change_check(struct team_port *port, bool linkup)
{
- struct team *team = port->team;
+ ASSERT_RTNL();
- mutex_lock(&team->lock);
__team_port_change_check(port, linkup);
- mutex_unlock(&team->lock);
}
diff --git a/drivers/net/team/team_mode_activebackup.c b/drivers/net/team/team_mode_activebackup.c
index e0f599e2a51d..1c3336c7a1b2 100644
--- a/drivers/net/team/team_mode_activebackup.c
+++ b/drivers/net/team/team_mode_activebackup.c
@@ -67,8 +67,7 @@ static void ab_active_port_get(struct team *team, struct team_gsetter_ctx *ctx)
{
struct team_port *active_port;
- active_port = rcu_dereference_protected(ab_priv(team)->active_port,
- lockdep_is_held(&team->lock));
+ active_port = rtnl_dereference(ab_priv(team)->active_port);
if (active_port)
ctx->data.u32_val = active_port->dev->ifindex;
else
diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c
index 00f8989c29c0..b14538bde2f8 100644
--- a/drivers/net/team/team_mode_loadbalance.c
+++ b/drivers/net/team/team_mode_loadbalance.c
@@ -301,8 +301,7 @@ static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx)
if (lb_priv->ex->orig_fprog) {
/* Clear old filter data */
__fprog_destroy(lb_priv->ex->orig_fprog);
- orig_fp = rcu_dereference_protected(lb_priv->fp,
- lockdep_is_held(&team->lock));
+ orig_fp = rtnl_dereference(lb_priv->fp);
}
rcu_assign_pointer(lb_priv->fp, fp);
@@ -324,8 +323,7 @@ static void lb_bpf_func_free(struct team *team)
return;
__fprog_destroy(lb_priv->ex->orig_fprog);
- fp = rcu_dereference_protected(lb_priv->fp,
- lockdep_is_held(&team->lock));
+ fp = rtnl_dereference(lb_priv->fp);
bpf_prog_destroy(fp);
}
@@ -335,8 +333,7 @@ static void lb_tx_method_get(struct team *team, struct team_gsetter_ctx *ctx)
lb_select_tx_port_func_t *func;
char *name;
- func = rcu_dereference_protected(lb_priv->select_tx_port_func,
- lockdep_is_held(&team->lock));
+ func = rtnl_dereference(lb_priv->select_tx_port_func);
name = lb_select_tx_port_get_name(func);
BUG_ON(!name);
ctx->data.str_val = name;
@@ -478,7 +475,7 @@ static void lb_stats_refresh(struct work_struct *work)
team = lb_priv_ex->team;
lb_priv = get_lb_priv(team);
- if (!mutex_trylock(&team->lock)) {
+ if (!rtnl_trylock()) {
schedule_delayed_work(&lb_priv_ex->stats.refresh_dw, 0);
return;
}
@@ -515,7 +512,7 @@ static void lb_stats_refresh(struct work_struct *work)
schedule_delayed_work(&lb_priv_ex->stats.refresh_dw,
(lb_priv_ex->stats.refresh_interval * HZ) / 10);
- mutex_unlock(&team->lock);
+ rtnl_unlock();
}
static void lb_stats_refresh_interval_get(struct team *team,
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 370b32fc2588..0a678e31cfaa 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -113,9 +113,8 @@ config USB_RTL8152
config USB_LAN78XX
tristate "Microchip LAN78XX Based USB Ethernet Adapters"
select MII
- select PHYLIB
+ select PHYLINK
select MICROCHIP_PHY
- select FIXED_PHY
select CRC32
help
This option adds support for Microchip LAN78XX based USB 2
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index f53e255116ea..f00284c9ad34 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -6,6 +6,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
+#include <linux/phylink.h>
#include <linux/usb.h>
#include <linux/crc32.h>
#include <linux/signal.h>
@@ -384,7 +385,7 @@ struct skb_data { /* skb->cb is one of these */
#define EVENT_RX_HALT 1
#define EVENT_RX_MEMORY 2
#define EVENT_STS_SPLIT 3
-#define EVENT_LINK_RESET 4
+#define EVENT_PHY_INT_ACK 4
#define EVENT_RX_PAUSED 5
#define EVENT_DEV_WAKING 6
#define EVENT_DEV_ASLEEP 7
@@ -413,7 +414,6 @@ struct lan78xx_net {
struct net_device *net;
struct usb_device *udev;
struct usb_interface *intf;
- void *driver_priv;
unsigned int tx_pend_data_len;
size_t n_tx_urbs;
@@ -448,28 +448,24 @@ struct lan78xx_net {
unsigned long flags;
wait_queue_head_t *wait;
- unsigned char suspend_count;
unsigned int maxpacket;
struct timer_list stat_monitor;
unsigned long data[5];
- int link_on;
- u8 mdix_ctrl;
-
u32 chipid;
u32 chiprev;
struct mii_bus *mdiobus;
phy_interface_t interface;
- int fc_autoneg;
- u8 fc_request_control;
-
int delta;
struct statstage stats;
struct irq_domain_data domain_data;
+
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
};
/* use ethtool to change the level for any given device */
@@ -1554,28 +1550,6 @@ static void lan78xx_set_multicast(struct net_device *netdev)
schedule_work(&pdata->set_multicast);
}
-static int lan78xx_configure_flowcontrol(struct lan78xx_net *dev,
- bool tx_pause, bool rx_pause);
-
-static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
- u16 lcladv, u16 rmtadv)
-{
- u8 cap;
-
- if (dev->fc_autoneg)
- cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
- else
- cap = dev->fc_request_control;
-
- netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
- (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
- (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
-
- return lan78xx_configure_flowcontrol(dev,
- cap & FLOW_CTRL_TX,
- cap & FLOW_CTRL_RX);
-}
-
static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev);
static int lan78xx_mac_reset(struct lan78xx_net *dev)
@@ -1638,75 +1612,6 @@ static int lan78xx_phy_int_ack(struct lan78xx_net *dev)
return lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
}
-static int lan78xx_configure_usb(struct lan78xx_net *dev, int speed);
-
-static int lan78xx_link_reset(struct lan78xx_net *dev)
-{
- struct phy_device *phydev = dev->net->phydev;
- struct ethtool_link_ksettings ecmd;
- int ladv, radv, ret, link;
-
- /* clear LAN78xx interrupt status */
- ret = lan78xx_phy_int_ack(dev);
- if (unlikely(ret < 0))
- return ret;
-
- mutex_lock(&phydev->lock);
- phy_read_status(phydev);
- link = phydev->link;
- mutex_unlock(&phydev->lock);
-
- if (!link && dev->link_on) {
- dev->link_on = false;
-
- /* reset MAC */
- ret = lan78xx_mac_reset(dev);
- if (ret < 0)
- return ret;
-
- timer_delete(&dev->stat_monitor);
- } else if (link && !dev->link_on) {
- dev->link_on = true;
-
- phy_ethtool_ksettings_get(phydev, &ecmd);
-
- ret = lan78xx_configure_usb(dev, ecmd.base.speed);
- if (ret < 0)
- return ret;
-
- ladv = phy_read(phydev, MII_ADVERTISE);
- if (ladv < 0)
- return ladv;
-
- radv = phy_read(phydev, MII_LPA);
- if (radv < 0)
- return radv;
-
- netif_dbg(dev, link, dev->net,
- "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
- ecmd.base.speed, ecmd.base.duplex, ladv, radv);
-
- ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
- radv);
- if (ret < 0)
- return ret;
-
- if (!timer_pending(&dev->stat_monitor)) {
- dev->delta = 1;
- mod_timer(&dev->stat_monitor,
- jiffies + STAT_UPDATE_TIMER);
- }
-
- lan78xx_rx_urb_submit_all(dev);
-
- local_bh_disable();
- napi_schedule(&dev->napi);
- local_bh_enable();
- }
-
- return 0;
-}
-
/* some work can't be done in tasklets, so we use keventd
*
* NOTE: annoying asymmetry: if it's active, schedule_work() fails,
@@ -1733,7 +1638,7 @@ static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
if (intdata & INT_ENP_PHY_INT) {
netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
- lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
+ lan78xx_defer_kevent(dev, EVENT_PHY_INT_ACK);
if (dev->domain_data.phyirq > 0)
generic_handle_irq_safe(dev->domain_data.phyirq);
@@ -1880,66 +1785,15 @@ exit_pm_put:
static int lan78xx_get_eee(struct net_device *net, struct ethtool_keee *edata)
{
struct lan78xx_net *dev = netdev_priv(net);
- struct phy_device *phydev = net->phydev;
- int ret;
- u32 buf;
-
- ret = usb_autopm_get_interface(dev->intf);
- if (ret < 0)
- return ret;
- ret = phy_ethtool_get_eee(phydev, edata);
- if (ret < 0)
- goto exit;
-
- ret = lan78xx_read_reg(dev, MAC_CR, &buf);
- if (buf & MAC_CR_EEE_EN_) {
- /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
- ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
- edata->tx_lpi_timer = buf;
- } else {
- edata->tx_lpi_timer = 0;
- }
-
- ret = 0;
-exit:
- usb_autopm_put_interface(dev->intf);
-
- return ret;
+ return phylink_ethtool_get_eee(dev->phylink, edata);
}
static int lan78xx_set_eee(struct net_device *net, struct ethtool_keee *edata)
{
struct lan78xx_net *dev = netdev_priv(net);
- int ret;
- u32 buf;
-
- ret = usb_autopm_get_interface(dev->intf);
- if (ret < 0)
- return ret;
-
- ret = phy_ethtool_set_eee(net->phydev, edata);
- if (ret < 0)
- goto out;
-
- buf = (u32)edata->tx_lpi_timer;
- ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
-out:
- usb_autopm_put_interface(dev->intf);
-
- return ret;
-}
-static u32 lan78xx_get_link(struct net_device *net)
-{
- u32 link;
-
- mutex_lock(&net->phydev->lock);
- phy_read_status(net->phydev);
- link = net->phydev->link;
- mutex_unlock(&net->phydev->lock);
-
- return link;
+ return phylink_ethtool_set_eee(dev->phylink, edata);
}
static void lan78xx_get_drvinfo(struct net_device *net,
@@ -1969,109 +1823,32 @@ static int lan78xx_get_link_ksettings(struct net_device *net,
struct ethtool_link_ksettings *cmd)
{
struct lan78xx_net *dev = netdev_priv(net);
- struct phy_device *phydev = net->phydev;
- int ret;
- ret = usb_autopm_get_interface(dev->intf);
- if (ret < 0)
- return ret;
-
- phy_ethtool_ksettings_get(phydev, cmd);
-
- usb_autopm_put_interface(dev->intf);
-
- return ret;
+ return phylink_ethtool_ksettings_get(dev->phylink, cmd);
}
static int lan78xx_set_link_ksettings(struct net_device *net,
const struct ethtool_link_ksettings *cmd)
{
struct lan78xx_net *dev = netdev_priv(net);
- struct phy_device *phydev = net->phydev;
- int ret = 0;
- int temp;
- ret = usb_autopm_get_interface(dev->intf);
- if (ret < 0)
- return ret;
-
- /* change speed & duplex */
- ret = phy_ethtool_ksettings_set(phydev, cmd);
-
- if (!cmd->base.autoneg) {
- /* force link down */
- temp = phy_read(phydev, MII_BMCR);
- phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
- mdelay(1);
- phy_write(phydev, MII_BMCR, temp);
- }
-
- usb_autopm_put_interface(dev->intf);
-
- return ret;
+ return phylink_ethtool_ksettings_set(dev->phylink, cmd);
}
static void lan78xx_get_pause(struct net_device *net,
struct ethtool_pauseparam *pause)
{
struct lan78xx_net *dev = netdev_priv(net);
- struct phy_device *phydev = net->phydev;
- struct ethtool_link_ksettings ecmd;
-
- phy_ethtool_ksettings_get(phydev, &ecmd);
- pause->autoneg = dev->fc_autoneg;
-
- if (dev->fc_request_control & FLOW_CTRL_TX)
- pause->tx_pause = 1;
-
- if (dev->fc_request_control & FLOW_CTRL_RX)
- pause->rx_pause = 1;
+ phylink_ethtool_get_pauseparam(dev->phylink, pause);
}
static int lan78xx_set_pause(struct net_device *net,
struct ethtool_pauseparam *pause)
{
struct lan78xx_net *dev = netdev_priv(net);
- struct phy_device *phydev = net->phydev;
- struct ethtool_link_ksettings ecmd;
- int ret;
-
- phy_ethtool_ksettings_get(phydev, &ecmd);
-
- if (pause->autoneg && !ecmd.base.autoneg) {
- ret = -EINVAL;
- goto exit;
- }
-
- dev->fc_request_control = 0;
- if (pause->rx_pause)
- dev->fc_request_control |= FLOW_CTRL_RX;
-
- if (pause->tx_pause)
- dev->fc_request_control |= FLOW_CTRL_TX;
-
- if (ecmd.base.autoneg) {
- __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
- u32 mii_adv;
- linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
- ecmd.link_modes.advertising);
- linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
- ecmd.link_modes.advertising);
- mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
- mii_adv_to_linkmode_adv_t(fc, mii_adv);
- linkmode_or(ecmd.link_modes.advertising, fc,
- ecmd.link_modes.advertising);
-
- phy_ethtool_ksettings_set(phydev, &ecmd);
- }
-
- dev->fc_autoneg = pause->autoneg;
-
- ret = 0;
-exit:
- return ret;
+ return phylink_ethtool_set_pauseparam(dev->phylink, pause);
}
static int lan78xx_get_regs_len(struct net_device *netdev)
@@ -2108,7 +1885,7 @@ clean_data:
}
static const struct ethtool_ops lan78xx_ethtool_ops = {
- .get_link = lan78xx_get_link,
+ .get_link = ethtool_op_get_link,
.nway_reset = phy_ethtool_nway_reset,
.get_drvinfo = lan78xx_get_drvinfo,
.get_msglevel = lan78xx_get_msglevel,
@@ -2332,26 +2109,6 @@ static void lan78xx_remove_mdio(struct lan78xx_net *dev)
mdiobus_free(dev->mdiobus);
}
-static void lan78xx_link_status_change(struct net_device *net)
-{
- struct lan78xx_net *dev = netdev_priv(net);
- struct phy_device *phydev = net->phydev;
- u32 data;
- int ret;
-
- ret = lan78xx_read_reg(dev, MAC_CR, &data);
- if (ret < 0)
- return;
-
- if (phydev->enable_tx_lpi)
- data |= MAC_CR_EEE_EN_;
- else
- data &= ~MAC_CR_EEE_EN_;
- lan78xx_write_reg(dev, MAC_CR, data);
-
- phy_print_status(phydev);
-}
-
static int irq_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
{
@@ -2448,10 +2205,8 @@ static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
dev->domain_data.irqchip = &lan78xx_irqchip;
dev->domain_data.irq_handler = handle_simple_irq;
- irqdomain = irq_domain_create_simple(of_fwnode_handle(dev->udev->dev.parent->of_node),
- MAX_INT_EP, 0,
- &chip_domain_ops,
- &dev->domain_data);
+ irqdomain = irq_domain_create_simple(dev_fwnode(dev->udev->dev.parent), MAX_INT_EP, 0,
+ &chip_domain_ops, &dev->domain_data);
if (irqdomain) {
/* create mapping for PHY interrupt */
irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
@@ -2483,6 +2238,77 @@ static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
dev->domain_data.irqdomain = NULL;
}
+static void lan78xx_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct net_device *net = to_net_dev(config->dev);
+ struct lan78xx_net *dev = netdev_priv(net);
+ u32 mac_cr = 0;
+ int ret;
+
+ /* Check if the mode is supported */
+ if (mode != MLO_AN_FIXED && mode != MLO_AN_PHY) {
+ netdev_err(net, "Unsupported negotiation mode: %u\n", mode);
+ return;
+ }
+
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_GMII:
+ mac_cr |= MAC_CR_GMII_EN_;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ break;
+ default:
+ netdev_warn(net, "Unsupported interface mode: %d\n",
+ state->interface);
+ return;
+ }
+
+ ret = lan78xx_update_reg(dev, MAC_CR, MAC_CR_GMII_EN_, mac_cr);
+ if (ret < 0)
+ netdev_err(net, "Failed to config MAC with error %pe\n",
+ ERR_PTR(ret));
+}
+
+static void lan78xx_mac_link_down(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
+{
+ struct net_device *net = to_net_dev(config->dev);
+ struct lan78xx_net *dev = netdev_priv(net);
+ int ret;
+
+ netif_stop_queue(net);
+
+ /* MAC reset will not de-assert TXEN/RXEN, we need to stop them
+ * manually before reset. TX and RX should be disabled before running
+ * link_up sequence.
+ */
+ ret = lan78xx_stop_tx_path(dev);
+ if (ret < 0)
+ goto link_down_fail;
+
+ ret = lan78xx_stop_rx_path(dev);
+ if (ret < 0)
+ goto link_down_fail;
+
+ /* MAC reset seems to not affect MAC configuration, no idea if it is
+ * really needed, but it was done in previous driver version. So, leave
+ * it here.
+ */
+ ret = lan78xx_mac_reset(dev);
+ if (ret < 0)
+ goto link_down_fail;
+
+ return;
+
+link_down_fail:
+ netdev_err(dev->net, "Failed to set MAC down with error %pe\n",
+ ERR_PTR(ret));
+}
+
/**
* lan78xx_configure_usb - Configure USB link power settings
* @dev: pointer to the LAN78xx device structure
@@ -2618,28 +2444,155 @@ static int lan78xx_configure_flowcontrol(struct lan78xx_net *dev,
return lan78xx_write_reg(dev, FLOW, flow);
}
+static void lan78xx_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct net_device *net = to_net_dev(config->dev);
+ struct lan78xx_net *dev = netdev_priv(net);
+ u32 mac_cr = 0;
+ int ret;
+
+ switch (speed) {
+ case SPEED_1000:
+ mac_cr |= MAC_CR_SPEED_1000_;
+ break;
+ case SPEED_100:
+ mac_cr |= MAC_CR_SPEED_100_;
+ break;
+ case SPEED_10:
+ mac_cr |= MAC_CR_SPEED_10_;
+ break;
+ default:
+ netdev_err(dev->net, "Unsupported speed %d\n", speed);
+ return;
+ }
+
+ if (duplex == DUPLEX_FULL)
+ mac_cr |= MAC_CR_FULL_DUPLEX_;
+
+ /* make sure TXEN and RXEN are disabled before reconfiguring MAC */
+ ret = lan78xx_update_reg(dev, MAC_CR, MAC_CR_SPEED_MASK_ |
+ MAC_CR_FULL_DUPLEX_ | MAC_CR_EEE_EN_, mac_cr);
+ if (ret < 0)
+ goto link_up_fail;
+
+ ret = lan78xx_configure_flowcontrol(dev, tx_pause, rx_pause);
+ if (ret < 0)
+ goto link_up_fail;
+
+ ret = lan78xx_configure_usb(dev, speed);
+ if (ret < 0)
+ goto link_up_fail;
+
+ lan78xx_rx_urb_submit_all(dev);
+
+ ret = lan78xx_flush_rx_fifo(dev);
+ if (ret < 0)
+ goto link_up_fail;
+
+ ret = lan78xx_flush_tx_fifo(dev);
+ if (ret < 0)
+ goto link_up_fail;
+
+ ret = lan78xx_start_tx_path(dev);
+ if (ret < 0)
+ goto link_up_fail;
+
+ ret = lan78xx_start_rx_path(dev);
+ if (ret < 0)
+ goto link_up_fail;
+
+ netif_start_queue(net);
+
+ return;
+
+link_up_fail:
+ netdev_err(dev->net, "Failed to set MAC up with error %pe\n",
+ ERR_PTR(ret));
+}
+
/**
- * lan78xx_register_fixed_phy() - Register a fallback fixed PHY
+ * lan78xx_mac_eee_enable - Enable or disable MAC-side EEE support
* @dev: LAN78xx device
+ * @enable: true to enable EEE, false to disable
*
- * Registers a fixed PHY with 1 Gbps full duplex. This is used in special cases
- * like EVB-KSZ9897-1, where LAN7801 acts as a USB-to-Ethernet interface to a
- * switch without a visible PHY.
+ * This function sets or clears the MAC_CR_EEE_EN_ bit to control Energy
+ * Efficient Ethernet (EEE) operation. According to current understanding
+ * of the LAN7800 documentation, this bit can be modified while TX and RX
+ * are enabled. No explicit requirement was found to disable data paths
+ * before changing this bit.
+ *
+ * Return: 0 on success or a negative error code
+ */
+static int lan78xx_mac_eee_enable(struct lan78xx_net *dev, bool enable)
+{
+ u32 mac_cr = 0;
+
+ if (enable)
+ mac_cr |= MAC_CR_EEE_EN_;
+
+ return lan78xx_update_reg(dev, MAC_CR, MAC_CR_EEE_EN_, mac_cr);
+}
+
+static void lan78xx_mac_disable_tx_lpi(struct phylink_config *config)
+{
+ struct net_device *net = to_net_dev(config->dev);
+ struct lan78xx_net *dev = netdev_priv(net);
+
+ lan78xx_mac_eee_enable(dev, false);
+}
+
+static int lan78xx_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
+ bool tx_clk_stop)
+{
+ struct net_device *net = to_net_dev(config->dev);
+ struct lan78xx_net *dev = netdev_priv(net);
+ int ret;
+
+ /* Software should only change this field when Energy Efficient
+ * Ethernet Enable (EEEEN) is cleared. We ensure that by clearing
+ * EEEEN during probe, and phylink itself guarantees that
+ * mac_disable_tx_lpi() will have been previously called.
+ */
+ ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, timer);
+ if (ret < 0)
+ return ret;
+
+ return lan78xx_mac_eee_enable(dev, true);
+}
+
+static const struct phylink_mac_ops lan78xx_phylink_mac_ops = {
+ .mac_config = lan78xx_mac_config,
+ .mac_link_down = lan78xx_mac_link_down,
+ .mac_link_up = lan78xx_mac_link_up,
+ .mac_disable_tx_lpi = lan78xx_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = lan78xx_mac_enable_tx_lpi,
+};
+
+/**
+ * lan78xx_set_fixed_link() - Set fixed link configuration for LAN7801
+ * @dev: LAN78xx device
+ *
+ * Use fixed link configuration with 1 Gbps full duplex. This is used in special
+ * cases like EVB-KSZ9897-1, where LAN7801 acts as a USB-to-Ethernet interface
+ * to a switch without a visible PHY.
*
* Return: pointer to the registered fixed PHY, or ERR_PTR() on error.
*/
-static struct phy_device *lan78xx_register_fixed_phy(struct lan78xx_net *dev)
+static int lan78xx_set_fixed_link(struct lan78xx_net *dev)
{
- struct fixed_phy_status fphy_status = {
- .link = 1,
+ static const struct phylink_link_state state = {
.speed = SPEED_1000,
.duplex = DUPLEX_FULL,
};
netdev_info(dev->net,
- "No PHY found on LAN7801 – registering fixed PHY (e.g. EVB-KSZ9897-1)\n");
+ "No PHY found on LAN7801 – using fixed link instead (e.g. EVB-KSZ9897-1)\n");
- return fixed_phy_register(&fphy_status, NULL);
+ return phylink_set_fixed_link(dev->phylink, &state);
}
/**
@@ -2675,7 +2628,7 @@ static struct phy_device *lan78xx_get_phy(struct lan78xx_net *dev)
dev->interface = PHY_INTERFACE_MODE_RGMII;
/* No PHY found – fallback to fixed PHY (e.g. KSZ switch board) */
- return lan78xx_register_fixed_phy(dev);
+ return NULL;
case ID_REV_CHIP_ID_7800_:
case ID_REV_CHIP_ID_7850_:
@@ -2802,20 +2755,96 @@ static int lan78xx_configure_leds_from_dt(struct lan78xx_net *dev,
return lan78xx_write_reg(dev, HW_CFG, reg);
}
+static int lan78xx_phylink_setup(struct lan78xx_net *dev)
+{
+ struct phylink_config *pc = &dev->phylink_config;
+ struct phylink *phylink;
+
+ pc->dev = &dev->net->dev;
+ pc->type = PHYLINK_NETDEV;
+ pc->mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | MAC_10 |
+ MAC_100 | MAC_1000FD;
+ pc->mac_managed_pm = true;
+ pc->lpi_capabilities = MAC_100FD | MAC_1000FD;
+ /*
+ * Default TX LPI (Low Power Idle) request delay count is set to 50us.
+ *
+ * Source: LAN7800 Documentation, DS00001992H, Section 15.1.57, Page 204.
+ *
+ * Reasoning:
+ * According to the application note in the LAN7800 documentation, a
+ * zero delay may negatively impact the TX data path’s ability to
+ * support Gigabit operation. A value of 50us is recommended as a
+ * reasonable default when the part operates at Gigabit speeds,
+ * balancing stability and power efficiency in EEE mode. This delay can
+ * be increased based on performance testing, as EEE is designed for
+ * scenarios with mostly idle links and occasional bursts of full
+ * bandwidth transmission. The goal is to ensure reliable Gigabit
+ * performance without overly aggressive power optimization during
+ * inactive periods.
+ */
+ pc->lpi_timer_default = 50;
+ pc->eee_enabled_default = true;
+
+ if (dev->chipid == ID_REV_CHIP_ID_7801_)
+ phy_interface_set_rgmii(pc->supported_interfaces);
+ else
+ __set_bit(PHY_INTERFACE_MODE_GMII, pc->supported_interfaces);
+
+ memcpy(dev->phylink_config.lpi_interfaces,
+ dev->phylink_config.supported_interfaces,
+ sizeof(dev->phylink_config.lpi_interfaces));
+
+ phylink = phylink_create(pc, dev->net->dev.fwnode,
+ dev->interface, &lan78xx_phylink_mac_ops);
+ if (IS_ERR(phylink))
+ return PTR_ERR(phylink);
+
+ dev->phylink = phylink;
+
+ return 0;
+}
+
+static void lan78xx_phy_uninit(struct lan78xx_net *dev)
+{
+ if (dev->phylink) {
+ phylink_disconnect_phy(dev->phylink);
+ phylink_destroy(dev->phylink);
+ dev->phylink = NULL;
+ }
+}
+
static int lan78xx_phy_init(struct lan78xx_net *dev)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
- int ret;
- u32 mii_adv;
struct phy_device *phydev;
+ int ret;
phydev = lan78xx_get_phy(dev);
+ /* phydev can be NULL if no PHY is found and the chip is LAN7801,
+ * which will use a fixed link later.
+ * If an error occurs, return the error code immediately.
+ */
if (IS_ERR(phydev))
return PTR_ERR(phydev);
+ ret = lan78xx_phylink_setup(dev);
+ if (ret < 0)
+ return ret;
+
+ /* If no PHY is found, set up a fixed link. It is very specific to
+ * the LAN7801 and is used in special cases like EVB-KSZ9897-1 where
+ * LAN7801 acts as a USB-to-Ethernet interface to a switch without
+ * a visible PHY.
+ */
+ if (!phydev) {
+ ret = lan78xx_set_fixed_link(dev);
+ if (ret < 0)
+ goto phylink_uninit;
+ }
+
ret = lan78xx_mac_prepare_for_phy(dev);
if (ret < 0)
- goto free_phy;
+ goto phylink_uninit;
/* if phyirq is not set, use polling mode in phylib */
if (dev->domain_data.phyirq > 0)
@@ -2824,54 +2853,21 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
phydev->irq = PHY_POLL;
netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
- /* set to AUTOMDIX */
- phydev->mdix = ETH_TP_MDI_AUTO;
-
- ret = phy_connect_direct(dev->net, phydev,
- lan78xx_link_status_change,
- dev->interface);
+ ret = phylink_connect_phy(dev->phylink, phydev);
if (ret) {
- netdev_err(dev->net, "can't attach PHY to %s\n",
- dev->mdiobus->id);
- if (dev->chipid == ID_REV_CHIP_ID_7801_) {
- if (phy_is_pseudo_fixed_link(phydev)) {
- fixed_phy_unregister(phydev);
- phy_device_free(phydev);
- }
- }
- return -EIO;
+ netdev_err(dev->net, "can't attach PHY to %s, error %pe\n",
+ dev->mdiobus->id, ERR_PTR(ret));
+ goto phylink_uninit;
}
- /* MAC doesn't support 1000T Half */
- phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
-
- /* support both flow controls */
- dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
- linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
- phydev->advertising);
- linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
- phydev->advertising);
- mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
- mii_adv_to_linkmode_adv_t(fc, mii_adv);
- linkmode_or(phydev->advertising, fc, phydev->advertising);
-
- phy_support_eee(phydev);
-
ret = lan78xx_configure_leds_from_dt(dev, phydev);
- if (ret)
- goto free_phy;
-
- genphy_config_aneg(phydev);
-
- dev->fc_autoneg = phydev->autoneg;
+ if (ret < 0)
+ goto phylink_uninit;
return 0;
-free_phy:
- if (phy_is_pseudo_fixed_link(phydev)) {
- fixed_phy_unregister(phydev);
- phy_device_free(phydev);
- }
+phylink_uninit:
+ lan78xx_phy_uninit(dev);
return ret;
}
@@ -3212,7 +3208,6 @@ static int lan78xx_reset(struct lan78xx_net *dev)
unsigned long timeout;
int ret;
u32 buf;
- u8 sig;
ret = lan78xx_read_reg(dev, HW_CFG, &buf);
if (ret < 0)
@@ -3369,22 +3364,12 @@ static int lan78xx_reset(struct lan78xx_net *dev)
if (ret < 0)
return ret;
+ buf &= ~(MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_ | MAC_CR_EEE_EN_);
+
/* LAN7801 only has RGMII mode */
- if (dev->chipid == ID_REV_CHIP_ID_7801_) {
+ if (dev->chipid == ID_REV_CHIP_ID_7801_)
buf &= ~MAC_CR_GMII_EN_;
- /* Enable Auto Duplex and Auto speed */
- buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
- }
- if (dev->chipid == ID_REV_CHIP_ID_7800_ ||
- dev->chipid == ID_REV_CHIP_ID_7850_) {
- ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
- if (!ret && sig != EEPROM_INDICATOR) {
- /* Implies there is no external eeprom. Set mac speed */
- netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
- buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
- }
- }
ret = lan78xx_write_reg(dev, MAC_CR, buf);
if (ret < 0)
return ret;
@@ -3434,9 +3419,11 @@ static int lan78xx_open(struct net_device *net)
mutex_lock(&dev->dev_mutex);
- phy_start(net->phydev);
+ lan78xx_init_stats(dev);
- netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
+ napi_enable(&dev->napi);
+
+ set_bit(EVENT_DEV_OPEN, &dev->flags);
/* for Link Check */
if (dev->urb_intr) {
@@ -3448,31 +3435,8 @@ static int lan78xx_open(struct net_device *net)
}
}
- ret = lan78xx_flush_rx_fifo(dev);
- if (ret < 0)
- goto done;
- ret = lan78xx_flush_tx_fifo(dev);
- if (ret < 0)
- goto done;
+ phylink_start(dev->phylink);
- ret = lan78xx_start_tx_path(dev);
- if (ret < 0)
- goto done;
- ret = lan78xx_start_rx_path(dev);
- if (ret < 0)
- goto done;
-
- lan78xx_init_stats(dev);
-
- set_bit(EVENT_DEV_OPEN, &dev->flags);
-
- netif_start_queue(net);
-
- dev->link_on = false;
-
- napi_enable(&dev->napi);
-
- lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
done:
mutex_unlock(&dev->dev_mutex);
@@ -3530,7 +3494,6 @@ static int lan78xx_stop(struct net_device *net)
timer_delete_sync(&dev->stat_monitor);
clear_bit(EVENT_DEV_OPEN, &dev->flags);
- netif_stop_queue(net);
napi_disable(&dev->napi);
lan78xx_terminate_urbs(dev);
@@ -3540,12 +3503,7 @@ static int lan78xx_stop(struct net_device *net)
net->stats.rx_packets, net->stats.tx_packets,
net->stats.rx_errors, net->stats.tx_errors);
- /* ignore errors that occur stopping the Tx and Rx data paths */
- lan78xx_stop_tx_path(dev);
- lan78xx_stop_rx_path(dev);
-
- if (net->phydev)
- phy_stop(net->phydev);
+ phylink_stop(dev->phylink);
usb_kill_urb(dev->urb_intr);
@@ -3555,7 +3513,7 @@ static int lan78xx_stop(struct net_device *net)
*/
clear_bit(EVENT_TX_HALT, &dev->flags);
clear_bit(EVENT_RX_HALT, &dev->flags);
- clear_bit(EVENT_LINK_RESET, &dev->flags);
+ clear_bit(EVENT_PHY_INT_ACK, &dev->flags);
clear_bit(EVENT_STAT_UPDATE, &dev->flags);
cancel_delayed_work_sync(&dev->wq);
@@ -4479,14 +4437,14 @@ static void lan78xx_delayedwork(struct work_struct *work)
}
}
- if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
+ if (test_bit(EVENT_PHY_INT_ACK, &dev->flags)) {
int ret = 0;
- clear_bit(EVENT_LINK_RESET, &dev->flags);
- if (lan78xx_link_reset(dev) < 0) {
- netdev_info(dev->net, "link reset failed (%d)\n",
- ret);
- }
+ clear_bit(EVENT_PHY_INT_ACK, &dev->flags);
+ ret = lan78xx_phy_int_ack(dev);
+ if (ret)
+ netdev_info(dev->net, "PHY INT ack failed (%pe)\n",
+ ERR_PTR(ret));
}
if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
@@ -4560,32 +4518,29 @@ static void lan78xx_disconnect(struct usb_interface *intf)
struct lan78xx_net *dev;
struct usb_device *udev;
struct net_device *net;
- struct phy_device *phydev;
dev = usb_get_intfdata(intf);
usb_set_intfdata(intf, NULL);
if (!dev)
return;
- netif_napi_del(&dev->napi);
-
udev = interface_to_usbdev(intf);
net = dev->net;
+ rtnl_lock();
+ phylink_stop(dev->phylink);
+ phylink_disconnect_phy(dev->phylink);
+ rtnl_unlock();
+
+ netif_napi_del(&dev->napi);
+
unregister_netdev(net);
timer_shutdown_sync(&dev->stat_monitor);
set_bit(EVENT_DEV_DISCONNECT, &dev->flags);
cancel_delayed_work_sync(&dev->wq);
- phydev = net->phydev;
-
- phy_disconnect(net->phydev);
-
- if (phy_is_pseudo_fixed_link(phydev)) {
- fixed_phy_unregister(phydev);
- phy_device_free(phydev);
- }
+ phylink_destroy(dev->phylink);
usb_scuttle_anchored_urbs(&dev->deferred);
@@ -4669,7 +4624,6 @@ static int lan78xx_probe(struct usb_interface *intf,
goto out1;
}
- /* netdev_printk() needs this */
SET_NETDEV_DEV(netdev, &intf->dev);
dev = netdev_priv(netdev);
@@ -4788,7 +4742,7 @@ static int lan78xx_probe(struct usb_interface *intf,
ret = register_netdev(netdev);
if (ret != 0) {
netif_err(dev, probe, netdev, "couldn't register the device\n");
- goto out8;
+ goto phy_uninit;
}
usb_set_intfdata(intf, dev);
@@ -4803,8 +4757,8 @@ static int lan78xx_probe(struct usb_interface *intf,
return 0;
-out8:
- phy_disconnect(netdev->phydev);
+phy_uninit:
+ lan78xx_phy_uninit(dev);
free_urbs:
usb_free_urb(dev->urb_intr);
out5:
@@ -5139,6 +5093,10 @@ static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
spin_unlock_irq(&dev->txq.lock);
}
+ rtnl_lock();
+ phylink_suspend(dev->phylink, false);
+ rtnl_unlock();
+
/* stop RX */
ret = lan78xx_stop_rx_path(dev);
if (ret < 0)
@@ -5366,11 +5324,15 @@ static int lan78xx_reset_resume(struct usb_interface *intf)
if (ret < 0)
return ret;
- phy_start(dev->net->phydev);
-
ret = lan78xx_resume(intf);
+ if (ret < 0)
+ return ret;
- return ret;
+ rtnl_lock();
+ phylink_resume(dev->phylink);
+ rtnl_unlock();
+
+ return 0;
}
static const struct usb_device_id products[] = {
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index c04e715a4c2a..9564478a79cc 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -461,7 +461,7 @@ static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
__skb_queue_tail(&dev->done, skb);
if (dev->done.qlen == 1)
- tasklet_schedule(&dev->bh);
+ queue_work(system_bh_wq, &dev->bh_work);
spin_unlock(&dev->done.lock);
spin_unlock_irqrestore(&list->lock, flags);
return old_state;
@@ -549,7 +549,7 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
default:
netif_dbg(dev, rx_err, dev->net,
"rx submit, %d\n", retval);
- tasklet_schedule (&dev->bh);
+ queue_work(system_bh_wq, &dev->bh_work);
break;
case 0:
__usbnet_queue_skb(&dev->rxq, skb, rx_start);
@@ -709,7 +709,7 @@ void usbnet_resume_rx(struct usbnet *dev)
num++;
}
- tasklet_schedule(&dev->bh);
+ queue_work(system_bh_wq, &dev->bh_work);
netif_dbg(dev, rx_status, dev->net,
"paused rx queue disabled, %d skbs requeued\n", num);
@@ -778,7 +778,7 @@ void usbnet_unlink_rx_urbs(struct usbnet *dev)
{
if (netif_running(dev->net)) {
(void) unlink_urbs (dev, &dev->rxq);
- tasklet_schedule(&dev->bh);
+ queue_work(system_bh_wq, &dev->bh_work);
}
}
EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs);
@@ -861,14 +861,14 @@ int usbnet_stop (struct net_device *net)
/* deferred work (timer, softirq, task) must also stop */
dev->flags = 0;
timer_delete_sync(&dev->delay);
- tasklet_kill(&dev->bh);
+ disable_work_sync(&dev->bh_work);
cancel_work_sync(&dev->kevent);
/* We have cyclic dependencies. Those calls are needed
* to break a cycle. We cannot fall into the gaps because
* we have a flag
*/
- tasklet_kill(&dev->bh);
+ disable_work_sync(&dev->bh_work);
timer_delete_sync(&dev->delay);
cancel_work_sync(&dev->kevent);
@@ -955,7 +955,7 @@ int usbnet_open (struct net_device *net)
clear_bit(EVENT_RX_KILL, &dev->flags);
// delay posting reads until we're fully open
- tasklet_schedule (&dev->bh);
+ queue_work(system_bh_wq, &dev->bh_work);
if (info->manage_power) {
retval = info->manage_power(dev, 1);
if (retval < 0) {
@@ -1123,7 +1123,7 @@ static void __handle_link_change(struct usbnet *dev)
*/
} else {
/* submitting URBs for reading packets */
- tasklet_schedule(&dev->bh);
+ queue_work(system_bh_wq, &dev->bh_work);
}
/* hard_mtu or rx_urb_size may change during link change */
@@ -1198,11 +1198,11 @@ fail_halt:
} else {
clear_bit (EVENT_RX_HALT, &dev->flags);
if (!usbnet_going_away(dev))
- tasklet_schedule(&dev->bh);
+ queue_work(system_bh_wq, &dev->bh_work);
}
}
- /* tasklet could resubmit itself forever if memory is tight */
+ /* work could resubmit itself forever if memory is tight */
if (test_bit (EVENT_RX_MEMORY, &dev->flags)) {
struct urb *urb = NULL;
int resched = 1;
@@ -1224,7 +1224,7 @@ fail_halt:
fail_lowmem:
if (resched)
if (!usbnet_going_away(dev))
- tasklet_schedule(&dev->bh);
+ queue_work(system_bh_wq, &dev->bh_work);
}
}
@@ -1325,7 +1325,7 @@ void usbnet_tx_timeout (struct net_device *net, unsigned int txqueue)
struct usbnet *dev = netdev_priv(net);
unlink_urbs (dev, &dev->txq);
- tasklet_schedule (&dev->bh);
+ queue_work(system_bh_wq, &dev->bh_work);
/* this needs to be handled individually because the generic layer
* doesn't know what is sufficient and could not restore private
* information if a remedy of an unconditional reset were used.
@@ -1547,7 +1547,7 @@ static inline void usb_free_skb(struct sk_buff *skb)
/*-------------------------------------------------------------------------*/
-// tasklet (work deferred from completions, in_irq) or timer
+// work (work deferred from completions, in_irq) or timer
static void usbnet_bh (struct timer_list *t)
{
@@ -1601,16 +1601,16 @@ static void usbnet_bh (struct timer_list *t)
"rxqlen %d --> %d\n",
temp, dev->rxq.qlen);
if (dev->rxq.qlen < RX_QLEN(dev))
- tasklet_schedule (&dev->bh);
+ queue_work(system_bh_wq, &dev->bh_work);
}
if (dev->txq.qlen < TX_QLEN (dev))
netif_wake_queue (dev->net);
}
}
-static void usbnet_bh_tasklet(struct tasklet_struct *t)
+static void usbnet_bh_work(struct work_struct *work)
{
- struct usbnet *dev = from_tasklet(dev, t, bh);
+ struct usbnet *dev = from_work(dev, work, bh_work);
usbnet_bh(&dev->delay);
}
@@ -1742,7 +1742,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
skb_queue_head_init (&dev->txq);
skb_queue_head_init (&dev->done);
skb_queue_head_init(&dev->rxq_pause);
- tasklet_setup(&dev->bh, usbnet_bh_tasklet);
+ INIT_WORK(&dev->bh_work, usbnet_bh_work);
INIT_WORK (&dev->kevent, usbnet_deferred_kevent);
init_usb_anchor(&dev->deferred);
timer_setup(&dev->delay, usbnet_bh, 0);
@@ -1971,7 +1971,7 @@ int usbnet_resume (struct usb_interface *intf)
if (!(dev->txq.qlen >= TX_QLEN(dev)))
netif_tx_wake_all_queues(dev->net);
- tasklet_schedule (&dev->bh);
+ queue_work(system_bh_wq, &dev->bh_work);
}
}
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index e53ba600605a..07e41dce4203 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -4193,8 +4193,11 @@ static void virtnet_init_default_rss(struct virtnet_info *vi)
netdev_rss_key_fill(vi->rss_hash_key_data, vi->rss_key_size);
}
-static void virtnet_get_hashflow(const struct virtnet_info *vi, struct ethtool_rxnfc *info)
+static int virtnet_get_hashflow(struct net_device *dev,
+ struct ethtool_rxfh_fields *info)
{
+ struct virtnet_info *vi = netdev_priv(dev);
+
info->data = 0;
switch (info->flow_type) {
case TCP_V4_FLOW:
@@ -4243,17 +4246,22 @@ static void virtnet_get_hashflow(const struct virtnet_info *vi, struct ethtool_r
info->data = 0;
break;
}
+
+ return 0;
}
-static bool virtnet_set_hashflow(struct virtnet_info *vi, struct ethtool_rxnfc *info)
+static int virtnet_set_hashflow(struct net_device *dev,
+ const struct ethtool_rxfh_fields *info,
+ struct netlink_ext_ack *extack)
{
+ struct virtnet_info *vi = netdev_priv(dev);
u32 new_hashtypes = vi->rss_hash_types_saved;
bool is_disable = info->data & RXH_DISCARD;
bool is_l4 = info->data == (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3);
/* supports only 'sd', 'sdfn' and 'r' */
if (!((info->data == (RXH_IP_SRC | RXH_IP_DST)) | is_l4 | is_disable))
- return false;
+ return -EINVAL;
switch (info->flow_type) {
case TCP_V4_FLOW:
@@ -4292,21 +4300,22 @@ static bool virtnet_set_hashflow(struct virtnet_info *vi, struct ethtool_rxnfc *
break;
default:
/* unsupported flow */
- return false;
+ return -EINVAL;
}
/* if unsupported hashtype was set */
if (new_hashtypes != (new_hashtypes & vi->rss_hash_types_supported))
- return false;
+ return -EINVAL;
if (new_hashtypes != vi->rss_hash_types_saved) {
vi->rss_hash_types_saved = new_hashtypes;
vi->rss_hdr->hash_types = cpu_to_le32(vi->rss_hash_types_saved);
if (vi->dev->features & NETIF_F_RXHASH)
- return virtnet_commit_rss_command(vi);
+ if (!virtnet_commit_rss_command(vi))
+ return -EINVAL;
}
- return true;
+ return 0;
}
static void virtnet_get_drvinfo(struct net_device *dev,
@@ -5540,27 +5549,6 @@ static int virtnet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
case ETHTOOL_GRXRINGS:
info->data = vi->curr_queue_pairs;
break;
- case ETHTOOL_GRXFH:
- virtnet_get_hashflow(vi, info);
- break;
- default:
- rc = -EOPNOTSUPP;
- }
-
- return rc;
-}
-
-static int virtnet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
-{
- struct virtnet_info *vi = netdev_priv(dev);
- int rc = 0;
-
- switch (info->cmd) {
- case ETHTOOL_SRXFH:
- if (!virtnet_set_hashflow(vi, info))
- rc = -EINVAL;
-
- break;
default:
rc = -EOPNOTSUPP;
}
@@ -5591,8 +5579,9 @@ static const struct ethtool_ops virtnet_ethtool_ops = {
.get_rxfh_indir_size = virtnet_get_rxfh_indir_size,
.get_rxfh = virtnet_get_rxfh,
.set_rxfh = virtnet_set_rxfh,
+ .get_rxfh_fields = virtnet_get_hashflow,
+ .set_rxfh_fields = virtnet_set_hashflow,
.get_rxnfc = virtnet_get_rxnfc,
- .set_rxnfc = virtnet_set_rxnfc,
};
static void virtnet_get_queue_stats_rx(struct net_device *dev, int i,
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 471f91c4204a..cc4d7573839d 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -833,11 +833,19 @@ out:
}
static int
-vmxnet3_get_rss_hash_opts(struct vmxnet3_adapter *adapter,
- struct ethtool_rxnfc *info)
+vmxnet3_get_rss_hash_opts(struct net_device *netdev,
+ struct ethtool_rxfh_fields *info)
{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
enum Vmxnet3_RSSField rss_fields;
+ if (!VMXNET3_VERSION_GE_4(adapter))
+ return -EOPNOTSUPP;
+#ifdef VMXNET3_RSS
+ if (!adapter->rss)
+ return -EOPNOTSUPP;
+#endif
+
if (netif_running(adapter->netdev)) {
unsigned long flags;
@@ -900,10 +908,20 @@ vmxnet3_get_rss_hash_opts(struct vmxnet3_adapter *adapter,
static int
vmxnet3_set_rss_hash_opt(struct net_device *netdev,
- struct vmxnet3_adapter *adapter,
- struct ethtool_rxnfc *nfc)
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
{
- enum Vmxnet3_RSSField rss_fields = adapter->rss_fields;
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ enum Vmxnet3_RSSField rss_fields;
+
+ if (!VMXNET3_VERSION_GE_4(adapter))
+ return -EOPNOTSUPP;
+#ifdef VMXNET3_RSS
+ if (!adapter->rss)
+ return -EOPNOTSUPP;
+#endif
+
+ rss_fields = adapter->rss_fields;
/* RSS does not support anything other than hashing
* to queues on src and dst IPs and ports
@@ -1074,54 +1092,11 @@ vmxnet3_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
case ETHTOOL_GRXRINGS:
info->data = adapter->num_rx_queues;
break;
- case ETHTOOL_GRXFH:
- if (!VMXNET3_VERSION_GE_4(adapter)) {
- err = -EOPNOTSUPP;
- break;
- }
-#ifdef VMXNET3_RSS
- if (!adapter->rss) {
- err = -EOPNOTSUPP;
- break;
- }
-#endif
- err = vmxnet3_get_rss_hash_opts(adapter, info);
- break;
- default:
- err = -EOPNOTSUPP;
- break;
- }
-
- return err;
-}
-
-static int
-vmxnet3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info)
-{
- struct vmxnet3_adapter *adapter = netdev_priv(netdev);
- int err = 0;
-
- if (!VMXNET3_VERSION_GE_4(adapter)) {
- err = -EOPNOTSUPP;
- goto done;
- }
-#ifdef VMXNET3_RSS
- if (!adapter->rss) {
- err = -EOPNOTSUPP;
- goto done;
- }
-#endif
-
- switch (info->cmd) {
- case ETHTOOL_SRXFH:
- err = vmxnet3_set_rss_hash_opt(netdev, adapter, info);
- break;
default:
err = -EOPNOTSUPP;
break;
}
-done:
return err;
}
@@ -1361,12 +1336,13 @@ static const struct ethtool_ops vmxnet3_ethtool_ops = {
.get_ringparam = vmxnet3_get_ringparam,
.set_ringparam = vmxnet3_set_ringparam,
.get_rxnfc = vmxnet3_get_rxnfc,
- .set_rxnfc = vmxnet3_set_rxnfc,
#ifdef VMXNET3_RSS
.get_rxfh_indir_size = vmxnet3_get_rss_indir_size,
.get_rxfh = vmxnet3_get_rss,
.set_rxfh = vmxnet3_set_rss,
#endif
+ .get_rxfh_fields = vmxnet3_get_rss_hash_opts,
+ .set_rxfh_fields = vmxnet3_set_rss_hash_opt,
.get_link_ksettings = vmxnet3_get_link_ksettings,
.get_channels = vmxnet3_get_channels,
};
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index 97792de896b7..bcde95cb2a2e 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -1485,21 +1485,18 @@ static enum skb_drop_reason vxlan_snoop(struct net_device *dev,
static bool __vxlan_sock_release_prep(struct vxlan_sock *vs)
{
- struct vxlan_net *vn;
+ ASSERT_RTNL();
if (!vs)
return false;
if (!refcount_dec_and_test(&vs->refcnt))
return false;
- vn = net_generic(sock_net(vs->sock->sk), vxlan_net_id);
- spin_lock(&vn->sock_lock);
hlist_del_rcu(&vs->hlist);
udp_tunnel_notify_del_rx_port(vs->sock,
(vs->flags & VXLAN_F_GPE) ?
UDP_TUNNEL_TYPE_VXLAN_GPE :
UDP_TUNNEL_TYPE_VXLAN);
- spin_unlock(&vn->sock_lock);
return true;
}
@@ -2451,6 +2448,7 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
rcu_read_lock();
if (addr_family == AF_INET) {
struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock);
+ u16 ipcb_flags = 0;
struct rtable *rt;
__be16 df = 0;
__be32 saddr;
@@ -2467,6 +2465,9 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
goto tx_error;
}
+ if (flags & VXLAN_F_MC_ROUTE)
+ ipcb_flags |= IPSKB_MCROUTE;
+
if (!info) {
/* Bypass encapsulation if the destination is local */
err = encap_bypass_if_local(skb, dev, vxlan, AF_INET,
@@ -2522,11 +2523,13 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
udp_tunnel_xmit_skb(rt, sock4->sock->sk, skb, saddr,
pkey->u.ipv4.dst, tos, ttl, df,
- src_port, dst_port, xnet, !udp_sum);
+ src_port, dst_port, xnet, !udp_sum,
+ ipcb_flags);
#if IS_ENABLED(CONFIG_IPV6)
} else {
struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock);
struct in6_addr saddr;
+ u16 ip6cb_flags = 0;
if (!ifindex)
ifindex = sock6->sock->sk->sk_bound_dev_if;
@@ -2542,6 +2545,9 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
goto tx_error;
}
+ if (flags & VXLAN_F_MC_ROUTE)
+ ip6cb_flags |= IP6SKB_MCROUTE;
+
if (!info) {
u32 rt6i_flags = dst_rt6_info(ndst)->rt6i_flags;
@@ -2586,7 +2592,8 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
udp_tunnel6_xmit_skb(ndst, sock6->sock->sk, skb, dev,
&saddr, &pkey->u.ipv6.dst, tos, ttl,
- pkey->label, src_port, dst_port, !udp_sum);
+ pkey->label, src_port, dst_port, !udp_sum,
+ ip6cb_flags);
#endif
}
vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX, pkt_len);
@@ -2847,26 +2854,23 @@ static void vxlan_cleanup(struct timer_list *t)
static void vxlan_vs_del_dev(struct vxlan_dev *vxlan)
{
- struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
+ ASSERT_RTNL();
- spin_lock(&vn->sock_lock);
hlist_del_init_rcu(&vxlan->hlist4.hlist);
#if IS_ENABLED(CONFIG_IPV6)
hlist_del_init_rcu(&vxlan->hlist6.hlist);
#endif
- spin_unlock(&vn->sock_lock);
}
static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan,
struct vxlan_dev_node *node)
{
- struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
__be32 vni = vxlan->default_dst.remote_vni;
+ ASSERT_RTNL();
+
node->vxlan = vxlan;
- spin_lock(&vn->sock_lock);
hlist_add_head_rcu(&node->hlist, vni_head(vs, vni));
- spin_unlock(&vn->sock_lock);
}
/* Setup stats when device is created */
@@ -3291,9 +3295,10 @@ static void vxlan_offload_rx_ports(struct net_device *dev, bool push)
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
unsigned int i;
- spin_lock(&vn->sock_lock);
+ ASSERT_RTNL();
+
for (i = 0; i < PORT_HASH_SIZE; ++i) {
- hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) {
+ hlist_for_each_entry(vs, &vn->sock_list[i], hlist) {
unsigned short type;
if (vs->flags & VXLAN_F_GPE)
@@ -3307,7 +3312,6 @@ static void vxlan_offload_rx_ports(struct net_device *dev, bool push)
udp_tunnel_drop_rx_port(dev, vs->sock, type);
}
}
- spin_unlock(&vn->sock_lock);
}
/* Initialize the device structure. */
@@ -3401,6 +3405,7 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
[IFLA_VXLAN_LOCALBYPASS] = NLA_POLICY_MAX(NLA_U8, 1),
[IFLA_VXLAN_LABEL_POLICY] = NLA_POLICY_MAX(NLA_U32, VXLAN_LABEL_MAX),
[IFLA_VXLAN_RESERVED_BITS] = NLA_POLICY_EXACT_LEN(sizeof(struct vxlanhdr)),
+ [IFLA_VXLAN_MC_ROUTE] = NLA_POLICY_MAX(NLA_U8, 1),
};
static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -3537,12 +3542,13 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6,
__be16 port, u32 flags,
int ifindex)
{
- struct vxlan_net *vn = net_generic(net, vxlan_net_id);
struct vxlan_sock *vs;
struct socket *sock;
unsigned int h;
struct udp_tunnel_sock_cfg tunnel_cfg;
+ ASSERT_RTNL();
+
vs = kzalloc(sizeof(*vs), GFP_KERNEL);
if (!vs)
return ERR_PTR(-ENOMEM);
@@ -3560,13 +3566,11 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6,
refcount_set(&vs->refcnt, 1);
vs->flags = (flags & VXLAN_F_RCV_FLAGS);
- spin_lock(&vn->sock_lock);
hlist_add_head_rcu(&vs->hlist, vs_head(net, port));
udp_tunnel_notify_add_rx_port(sock,
(vs->flags & VXLAN_F_GPE) ?
UDP_TUNNEL_TYPE_VXLAN_GPE :
UDP_TUNNEL_TYPE_VXLAN);
- spin_unlock(&vn->sock_lock);
/* Mark socket as an encapsulation socket. */
memset(&tunnel_cfg, 0, sizeof(tunnel_cfg));
@@ -3590,26 +3594,27 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6,
static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6)
{
- struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
bool metadata = vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA;
struct vxlan_sock *vs = NULL;
struct vxlan_dev_node *node;
int l3mdev_index = 0;
+ ASSERT_RTNL();
+
if (vxlan->cfg.remote_ifindex)
l3mdev_index = l3mdev_master_upper_ifindex_by_index(
vxlan->net, vxlan->cfg.remote_ifindex);
if (!vxlan->cfg.no_share) {
- spin_lock(&vn->sock_lock);
+ rcu_read_lock();
vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET,
vxlan->cfg.dst_port, vxlan->cfg.flags,
l3mdev_index);
if (vs && !refcount_inc_not_zero(&vs->refcnt)) {
- spin_unlock(&vn->sock_lock);
+ rcu_read_unlock();
return -EBUSY;
}
- spin_unlock(&vn->sock_lock);
+ rcu_read_unlock();
}
if (!vs)
vs = vxlan_socket_create(vxlan->net, ipv6,
@@ -4314,6 +4319,14 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
return err;
}
+ if (data[IFLA_VXLAN_MC_ROUTE]) {
+ err = vxlan_nl2flag(conf, data, IFLA_VXLAN_MC_ROUTE,
+ VXLAN_F_MC_ROUTE, changelink,
+ true, extack);
+ if (err)
+ return err;
+ }
+
if (tb[IFLA_MTU]) {
if (changelink) {
NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_MTU],
@@ -4875,7 +4888,6 @@ static __net_init int vxlan_init_net(struct net *net)
unsigned int h;
INIT_LIST_HEAD(&vn->vxlan_list);
- spin_lock_init(&vn->sock_lock);
vn->nexthop_notifier_block.notifier_call = vxlan_nexthop_event;
for (h = 0; h < PORT_HASH_SIZE; ++h)
diff --git a/drivers/net/vxlan/vxlan_private.h b/drivers/net/vxlan/vxlan_private.h
index d328aed9feef..6c625fb29c6c 100644
--- a/drivers/net/vxlan/vxlan_private.h
+++ b/drivers/net/vxlan/vxlan_private.h
@@ -19,8 +19,8 @@ extern const struct rhashtable_params vxlan_vni_rht_params;
/* per-network namespace private data for this module */
struct vxlan_net {
struct list_head vxlan_list;
+ /* sock_list is protected by rtnl lock */
struct hlist_head sock_list[PORT_HASH_SIZE];
- spinlock_t sock_lock;
struct notifier_block nexthop_notifier_block;
};
diff --git a/drivers/net/vxlan/vxlan_vnifilter.c b/drivers/net/vxlan/vxlan_vnifilter.c
index 186d0660669a..4ff56d9f8f28 100644
--- a/drivers/net/vxlan/vxlan_vnifilter.c
+++ b/drivers/net/vxlan/vxlan_vnifilter.c
@@ -40,11 +40,11 @@ static void vxlan_vs_add_del_vninode(struct vxlan_dev *vxlan,
struct vxlan_vni_node *v,
bool del)
{
- struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
struct vxlan_dev_node *node;
struct vxlan_sock *vs;
- spin_lock(&vn->sock_lock);
+ ASSERT_RTNL();
+
if (del) {
if (!hlist_unhashed(&v->hlist4.hlist))
hlist_del_init_rcu(&v->hlist4.hlist);
@@ -52,7 +52,7 @@ static void vxlan_vs_add_del_vninode(struct vxlan_dev *vxlan,
if (!hlist_unhashed(&v->hlist6.hlist))
hlist_del_init_rcu(&v->hlist6.hlist);
#endif
- goto out;
+ return;
}
#if IS_ENABLED(CONFIG_IPV6)
@@ -67,23 +67,21 @@ static void vxlan_vs_add_del_vninode(struct vxlan_dev *vxlan,
node = &v->hlist4;
hlist_add_head_rcu(&node->hlist, vni_head(vs, v->vni));
}
-out:
- spin_unlock(&vn->sock_lock);
}
void vxlan_vs_add_vnigrp(struct vxlan_dev *vxlan,
struct vxlan_sock *vs,
bool ipv6)
{
- struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
struct vxlan_vni_group *vg = rtnl_dereference(vxlan->vnigrp);
struct vxlan_vni_node *v, *tmp;
struct vxlan_dev_node *node;
+ ASSERT_RTNL();
+
if (!vg)
return;
- spin_lock(&vn->sock_lock);
list_for_each_entry_safe(v, tmp, &vg->vni_list, vlist) {
#if IS_ENABLED(CONFIG_IPV6)
if (ipv6)
@@ -94,26 +92,24 @@ void vxlan_vs_add_vnigrp(struct vxlan_dev *vxlan,
node->vxlan = vxlan;
hlist_add_head_rcu(&node->hlist, vni_head(vs, v->vni));
}
- spin_unlock(&vn->sock_lock);
}
void vxlan_vs_del_vnigrp(struct vxlan_dev *vxlan)
{
struct vxlan_vni_group *vg = rtnl_dereference(vxlan->vnigrp);
- struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
struct vxlan_vni_node *v, *tmp;
+ ASSERT_RTNL();
+
if (!vg)
return;
- spin_lock(&vn->sock_lock);
list_for_each_entry_safe(v, tmp, &vg->vni_list, vlist) {
hlist_del_init_rcu(&v->hlist4.hlist);
#if IS_ENABLED(CONFIG_IPV6)
hlist_del_init_rcu(&v->hlist6.hlist);
#endif
}
- spin_unlock(&vn->sock_lock);
}
static void vxlan_vnifilter_stats_get(const struct vxlan_vni_node *vninode,
diff --git a/drivers/net/wireguard/socket.c b/drivers/net/wireguard/socket.c
index 0414d7a6ce74..253488f8c00f 100644
--- a/drivers/net/wireguard/socket.c
+++ b/drivers/net/wireguard/socket.c
@@ -84,7 +84,7 @@ static int send4(struct wg_device *wg, struct sk_buff *skb,
skb->ignore_df = 1;
udp_tunnel_xmit_skb(rt, sock, skb, fl.saddr, fl.daddr, ds,
ip4_dst_hoplimit(&rt->dst), 0, fl.fl4_sport,
- fl.fl4_dport, false, false);
+ fl.fl4_dport, false, false, 0);
goto out;
err:
@@ -151,7 +151,7 @@ static int send6(struct wg_device *wg, struct sk_buff *skb,
skb->ignore_df = 1;
udp_tunnel6_xmit_skb(dst, sock, skb, skb->dev, &fl.saddr, &fl.daddr, ds,
ip6_dst_hoplimit(dst), 0, fl.fl6_sport,
- fl.fl6_dport, false);
+ fl.fl6_dport, false, 0);
goto out;
err:
diff --git a/drivers/net/wireless/admtek/adm8211.c b/drivers/net/wireless/admtek/adm8211.c
index a2d87c3ad196..e94a6b180314 100644
--- a/drivers/net/wireless/admtek/adm8211.c
+++ b/drivers/net/wireless/admtek/adm8211.c
@@ -1293,7 +1293,7 @@ static void adm8211_set_bssid(struct ieee80211_hw *dev, const u8 *bssid)
ADM8211_CSR_WRITE(ABDA1, reg);
}
-static int adm8211_config(struct ieee80211_hw *dev, u32 changed)
+static int adm8211_config(struct ieee80211_hw *dev, int radio_idx, u32 changed)
{
struct adm8211_priv *priv = dev->priv;
struct ieee80211_conf *conf = &dev->conf;
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 343c9de2749c..1230e6278f23 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -1083,7 +1083,8 @@ static void ar5523_stop(struct ieee80211_hw *hw, bool suspend)
mutex_unlock(&ar->mutex);
}
-static int ar5523_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+static int ar5523_set_rts_threshold(struct ieee80211_hw *hw, int radio_idx,
+ u32 value)
{
struct ar5523 *ar = hw->priv;
int ret;
@@ -1137,7 +1138,7 @@ static void ar5523_remove_interface(struct ieee80211_hw *hw,
ar->vif = NULL;
}
-static int ar5523_hwconfig(struct ieee80211_hw *hw, u32 changed)
+static int ar5523_hwconfig(struct ieee80211_hw *hw, int radio_idx, u32 changed)
{
struct ar5523 *ar = hw->priv;
diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c
index 48efdc71d54d..52118867ecde 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.c
+++ b/drivers/net/wireless/ath/ath10k/bmi.c
@@ -3,8 +3,10 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2014,2016-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
+#include <linux/export.h>
#include "bmi.h"
#include "hif.h"
#include "debug.h"
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index a89a7491a76c..7bbda46cfd93 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -4,8 +4,10 @@
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018 The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
+#include <linux/export.h>
#include "hif.h"
#include "ce.h"
#include "debug.h"
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index fe3a8f4a1cc1..d31708eca3c8 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -4,8 +4,10 @@
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/of.h>
@@ -2606,7 +2608,7 @@ static void ath10k_core_set_coverage_class_work(struct work_struct *work)
set_coverage_class_work);
if (ar->hw_params.hw_ops->set_coverage_class)
- ar->hw_params.hw_ops->set_coverage_class(ar, -1);
+ ar->hw_params.hw_ops->set_coverage_class(ar, -1, -1);
}
static int ath10k_core_init_firmware_features(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath10k/coredump.c b/drivers/net/wireless/ath/ath10k/coredump.c
index bb3a276b7ed5..50d0c4213ecf 100644
--- a/drivers/net/wireless/ath/ath10k/coredump.c
+++ b/drivers/net/wireless/ath/ath10k/coredump.c
@@ -3,11 +3,13 @@
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include "coredump.h"
#include <linux/devcoredump.h>
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/utsname.h>
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index a0c1afeda4dd..6410d3961e76 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -4,10 +4,12 @@
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
* Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include <linux/module.h>
#include <linux/debugfs.h>
+#include <linux/export.h>
#include <linux/vmalloc.h>
#include <linux/crc32.h>
#include <linux/firmware.h>
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index 2da08dfebd3e..ce9b248c12dc 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -3,8 +3,11 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
+#include <linux/export.h>
+
#include "core.h"
#include "hif.h"
#include "debug.h"
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index fb0d5d4cae3a..f12243d6bee1 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -4,8 +4,11 @@
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
+#include <linux/export.h>
+
#include "core.h"
#include "htc.h"
#include "htt.h"
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index 9725feecefd6..c1ddd761af3e 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -3,8 +3,10 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
+#include <linux/export.h>
#include <linux/etherdevice.h>
#include "htt.h"
#include "mac.h"
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
index 84b35a22fc23..59b6cebfdd8f 100644
--- a/drivers/net/wireless/ath/ath10k/hw.c
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -590,6 +590,7 @@ void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
* function monitors and modifies the corresponding MAC registers.
*/
static void ath10k_hw_qca988x_set_coverage_class(struct ath10k *ar,
+ int radio_idx,
s16 value)
{
u32 slottime_reg;
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 7ffa1fbe2874..fec56b916497 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -646,7 +646,7 @@ struct htt_rx_ring_rx_desc_offsets;
/* Defines needed for Rx descriptor abstraction */
struct ath10k_hw_ops {
- void (*set_coverage_class)(struct ath10k *ar, s16 value);
+ void (*set_coverage_class)(struct ath10k *ar, int radio_idx, s16 value);
int (*enable_pll_clk)(struct ath10k *ar);
int (*tx_data_rssi_pad_bytes)(struct htt_resp *htt);
int (*is_rssi_enable)(struct htt_resp *resp);
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 07fe05384cdf..40843974d6f8 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -9,6 +9,7 @@
#include "mac.h"
+#include <linux/export.h>
#include <net/cfg80211.h>
#include <net/mac80211.h>
#include <linux/etherdevice.h>
@@ -4820,7 +4821,8 @@ void ath10k_halt(struct ath10k *ar)
spin_unlock_bh(&ar->data_lock);
}
-static int ath10k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
+static int ath10k_get_antenna(struct ieee80211_hw *hw, int radio_idx,
+ u32 *tx_ant, u32 *rx_ant)
{
struct ath10k *ar = hw->priv;
@@ -5067,7 +5069,8 @@ static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant)
return 0;
}
-static int ath10k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
+static int ath10k_set_antenna(struct ieee80211_hw *hw, int radio_idx,
+ u32 tx_ant, u32 rx_ant)
{
struct ath10k *ar = hw->priv;
int ret;
@@ -5437,7 +5440,7 @@ static int ath10k_config_ps(struct ath10k *ar)
return ret;
}
-static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
+static int ath10k_config(struct ieee80211_hw *hw, int radio_idx, u32 changed)
{
struct ath10k *ar = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
@@ -6336,7 +6339,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
mutex_unlock(&ar->conf_mutex);
}
-static void ath10k_mac_op_set_coverage_class(struct ieee80211_hw *hw, s16 value)
+static void ath10k_mac_op_set_coverage_class(struct ieee80211_hw *hw, int radio_idx,
+ s16 value)
{
struct ath10k *ar = hw->priv;
@@ -6347,7 +6351,7 @@ static void ath10k_mac_op_set_coverage_class(struct ieee80211_hw *hw, s16 value)
WARN_ON_ONCE(1);
return;
}
- ar->hw_params.hw_ops->set_coverage_class(ar, value);
+ ar->hw_params.hw_ops->set_coverage_class(ar, -1, value);
}
struct ath10k_mac_tdls_iter_data {
@@ -8035,7 +8039,8 @@ static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw,
* in ath10k, but device-specific in mac80211.
*/
-static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, int radio_idx,
+ u32 value)
{
struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif;
@@ -8058,7 +8063,8 @@ static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
return ret;
}
-static int ath10k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
+static int ath10k_mac_op_set_frag_threshold(struct ieee80211_hw *hw,
+ int radio_idx, u32 value)
{
/* Even though there's a WMI enum for fragmentation threshold no known
* firmware actually implements it. Moreover it is not possible to rely
diff --git a/drivers/net/wireless/ath/ath10k/trace.c b/drivers/net/wireless/ath/ath10k/trace.c
index c7d4c97e6079..421ec47c59bd 100644
--- a/drivers/net/wireless/ath/ath10k/trace.c
+++ b/drivers/net/wireless/ath/ath10k/trace.c
@@ -1,8 +1,10 @@
// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2012 Qualcomm Atheros, Inc.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
+#include <linux/export.h>
#include <linux/module.h>
#define CREATE_TRACE_POINTS
diff --git a/drivers/net/wireless/ath/ath11k/ce.c b/drivers/net/wireless/ath/ath11k/ce.c
index 746038006eb4..be9395f2ed8b 100644
--- a/drivers/net/wireless/ath/ath11k/ce.c
+++ b/drivers/net/wireless/ath/ath11k/ce.c
@@ -2,8 +2,10 @@
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
+#include <linux/export.h>
#include "dp_rx.h"
#include "debug.h"
#include "hif.h"
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index 22a101136135..48d81b82f895 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -2,8 +2,10 @@
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/remoteproc.h>
diff --git a/drivers/net/wireless/ath/ath11k/coredump.c b/drivers/net/wireless/ath/ath11k/coredump.c
index b8bad358cebe..1949d57b007a 100644
--- a/drivers/net/wireless/ath/ath11k/coredump.c
+++ b/drivers/net/wireless/ath/ath11k/coredump.c
@@ -2,8 +2,10 @@
/*
* Copyright (c) 2020 The Linux Foundation. All rights reserved.
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include <linux/devcoredump.h>
+#include <linux/export.h>
#include "hif.h"
#include "coredump.h"
#include "debug.h"
diff --git a/drivers/net/wireless/ath/ath11k/debug.c b/drivers/net/wireless/ath/ath11k/debug.c
index 2b8544355fc1..37d23a559ba3 100644
--- a/drivers/net/wireless/ath/ath11k/debug.c
+++ b/drivers/net/wireless/ath/ath11k/debug.c
@@ -2,8 +2,10 @@
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
+#include <linux/export.h>
#include <linux/vmalloc.h>
#include "core.h"
#include "debug.h"
diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c
index 5d46f8e4c231..906df3b13f4f 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs.c
@@ -2,8 +2,10 @@
/*
* Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
+#include <linux/export.h>
#include <linux/vmalloc.h>
#include "debugfs.h"
diff --git a/drivers/net/wireless/ath/ath11k/dp.c b/drivers/net/wireless/ath/ath11k/dp.c
index bf3928ada995..4661e0d64dd9 100644
--- a/drivers/net/wireless/ath/ath11k/dp.c
+++ b/drivers/net/wireless/ath/ath11k/dp.c
@@ -2,9 +2,11 @@
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include <crypto/hash.h>
+#include <linux/export.h>
#include "core.h"
#include "dp_tx.h"
#include "hal_tx.h"
diff --git a/drivers/net/wireless/ath/ath11k/fw.c b/drivers/net/wireless/ath/ath11k/fw.c
index cbbd8e57119f..07d775a7b528 100644
--- a/drivers/net/wireless/ath/ath11k/fw.c
+++ b/drivers/net/wireless/ath/ath11k/fw.c
@@ -1,8 +1,10 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
+#include <linux/export.h>
#include "core.h"
#include "debug.h"
diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c
index 8cb1505a5a0c..f1d76839a87b 100644
--- a/drivers/net/wireless/ath/ath11k/hal.c
+++ b/drivers/net/wireless/ath/ath11k/hal.c
@@ -2,8 +2,10 @@
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include <linux/dma-mapping.h>
+#include <linux/export.h>
#include "hal_tx.h"
#include "debug.h"
#include "hal_desc.h"
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index 13301ca317a5..758ef6f26432 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -1283,7 +1283,7 @@ static int ath11k_mac_config_ps(struct ath11k *ar)
return ret;
}
-static int ath11k_mac_op_config(struct ieee80211_hw *hw, u32 changed)
+static int ath11k_mac_op_config(struct ieee80211_hw *hw, int radio_idx, u32 changed)
{
struct ath11k *ar = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
@@ -7044,7 +7044,8 @@ static void ath11k_mac_op_configure_filter(struct ieee80211_hw *hw,
mutex_unlock(&ar->conf_mutex);
}
-static int ath11k_mac_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
+static int ath11k_mac_op_get_antenna(struct ieee80211_hw *hw, int radio_idx,
+ u32 *tx_ant, u32 *rx_ant)
{
struct ath11k *ar = hw->priv;
@@ -7058,7 +7059,8 @@ static int ath11k_mac_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *
return 0;
}
-static int ath11k_mac_op_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
+static int ath11k_mac_op_set_antenna(struct ieee80211_hw *hw, int radio_idx,
+ u32 tx_ant, u32 rx_ant)
{
struct ath11k *ar = hw->priv;
int ret;
@@ -8182,7 +8184,8 @@ ath11k_set_vdev_param_to_all_vifs(struct ath11k *ar, int param, u32 value)
/* mac80211 stores device specific RTS/Fragmentation threshold value,
* this is set interface specific to firmware from ath11k driver
*/
-static int ath11k_mac_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+static int ath11k_mac_op_set_rts_threshold(struct ieee80211_hw *hw,
+ int radio_idx, u32 value)
{
struct ath11k *ar = hw->priv;
int param_id = WMI_VDEV_PARAM_RTS_THRESHOLD;
@@ -8190,7 +8193,8 @@ static int ath11k_mac_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
return ath11k_set_vdev_param_to_all_vifs(ar, param_id, value);
}
-static int ath11k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
+static int ath11k_mac_op_set_frag_threshold(struct ieee80211_hw *hw,
+ int radio_idx, u32 value)
{
/* Even though there's a WMI vdev param for fragmentation threshold no
* known firmware actually implements it. Moreover it is not possible to
diff --git a/drivers/net/wireless/ath/ath11k/pcic.c b/drivers/net/wireless/ath/ath11k/pcic.c
index 3fe77310c71f..fc6e7da05c60 100644
--- a/drivers/net/wireless/ath/ath11k/pcic.c
+++ b/drivers/net/wireless/ath/ath11k/pcic.c
@@ -2,8 +2,10 @@
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
+#include <linux/export.h>
#include "core.h"
#include "pcic.h"
#include "debug.h"
diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
index 2782f4723e41..378ac96b861b 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.c
+++ b/drivers/net/wireless/ath/ath11k/qmi.c
@@ -2,9 +2,11 @@
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include <linux/elf.h>
+#include <linux/export.h>
#include "qmi.h"
#include "core.h"
diff --git a/drivers/net/wireless/ath/ath11k/trace.c b/drivers/net/wireless/ath/ath11k/trace.c
index 6620650d7845..44ff8e9eff5d 100644
--- a/drivers/net/wireless/ath/ath11k/trace.c
+++ b/drivers/net/wireless/ath/ath11k/trace.c
@@ -1,8 +1,10 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
+#include <linux/export.h>
#include <linux/module.h>
#define CREATE_TRACE_POINTS
diff --git a/drivers/net/wireless/ath/ath12k/core.c b/drivers/net/wireless/ath/ath12k/core.c
index 89ae80934b30..83caba3104d6 100644
--- a/drivers/net/wireless/ath/ath12k/core.c
+++ b/drivers/net/wireless/ath/ath12k/core.c
@@ -2,8 +2,10 @@
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/remoteproc.h>
@@ -1409,6 +1411,7 @@ void ath12k_core_halt(struct ath12k *ar)
ath12k_mac_peer_cleanup_all(ar);
cancel_delayed_work_sync(&ar->scan.timeout);
cancel_work_sync(&ar->regd_update_work);
+ cancel_work_sync(&ar->regd_channel_update_work);
cancel_work_sync(&ab->rfkill_work);
cancel_work_sync(&ab->update_11d_work);
@@ -1472,6 +1475,7 @@ static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
complete(&ar->vdev_setup_done);
complete(&ar->vdev_delete_done);
complete(&ar->bss_survey_done);
+ complete(&ar->regd_update_completed);
wake_up(&ar->dp.tx_empty_waitq);
idr_for_each(&ar->txmgmt_idr,
@@ -1511,6 +1515,9 @@ static void ath12k_update_11d(struct work_struct *work)
ar = pdev->ar;
memcpy(&ar->alpha2, &arg.alpha2, 2);
+
+ reinit_completion(&ar->regd_update_completed);
+
ret = ath12k_wmi_send_set_current_country_cmd(ar, &arg);
if (ret)
ath12k_warn(ar->ab,
diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h
index 7bcd9c70309f..0c1a6df7a02e 100644
--- a/drivers/net/wireless/ath/ath12k/core.h
+++ b/drivers/net/wireless/ath/ath12k/core.h
@@ -345,6 +345,10 @@ struct ath12k_link_vif {
bool is_sta_assoc_link;
struct ath12k_reg_tpc_power_info reg_tpc_info;
+
+ bool group_key_valid;
+ struct wmi_vdev_install_key_arg group_key;
+ bool pairwise_key_done;
};
struct ath12k_vif {
@@ -380,9 +384,7 @@ struct ath12k_vif {
struct ath12k_link_vif __rcu *link[ATH12K_NUM_MAX_LINKS];
struct ath12k_vif_cache *cache[IEEE80211_MLD_MAX_NUM_LINKS];
/* indicates bitmap of link vif created in FW */
- u16 links_map;
- u8 last_scan_link;
-
+ u32 links_map;
/* Must be last - ends in a flexible-array member.
*
* FIXME: Driver should not copy struct ieee80211_chanctx_conf,
@@ -719,7 +721,7 @@ struct ath12k {
/* protects the radio specific data like debug stats, ppdu_stats_info stats,
* vdev_stop_status info, scan data, ath12k_sta info, ath12k_link_vif info,
- * channel context data, survey info, test mode data.
+ * channel context data, survey info, test mode data, regd_channel_update_queue.
*/
spinlock_t data_lock;
@@ -778,6 +780,8 @@ struct ath12k {
struct completion bss_survey_done;
struct work_struct regd_update_work;
+ struct work_struct regd_channel_update_work;
+ struct list_head regd_channel_update_queue;
struct wiphy_work wmi_mgmt_tx_work;
struct sk_buff_head wmi_mgmt_tx_queue;
@@ -811,6 +815,7 @@ struct ath12k {
enum ath12k_11d_state state_11d;
u8 alpha2[REG_ALPHA2_LEN];
bool regdom_set_by_user;
+ struct completion regd_update_completed;
struct completion fw_stats_complete;
struct completion fw_stats_done;
diff --git a/drivers/net/wireless/ath/ath12k/dp.c b/drivers/net/wireless/ath/ath12k/dp.c
index 6317c6d4c043..c6b10acb643e 100644
--- a/drivers/net/wireless/ath/ath12k/dp.c
+++ b/drivers/net/wireless/ath/ath12k/dp.c
@@ -84,6 +84,7 @@ int ath12k_dp_peer_setup(struct ath12k *ar, int vdev_id, const u8 *addr)
ret = ath12k_dp_rx_peer_frag_setup(ar, addr, vdev_id);
if (ret) {
ath12k_warn(ab, "failed to setup rx defrag context\n");
+ tid--;
goto peer_clean;
}
diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.c b/drivers/net/wireless/ath/ath12k/dp_mon.c
index 28cadc4167f7..91f4e3aff74c 100644
--- a/drivers/net/wireless/ath/ath12k/dp_mon.c
+++ b/drivers/net/wireless/ath/ath12k/dp_mon.c
@@ -3761,7 +3761,6 @@ int ath12k_dp_mon_srng_process(struct ath12k *ar, int *budget,
ath12k_hal_srng_access_begin(ab, srng);
while (likely(*budget)) {
- *budget -= 1;
mon_dst_desc = ath12k_hal_srng_dst_peek(ab, srng);
if (unlikely(!mon_dst_desc))
break;
diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c
index 57648febc4a4..420a9b161f4a 100644
--- a/drivers/net/wireless/ath/ath12k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath12k/dp_rx.c
@@ -2533,31 +2533,15 @@ static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *nap
struct ath12k_dp_rx_info *rx_info)
{
struct ath12k_base *ab = ar->ab;
- static const struct ieee80211_radiotap_he known = {
- .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
- IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN),
- .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN),
- };
- struct ieee80211_radiotap_he *he;
struct ieee80211_rx_status *rx_status;
struct ieee80211_sta *pubsta;
struct ath12k_peer *peer;
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
struct ieee80211_rx_status *status = rx_info->rx_status;
- u8 decap = DP_RX_DECAP_TYPE_RAW;
+ u8 decap = rx_info->decap_type;
bool is_mcbc = rxcb->is_mcbc;
bool is_eapol = rxcb->is_eapol;
- if (status->encoding == RX_ENC_HE && !(status->flag & RX_FLAG_RADIOTAP_HE) &&
- !(status->flag & RX_FLAG_SKIP_MONITOR)) {
- he = skb_push(msdu, sizeof(known));
- memcpy(he, &known, sizeof(known));
- status->flag |= RX_FLAG_RADIOTAP_HE;
- }
-
- if (!(status->flag & RX_FLAG_ONLY_MONITOR))
- decap = rx_info->decap_type;
-
spin_lock_bh(&ab->base_lock);
peer = ath12k_dp_rx_h_find_peer(ab, msdu, rx_info);
diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.c b/drivers/net/wireless/ath/ath12k/dp_tx.c
index b6816b6c2c04..075912eacfaa 100644
--- a/drivers/net/wireless/ath/ath12k/dp_tx.c
+++ b/drivers/net/wireless/ath/ath12k/dp_tx.c
@@ -13,10 +13,9 @@
#include "mac.h"
static enum hal_tcl_encap_type
-ath12k_dp_tx_get_encap_type(struct ath12k_link_vif *arvif, struct sk_buff *skb)
+ath12k_dp_tx_get_encap_type(struct ath12k_base *ab, struct sk_buff *skb)
{
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
- struct ath12k_base *ab = arvif->ar->ab;
if (test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags))
return HAL_TCL_ENCAP_TYPE_RAW;
@@ -305,7 +304,7 @@ tcl_ring_sel:
u32_encode_bits(mcbc_gsn, HTT_TCL_META_DATA_GLOBAL_SEQ_NUM);
}
- ti.encap_type = ath12k_dp_tx_get_encap_type(arvif, skb);
+ ti.encap_type = ath12k_dp_tx_get_encap_type(ab, skb);
ti.addr_search_flags = arvif->hal_addr_search_flags;
ti.search_type = arvif->search_type;
ti.type = HAL_TCL_DESC_TYPE_BUFFER;
diff --git a/drivers/net/wireless/ath/ath12k/hw.c b/drivers/net/wireless/ath/ath12k/hw.c
index 8254dc10b53b..ec77ad498b33 100644
--- a/drivers/net/wireless/ath/ath12k/hw.c
+++ b/drivers/net/wireless/ath/ath12k/hw.c
@@ -1478,7 +1478,7 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.download_calib = true,
.supports_suspend = false,
.tcl_ring_retry = true,
- .reoq_lut_support = false,
+ .reoq_lut_support = true,
.supports_shadow_regs = false,
.num_tcl_banks = 48,
diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
index 59ec422992d3..32519666632d 100644
--- a/drivers/net/wireless/ath/ath12k/mac.c
+++ b/drivers/net/wireless/ath/ath12k/mac.c
@@ -693,6 +693,9 @@ static void ath12k_get_arvif_iter(void *data, u8 *mac,
if (WARN_ON(!arvif))
continue;
+ if (!arvif->is_created)
+ continue;
+
if (arvif->vdev_id == arvif_iter->vdev_id &&
arvif->ar == arvif_iter->ar) {
arvif_iter->arvif = arvif;
@@ -1392,7 +1395,7 @@ err:
return ret;
}
-static int ath12k_mac_op_config(struct ieee80211_hw *hw, u32 changed)
+static int ath12k_mac_op_config(struct ieee80211_hw *hw, int radio_idx, u32 changed)
{
return 0;
}
@@ -1755,7 +1758,7 @@ static void ath12k_mac_handle_beacon_iter(void *data, u8 *mac,
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
struct ath12k_link_vif *arvif = &ahvif->deflink;
- if (vif->type != NL80211_IFTYPE_STATION)
+ if (vif->type != NL80211_IFTYPE_STATION || !arvif->is_created)
return;
if (!ether_addr_equal(mgmt->bssid, vif->bss_conf.bssid))
@@ -1778,16 +1781,16 @@ static void ath12k_mac_handle_beacon_miss_iter(void *data, u8 *mac,
u32 *vdev_id = data;
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
struct ath12k_link_vif *arvif = &ahvif->deflink;
- struct ath12k *ar = arvif->ar;
- struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
+ struct ieee80211_hw *hw;
- if (arvif->vdev_id != *vdev_id)
+ if (!arvif->is_created || arvif->vdev_id != *vdev_id)
return;
if (!arvif->is_up)
return;
ieee80211_beacon_loss(vif);
+ hw = ath12k_ar_to_hw(arvif->ar);
/* Firmware doesn't report beacon loss events repeatedly. If AP probe
* (done by mac80211) succeeds but beacons do not resume then it
@@ -3496,7 +3499,7 @@ static struct ath12k_link_vif *ath12k_mac_assign_link_vif(struct ath12k_hw *ah,
/* If this is the first link arvif being created for an ML VIF
* use the preallocated deflink memory except for scan arvifs
*/
- if (!ahvif->links_map && link_id != ATH12K_DEFAULT_SCAN_LINK) {
+ if (!ahvif->links_map && link_id < ATH12K_FIRST_SCAN_LINK) {
arvif = &ahvif->deflink;
if (vif->type == NL80211_IFTYPE_STATION)
@@ -4149,8 +4152,9 @@ ath12k_mac_select_scan_device(struct ieee80211_hw *hw,
band = NL80211_BAND_6GHZ;
for_each_ar(ah, ar, i) {
- /* TODO 5 GHz low high split changes */
- if (ar->mac.sbands[band].channels)
+ if (ar->mac.sbands[band].channels &&
+ center_freq >= KHZ_TO_MHZ(ar->freq_range.start_freq) &&
+ center_freq <= KHZ_TO_MHZ(ar->freq_range.end_freq))
return ar;
}
@@ -4274,6 +4278,23 @@ static void ath12k_scan_timeout_work(struct work_struct *work)
wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
}
+static void ath12k_mac_scan_send_complete(struct ath12k *ar,
+ struct cfg80211_scan_info *info)
+{
+ struct ath12k_hw *ah = ar->ah;
+ struct ath12k *partner_ar;
+ int i;
+
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
+ for_each_ar(ah, partner_ar, i)
+ if (partner_ar != ar &&
+ partner_ar->scan.state == ATH12K_SCAN_RUNNING)
+ return;
+
+ ieee80211_scan_completed(ah->hw, info);
+}
+
static void ath12k_scan_vdev_clean_work(struct wiphy *wiphy, struct wiphy_work *work)
{
struct ath12k *ar = container_of(work, struct ath12k,
@@ -4312,7 +4333,7 @@ work_complete:
ATH12K_SCAN_STARTING)),
};
- ieee80211_scan_completed(ar->ah->hw, &info);
+ ath12k_mac_scan_send_complete(ar, &info);
}
ar->scan.state = ATH12K_SCAN_IDLE;
@@ -4488,11 +4509,12 @@ ath12k_mac_find_link_id_by_ar(struct ath12k_vif *ahvif, struct ath12k *ar)
struct ath12k_link_vif *arvif;
struct ath12k_hw *ah = ahvif->ah;
unsigned long links = ahvif->links_map;
+ unsigned long scan_links_map;
u8 link_id;
lockdep_assert_wiphy(ah->hw->wiphy);
- for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ for_each_set_bit(link_id, &links, ATH12K_NUM_MAX_LINKS) {
arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]);
if (!arvif || !arvif->is_created)
@@ -4502,18 +4524,30 @@ ath12k_mac_find_link_id_by_ar(struct ath12k_vif *ahvif, struct ath12k *ar)
return link_id;
}
- /* input ar is not assigned to any of the links of ML VIF, use scan
- * link (15) for scan vdev creation.
+ /* input ar is not assigned to any of the links of ML VIF, use next
+ * available scan link for scan vdev creation. There are cases where
+ * single scan req needs to be split in driver and initiate separate
+ * scan requests to firmware based on device.
*/
- return ATH12K_DEFAULT_SCAN_LINK;
+
+ /* Unset all non-scan links (0-14) of scan_links_map so that ffs() will
+ * choose an available link among scan links (i.e link id >= 15)
+ */
+ scan_links_map = ~ahvif->links_map & ATH12K_SCAN_LINKS_MASK;
+ if (scan_links_map)
+ return __ffs(scan_links_map);
+
+ return ATH12K_FIRST_SCAN_LINK;
}
-static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_scan_request *hw_req)
+static int ath12k_mac_initiate_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *hw_req,
+ int n_channels,
+ struct ieee80211_channel **chan_list,
+ struct ath12k *ar)
{
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
- struct ath12k *ar;
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
struct ath12k_link_vif *arvif;
struct cfg80211_scan_request *req = &hw_req->req;
@@ -4527,18 +4561,18 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
arvif = &ahvif->deflink;
- /* Since the targeted scan device could depend on the frequency
- * requested in the hw_req, select the corresponding radio
- */
- ar = ath12k_mac_select_scan_device(hw, vif, hw_req->req.channels[0]->center_freq);
- if (!ar)
- return -EINVAL;
-
/* check if any of the links of ML VIF is already started on
* radio(ar) corresponding to given scan frequency and use it,
- * if not use scan link (link 15) for scan purpose.
+ * if not use scan link (link id >= 15) for scan purpose.
*/
link_id = ath12k_mac_find_link_id_by_ar(ahvif, ar);
+ /* All scan links are occupied. ideally this shouldn't happen as
+ * mac80211 won't schedule scan for same band until ongoing scan is
+ * completed, don't try to exceed max links just in case if it happens.
+ */
+ if (link_id >= ATH12K_NUM_MAX_LINKS)
+ return -EBUSY;
+
arvif = ath12k_mac_assign_link_vif(ah, vif, link_id);
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac link ID %d selected for scan",
@@ -4629,8 +4663,8 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
arg->scan_f_passive = 1;
}
- if (req->n_channels) {
- arg->num_chan = req->n_channels;
+ if (n_channels) {
+ arg->num_chan = n_channels;
arg->chan_list = kcalloc(arg->num_chan, sizeof(*arg->chan_list),
GFP_KERNEL);
if (!arg->chan_list) {
@@ -4639,7 +4673,7 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
}
for (i = 0; i < arg->num_chan; i++)
- arg->chan_list[i] = req->channels[i]->center_freq;
+ arg->chan_list[i] = chan_list[i]->center_freq;
}
ret = ath12k_start_scan(ar, arg);
@@ -4658,13 +4692,6 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac scan started");
- /* As per cfg80211/mac80211 scan design, it allows only one
- * scan at a time. Hence last_scan link id is used for
- * tracking the link id on which the scan is been done on
- * this vif.
- */
- ahvif->last_scan_link = arvif->link_id;
-
/* Add a margin to account for event/command processing */
ieee80211_queue_delayed_work(ath12k_ar_to_hw(ar), &ar->scan.timeout,
msecs_to_jiffies(arg->max_scan_time +
@@ -4685,25 +4712,108 @@ exit:
return ret;
}
+static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *hw_req)
+{
+ struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+ struct ieee80211_channel **chan_list, *chan;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ unsigned long links_map, link_id;
+ struct ath12k_link_vif *arvif;
+ struct ath12k *ar, *scan_ar;
+ int i, j, ret = 0;
+
+ lockdep_assert_wiphy(hw->wiphy);
+
+ chan_list = kcalloc(hw_req->req.n_channels, sizeof(*chan_list), GFP_KERNEL);
+ if (!chan_list)
+ return -ENOMEM;
+
+ /* There could be channels that belong to multiple underlying radio
+ * in same scan request as mac80211 sees it as single band. In that
+ * case split the hw_req based on frequency range and schedule scans to
+ * corresponding radio.
+ */
+ for_each_ar(ah, ar, i) {
+ int n_chans = 0;
+
+ for (j = 0; j < hw_req->req.n_channels; j++) {
+ chan = hw_req->req.channels[j];
+ scan_ar = ath12k_mac_select_scan_device(hw, vif,
+ chan->center_freq);
+ if (!scan_ar) {
+ ath12k_hw_warn(ah, "unable to select scan device for freq %d\n",
+ chan->center_freq);
+ ret = -EINVAL;
+ goto abort;
+ }
+ if (ar != scan_ar)
+ continue;
+
+ chan_list[n_chans++] = chan;
+ }
+ if (n_chans) {
+ ret = ath12k_mac_initiate_hw_scan(hw, vif, hw_req, n_chans,
+ chan_list, ar);
+ if (ret)
+ goto abort;
+ }
+ }
+abort:
+ /* If any of the parallel scans initiated fails, abort all and
+ * remove the scan interfaces created. Return complete scan
+ * failure as mac80211 assumes this as single scan request.
+ */
+ if (ret) {
+ ath12k_hw_warn(ah, "Scan failed %d , cleanup all scan vdevs\n", ret);
+ links_map = ahvif->links_map;
+ for_each_set_bit(link_id, &links_map, ATH12K_NUM_MAX_LINKS) {
+ arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
+ if (!arvif)
+ continue;
+
+ ar = arvif->ar;
+ if (ar->scan.arvif == arvif) {
+ wiphy_work_cancel(hw->wiphy, &ar->scan.vdev_clean_wk);
+ spin_lock_bh(&ar->data_lock);
+ ar->scan.arvif = NULL;
+ ar->scan.state = ATH12K_SCAN_IDLE;
+ ar->scan_channel = NULL;
+ ar->scan.roc_freq = 0;
+ spin_unlock_bh(&ar->data_lock);
+ }
+ if (link_id >= ATH12K_FIRST_SCAN_LINK) {
+ ath12k_mac_remove_link_interface(hw, arvif);
+ ath12k_mac_unassign_link_vif(arvif);
+ }
+ }
+ }
+ kfree(chan_list);
+ return ret;
+}
+
static void ath12k_mac_op_cancel_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
- u16 link_id = ahvif->last_scan_link;
+ unsigned long link_id, links_map = ahvif->links_map;
struct ath12k_link_vif *arvif;
struct ath12k *ar;
lockdep_assert_wiphy(hw->wiphy);
- arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
- if (!arvif || arvif->is_started)
- return;
+ for_each_set_bit(link_id, &links_map, ATH12K_NUM_MAX_LINKS) {
+ arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
+ if (!arvif || arvif->is_started)
+ continue;
- ar = arvif->ar;
+ ar = arvif->ar;
- ath12k_scan_abort(ar);
+ ath12k_scan_abort(ar);
- cancel_delayed_work_sync(&ar->scan.timeout);
+ cancel_delayed_work_sync(&ar->scan.timeout);
+ }
}
static int ath12k_install_key(struct ath12k_link_vif *arvif,
@@ -4719,14 +4829,13 @@ static int ath12k_install_key(struct ath12k_link_vif *arvif,
.key_len = key->keylen,
.key_data = key->key,
.key_flags = flags,
+ .ieee80211_key_cipher = key->cipher,
.macaddr = macaddr,
};
struct ath12k_vif *ahvif = arvif->ahvif;
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
- reinit_completion(&ar->install_key_done);
-
if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags))
return 0;
@@ -4735,7 +4844,7 @@ static int ath12k_install_key(struct ath12k_link_vif *arvif,
/* arg.key_cipher = WMI_CIPHER_NONE; */
arg.key_len = 0;
arg.key_data = NULL;
- goto install;
+ goto check_order;
}
switch (key->cipher) {
@@ -4763,19 +4872,82 @@ static int ath12k_install_key(struct ath12k_link_vif *arvif,
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV |
IEEE80211_KEY_FLAG_RESERVE_TAILROOM;
+check_order:
+ if (ahvif->vdev_type == WMI_VDEV_TYPE_STA &&
+ arg.key_flags == WMI_KEY_GROUP) {
+ if (cmd == SET_KEY) {
+ if (arvif->pairwise_key_done) {
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "vdev %u pairwise key done, go install group key\n",
+ arg.vdev_id);
+ goto install;
+ } else {
+ /* WCN7850 firmware requires pairwise key to be installed
+ * before group key. In case group key comes first, cache
+ * it and return. Will revisit it once pairwise key gets
+ * installed.
+ */
+ arvif->group_key = arg;
+ arvif->group_key_valid = true;
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "vdev %u group key before pairwise key, cache and skip\n",
+ arg.vdev_id);
+
+ ret = 0;
+ goto out;
+ }
+ } else {
+ arvif->group_key_valid = false;
+ }
+ }
+
install:
- ret = ath12k_wmi_vdev_install_key(arvif->ar, &arg);
+ reinit_completion(&ar->install_key_done);
+ ret = ath12k_wmi_vdev_install_key(arvif->ar, &arg);
if (ret)
return ret;
if (!wait_for_completion_timeout(&ar->install_key_done, 1 * HZ))
return -ETIMEDOUT;
- if (ether_addr_equal(macaddr, arvif->bssid))
- ahvif->key_cipher = key->cipher;
+ if (ether_addr_equal(arg.macaddr, arvif->bssid))
+ ahvif->key_cipher = arg.ieee80211_key_cipher;
- return ar->install_key_status ? -EINVAL : 0;
+ if (ar->install_key_status) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (ahvif->vdev_type == WMI_VDEV_TYPE_STA &&
+ arg.key_flags == WMI_KEY_PAIRWISE) {
+ if (cmd == SET_KEY) {
+ arvif->pairwise_key_done = true;
+ if (arvif->group_key_valid) {
+ /* Install cached GTK */
+ arvif->group_key_valid = false;
+ arg = arvif->group_key;
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "vdev %u pairwise key done, group key ready, go install\n",
+ arg.vdev_id);
+ goto install;
+ }
+ } else {
+ arvif->pairwise_key_done = false;
+ }
+ }
+
+out:
+ if (ret) {
+ /* In case of failure userspace may not do DISABLE_KEY
+ * but triggers re-connection directly, so manually reset
+ * status here.
+ */
+ arvif->group_key_valid = false;
+ arvif->pairwise_key_done = false;
+ }
+
+ return ret;
}
static int ath12k_clear_peer_keys(struct ath12k_link_vif *arvif,
@@ -4869,9 +5041,9 @@ static int ath12k_mac_set_key(struct ath12k *ar, enum set_key_cmd cmd,
}
if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
- flags |= WMI_KEY_PAIRWISE;
+ flags = WMI_KEY_PAIRWISE;
else
- flags |= WMI_KEY_GROUP;
+ flags = WMI_KEY_GROUP;
ret = ath12k_install_key(arvif, key, cmd, peer_addr, flags);
if (ret) {
@@ -8114,7 +8286,17 @@ static int ath12k_mac_start(struct ath12k *ar)
/* TODO: Do we need to enable ANI? */
- ath12k_reg_update_chan_list(ar, false);
+ ret = ath12k_reg_update_chan_list(ar, false);
+
+ /* The ar state alone can be turned off for non supported country
+ * without returning the error value. As we need to update the channel
+ * for the next ar.
+ */
+ if (ret) {
+ if (ret == -EINVAL)
+ ret = 0;
+ goto err;
+ }
ar->num_started_vdevs = 0;
ar->num_created_vdevs = 0;
@@ -8286,6 +8468,7 @@ static void ath12k_mac_stop(struct ath12k *ar)
{
struct ath12k_hw *ah = ar->ah;
struct htt_ppdu_stats_info *ppdu_stats, *tmp;
+ struct ath12k_wmi_scan_chan_list_arg *arg;
int ret;
lockdep_assert_held(&ah->hw_mutex);
@@ -8300,6 +8483,7 @@ static void ath12k_mac_stop(struct ath12k *ar)
cancel_delayed_work_sync(&ar->scan.timeout);
wiphy_work_cancel(ath12k_ar_to_hw(ar)->wiphy, &ar->scan.vdev_clean_wk);
+ cancel_work_sync(&ar->regd_channel_update_work);
cancel_work_sync(&ar->regd_update_work);
cancel_work_sync(&ar->ab->rfkill_work);
cancel_work_sync(&ar->ab->update_11d_work);
@@ -8307,10 +8491,18 @@ static void ath12k_mac_stop(struct ath12k *ar)
complete(&ar->completed_11d_scan);
spin_lock_bh(&ar->data_lock);
+
list_for_each_entry_safe(ppdu_stats, tmp, &ar->ppdu_stats_info, list) {
list_del(&ppdu_stats->list);
kfree(ppdu_stats);
}
+
+ while ((arg = list_first_entry_or_null(&ar->regd_channel_update_queue,
+ struct ath12k_wmi_scan_chan_list_arg,
+ list))) {
+ list_del(&arg->list);
+ kfree(arg);
+ }
spin_unlock_bh(&ar->data_lock);
rcu_assign_pointer(ar->ab->pdevs_active[ar->pdev_idx], NULL);
@@ -9055,7 +9247,8 @@ static struct ath12k *ath12k_mac_assign_vif_to_vdev(struct ieee80211_hw *hw,
struct ath12k_hw *ah = hw->priv;
struct ath12k *ar;
struct ath12k_base *ab;
- u8 link_id = arvif->link_id;
+ u8 link_id = arvif->link_id, scan_link_id;
+ unsigned long scan_link_map;
int ret;
lockdep_assert_wiphy(hw->wiphy);
@@ -9074,12 +9267,16 @@ static struct ath12k *ath12k_mac_assign_vif_to_vdev(struct ieee80211_hw *hw,
* and now we want to create for actual usage.
*/
if (ieee80211_vif_is_mld(vif)) {
- scan_arvif = wiphy_dereference(hw->wiphy,
- ahvif->link[ATH12K_DEFAULT_SCAN_LINK]);
- if (scan_arvif && scan_arvif->ar == ar) {
- ar->scan.arvif = NULL;
- ath12k_mac_remove_link_interface(hw, scan_arvif);
- ath12k_mac_unassign_link_vif(scan_arvif);
+ scan_link_map = ahvif->links_map & ATH12K_SCAN_LINKS_MASK;
+ for_each_set_bit(scan_link_id, &scan_link_map, ATH12K_NUM_MAX_LINKS) {
+ scan_arvif = wiphy_dereference(hw->wiphy,
+ ahvif->link[scan_link_id]);
+ if (scan_arvif && scan_arvif->ar == ar) {
+ ar->scan.arvif = NULL;
+ ath12k_mac_remove_link_interface(hw, scan_arvif);
+ ath12k_mac_unassign_link_vif(scan_arvif);
+ break;
+ }
}
}
@@ -9314,7 +9511,7 @@ static void ath12k_mac_op_remove_interface(struct ieee80211_hw *hw,
.aborted = true,
};
- ieee80211_scan_completed(ar->ah->hw, &info);
+ ath12k_mac_scan_send_complete(ar, &info);
}
ar->scan.state = ATH12K_SCAN_IDLE;
@@ -9354,7 +9551,8 @@ static void ath12k_mac_op_configure_filter(struct ieee80211_hw *hw,
ar->filter_flags = *total_flags;
}
-static int ath12k_mac_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
+static int ath12k_mac_op_get_antenna(struct ieee80211_hw *hw, int radio_idx,
+ u32 *tx_ant, u32 *rx_ant)
{
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
int antennas_rx = 0, antennas_tx = 0;
@@ -9374,7 +9572,8 @@ static int ath12k_mac_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *
return 0;
}
-static int ath12k_mac_op_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
+static int ath12k_mac_op_set_antenna(struct ieee80211_hw *hw, int radio_idx,
+ u32 tx_ant, u32 rx_ant)
{
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
@@ -9818,7 +10017,7 @@ ath12k_mac_change_chanctx_cnt_iter(void *data, u8 *mac,
if (WARN_ON(!arvif))
continue;
- if (arvif->ar != arg->ar)
+ if (!arvif->is_created || arvif->ar != arg->ar)
continue;
link_conf = wiphy_dereference(ahvif->ah->hw->wiphy,
@@ -9853,7 +10052,7 @@ ath12k_mac_change_chanctx_fill_iter(void *data, u8 *mac,
if (WARN_ON(!arvif))
continue;
- if (arvif->ar != arg->ar)
+ if (!arvif->is_created || arvif->ar != arg->ar)
continue;
link_conf = wiphy_dereference(ahvif->ah->hw->wiphy,
@@ -10735,7 +10934,8 @@ ath12k_set_vdev_param_to_all_vifs(struct ath12k *ar, int param, u32 value)
/* mac80211 stores device specific RTS/Fragmentation threshold value,
* this is set interface specific to firmware from ath12k driver
*/
-static int ath12k_mac_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+static int ath12k_mac_op_set_rts_threshold(struct ieee80211_hw *hw,
+ int radio_idx, u32 value)
{
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
@@ -10760,7 +10960,8 @@ static int ath12k_mac_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
return ret;
}
-static int ath12k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
+static int ath12k_mac_op_set_frag_threshold(struct ieee80211_hw *hw,
+ int radio_idx, u32 value)
{
/* Even though there's a WMI vdev param for fragmentation threshold no
* known firmware actually implements it. Moreover it is not possible to
@@ -11246,6 +11447,7 @@ ath12k_mac_op_reconfig_complete(struct ieee80211_hw *hw,
struct wmi_set_current_country_arg arg = {};
memcpy(&arg.alpha2, ar->alpha2, 2);
+ reinit_completion(&ar->regd_update_completed);
ath12k_wmi_send_set_current_country_cmd(ar, &arg);
}
@@ -11757,6 +11959,32 @@ static u32 ath12k_get_phy_id(struct ath12k *ar, u32 band)
return 0;
}
+static int ath12k_mac_update_band(struct ath12k *ar,
+ struct ieee80211_supported_band *orig_band,
+ struct ieee80211_supported_band *new_band)
+{
+ int i;
+
+ if (!orig_band || !new_band)
+ return -EINVAL;
+
+ if (orig_band->band != new_band->band)
+ return -EINVAL;
+
+ for (i = 0; i < new_band->n_channels; i++) {
+ if (new_band->channels[i].flags & IEEE80211_CHAN_DISABLED)
+ continue;
+ /* An enabled channel in new_band should not be already enabled
+ * in the orig_band
+ */
+ if (WARN_ON(!(orig_band->channels[i].flags &
+ IEEE80211_CHAN_DISABLED)))
+ return -EINVAL;
+ orig_band->channels[i].flags &= ~IEEE80211_CHAN_DISABLED;
+ }
+ return 0;
+}
+
static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
u32 supported_bands,
struct ieee80211_supported_band *bands[])
@@ -11767,6 +11995,7 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
u32 phy_id, freq_low, freq_high;
struct ath12k_hw *ah = ar->ah;
void *channels;
+ int ret;
BUILD_BUG_ON((ARRAY_SIZE(ath12k_2ghz_channels) +
ARRAY_SIZE(ath12k_5ghz_channels) +
@@ -11788,7 +12017,6 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
band->channels = channels;
band->n_bitrates = ath12k_g_rates_size;
band->bitrates = ath12k_g_rates;
- bands[NL80211_BAND_2GHZ] = band;
if (ab->hw_params->single_pdev_only) {
phy_id = ath12k_get_phy_id(ar, WMI_HOST_WLAN_2GHZ_CAP);
@@ -11805,6 +12033,22 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
reg_cap->high_2ghz_chan);
ath12k_mac_update_freq_range(ar, freq_low, freq_high);
+
+ if (!bands[NL80211_BAND_2GHZ]) {
+ bands[NL80211_BAND_2GHZ] = band;
+ } else {
+ /* Split mac in same band under same wiphy */
+ ret = ath12k_mac_update_band(ar, bands[NL80211_BAND_2GHZ], band);
+ if (ret) {
+ kfree(channels);
+ band->channels = NULL;
+ return ret;
+ }
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac pdev %u identified as 2 GHz split mac with start freq %d end freq %d",
+ ar->pdev->pdev_id,
+ KHZ_TO_MHZ(ar->freq_range.start_freq),
+ KHZ_TO_MHZ(ar->freq_range.end_freq));
+ }
}
if (supported_bands & WMI_HOST_WLAN_5GHZ_CAP) {
@@ -11823,7 +12067,6 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
band->channels = channels;
band->n_bitrates = ath12k_a_rates_size;
band->bitrates = ath12k_a_rates;
- bands[NL80211_BAND_6GHZ] = band;
freq_low = max(reg_cap->low_5ghz_chan,
ab->reg_freq_6ghz.start_freq);
@@ -11836,6 +12079,26 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
ath12k_mac_update_freq_range(ar, freq_low, freq_high);
ah->use_6ghz_regd = true;
+
+ if (!bands[NL80211_BAND_6GHZ]) {
+ bands[NL80211_BAND_6GHZ] = band;
+ } else {
+ /* Split mac in same band under same wiphy */
+ ret = ath12k_mac_update_band(ar,
+ bands[NL80211_BAND_6GHZ],
+ band);
+ if (ret) {
+ kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
+ ar->mac.sbands[NL80211_BAND_2GHZ].channels = NULL;
+ kfree(channels);
+ band->channels = NULL;
+ return ret;
+ }
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac pdev %u identified as 6 GHz split mac with start freq %d end freq %d",
+ ar->pdev->pdev_id,
+ KHZ_TO_MHZ(ar->freq_range.start_freq),
+ KHZ_TO_MHZ(ar->freq_range.end_freq));
+ }
}
if (reg_cap->low_5ghz_chan < ATH12K_MIN_6GHZ_FREQ) {
@@ -11854,7 +12117,6 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
band->channels = channels;
band->n_bitrates = ath12k_a_rates_size;
band->bitrates = ath12k_a_rates;
- bands[NL80211_BAND_5GHZ] = band;
if (ab->hw_params->single_pdev_only) {
phy_id = ath12k_get_phy_id(ar, WMI_HOST_WLAN_5GHZ_CAP);
@@ -11871,6 +12133,28 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
reg_cap->high_5ghz_chan);
ath12k_mac_update_freq_range(ar, freq_low, freq_high);
+
+ if (!bands[NL80211_BAND_5GHZ]) {
+ bands[NL80211_BAND_5GHZ] = band;
+ } else {
+ /* Split mac in same band under same wiphy */
+ ret = ath12k_mac_update_band(ar,
+ bands[NL80211_BAND_5GHZ],
+ band);
+ if (ret) {
+ kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
+ ar->mac.sbands[NL80211_BAND_2GHZ].channels = NULL;
+ kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels);
+ ar->mac.sbands[NL80211_BAND_2GHZ].channels = NULL;
+ kfree(channels);
+ band->channels = NULL;
+ return ret;
+ }
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac pdev %u identified as 5 GHz split mac with start freq %d end freq %d",
+ ar->pdev->pdev_id,
+ KHZ_TO_MHZ(ar->freq_range.start_freq),
+ KHZ_TO_MHZ(ar->freq_range.end_freq));
+ }
}
}
@@ -12204,6 +12488,7 @@ static void ath12k_mac_hw_unregister(struct ath12k_hw *ah)
int i;
for_each_ar(ah, ar, i) {
+ cancel_work_sync(&ar->regd_channel_update_work);
cancel_work_sync(&ar->regd_update_work);
ath12k_debugfs_unregister(ar);
ath12k_fw_stats_reset(ar);
@@ -12462,6 +12747,16 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
goto err_cleanup_if_combs;
}
+ /* Boot-time regulatory updates have already been processed.
+ * Mark them as complete now, because after registration,
+ * cfg80211 will notify us again if there are any pending hints.
+ * We need to wait for those hints to be processed, so it's
+ * important to mark the boot-time updates as complete before
+ * proceeding with registration.
+ */
+ for_each_ar(ah, ar, i)
+ complete(&ar->regd_update_completed);
+
ret = ieee80211_register_hw(hw);
if (ret) {
ath12k_err(ab, "ieee80211 registration failed: %d\n", ret);
@@ -12489,6 +12784,9 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
memcpy(&current_cc.alpha2, ab->new_alpha2, 2);
memcpy(&ar->alpha2, ab->new_alpha2, 2);
+
+ reinit_completion(&ar->regd_update_completed);
+
ret = ath12k_wmi_send_set_current_country_cmd(ar, &current_cc);
if (ret)
ath12k_warn(ar->ab,
@@ -12561,9 +12859,12 @@ static void ath12k_mac_setup(struct ath12k *ar)
init_completion(&ar->scan.on_channel);
init_completion(&ar->mlo_setup_done);
init_completion(&ar->completed_11d_scan);
+ init_completion(&ar->regd_update_completed);
INIT_DELAYED_WORK(&ar->scan.timeout, ath12k_scan_timeout_work);
wiphy_work_init(&ar->scan.vdev_clean_wk, ath12k_scan_vdev_clean_work);
+ INIT_WORK(&ar->regd_channel_update_work, ath12k_regd_update_chan_list_work);
+ INIT_LIST_HEAD(&ar->regd_channel_update_queue);
INIT_WORK(&ar->regd_update_work, ath12k_regd_update_work);
wiphy_work_init(&ar->wmi_mgmt_tx_work, ath12k_mgmt_over_wmi_tx_work);
diff --git a/drivers/net/wireless/ath/ath12k/mac.h b/drivers/net/wireless/ath/ath12k/mac.h
index cc81b1f5680f..473611bfccdc 100644
--- a/drivers/net/wireless/ath/ath12k/mac.h
+++ b/drivers/net/wireless/ath/ath12k/mac.h
@@ -51,8 +51,11 @@ struct ath12k_generic_iter {
/* Default link after the IEEE802.11 defined Max link id limit
* for driver usage purpose.
*/
-#define ATH12K_DEFAULT_SCAN_LINK IEEE80211_MLD_MAX_NUM_LINKS
-#define ATH12K_NUM_MAX_LINKS (IEEE80211_MLD_MAX_NUM_LINKS + 1)
+#define ATH12K_FIRST_SCAN_LINK IEEE80211_MLD_MAX_NUM_LINKS
+#define ATH12K_SCAN_MAX_LINKS ATH12K_GROUP_MAX_RADIO
+/* Define 1 scan link for each radio for parallel scan purposes */
+#define ATH12K_NUM_MAX_LINKS (IEEE80211_MLD_MAX_NUM_LINKS + ATH12K_SCAN_MAX_LINKS)
+#define ATH12K_SCAN_LINKS_MASK GENMASK(ATH12K_NUM_MAX_LINKS, IEEE80211_MLD_MAX_NUM_LINKS)
#define ATH12K_NUM_MAX_ACTIVE_LINKS_PER_DEVICE 2
diff --git a/drivers/net/wireless/ath/ath12k/p2p.c b/drivers/net/wireless/ath/ath12k/p2p.c
index 84cccf7d91e7..59589748f1a8 100644
--- a/drivers/net/wireless/ath/ath12k/p2p.c
+++ b/drivers/net/wireless/ath/ath12k/p2p.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include <net/mac80211.h>
@@ -124,7 +125,7 @@ static void ath12k_p2p_noa_update_vdev_iter(void *data, u8 *mac,
WARN_ON(!rcu_read_lock_any_held());
arvif = &ahvif->deflink;
- if (arvif->ar != arg->ar || arvif->vdev_id != arg->vdev_id)
+ if (!arvif->is_created || arvif->ar != arg->ar || arvif->vdev_id != arg->vdev_id)
return;
ath12k_p2p_noa_update(arvif, arg->noa);
diff --git a/drivers/net/wireless/ath/ath12k/reg.c b/drivers/net/wireless/ath/ath12k/reg.c
index 2598b39d5d7e..96254d6fc675 100644
--- a/drivers/net/wireless/ath/ath12k/reg.c
+++ b/drivers/net/wireless/ath/ath12k/reg.c
@@ -65,7 +65,7 @@ ath12k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
for_each_ar(ah, ar, i) {
ret = ath12k_reg_update_chan_list(ar, true);
- if (ret) {
+ if (ret && ret != -EINVAL) {
ath12k_warn(ar->ab,
"failed to update chan list for pdev %u, ret %d\n",
i, ret);
@@ -102,6 +102,8 @@ ath12k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
/* Send the reg change request to all the radios */
for_each_ar(ah, ar, i) {
+ reinit_completion(&ar->regd_update_completed);
+
if (ar->ab->hw_params->current_cc_support) {
memcpy(&current_arg.alpha2, request->alpha2, 2);
memcpy(&ar->alpha2, &current_arg.alpha2, 2);
@@ -137,32 +139,7 @@ int ath12k_reg_update_chan_list(struct ath12k *ar, bool wait)
struct ath12k_wmi_channel_arg *ch;
enum nl80211_band band;
int num_channels = 0;
- int i, ret, left;
-
- if (wait && ar->state_11d == ATH12K_11D_RUNNING) {
- left = wait_for_completion_timeout(&ar->completed_11d_scan,
- ATH12K_SCAN_TIMEOUT_HZ);
- if (!left) {
- ath12k_dbg(ar->ab, ATH12K_DBG_REG,
- "failed to receive 11d scan complete: timed out\n");
- ar->state_11d = ATH12K_11D_IDLE;
- }
- ath12k_dbg(ar->ab, ATH12K_DBG_REG,
- "reg 11d scan wait left time %d\n", left);
- }
-
- if (wait &&
- (ar->scan.state == ATH12K_SCAN_STARTING ||
- ar->scan.state == ATH12K_SCAN_RUNNING)) {
- left = wait_for_completion_timeout(&ar->scan.completed,
- ATH12K_SCAN_TIMEOUT_HZ);
- if (!left)
- ath12k_dbg(ar->ab, ATH12K_DBG_REG,
- "failed to receive hw scan complete: timed out\n");
-
- ath12k_dbg(ar->ab, ATH12K_DBG_REG,
- "reg hw scan wait left time %d\n", left);
- }
+ int i, ret = 0;
if (ar->ah->state == ATH12K_HW_STATE_RESTARTING)
return 0;
@@ -176,13 +153,22 @@ int ath12k_reg_update_chan_list(struct ath12k *ar, bool wait)
if (bands[band]->channels[i].flags &
IEEE80211_CHAN_DISABLED)
continue;
+ /* Skip Channels that are not in current radio's range */
+ if (bands[band]->channels[i].center_freq <
+ KHZ_TO_MHZ(ar->freq_range.start_freq) ||
+ bands[band]->channels[i].center_freq >
+ KHZ_TO_MHZ(ar->freq_range.end_freq))
+ continue;
num_channels++;
}
}
- if (WARN_ON(!num_channels))
+ if (!num_channels) {
+ ath12k_dbg(ar->ab, ATH12K_DBG_REG,
+ "pdev is not supported for this country\n");
return -EINVAL;
+ }
arg = kzalloc(struct_size(arg, channel, num_channels), GFP_KERNEL);
@@ -204,6 +190,13 @@ int ath12k_reg_update_chan_list(struct ath12k *ar, bool wait)
if (channel->flags & IEEE80211_CHAN_DISABLED)
continue;
+ /* Skip Channels that are not in current radio's range */
+ if (bands[band]->channels[i].center_freq <
+ KHZ_TO_MHZ(ar->freq_range.start_freq) ||
+ bands[band]->channels[i].center_freq >
+ KHZ_TO_MHZ(ar->freq_range.end_freq))
+ continue;
+
/* TODO: Set to true/false based on some condition? */
ch->allow_ht = true;
ch->allow_vht = true;
@@ -244,6 +237,16 @@ int ath12k_reg_update_chan_list(struct ath12k *ar, bool wait)
}
}
+ if (wait) {
+ spin_lock_bh(&ar->data_lock);
+ list_add_tail(&arg->list, &ar->regd_channel_update_queue);
+ spin_unlock_bh(&ar->data_lock);
+
+ queue_work(ar->ab->workqueue, &ar->regd_channel_update_work);
+
+ return 0;
+ }
+
ret = ath12k_wmi_send_scan_chan_list_cmd(ar, arg);
kfree(arg);
@@ -272,9 +275,19 @@ int ath12k_regd_update(struct ath12k *ar, bool init)
struct ieee80211_regdomain *regd, *regd_copy = NULL;
int ret, regd_len, pdev_id;
struct ath12k_base *ab;
+ long time_left;
ab = ar->ab;
+ time_left = wait_for_completion_timeout(&ar->regd_update_completed,
+ ATH12K_REG_UPDATE_TIMEOUT_HZ);
+ if (time_left == 0) {
+ ath12k_warn(ab, "Timeout while waiting for regulatory update");
+ /* Even though timeout has occurred, still continue since at least boot
+ * time data would be there to process
+ */
+ }
+
supported_bands = ar->pdev->cap.supported_bands;
reg_cap = &ab->hal_reg_cap[ar->pdev_idx];
@@ -764,6 +777,54 @@ ret:
return new_regd;
}
+void ath12k_regd_update_chan_list_work(struct work_struct *work)
+{
+ struct ath12k *ar = container_of(work, struct ath12k,
+ regd_channel_update_work);
+ struct ath12k_wmi_scan_chan_list_arg *arg;
+ struct list_head local_update_list;
+ int left;
+
+ INIT_LIST_HEAD(&local_update_list);
+
+ spin_lock_bh(&ar->data_lock);
+ list_splice_tail_init(&ar->regd_channel_update_queue, &local_update_list);
+ spin_unlock_bh(&ar->data_lock);
+
+ while ((arg = list_first_entry_or_null(&local_update_list,
+ struct ath12k_wmi_scan_chan_list_arg,
+ list))) {
+ if (ar->state_11d != ATH12K_11D_IDLE) {
+ left = wait_for_completion_timeout(&ar->completed_11d_scan,
+ ATH12K_SCAN_TIMEOUT_HZ);
+ if (!left) {
+ ath12k_dbg(ar->ab, ATH12K_DBG_REG,
+ "failed to receive 11d scan complete: timed out\n");
+ ar->state_11d = ATH12K_11D_IDLE;
+ }
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_REG,
+ "reg 11d scan wait left time %d\n", left);
+ }
+
+ if ((ar->scan.state == ATH12K_SCAN_STARTING ||
+ ar->scan.state == ATH12K_SCAN_RUNNING)) {
+ left = wait_for_completion_timeout(&ar->scan.completed,
+ ATH12K_SCAN_TIMEOUT_HZ);
+ if (!left)
+ ath12k_dbg(ar->ab, ATH12K_DBG_REG,
+ "failed to receive hw scan complete: timed out\n");
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_REG,
+ "reg hw scan wait left time %d\n", left);
+ }
+
+ ath12k_wmi_send_scan_chan_list_cmd(ar, arg);
+ list_del(&arg->list);
+ kfree(arg);
+ }
+}
+
void ath12k_regd_update_work(struct work_struct *work)
{
struct ath12k *ar = container_of(work, struct ath12k,
diff --git a/drivers/net/wireless/ath/ath12k/reg.h b/drivers/net/wireless/ath/ath12k/reg.h
index 8af8e9ba462e..da5128b8c97f 100644
--- a/drivers/net/wireless/ath/ath12k/reg.h
+++ b/drivers/net/wireless/ath/ath12k/reg.h
@@ -13,6 +13,8 @@
struct ath12k_base;
struct ath12k;
+#define ATH12K_REG_UPDATE_TIMEOUT_HZ (3 * HZ)
+
#define ATH12K_2GHZ_MAX_FREQUENCY 2495
#define ATH12K_5GHZ_MAX_FREQUENCY 5920
@@ -113,6 +115,7 @@ int ath12k_reg_handle_chan_list(struct ath12k_base *ab,
struct ath12k_reg_info *reg_info,
enum wmi_vdev_type vdev_type,
enum ieee80211_ap_reg_power power_type);
+void ath12k_regd_update_chan_list_work(struct work_struct *work);
enum wmi_reg_6g_ap_type
ath12k_reg_ap_pwr_convert(enum ieee80211_ap_reg_power power_type);
enum ath12k_reg_status ath12k_reg_validate_reg_info(struct ath12k_base *ab,
diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
index 465f877fc0fb..b38f22118d73 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.c
+++ b/drivers/net/wireless/ath/ath12k/wmi.c
@@ -6445,9 +6445,16 @@ static int freq_to_idx(struct ath12k *ar, int freq)
if (!sband)
continue;
- for (ch = 0; ch < sband->n_channels; ch++, idx++)
+ for (ch = 0; ch < sband->n_channels; ch++, idx++) {
+ if (sband->channels[ch].center_freq <
+ KHZ_TO_MHZ(ar->freq_range.start_freq) ||
+ sband->channels[ch].center_freq >
+ KHZ_TO_MHZ(ar->freq_range.end_freq))
+ continue;
+
if (sband->channels[ch].center_freq == freq)
goto exit;
+ }
}
exit:
@@ -6677,7 +6684,8 @@ static void ath12k_wmi_htc_tx_complete(struct ath12k_base *ab,
static int ath12k_reg_chan_list_event(struct ath12k_base *ab, struct sk_buff *skb)
{
struct ath12k_reg_info *reg_info;
- u8 pdev_idx;
+ struct ath12k *ar = NULL;
+ u8 pdev_idx = 255;
int ret;
reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC);
@@ -6732,7 +6740,7 @@ mem_free:
kfree(reg_info);
if (ret == ATH12K_REG_STATUS_VALID)
- return ret;
+ goto out;
fallback:
/* Fallback to older reg (by sending previous country setting
@@ -6746,6 +6754,18 @@ fallback:
WARN_ON(1);
out:
+ /* In some error cases, even a valid pdev_idx might not be available */
+ if (pdev_idx != 255)
+ ar = ab->pdevs[pdev_idx].ar;
+
+ /* During the boot-time update, 'ar' might not be allocated,
+ * so the completion cannot be marked at that point.
+ * This boot-time update is handled in ath12k_mac_hw_register()
+ * before registering the hardware.
+ */
+ if (ar)
+ complete(&ar->regd_update_completed);
+
return ret;
}
diff --git a/drivers/net/wireless/ath/ath12k/wmi.h b/drivers/net/wireless/ath/ath12k/wmi.h
index c640ffa180c8..8627154f1680 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.h
+++ b/drivers/net/wireless/ath/ath12k/wmi.h
@@ -3760,6 +3760,7 @@ struct wmi_vdev_install_key_arg {
u32 key_idx;
u32 key_flags;
u32 key_cipher;
+ u32 ieee80211_key_cipher;
u32 key_len;
u32 key_txmic_len;
u32 key_rxmic_len;
@@ -3948,6 +3949,7 @@ struct wmi_stop_scan_cmd {
} __packed;
struct ath12k_wmi_scan_chan_list_arg {
+ struct list_head list;
u32 pdev_id;
u16 nallchans;
struct ath12k_wmi_channel_arg channel[];
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index d81b2ad0b095..eca8145d3874 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -192,7 +192,7 @@ ath5k_remove_interface(struct ieee80211_hw *hw,
* TODO: Phy disable/diversity etc
*/
static int
-ath5k_config(struct ieee80211_hw *hw, u32 changed)
+ath5k_config(struct ieee80211_hw *hw, int radio_idx, u32 changed)
{
struct ath5k_hw *ah = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
@@ -686,6 +686,7 @@ ath5k_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey)
* ath5k_set_coverage_class - Set IEEE 802.11 coverage class
*
* @hw: struct ieee80211_hw pointer
+ * @radio_idx: Radio index
* @coverage_class: IEEE 802.11 coverage class number
*
* Mac80211 callback. Sets slot time, ACK timeout and CTS timeout for given
@@ -693,7 +694,8 @@ ath5k_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey)
* reset.
*/
static void
-ath5k_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class)
+ath5k_set_coverage_class(struct ieee80211_hw *hw, int radio_idx,
+ s16 coverage_class)
{
struct ath5k_hw *ah = hw->priv;
@@ -704,7 +706,8 @@ ath5k_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class)
static int
-ath5k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
+ath5k_set_antenna(struct ieee80211_hw *hw, int radio_idx, u32 tx_ant,
+ u32 rx_ant)
{
struct ath5k_hw *ah = hw->priv;
@@ -721,7 +724,8 @@ ath5k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
static int
-ath5k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
+ath5k_get_antenna(struct ieee80211_hw *hw, int radio_idx,
+ u32 *tx_ant, u32 *rx_ant)
{
struct ath5k_hw *ah = hw->priv;
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 8c2e8081112e..88f0197fc041 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -1376,7 +1376,8 @@ void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid,
GFP_KERNEL);
}
-static int ath6kl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
+static int ath6kl_cfg80211_set_wiphy_params(struct wiphy *wiphy, int radio_idx,
+ u32 changed)
{
struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy);
struct ath6kl_vif *vif;
@@ -1405,6 +1406,7 @@ static int ath6kl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
static int ath6kl_cfg80211_set_txpower(struct wiphy *wiphy,
struct wireless_dev *wdev,
+ int radio_idx,
enum nl80211_tx_power_setting type,
int mbm)
{
@@ -1441,6 +1443,7 @@ static int ath6kl_cfg80211_set_txpower(struct wiphy *wiphy,
static int ath6kl_cfg80211_get_txpower(struct wiphy *wiphy,
struct wireless_dev *wdev,
+ int radio_idx,
unsigned int link_id,
int *dbm)
{
@@ -3242,7 +3245,7 @@ static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
wait, buf, len, no_cck);
}
-static int ath6kl_get_antenna(struct wiphy *wiphy,
+static int ath6kl_get_antenna(struct wiphy *wiphy, int radio_idx,
u32 *tx_ant, u32 *rx_ant)
{
struct ath6kl *ar = wiphy_priv(wiphy);
diff --git a/drivers/net/wireless/ath/ath9k/common-beacon.c b/drivers/net/wireless/ath/ath9k/common-beacon.c
index 01d6d3205a65..e4df89f2fa03 100644
--- a/drivers/net/wireless/ath/ath9k/common-beacon.c
+++ b/drivers/net/wireless/ath/ath9k/common-beacon.c
@@ -14,6 +14,7 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <linux/export.h>
#include "common.h"
#define FUDGE 2
diff --git a/drivers/net/wireless/ath/ath9k/common-debug.c b/drivers/net/wireless/ath/ath9k/common-debug.c
index 7aefb79f6bed..1ea070200e4a 100644
--- a/drivers/net/wireless/ath/ath9k/common-debug.c
+++ b/drivers/net/wireless/ath/ath9k/common-debug.c
@@ -14,6 +14,7 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <linux/export.h>
#include "common.h"
static ssize_t read_file_modal_eeprom(struct file *file, char __user *user_buf,
diff --git a/drivers/net/wireless/ath/ath9k/common-init.c b/drivers/net/wireless/ath/ath9k/common-init.c
index 7c13a1deb3ac..da102c791712 100644
--- a/drivers/net/wireless/ath/ath9k/common-init.c
+++ b/drivers/net/wireless/ath/ath9k/common-init.c
@@ -16,6 +16,7 @@
/* We use the hw_value as an index into our private channel structure */
+#include <linux/export.h>
#include "common.h"
#define CHAN2G(_freq, _idx) { \
diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c
index 300d178830ad..ca01a07f6630 100644
--- a/drivers/net/wireless/ath/ath9k/common-spectral.c
+++ b/drivers/net/wireless/ath/ath9k/common-spectral.c
@@ -14,6 +14,7 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <linux/export.h>
#include <linux/relay.h>
#include <linux/random.h>
#include "ath9k.h"
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
index 099f3d45c594..ffcf2276eb92 100644
--- a/drivers/net/wireless/ath/ath9k/common.c
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -18,6 +18,7 @@
* Module for common driver code between ath9k and ath9k_htc
*/
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/net/wireless/ath/ath9k/dynack.c b/drivers/net/wireless/ath/ath9k/dynack.c
index 321ff54fdb42..598b3a2ad818 100644
--- a/drivers/net/wireless/ath/ath9k/dynack.c
+++ b/drivers/net/wireless/ath/ath9k/dynack.c
@@ -14,6 +14,7 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <linux/export.h>
#include "ath9k.h"
#include "hw.h"
#include "dynack.h"
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 19600018e562..0d6272ac0dac 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1172,7 +1172,7 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw,
mutex_unlock(&priv->mutex);
}
-static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
+static int ath9k_htc_config(struct ieee80211_hw *hw, int radio_idx, u32 changed)
{
struct ath9k_htc_priv *priv = hw->priv;
struct ath_common *common = ath9k_hw_common(priv->ah);
@@ -1737,12 +1737,14 @@ static void ath9k_htc_sw_scan_complete(struct ieee80211_hw *hw,
mutex_unlock(&priv->mutex);
}
-static int ath9k_htc_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+static int ath9k_htc_set_rts_threshold(struct ieee80211_hw *hw,
+ int radio_idx, u32 value)
{
return 0;
}
static void ath9k_htc_set_coverage_class(struct ieee80211_hw *hw,
+ int radio_idx,
s16 coverage_class)
{
struct ath9k_htc_priv *priv = hw->priv;
@@ -1841,8 +1843,8 @@ struct base_eep_header *ath9k_htc_get_eeprom_base(struct ath9k_htc_priv *priv)
}
-static int ath9k_htc_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant,
- u32 *rx_ant)
+static int ath9k_htc_get_antenna(struct ieee80211_hw *hw, int radio_idx,
+ u32 *tx_ant, u32 *rx_ant)
{
struct ath9k_htc_priv *priv = hw->priv;
struct base_eep_header *pBase = ath9k_htc_get_eeprom_base(priv);
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index f9a774bd0e13..14de62c1a32b 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -14,6 +14,7 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <linux/export.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/module.h>
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index c56f4f3b8990..740a6fc7b067 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1484,7 +1484,7 @@ static void ath9k_disable_ps(struct ath_softc *sc)
ath_dbg(common, PS, "PowerSave disabled\n");
}
-static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
+static int ath9k_config(struct ieee80211_hw *hw, int radio_idx, u32 changed)
{
struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
@@ -2114,6 +2114,7 @@ static void ath9k_enable_dynack(struct ath_softc *sc)
}
static void ath9k_set_coverage_class(struct ieee80211_hw *hw,
+ int radio_idx,
s16 coverage_class)
{
struct ath_softc *sc = hw->priv;
@@ -2338,7 +2339,8 @@ static bool validate_antenna_mask(struct ath_hw *ah, u32 val)
}
}
-static int ath9k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
+static int ath9k_set_antenna(struct ieee80211_hw *hw, int radio_idx,
+ u32 tx_ant, u32 rx_ant)
{
struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
@@ -2367,7 +2369,8 @@ static int ath9k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
return 0;
}
-static int ath9k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
+static int ath9k_get_antenna(struct ieee80211_hw *hw, int radio_idx,
+ u32 *tx_ant, u32 *rx_ant)
{
struct ath_softc *sc = hw->priv;
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 755c068e4197..a7a9345f3483 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -890,7 +890,7 @@ static void carl9170_stat_work(struct work_struct *work)
round_jiffies(msecs_to_jiffies(CARL9170_STAT_WORK)));
}
-static int carl9170_op_config(struct ieee80211_hw *hw, u32 changed)
+static int carl9170_op_config(struct ieee80211_hw *hw, int radio_idx, u32 changed)
{
struct ar9170 *ar = hw->priv;
int err = 0;
diff --git a/drivers/net/wireless/ath/main.c b/drivers/net/wireless/ath/main.c
index 89f4b0513946..d79d73738a81 100644
--- a/drivers/net/wireless/ath/main.c
+++ b/drivers/net/wireless/ath/main.c
@@ -16,6 +16,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index 94d08d6ae1a3..02a525645bfa 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -361,7 +361,7 @@ static void wcn36xx_change_opchannel(struct wcn36xx *wcn, int ch)
return;
}
-static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
+static int wcn36xx_config(struct ieee80211_hw *hw, int radio_idx, u32 changed)
{
struct wcn36xx *wcn = hw->priv;
int ret;
@@ -965,7 +965,8 @@ out:
}
/* this is required when using IEEE80211_HW_HAS_RATE_CONTROL */
-static int wcn36xx_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+static int wcn36xx_set_rts_threshold(struct ieee80211_hw *hw, int radio_idx,
+ u32 value)
{
struct wcn36xx *wcn = hw->priv;
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac set RTS threshold %d\n", value);
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 5473c01cbe66..7703a0933a14 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -1408,7 +1408,8 @@ static int wil_cfg80211_disconnect(struct wiphy *wiphy,
return rc;
}
-static int wil_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
+static int wil_cfg80211_set_wiphy_params(struct wiphy *wiphy, int radio_idx,
+ u32 changed)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
int rc;
diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c
index 6842c2b02b39..aa683eacaf38 100644
--- a/drivers/net/wireless/atmel/at76c50x-usb.c
+++ b/drivers/net/wireless/atmel/at76c50x-usb.c
@@ -2002,7 +2002,7 @@ exit:
return 0;
}
-static int at76_config(struct ieee80211_hw *hw, u32 changed)
+static int at76_config(struct ieee80211_hw *hw, int radio_idx, u32 changed)
{
struct at76_priv *priv = hw->priv;
diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c
index 7529afd24aed..f1a77c4c445f 100644
--- a/drivers/net/wireless/broadcom/b43/main.c
+++ b/drivers/net/wireless/broadcom/b43/main.c
@@ -3975,7 +3975,7 @@ static void b43_set_retry_limits(struct b43_wldev *dev,
long_retry);
}
-static int b43_op_config(struct ieee80211_hw *hw, u32 changed)
+static int b43_op_config(struct ieee80211_hw *hw, int radio_idx, u32 changed)
{
struct b43_wl *wl = hw_to_b43_wl(hw);
struct b43_wldev *dev = wl->current_dev;
@@ -5073,7 +5073,7 @@ static int b43_op_start(struct ieee80211_hw *hw)
* may hang the system.
*/
if (!err)
- b43_op_config(hw, ~0);
+ b43_op_config(hw, -1, ~0);
return err;
}
@@ -5248,7 +5248,7 @@ out:
}
/* reload configuration */
- b43_op_config(wl->hw, ~0);
+ b43_op_config(wl->hw, -1, ~0);
if (wl->vif)
b43_op_bss_info_changed(wl->hw, wl->vif, &wl->vif->bss_conf, ~0);
diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c
index 2370a2e6a2e3..aada342e0b80 100644
--- a/drivers/net/wireless/broadcom/b43legacy/main.c
+++ b/drivers/net/wireless/broadcom/b43legacy/main.c
@@ -2662,7 +2662,7 @@ static void b43legacy_set_retry_limits(struct b43legacy_wldev *dev,
b43legacy_shm_write16(dev, B43legacy_SHM_WIRELESS, 0x0007, long_retry);
}
-static int b43legacy_op_dev_config(struct ieee80211_hw *hw,
+static int b43legacy_op_dev_config(struct ieee80211_hw *hw, int radio_idx,
u32 changed)
{
struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index b94c3619526c..40a9a8177de6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -1043,6 +1043,21 @@ void brcmf_set_mpc(struct brcmf_if *ifp, int mpc)
}
}
+bool brcmf_is_apmode_operating(struct wiphy *wiphy)
+{
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct brcmf_cfg80211_vif *vif;
+ bool ret = false;
+
+ list_for_each_entry(vif, &cfg->vif_list, list) {
+ if (brcmf_is_apmode(vif) &&
+ test_bit(BRCMF_VIF_STATUS_AP_CREATED, &vif->sme_state))
+ ret = true;
+ }
+
+ return ret;
+}
+
static void brcmf_scan_params_v2_to_v1(struct brcmf_scan_params_v2_le *params_v2_le,
struct brcmf_scan_params_le *params_le)
{
@@ -1622,7 +1637,8 @@ static s32 brcmf_set_retry(struct net_device *ndev, u32 retry, bool l)
return err;
}
-static s32 brcmf_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
+static s32 brcmf_cfg80211_set_wiphy_params(struct wiphy *wiphy, int radio_idx,
+ u32 changed)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct net_device *ndev = cfg_to_ndev(cfg);
@@ -2630,7 +2646,8 @@ brcmf_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *ndev,
static s32
brcmf_cfg80211_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
- enum nl80211_tx_power_setting type, s32 mbm)
+ int radio_idx, enum nl80211_tx_power_setting type,
+ s32 mbm)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct net_device *ndev = cfg_to_ndev(cfg);
@@ -2681,7 +2698,7 @@ done:
static s32
brcmf_cfg80211_get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
- unsigned int link_id, s32 *dbm)
+ int radio_idx, unsigned int link_id, s32 *dbm)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_cfg80211_vif *vif = wdev_to_vif(wdev);
@@ -5416,8 +5433,8 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev,
bphy_err(drvr, "bss_enable config failed %d\n", err);
}
brcmf_set_mpc(ifp, 1);
- brcmf_configure_arp_nd_offload(ifp, true);
clear_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
+ brcmf_configure_arp_nd_offload(ifp, true);
brcmf_net_setcarrier(ifp, false);
return err;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
index b83485ec7b87..273c80f2d483 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
@@ -487,6 +487,7 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
struct brcmf_if *ifp, bool aborted,
bool fw_abort);
void brcmf_set_mpc(struct brcmf_if *ndev, int mpc);
+bool brcmf_is_apmode_operating(struct wiphy *wiphy);
void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg);
void brcmf_cfg80211_free_netdev(struct net_device *ndev);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index 04f41c09deca..862a0336a0b5 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -98,6 +98,11 @@ void brcmf_configure_arp_nd_offload(struct brcmf_if *ifp, bool enable)
s32 err;
u32 mode;
+ if (enable && brcmf_is_apmode_operating(ifp->drvr->wiphy)) {
+ brcmf_dbg(TRACE, "Skip ARP/ND offload enable when soft AP is running\n");
+ return;
+ }
+
if (enable)
mode = BRCMF_ARP_OL_AGENT | BRCMF_ARP_OL_PEER_AUTO_REPLY;
else
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 8f97562811d7..9747928a3650 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -654,17 +654,19 @@ brcmf_pcie_select_core(struct brcmf_pciedev_info *devinfo, u16 coreid)
static void brcmf_pcie_reset_device(struct brcmf_pciedev_info *devinfo)
{
struct brcmf_core *core;
- u16 cfg_offset[] = { BRCMF_PCIE_CFGREG_STATUS_CMD,
- BRCMF_PCIE_CFGREG_PM_CSR,
- BRCMF_PCIE_CFGREG_MSI_CAP,
- BRCMF_PCIE_CFGREG_MSI_ADDR_L,
- BRCMF_PCIE_CFGREG_MSI_ADDR_H,
- BRCMF_PCIE_CFGREG_MSI_DATA,
- BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL2,
- BRCMF_PCIE_CFGREG_RBAR_CTRL,
- BRCMF_PCIE_CFGREG_PML1_SUB_CTRL1,
- BRCMF_PCIE_CFGREG_REG_BAR2_CONFIG,
- BRCMF_PCIE_CFGREG_REG_BAR3_CONFIG };
+ static const u16 cfg_offset[] = {
+ BRCMF_PCIE_CFGREG_STATUS_CMD,
+ BRCMF_PCIE_CFGREG_PM_CSR,
+ BRCMF_PCIE_CFGREG_MSI_CAP,
+ BRCMF_PCIE_CFGREG_MSI_ADDR_L,
+ BRCMF_PCIE_CFGREG_MSI_ADDR_H,
+ BRCMF_PCIE_CFGREG_MSI_DATA,
+ BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL2,
+ BRCMF_PCIE_CFGREG_RBAR_CTRL,
+ BRCMF_PCIE_CFGREG_PML1_SUB_CTRL1,
+ BRCMF_PCIE_CFGREG_REG_BAR2_CONFIG,
+ BRCMF_PCIE_CFGREG_REG_BAR3_CONFIG
+ };
u32 i;
u32 val;
u32 lsc;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
index 1c3d29dca424..8ab452cf48c4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
@@ -525,7 +525,8 @@ brcms_ops_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
spin_unlock_bh(&wl->lock);
}
-static int brcms_ops_config(struct ieee80211_hw *hw, u32 changed)
+static int brcms_ops_config(struct ieee80211_hw *hw, int radio_idx,
+ u32 changed)
{
struct ieee80211_conf *conf = &hw->conf;
struct brcms_info *wl = hw->priv;
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_module.c b/drivers/net/wireless/intel/ipw2x00/libipw_module.c
index 0a16127bfd68..2ad085b1f492 100644
--- a/drivers/net/wireless/intel/ipw2x00/libipw_module.c
+++ b/drivers/net/wireless/intel/ipw2x00/libipw_module.c
@@ -83,7 +83,7 @@ void libipw_networks_age(struct libipw_device *ieee,
{
struct libipw_network *network = NULL;
unsigned long flags;
- unsigned long age_jiffies = msecs_to_jiffies(age_secs * MSEC_PER_SEC);
+ unsigned long age_jiffies = secs_to_jiffies(age_secs);
spin_lock_irqsave(&ieee->lock, flags);
list_for_each_entry(network, &ieee->network_list, list) {
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
index 8e58e97a148f..3588dec75ebd 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -1382,7 +1382,7 @@ il4965_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb)
* we get a thermal update even if the uCode doesn't give us one
*/
mod_timer(&il->stats_periodic,
- jiffies + msecs_to_jiffies(recalib_seconds * 1000));
+ jiffies + secs_to_jiffies(recalib_seconds));
if (unlikely(!test_bit(S_SCANNING, &il->status)) &&
(pkt->hdr.cmd == N_STATS)) {
@@ -1575,8 +1575,11 @@ il4965_tx_cmd_build_rate(struct il_priv *il,
|| rate_idx > RATE_COUNT_LEGACY)
rate_idx = rate_lowest_index(&il->bands[info->band], sta);
/* For 5 GHZ band, remap mac80211 rate indices into driver indices */
- if (info->band == NL80211_BAND_5GHZ)
+ if (info->band == NL80211_BAND_5GHZ) {
rate_idx += IL_FIRST_OFDM_RATE;
+ if (rate_idx > IL_LAST_OFDM_RATE)
+ rate_idx = IL_LAST_OFDM_RATE;
+ }
/* Get PLCP rate for tx_cmd->rate_n_flags */
rate_plcp = il_rates[rate_idx].plcp;
/* Zero out flags for this packet */
diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c
index 9a86688aea67..b7bd3ec4cc50 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.c
+++ b/drivers/net/wireless/intel/iwlegacy/common.c
@@ -4990,7 +4990,7 @@ il_update_qos(struct il_priv *il)
* il_mac_config - mac80211 config callback
*/
int
-il_mac_config(struct ieee80211_hw *hw, u32 changed)
+il_mac_config(struct ieee80211_hw *hw, int radio_idx, u32 changed)
{
struct il_priv *il = hw->priv;
const struct il_channel_info *ch_info;
diff --git a/drivers/net/wireless/intel/iwlegacy/common.h b/drivers/net/wireless/intel/iwlegacy/common.h
index 52610f5e57a3..4c9836ab11dd 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.h
+++ b/drivers/net/wireless/intel/iwlegacy/common.h
@@ -1956,7 +1956,7 @@ il_get_hw_mode(struct il_priv *il, enum nl80211_band band)
}
/* mac80211 handlers */
-int il_mac_config(struct ieee80211_hw *hw, u32 changed);
+int il_mac_config(struct ieee80211_hw *hw, int radio_idx, u32 changed);
void il_mac_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
void il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf, u64 changes);
diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig
index 82f577da1a8b..153a8368b412 100644
--- a/drivers/net/wireless/intel/iwlwifi/Kconfig
+++ b/drivers/net/wireless/intel/iwlwifi/Kconfig
@@ -97,6 +97,7 @@ config IWLWIFI_OPMODE_MODULAR
default y if IWLDVM=m
default y if IWLMVM=m
default y if IWLMLD=m
+ default y if IWLWIFI_KUNIT_TESTS=m
comment "WARNING: iwlwifi is useless without IWLDVM or IWLMVM or IWLMLD"
depends on IWLDVM=n && IWLMVM=n && IWLMLD=n
diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile
index 3f476e333726..b82392978b76 100644
--- a/drivers/net/wireless/intel/iwlwifi/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/Makefile
@@ -7,9 +7,11 @@ iwlwifi-objs += iwl-debug.o
iwlwifi-objs += iwl-nvm-utils.o
iwlwifi-objs += iwl-utils.o
iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o
-iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
-iwlwifi-objs += pcie/ctxt-info.o pcie/ctxt-info-v2.o
-iwlwifi-objs += pcie/trans-gen2.o pcie/tx-gen2.o
+
+# Bus
+iwlwifi-objs += pcie/ctxt-info.o pcie/ctxt-info-v2.o pcie/drv.o pcie/utils.o
+iwlwifi-objs += pcie/gen1_2/rx.o pcie/gen1_2/tx.o pcie/gen1_2/trans.o
+iwlwifi-objs += pcie/gen1_2/trans-gen2.o pcie/gen1_2/tx-gen2.o
CFLAGS_pcie/drv.o += -Wno-override-init
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
index 05e45fff8b36..b5ad6d635fcb 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
@@ -13,7 +13,7 @@
#define IWL_BZ_UCODE_API_MAX 99
/* Lowest firmware API version supported */
-#define IWL_BZ_UCODE_API_MIN 93
+#define IWL_BZ_UCODE_API_MIN 94
/* Memory offsets and lengths */
#define IWL_BZ_SMEM_OFFSET 0x400000
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/dr.c b/drivers/net/wireless/intel/iwlwifi/cfg/dr.c
index 45e55cef42ea..95aa27c35357 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/dr.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/dr.c
@@ -12,7 +12,7 @@
#define IWL_DR_UCODE_API_MAX 99
/* Lowest firmware API version supported */
-#define IWL_DR_UCODE_API_MIN 97
+#define IWL_DR_UCODE_API_MIN 98
/* Memory offsets and lengths */
#define IWL_DR_SMEM_OFFSET 0x400000
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
index b2e4d4035296..12c2adb4b5c4 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
@@ -13,7 +13,7 @@
#define IWL_SC_UCODE_API_MAX 99
/* Lowest firmware API version supported */
-#define IWL_SC_UCODE_API_MIN 97
+#define IWL_SC_UCODE_API_MIN 98
/* NVM versions */
#define IWL_SC_NVM_VERSION 0x0a1d
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
index 1ebc7effcc2a..f46c65d75962 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
@@ -88,7 +88,7 @@ void iwl_connection_init_rx_config(struct iwl_priv *priv,
int iwlagn_set_pan_params(struct iwl_priv *priv);
int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
-int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed);
+int iwlagn_mac_config(struct ieee80211_hw *hw, int radio_idx, u32 changed);
void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
@@ -397,6 +397,8 @@ static inline void iwl_dvm_set_pmi(struct iwl_priv *priv, bool state)
* returns a (newly allocated) struct containing all the
* relevant values for driver use. The struct must be freed
* later with iwl_free_nvm_data().
+ *
+ * Return: the parsed NVM data
*/
struct iwl_nvm_data *
iwl_parse_eeprom_data(struct iwl_trans *trans, const struct iwl_rf_cfg *cfg,
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/commands.h b/drivers/net/wireless/intel/iwlwifi/dvm/commands.h
index 96ea6c8dfc89..138b11f51d00 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/commands.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/commands.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2023-2024 Intel Corporation
+ * Copyright (C) 2005-2014, 2023-2025 Intel Corporation
*/
/*
* Please use this file (commands.h) only for uCode API definitions.
@@ -614,7 +614,7 @@ struct iwl_rxon_time_cmd {
* REPLY_CHANNEL_SWITCH = 0x72 (command, has simple generic response)
*/
/**
- * struct iwl5000_channel_switch_cmd
+ * struct iwl5000_channel_switch_cmd - channel switch command (5000 series)
* @band: 0- 5.2GHz, 1- 2.4GHz
* @expect_beacon: 0- resume transmits after channel switch
* 1- wait for beacon to resume transmits
@@ -635,7 +635,7 @@ struct iwl5000_channel_switch_cmd {
} __packed;
/**
- * struct iwl6000_channel_switch_cmd
+ * struct iwl6000_channel_switch_cmd - channel switch command (6000 series)
* @band: 0- 5.2GHz, 1- 2.4GHz
* @expect_beacon: 0- resume transmits after channel switch
* 1- wait for beacon to resume transmits
@@ -791,7 +791,7 @@ struct iwl_keyinfo {
} __packed;
/**
- * struct sta_id_modify
+ * struct sta_id_modify - station modify command
* @addr: station's MAC address
* @reserved1: reserved for alignment
* @sta_id: index of station in uCode's station table
@@ -2026,7 +2026,7 @@ struct iwl_spectrum_notification {
u8 channel;
u8 type; /* see enum iwl_measurement_type */
u8 reserved1;
- /* NOTE: cca_ofdm, cca_cck, basic_type, and histogram are only only
+ /* NOTE: cca_ofdm, cca_cck, basic_type, and histogram are only
* valid if applicable for measurement type requested. */
__le32 cca_ofdm; /* cca fraction time in 40Mhz clock periods */
__le32 cca_cck; /* cca fraction time in 44Mhz clock periods */
@@ -2992,7 +2992,7 @@ struct iwl_missed_beacon_notif {
#define SENSITIVITY_CMD_CONTROL_WORK_TABLE cpu_to_le16(1)
/**
- * struct iwl_sensitivity_cmd
+ * struct iwl_sensitivity_cmd - sensitivity configuration command
* @control: (1) updates working table, (0) updates default table
* @table: energy threshold values, use HD_* as index into table
*
@@ -3848,7 +3848,7 @@ struct iwlagn_wowlan_status {
#define IWL_MIN_SLOT_TIME 20
/**
- * struct iwl_wipan_slot
+ * struct iwl_wipan_slot - WiPAN slot configuration
* @width: Time in TU
* @type:
* 0 - BSS
@@ -3868,7 +3868,7 @@ struct iwl_wipan_slot {
#define IWL_WIPAN_PARAMS_FLG_FULL_SLOTTED_MODE BIT(5)
/**
- * struct iwl_wipan_params_cmd
+ * struct iwl_wipan_params_cmd - WiPAN parameters
* @flags:
* bit0: reserved
* bit1: CP leave channel with CTS
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
index 25b24820466d..4d12bf901703 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
@@ -104,7 +104,7 @@ struct iwl_qos_info {
};
/**
- * enum iwl_agg_state
+ * enum iwl_agg_state - aggregation state
*
* The state machine of the BA agreement establishment / tear down.
* These states relate to a specific RA / TID.
@@ -519,7 +519,7 @@ enum iwl_scan_type {
};
/**
- * struct iwl_hw_params
+ * struct iwl_hw_params - HW parameters
*
* Holds the module parameters
*
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/devices.c b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c
index 3447ae0b160a..be7e61e2b291 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/devices.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c
@@ -55,6 +55,7 @@ static void iwl1000_nic_config(struct iwl_priv *priv)
* iwl_beacon_time_mask_low - mask of lower 32 bit of beacon time
* @priv: pointer to iwl_priv data structure
* @tsf_bits: number of bits need to shift for masking)
+ * Return: low 32 bits of beacon time mask
*/
static inline u32 iwl_beacon_time_mask_low(struct iwl_priv *priv,
u16 tsf_bits)
@@ -66,6 +67,7 @@ static inline u32 iwl_beacon_time_mask_low(struct iwl_priv *priv,
* iwl_beacon_time_mask_high - mask of higher 32 bit of beacon time
* @priv: pointer to iwl_priv data structure
* @tsf_bits: number of bits need to shift for masking)
+ * Return: high 32 bits of beacon time mask
*/
static inline u32 iwl_beacon_time_mask_high(struct iwl_priv *priv,
u16 tsf_bits)
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
index 1dc974e2c511..48711dbcfa5a 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
@@ -586,7 +586,7 @@ static bool iwlagn_fill_txpower_mode(struct iwl_priv *priv,
return false;
}
- ave_rssi = ieee80211_ave_rssi(ctx->vif);
+ ave_rssi = ieee80211_ave_rssi(ctx->vif, -1);
if (!ave_rssi) {
/* no rssi data, no changes to reduce tx power */
IWL_DEBUG_COEX(priv, "no rssi data available\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
index 66211426aa3a..e015b83bb6e9 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
@@ -1049,9 +1049,11 @@ static void iwl_bg_restart(struct work_struct *data)
*
*****************************************************************************/
-static void iwl_setup_deferred_work(struct iwl_priv *priv)
+static int iwl_setup_deferred_work(struct iwl_priv *priv)
{
priv->workqueue = alloc_ordered_workqueue(DRV_NAME, 0);
+ if (!priv->workqueue)
+ return -ENOMEM;
INIT_WORK(&priv->restart, iwl_bg_restart);
INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update);
@@ -1068,6 +1070,8 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
timer_setup(&priv->statistics_periodic, iwl_bg_statistics_periodic, 0);
timer_setup(&priv->ucode_trace, iwl_bg_ucode_trace, 0);
+
+ return 0;
}
void iwl_cancel_deferred_work(struct iwl_priv *priv)
@@ -1463,7 +1467,9 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
/********************
* 6. Setup services
********************/
- iwl_setup_deferred_work(priv);
+ if (iwl_setup_deferred_work(priv))
+ goto out_uninit_drv;
+
iwl_setup_rx_handlers(priv);
iwl_power_initialize(priv);
@@ -1502,6 +1508,7 @@ out_destroy_workqueue:
iwl_cancel_deferred_work(priv);
destroy_workqueue(priv->workqueue);
priv->workqueue = NULL;
+out_uninit_drv:
iwl_uninit_drv(priv);
out_free_eeprom_blob:
kfree(priv->eeprom_blob);
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/power.h b/drivers/net/wireless/intel/iwlwifi/dvm/power.h
index f38201ce1e99..1a688d942bca 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/power.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/power.h
@@ -23,6 +23,4 @@ int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
int iwl_power_update_mode(struct iwl_priv *priv, bool force);
void iwl_power_initialize(struct iwl_priv *priv);
-extern bool no_sleep_autoadjust;
-
#endif /* __iwl_power_setting_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
index 8879e668ef0d..ed964103281e 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
@@ -2899,7 +2899,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
/* Repeat initial/next rate.
* For legacy IWL_NUMBER_TRY == 1, this loop will not execute.
* For HT IWL_HT_NUMBER_TRY == 3, this executes twice. */
- while (repeat_rate > 0 && (index < LINK_QUAL_MAX_RETRY_NUM)) {
+ while (repeat_rate > 0 && index < (LINK_QUAL_MAX_RETRY_NUM - 1)) {
if (is_legacy(tbl_type.lq_type)) {
if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
ant_toggle_cnt++;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
index 5f8b60824043..b34ee68f3dce 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
@@ -429,7 +429,7 @@ static void iwlagn_rx_statistics(struct iwl_priv *priv,
* thermal update even if the uCode doesn't give
* us one */
mod_timer(&priv->statistics_periodic, jiffies +
- msecs_to_jiffies(reg_recalib_period * 1000));
+ secs_to_jiffies(reg_recalib_period));
if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
(pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c b/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c
index 2d3c1627f283..e08e44cae434 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c
@@ -1149,7 +1149,7 @@ void iwlagn_config_ht40(struct ieee80211_conf *conf,
}
}
-int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
+int iwlagn_mac_config(struct ieee80211_hw *hw, int radio_idx, u32 changed)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
struct iwl_rxon_context *ctx;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
index 24fefa0e8148..a7806776a51e 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
@@ -232,6 +232,8 @@ static void iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
* that may be %NULL, for example during TX or key setup. In
* that case, we need to use the broadcast station, so this
* inline wraps that pattern.
+ *
+ * Return: station ID for mac80211 station (or broadcast if %NULL)
*/
static int iwl_sta_id_or_broadcast(struct iwl_rxon_context *context,
struct ieee80211_sta *sta)
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
index 9c271ea67155..9ce819503aed 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -19,9 +19,11 @@ enum iwl_d0i3_flags {
/**
* enum iwl_d3_wakeup_flags - D3 manager wakeup flags
* @IWL_WAKEUP_D3_CONFIG_FW_ERROR: wake up on firmware sysassert
+ * @IWL_WAKEUP_D3_HOST_TIMER: wake up on host timer expiry
*/
enum iwl_d3_wakeup_flags {
- IWL_WAKEUP_D3_CONFIG_FW_ERROR = BIT(0),
+ IWL_WAKEUP_D3_CONFIG_FW_ERROR = BIT(0),
+ IWL_WAKEUP_D3_HOST_TIMER = BIT(1),
}; /* D3_MANAGER_WAKEUP_CONFIG_API_E_VER_3 */
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
index 557832563f89..62bd35a8f680 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
@@ -864,7 +864,7 @@ struct iwl_extended_beacon_notif {
/**
* enum iwl_dump_control - dump (flush) control flags
- * @DUMP_TX_FIFO_FLUSH: Dump MSDUs until the the FIFO is empty
+ * @DUMP_TX_FIFO_FLUSH: Dump MSDUs until the FIFO is empty
* and the TFD queues are empty.
*/
enum iwl_dump_control {
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index ea739ebe7cb0..fd60a6816150 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -1106,6 +1106,7 @@ static int iwl_dump_ini_prph_phy_iter_common(struct iwl_fw_runtime *fwrt,
u32 prph_val;
u32 dphy_state;
u32 dphy_addr;
+ u32 prph_stts;
int i;
range->internal_base_addr = cpu_to_le32(addr);
@@ -1133,6 +1134,21 @@ static int iwl_dump_ini_prph_phy_iter_common(struct iwl_fw_runtime *fwrt,
iwl_write_prph_no_grab(fwrt->trans, indirect_wr_addr,
WMAL_INDRCT_CMD(addr + i));
+
+ if (fwrt->trans->info.hw_rf_id != IWL_CFG_RF_TYPE_JF1 &&
+ fwrt->trans->info.hw_rf_id != IWL_CFG_RF_TYPE_JF2 &&
+ fwrt->trans->info.hw_rf_id != IWL_CFG_RF_TYPE_HR1 &&
+ fwrt->trans->info.hw_rf_id != IWL_CFG_RF_TYPE_HR2) {
+ udelay(2);
+ prph_stts = iwl_read_prph_no_grab(fwrt->trans,
+ WMAL_MRSPF_STTS);
+
+ /* Abort dump if status is 0xA5A5A5A2 or FIFO1 empty */
+ if (prph_stts == WMAL_TIMEOUT_VAL ||
+ !WMAL_MRSPF_STTS_IS_FIFO1_NOT_EMPTY(prph_stts))
+ break;
+ }
+
prph_val = iwl_read_prph_no_grab(fwrt->trans,
indirect_rd_addr);
*val++ = cpu_to_le32(prph_val);
@@ -3008,6 +3024,7 @@ int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
struct iwl_fw_dump_desc *desc;
unsigned int delay = 0;
bool monitor_only = false;
+ int ret;
if (trigger) {
u16 occurrences = le16_to_cpu(trigger->occurrences) - 1;
@@ -3038,7 +3055,11 @@ int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
desc->trig_desc.type = cpu_to_le32(trig);
memcpy(desc->trig_desc.data, str, len);
- return iwl_fw_dbg_collect_desc(fwrt, desc, monitor_only, delay);
+ ret = iwl_fw_dbg_collect_desc(fwrt, desc, monitor_only, delay);
+ if (ret)
+ kfree(desc);
+
+ return ret;
}
IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect);
@@ -3046,7 +3067,7 @@ int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt,
struct iwl_fw_dbg_trigger_tlv *trigger,
const char *fmt, ...)
{
- int ret, len = 0;
+ int len = 0;
char buf[64];
if (iwl_trans_dbg_ini_valid(fwrt->trans))
@@ -3068,13 +3089,8 @@ int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt,
len = strlen(buf) + 1;
}
- ret = iwl_fw_dbg_collect(fwrt, le32_to_cpu(trigger->id), buf, len,
- trigger);
-
- if (ret)
- return ret;
-
- return 0;
+ return iwl_fw_dbg_collect(fwrt, le32_to_cpu(trigger->id), buf, len,
+ trigger);
}
IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_trig);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
index c70f2a20f7d5..803ba35e7501 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
@@ -198,7 +198,7 @@ void iwl_fw_trigger_timestamp(struct iwl_fw_runtime *fwrt, u32 delay)
iwl_fw_cancel_timestamp(fwrt);
- fwrt->timestamp.delay = msecs_to_jiffies(delay * 1000);
+ fwrt->timestamp.delay = secs_to_jiffies(delay);
schedule_delayed_work(&fwrt->timestamp.wk,
round_jiffies_relative(fwrt->timestamp.delay));
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h
index f9de139561a0..e055f798a398 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/img.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h
@@ -53,8 +53,8 @@ struct iwl_ucode_capabilities {
u32 num_stations;
u32 num_links;
u32 num_beacons;
- unsigned long _api[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_API)];
- unsigned long _capa[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_CAPA)];
+ DECLARE_BITMAP(_api, NUM_IWL_UCODE_TLV_API);
+ DECLARE_BITMAP(_capa, NUM_IWL_UCODE_TLV_CAPA);
const struct iwl_fw_cmd_version *cmd_versions;
u32 n_cmd_versions;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
index 4f3c2f7f4f5b..3bcd375995cc 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
@@ -332,7 +332,7 @@ iwl_pnvm_load_pnvm_to_trans(struct iwl_trans *trans,
ret = iwl_trans_load_pnvm(trans, pnvm_data, capa);
if (ret)
goto free;
- IWL_INFO(trans, "loaded PNVM version %08x\n", pnvm_data->version);
+ IWL_DEBUG_INFO(trans, "loaded PNVM version %08x\n", pnvm_data->version);
set:
iwl_trans_set_pnvm(trans, capa);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index 0fd452cb94ae..f3fa37fee2e4 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -113,6 +113,7 @@
#define CSR_IPC_STATE_RESET_SW_READY 1
#define CSR_IPC_STATE_RESET_TOP_READY 2
#define CSR_IPC_STATE_RESET_TOP_FOLLOWER 3
+#define CSR_IPC_STATE_TOP_RESET_REQ BIT(6)
#define CSR_IPC_SLEEP_CONTROL (CSR_BASE + 0x114)
#define CSR_IPC_SLEEP_CONTROL_SUSPEND 0x3
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 9504a0cb8b13..6492bc7d1680 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -1276,8 +1276,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
if (tlv_len != sizeof(*fseq_ver))
goto invalid_tlv_len;
- IWL_INFO(drv, "TLV_FW_FSEQ_VERSION: %.32s\n",
- fseq_ver->version);
+ IWL_DEBUG_INFO(drv, "TLV_FW_FSEQ_VERSION: %.32s\n",
+ fseq_ver->version);
}
break;
case IWL_UCODE_TLV_FW_NUM_STATIONS:
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index 0592f0f59d1c..1e4162f1bb44 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -160,23 +160,26 @@ static struct ieee80211_rate iwl_cfg80211_rates[] = {
* @NVM_CHANNEL_DC_HIGH: DC HIGH required/allowed (?)
* @NVM_CHANNEL_VLP: client support connection to UHB VLP AP
* @NVM_CHANNEL_AFC: client support connection to UHB AFC AP
+ * @NVM_CHANNEL_VLP_AP_NOT_ALLOWED: UHB VLP AP not allowed,
+ * Valid only when %NVM_CHANNEL_VLP is enabled.
*/
enum iwl_nvm_channel_flags {
- NVM_CHANNEL_VALID = BIT(0),
- NVM_CHANNEL_IBSS = BIT(1),
- NVM_CHANNEL_ALLOW_20MHZ_ACTIVITY = BIT(2),
- NVM_CHANNEL_ACTIVE = BIT(3),
- NVM_CHANNEL_RADAR = BIT(4),
- NVM_CHANNEL_INDOOR_ONLY = BIT(5),
- NVM_CHANNEL_GO_CONCURRENT = BIT(6),
- NVM_CHANNEL_UNIFORM = BIT(7),
- NVM_CHANNEL_20MHZ = BIT(8),
- NVM_CHANNEL_40MHZ = BIT(9),
- NVM_CHANNEL_80MHZ = BIT(10),
- NVM_CHANNEL_160MHZ = BIT(11),
- NVM_CHANNEL_DC_HIGH = BIT(12),
- NVM_CHANNEL_VLP = BIT(13),
- NVM_CHANNEL_AFC = BIT(14),
+ NVM_CHANNEL_VALID = BIT(0),
+ NVM_CHANNEL_IBSS = BIT(1),
+ NVM_CHANNEL_ALLOW_20MHZ_ACTIVITY = BIT(2),
+ NVM_CHANNEL_ACTIVE = BIT(3),
+ NVM_CHANNEL_RADAR = BIT(4),
+ NVM_CHANNEL_INDOOR_ONLY = BIT(5),
+ NVM_CHANNEL_GO_CONCURRENT = BIT(6),
+ NVM_CHANNEL_UNIFORM = BIT(7),
+ NVM_CHANNEL_20MHZ = BIT(8),
+ NVM_CHANNEL_40MHZ = BIT(9),
+ NVM_CHANNEL_80MHZ = BIT(10),
+ NVM_CHANNEL_160MHZ = BIT(11),
+ NVM_CHANNEL_DC_HIGH = BIT(12),
+ NVM_CHANNEL_VLP = BIT(13),
+ NVM_CHANNEL_AFC = BIT(14),
+ NVM_CHANNEL_VLP_AP_NOT_ALLOWED = BIT(15),
};
/**
@@ -1044,6 +1047,7 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
case IWL_CFG_RF_TYPE_GF:
case IWL_CFG_RF_TYPE_FM:
case IWL_CFG_RF_TYPE_WH:
+ case IWL_CFG_RF_TYPE_PE:
iftype_data->he_cap.he_cap_elem.phy_cap_info[9] |=
IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU;
if (!is_ap)
@@ -1629,8 +1633,7 @@ IWL_EXPORT_SYMBOL(iwl_parse_nvm_data);
static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
int ch_idx, u16 nvm_flags,
- struct iwl_reg_capa reg_capa,
- const struct iwl_rf_cfg *cfg)
+ struct iwl_reg_capa reg_capa)
{
u32 flags = NL80211_RRF_NO_HT40;
@@ -1685,10 +1688,12 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
}
/* Set the AP type for the UHB case. */
- if (nvm_flags & NVM_CHANNEL_VLP)
- flags |= NL80211_RRF_ALLOW_6GHZ_VLP_AP;
- else
+ if (nvm_flags & NVM_CHANNEL_VLP) {
+ if (!(nvm_flags & NVM_CHANNEL_VLP_AP_NOT_ALLOWED))
+ flags |= NL80211_RRF_ALLOW_6GHZ_VLP_AP;
+ } else {
flags |= NL80211_RRF_NO_6GHZ_VLP_CLIENT;
+ }
if (!(nvm_flags & NVM_CHANNEL_AFC))
flags |= NL80211_RRF_NO_6GHZ_AFC_CLIENT;
@@ -1815,8 +1820,8 @@ iwl_parse_nvm_mcc_info(struct iwl_trans *trans,
}
reg_rule_flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx,
- ch_flags, reg_capa,
- cfg);
+ ch_flags,
+ reg_capa);
/* we can't continue the same rule */
if (ch_idx == 0 || prev_reg_rule_flags != reg_rule_flags ||
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
index 5dc299296d6d..a146d0e399f2 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
@@ -147,6 +147,8 @@ struct iwl_fw_error_dump_mode {
* Op_mode needs to reset its internal state because the device did not
* survive the system state transition. The firmware is no longer running,
* etc...
+ * @dump: Op_mode needs to collect the firmware dump upon this handler
+ * being called.
*/
struct iwl_op_mode_ops {
struct iwl_op_mode *(*start)(struct iwl_trans *trans,
@@ -174,6 +176,7 @@ struct iwl_op_mode_ops {
enum iwl_fw_ini_time_point tp_id,
union iwl_dbg_tlv_tp_data *tp_data);
void (*device_powered_off)(struct iwl_op_mode *op_mode);
+ void (*dump)(struct iwl_op_mode *op_mode);
};
int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops);
@@ -286,4 +289,11 @@ static inline void iwl_op_mode_device_powered_off(struct iwl_op_mode *op_mode)
op_mode->ops->device_powered_off(op_mode);
}
+static inline void iwl_op_mode_dump(struct iwl_op_mode *op_mode)
+{
+ if (!op_mode || !op_mode->ops || !op_mode->ops->dump)
+ return;
+ op_mode->ops->dump(op_mode);
+}
+
#endif /* __iwl_op_mode_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index 23b2009fbb28..a7214ddcfaf5 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016 Intel Deutschland GmbH
*/
@@ -514,6 +514,14 @@ enum {
#define WMAL_INDRCT_CMD(addr) \
((WMAL_CMD_READ_BURST_ACCESS << WMAL_INDRCT_RD_CMD1_OPMOD_POS) | \
((addr) & WMAL_INDRCT_RD_CMD1_BYTE_ADDRESS_MSK))
+#define WMAL_MRSPF_STTS 0xADFC24
+#define WMAL_MRSPF_STTS_FIFO1_NOT_EMPTY_POS 15
+#define WMAL_MRSPF_STTS_FIFO1_NOT_EMPTY_MSK 0x8000
+#define WMAL_TIMEOUT_VAL 0xA5A5A5A2
+#define WMAL_MRSPF_STTS_IS_FIFO1_NOT_EMPTY(val) \
+ (((val) >> (WMAL_MRSPF_STTS_FIFO1_NOT_EMPTY_POS)) & \
+ ((WMAL_MRSPF_STTS_FIFO1_NOT_EMPTY_MSK) >> \
+ (WMAL_MRSPF_STTS_FIFO1_NOT_EMPTY_POS)))
#define WFPM_LMAC1_PS_CTL_RW 0xA03380
#define WFPM_LMAC2_PS_CTL_RW 0xA033C0
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
index 8a40801cf0dd..78808c956444 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
@@ -14,8 +14,8 @@
#include "iwl-fh.h"
#include <linux/dmapool.h>
#include "fw/api/commands.h"
-#include "pcie/internal.h"
-#include "iwl-context-info-v2.h"
+#include "pcie/gen1_2/internal.h"
+#include "pcie/iwl-context-info-v2.h"
struct iwl_trans_dev_restart_data {
struct list_head list;
@@ -497,7 +497,19 @@ IWL_EXPORT_SYMBOL(iwl_trans_read_mem);
int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
const void *buf, int dwords)
{
- return iwl_trans_pcie_write_mem(trans, addr, buf, dwords);
+ int offs, ret = 0;
+ const u32 *vals = buf;
+
+ if (iwl_trans_grab_nic_access(trans)) {
+ iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
+ for (offs = 0; offs < dwords; offs++)
+ iwl_write32(trans, HBUS_TARG_MEM_WDAT,
+ vals ? vals[offs] : 0);
+ iwl_trans_release_nic_access(trans);
+ } else {
+ ret = -EBUSY;
+ }
+ return ret;
}
IWL_EXPORT_SYMBOL(iwl_trans_write_mem);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-utils.c b/drivers/net/wireless/intel/iwlwifi/iwl-utils.c
index c5b49851e4b9..d503544fda40 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-utils.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2024 Intel Corporation
+ * Copyright (C) 2024-2025 Intel Corporation
*/
#include <net/gso.h>
#include <linux/ieee80211.h>
@@ -82,3 +82,114 @@ int iwl_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
}
IWL_EXPORT_SYMBOL(iwl_tx_tso_segment);
#endif /* CONFIG_INET */
+
+static u32 iwl_div_by_db(u32 value, u8 db)
+{
+ /*
+ * 2^32 * 10**(i / 10) for i = [1, 10], skipping 0 and simply stopping
+ * at 10 dB and looping instead of using a much larger table.
+ *
+ * Using 64 bit math is overkill, but means the helper does not require
+ * a limit on the input range.
+ */
+ static const u32 db_to_val[] = {
+ 0xcb59185e, 0xa1866ba8, 0x804dce7a, 0x65ea59fe, 0x50f44d89,
+ 0x404de61f, 0x331426af, 0x2892c18b, 0x203a7e5b, 0x1999999a,
+ };
+
+ while (value && db > 0) {
+ u8 change = min_t(u8, db, ARRAY_SIZE(db_to_val));
+
+ value = (((u64)value) * db_to_val[change - 1]) >> 32;
+
+ db -= change;
+ }
+
+ return value;
+}
+
+s8 iwl_average_neg_dbm(const u8 *neg_dbm_values, u8 len)
+{
+ int average_magnitude;
+ u32 average_factor;
+ int sum_magnitude = -128;
+ u32 sum_factor = 0;
+ int i, count = 0;
+
+ /*
+ * To properly average the decibel values (signal values given in dBm)
+ * we need to do the math in linear space. Doing a linear average of
+ * dB (dBm) values is a bit annoying though due to the large range of
+ * at least -10 to -110 dBm that will not fit into a 32 bit integer.
+ *
+ * A 64 bit integer should be sufficient, but then we still have the
+ * problem that there are no directly usable utility functions
+ * available.
+ *
+ * So, lets not deal with that and instead do much of the calculation
+ * with a 16.16 fixed point integer along with a base in dBm. 16.16 bit
+ * gives us plenty of head-room for adding up a few values and even
+ * doing some math on it. And the tail should be accurate enough too
+ * (1/2^16 is somewhere around -48 dB, so effectively zero).
+ *
+ * i.e. the real value of sum is:
+ * sum = sum_factor / 2^16 * 10^(sum_magnitude / 10) mW
+ *
+ * However, that does mean we need to be able to bring two values to
+ * a common base, so we need a helper for that.
+ *
+ * Note that this function takes an input with unsigned negative dBm
+ * values but returns a signed dBm (i.e. a negative value).
+ */
+
+ for (i = 0; i < len; i++) {
+ int val_magnitude;
+ u32 val_factor;
+
+ /* Assume invalid */
+ if (neg_dbm_values[i] == 0xff)
+ continue;
+
+ val_factor = 0x10000;
+ val_magnitude = -neg_dbm_values[i];
+
+ if (val_magnitude <= sum_magnitude) {
+ u8 div_db = sum_magnitude - val_magnitude;
+
+ val_factor = iwl_div_by_db(val_factor, div_db);
+ val_magnitude = sum_magnitude;
+ } else {
+ u8 div_db = val_magnitude - sum_magnitude;
+
+ sum_factor = iwl_div_by_db(sum_factor, div_db);
+ sum_magnitude = val_magnitude;
+ }
+
+ sum_factor += val_factor;
+ count++;
+ }
+
+ /* No valid noise measurement, return a very high noise level */
+ if (count == 0)
+ return 0;
+
+ average_magnitude = sum_magnitude;
+ average_factor = sum_factor / count;
+
+ /*
+ * average_factor will be a number smaller than 1.0 (0x10000) at this
+ * point. What we need to do now is to adjust average_magnitude so that
+ * average_factor is between -0.5 dB and 0.5 dB.
+ *
+ * Just do -1 dB steps and find the point where
+ * -0.5 dB * -i dB = 0x10000 * 10^(-0.5/10) / i dB
+ * = div_by_db(0xe429, i)
+ * is smaller than average_factor.
+ */
+ for (i = 0; average_factor < iwl_div_by_db(0xe429, i); i++) {
+ /* nothing */
+ }
+
+ return clamp(average_magnitude - i, -128, 0);
+}
+IWL_EXPORT_SYMBOL(iwl_average_neg_dbm);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-utils.h b/drivers/net/wireless/intel/iwlwifi/iwl-utils.h
index 8f1f11d06fbe..5172035e4d26 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-utils.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-utils.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2024 Intel Corporation
+ * Copyright (C) 2024-2025 Intel Corporation
*/
#ifndef __iwl_utils_h__
#define __iwl_utils_h__
@@ -53,4 +53,6 @@ u32 iwl_find_ie_offset(u8 *beacon, u8 eid, u32 frame_size)
return ie - beacon;
}
+s8 iwl_average_neg_dbm(const u8 *neg_dbm_values, u8 len);
+
#endif /* __iwl_utils_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mei/sap.h b/drivers/net/wireless/intel/iwlwifi/mei/sap.h
index 3b56637b9697..ba1f75f739c2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mei/sap.h
+++ b/drivers/net/wireless/intel/iwlwifi/mei/sap.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2021 - 2022 Intel Corporation
+ * Copyright (C) 2021 - 2022, 2025 Intel Corporation
*/
#ifndef __sap_h__
@@ -340,12 +340,12 @@ enum iwl_sap_wifi_auth_type {
};
/**
- * enum iwl_sap_wifi_cipher_alg
- * @SAP_WIFI_CIPHER_ALG_NONE: TBD
- * @SAP_WIFI_CIPHER_ALG_TKIP: TBD
- * @SAP_WIFI_CIPHER_ALG_CCMP: TBD
- * @SAP_WIFI_CIPHER_ALG_GCMP: TBD
- * @SAP_WIFI_CIPHER_ALG_GCMP_256: TBD
+ * enum iwl_sap_wifi_cipher_alg - MEI WiFi cipher algorithm IDs
+ * @SAP_WIFI_CIPHER_ALG_NONE: No encryption
+ * @SAP_WIFI_CIPHER_ALG_TKIP: TKIPO
+ * @SAP_WIFI_CIPHER_ALG_CCMP: CCMP
+ * @SAP_WIFI_CIPHER_ALG_GCMP: GCMP-128
+ * @SAP_WIFI_CIPHER_ALG_GCMP_256: GCMP-256
*/
enum iwl_sap_wifi_cipher_alg {
SAP_WIFI_CIPHER_ALG_NONE = IWL_MEI_CIPHER_NONE,
@@ -601,7 +601,7 @@ enum iwl_sap_flex_filter_flags {
};
/**
- * struct iwl_sap_flex_filter -
+ * struct iwl_sap_flex_filter - filter configuration
* @src_port: Source port in network format.
* @dst_port: Destination port in network format.
* @flags: Flags and protocol, see &enum iwl_sap_flex_filter_flags.
@@ -633,7 +633,7 @@ enum iwl_sap_ipv4_filter_flags {
};
/**
- * struct iwl_sap_ipv4_filter-
+ * struct iwl_sap_ipv4_filter - IPv4 filter configuration
* @ipv4_addr: The IP address to filer.
* @flags: See &enum iwl_sap_ipv4_filter_flags.
*/
@@ -643,7 +643,7 @@ struct iwl_sap_ipv4_filter {
} __packed;
/**
- * enum iwl_sap_ipv6_filter_flags -
+ * enum iwl_sap_ipv6_filter_flags - IPv6 filter flags
* @SAP_IPV6_ADDR_FILTER_COPY: Pass packets to the host.
* @SAP_IPV6_ADDR_FILTER_ENABLED: If false, the filter should be ignored.
*/
@@ -653,7 +653,7 @@ enum iwl_sap_ipv6_filter_flags {
};
/**
- * struct iwl_sap_ipv6_filter -
+ * struct iwl_sap_ipv6_filter - IPv6 filter configuration
* @addr_lo24: Lowest 24 bits of the IPv6 address.
* @flags: See &enum iwl_sap_ipv6_filter_flags.
*/
@@ -663,7 +663,7 @@ struct iwl_sap_ipv6_filter {
} __packed;
/**
- * enum iwl_sap_icmpv6_filter_flags -
+ * enum iwl_sap_icmpv6_filter_flags - ICMPv6 filter flags
* @SAP_ICMPV6_FILTER_ENABLED: If false, the filter should be ignored.
* @SAP_ICMPV6_FILTER_COPY: Pass packets to the host.
*/
@@ -673,8 +673,8 @@ enum iwl_sap_icmpv6_filter_flags {
};
/**
- * enum iwl_sap_vlan_filter_flags -
- * @SAP_VLAN_FILTER_VLAN_ID_MSK: TBD
+ * enum iwl_sap_vlan_filter_flags - VLAN filter flags
+ * @SAP_VLAN_FILTER_VLAN_ID_MSK: VLAN ID
* @SAP_VLAN_FILTER_ENABLED: If false, the filter should be ignored.
*/
enum iwl_sap_vlan_filter_flags {
@@ -751,7 +751,7 @@ struct iwl_sap_pldr_data {
} __packed;
/**
- * enum iwl_sap_pldr_status -
+ * enum iwl_sap_pldr_status - product reset status
* @SAP_PLDR_STATUS_SUCCESS: PLDR started/ended successfully
* @SAP_PLDR_STATUS_FAILURE: PLDR failed to start/end
*/
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/Makefile b/drivers/net/wireless/intel/iwlwifi/mld/Makefile
index ece66e7a9be4..c966e573f430 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/mld/Makefile
@@ -9,8 +9,4 @@ iwlmld-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o
iwlmld-$(CONFIG_IWLWIFI_LEDS) += led.o
iwlmld-$(CONFIG_PM_SLEEP) += d3.o
-# non-upstream things
-iwlmld-$(CONFIG_IWL_VENDOR_CMDS) += vendor-cmd.o
-iwlmld-$(CONFIG_IWLMVM_AX_SOFTAP_TESTMODE) += ax-softap-testmode.o
-
subdir-ccflags-y += -I$(src)/../
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/d3.c b/drivers/net/wireless/intel/iwlwifi/mld/d3.c
index c776543cbba5..af12b3d81899 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/d3.c
@@ -204,66 +204,6 @@ void iwl_mld_ipv6_addr_change(struct ieee80211_hw *hw,
}
#endif
-enum rt_status {
- FW_ALIVE,
- FW_NEEDS_RESET,
- FW_ERROR,
-};
-
-static enum rt_status iwl_mld_check_err_tables(struct iwl_mld *mld,
- struct ieee80211_vif *vif)
-{
- u32 err_id;
-
- /* check for lmac1 error */
- if (iwl_fwrt_read_err_table(mld->trans,
- mld->trans->dbg.lmac_error_event_table[0],
- &err_id)) {
- if (err_id == RF_KILL_INDICATOR_FOR_WOWLAN && vif) {
- struct cfg80211_wowlan_wakeup wakeup = {
- .rfkill_release = true,
- };
- ieee80211_report_wowlan_wakeup(vif, &wakeup,
- GFP_KERNEL);
-
- return FW_NEEDS_RESET;
- }
- return FW_ERROR;
- }
-
- /* check if we have lmac2 set and check for error */
- if (iwl_fwrt_read_err_table(mld->trans,
- mld->trans->dbg.lmac_error_event_table[1],
- NULL))
- return FW_ERROR;
-
- /* check for umac error */
- if (iwl_fwrt_read_err_table(mld->trans,
- mld->trans->dbg.umac_error_event_table,
- NULL))
- return FW_ERROR;
-
- return FW_ALIVE;
-}
-
-static bool iwl_mld_fw_needs_restart(struct iwl_mld *mld,
- struct ieee80211_vif *vif)
-{
- enum rt_status rt_status = iwl_mld_check_err_tables(mld, vif);
-
- if (rt_status == FW_ALIVE)
- return false;
-
- if (rt_status == FW_ERROR) {
- IWL_ERR(mld, "FW Error occurred during suspend\n");
- iwl_fwrt_dump_error_logs(&mld->fwrt);
- iwl_dbg_tlv_time_point(&mld->fwrt,
- IWL_FW_INI_TIME_POINT_FW_ASSERT, NULL);
- }
-
- return true;
-}
-
static int
iwl_mld_netdetect_config(struct iwl_mld *mld,
struct ieee80211_vif *vif,
@@ -928,7 +868,7 @@ iwl_mld_add_mcast_rekey(struct ieee80211_vif *vif,
return true;
}
-static bool
+static void
iwl_mld_add_all_rekeys(struct ieee80211_vif *vif,
struct iwl_mld_wowlan_status *wowlan_status,
struct iwl_mld_resume_key_iter_data *key_iter_data,
@@ -941,21 +881,19 @@ iwl_mld_add_all_rekeys(struct ieee80211_vif *vif,
&wowlan_status->gtk[i],
link_conf,
key_iter_data->gtk_cipher))
- return false;
+ return;
if (!iwl_mld_add_mcast_rekey(vif, key_iter_data->mld,
&wowlan_status->igtk,
link_conf, key_iter_data->igtk_cipher))
- return false;
+ return;
for (i = 0; i < ARRAY_SIZE(wowlan_status->bigtk); i++)
if (!iwl_mld_add_mcast_rekey(vif, key_iter_data->mld,
&wowlan_status->bigtk[i],
link_conf,
key_iter_data->bigtk_cipher))
- return false;
-
- return true;
+ return;
}
static bool
@@ -1317,6 +1255,13 @@ int iwl_mld_no_wowlan_suspend(struct iwl_mld *mld)
struct iwl_d3_manager_config d3_cfg_cmd_data = {};
int ret;
+ if (mld->debug_max_sleep) {
+ d3_cfg_cmd_data.wakeup_host_timer =
+ cpu_to_le32(mld->debug_max_sleep);
+ d3_cfg_cmd_data.wakeup_flags =
+ cpu_to_le32(IWL_WAKEUP_D3_HOST_TIMER);
+ }
+
lockdep_assert_wiphy(mld->wiphy);
IWL_DEBUG_WOWLAN(mld, "Starting the no wowlan suspend flow\n");
@@ -1376,10 +1321,7 @@ int iwl_mld_no_wowlan_resume(struct iwl_mld *mld)
mld->fw_status.in_d3 = false;
iwl_fw_dbg_read_d3_debug_data(&mld->fwrt);
- if (iwl_mld_fw_needs_restart(mld, NULL))
- ret = -ENODEV;
- else
- ret = iwl_mld_wait_d3_notif(mld, &resume_data, false);
+ ret = iwl_mld_wait_d3_notif(mld, &resume_data, false);
if (!ret && (resume_data.d3_end_flags & IWL_D0I3_RESET_REQUIRE))
return -ENODEV;
@@ -1928,15 +1870,10 @@ int iwl_mld_wowlan_resume(struct iwl_mld *mld)
iwl_fw_dbg_read_d3_debug_data(&mld->fwrt);
- if (iwl_mld_fw_needs_restart(mld, bss_vif)) {
- fw_err = true;
- goto err;
- }
-
resume_data.wowlan_status = kzalloc(sizeof(*resume_data.wowlan_status),
GFP_KERNEL);
if (!resume_data.wowlan_status)
- return -1;
+ return -ENOMEM;
if (mld->netdetect)
resume_data.notifs_expected |= IWL_D3_ND_MATCH_INFO;
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c
index 352da8aa7898..75cc1d8bb90c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c
@@ -546,6 +546,11 @@ iwl_mld_add_debugfs_files(struct iwl_mld *mld, struct dentry *debugfs_dir)
#endif
MLD_DEBUGFS_ADD_FILE(inject_packet, debugfs_dir, 0200);
+#ifdef CONFIG_PM_SLEEP
+ debugfs_create_u32("max_sleep", 0600, debugfs_dir,
+ &mld->debug_max_sleep);
+#endif
+
debugfs_create_bool("rx_ts_ptp", 0600, debugfs_dir,
&mld->monitor.ptp_time);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mld/ftm-initiator.c
index f77ba21a174d..3464b3268712 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/ftm-initiator.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/ftm-initiator.c
@@ -94,7 +94,7 @@ iwl_mld_ftm_set_target_chandef(struct iwl_mld *mld,
IWL_ERR(mld, "Unsupported BW in FTM request (%d)\n",
peer->chandef.width);
return -EINVAL;
-}
+ }
/* non EDCA based measurement must use HE preamble */
if (peer->ftm.trigger_based || peer->ftm.non_trigger_based)
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/iface.c b/drivers/net/wireless/intel/iwlwifi/mld/iface.c
index 235b55e0fe59..38993d65c052 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/iface.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/iface.c
@@ -55,6 +55,8 @@ void iwl_mld_cleanup_vif(void *data, u8 *mac, struct ieee80211_vif *vif)
ieee80211_iter_keys(mld->hw, vif, iwl_mld_cleanup_keys_iter, NULL);
+ wiphy_delayed_work_cancel(mld->wiphy, &mld_vif->mlo_scan_start_wk);
+
CLEANUP_STRUCT(mld_vif);
}
@@ -385,6 +387,17 @@ int iwl_mld_mac_fw_action(struct iwl_mld *mld, struct ieee80211_vif *vif,
return iwl_mld_send_mac_cmd(mld, &cmd);
}
+static void iwl_mld_mlo_scan_start_wk(struct wiphy *wiphy,
+ struct wiphy_work *wk)
+{
+ struct iwl_mld_vif *mld_vif = container_of(wk, struct iwl_mld_vif,
+ mlo_scan_start_wk.work);
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+ iwl_mld_int_mlo_scan(mld, iwl_mld_vif_to_mac80211(mld_vif));
+}
+
IWL_MLD_ALLOC_FN(vif, vif)
/* Constructor function for struct iwl_mld_vif */
@@ -412,6 +425,8 @@ iwl_mld_init_vif(struct iwl_mld *mld, struct ieee80211_vif *vif)
iwl_mld_emlsr_prevent_done_wk);
wiphy_delayed_work_init(&mld_vif->emlsr.tmp_non_bss_done_wk,
iwl_mld_emlsr_tmp_non_bss_done_wk);
+ wiphy_delayed_work_init(&mld_vif->mlo_scan_start_wk,
+ iwl_mld_mlo_scan_start_wk);
}
iwl_mld_init_internal_sta(&mld_vif->aux_sta);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/iface.h b/drivers/net/wireless/intel/iwlwifi/mld/iface.h
index 49e2ce65557d..05dcb63701b1 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/iface.h
+++ b/drivers/net/wireless/intel/iwlwifi/mld/iface.h
@@ -87,6 +87,8 @@ enum iwl_mld_emlsr_exit {
* @last_exit_reason: Reason for the last EMLSR exit
* @last_exit_ts: Time of the last EMLSR exit (if @last_exit_reason is non-zero)
* @exit_repeat_count: Number of times EMLSR was exited for the same reason
+ * @last_entry_ts: the time of the last EMLSR entry (if iwl_mld_emlsr_active()
+ * is true)
* @unblock_tpt_wk: Unblock EMLSR because the throughput limit was reached
* @check_tpt_wk: a worker to check if IWL_MLD_EMLSR_BLOCKED_TPT should be
* added, for example if there is no longer enough traffic.
@@ -105,6 +107,7 @@ struct iwl_mld_emlsr {
enum iwl_mld_emlsr_exit last_exit_reason;
unsigned long last_exit_ts;
u8 exit_repeat_count;
+ unsigned long last_entry_ts;
);
struct wiphy_work unblock_tpt_wk;
@@ -133,6 +136,8 @@ struct iwl_mld_emlsr {
* @low_latency_causes: bit flags, indicating the causes for low-latency,
* see @iwl_mld_low_latency_cause.
* @ps_disabled: indicates that PS is disabled for this interface
+ * @last_link_activation_time: last time a link was activated, for
+ * deferring MLO scans (to make them more reliable)
* @mld: pointer to the mld structure.
* @deflink: default link data, for use in non-MLO,
* @link: reference to link data for each valid link, for use in MLO.
@@ -144,6 +149,7 @@ struct iwl_mld_emlsr {
* @roc_activity: the id of the roc_activity running. Relevant for STA and
* p2p device only. Set to %ROC_NUM_ACTIVITIES when not in use.
* @aux_sta: station used for remain on channel. Used in P2P device.
+ * @mlo_scan_start_wk: worker to start a deferred MLO scan
*/
struct iwl_mld_vif {
/* Add here fields that need clean up on restart */
@@ -161,6 +167,7 @@ struct iwl_mld_vif {
#endif
u8 low_latency_causes;
bool ps_disabled;
+ time64_t last_link_activation_time;
);
/* And here fields that survive a fw restart */
struct iwl_mld *mld;
@@ -179,6 +186,8 @@ struct iwl_mld_vif {
#endif
enum iwl_roc_activity roc_activity;
struct iwl_mld_int_sta aux_sta;
+
+ struct wiphy_delayed_work mlo_scan_start_wk;
};
static inline struct iwl_mld_vif *
@@ -187,6 +196,12 @@ iwl_mld_vif_from_mac80211(struct ieee80211_vif *vif)
return (void *)vif->drv_priv;
}
+static inline struct ieee80211_vif *
+iwl_mld_vif_to_mac80211(struct iwl_mld_vif *mld_vif)
+{
+ return container_of((void *)mld_vif, struct ieee80211_vif, drv_priv);
+}
+
#define iwl_mld_link_dereference_check(mld_vif, link_id) \
rcu_dereference_check((mld_vif)->link[link_id], \
lockdep_is_held(&mld_vif->mld->wiphy->mtx))
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/link.c b/drivers/net/wireless/intel/iwlwifi/mld/link.c
index d0f56189ad3f..c65ac6ecbd1d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/link.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/link.c
@@ -404,6 +404,7 @@ int iwl_mld_activate_link(struct iwl_mld *mld,
struct ieee80211_bss_conf *link)
{
struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(mld_link->vif);
int ret;
lockdep_assert_wiphy(mld->wiphy);
@@ -418,6 +419,9 @@ int iwl_mld_activate_link(struct iwl_mld *mld,
LINK_CONTEXT_MODIFY_ACTIVE);
if (ret)
mld_link->active = false;
+ else
+ mld_vif->last_link_activation_time =
+ ktime_get_boottime_seconds();
return ret;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c
index 4ba050397632..59be9923c3b2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c
@@ -508,8 +508,15 @@ int iwl_mld_mac80211_start(struct ieee80211_hw *hw)
if (in_d3) {
/* mac80211 already cleaned up the state, no need for cleanup */
ret = iwl_mld_no_wowlan_resume(mld);
- if (ret)
+ if (ret) {
iwl_mld_stop_fw(mld);
+ /* We're not really restarting in the sense of
+ * in_hw_restart even if we got an error during
+ * this. We'll just start again below and have
+ * nothing to recover, mac80211 will do anyway.
+ */
+ mld->fw_status.in_hw_restart = false;
+ }
}
#endif /* CONFIG_PM_SLEEP */
@@ -574,7 +581,8 @@ void iwl_mld_mac80211_stop(struct ieee80211_hw *hw, bool suspend)
}
static
-int iwl_mld_mac80211_config(struct ieee80211_hw *hw, u32 changed)
+int iwl_mld_mac80211_config(struct ieee80211_hw *hw, int radio_idx,
+ u32 changed)
{
return 0;
}
@@ -1002,6 +1010,7 @@ int iwl_mld_assign_vif_chanctx(struct ieee80211_hw *hw,
/* Indicate to mac80211 that EML is enabled */
vif->driver_flags |= IEEE80211_VIF_EML_ACTIVE;
+ mld_vif->emlsr.last_entry_ts = jiffies;
if (vif->active_links & BIT(mld_vif->emlsr.selected_links))
mld_vif->emlsr.primary = mld_vif->emlsr.selected_primary;
@@ -1102,7 +1111,8 @@ void iwl_mld_unassign_vif_chanctx(struct ieee80211_hw *hw,
}
static
-int iwl_mld_mac80211_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+int iwl_mld_mac80211_set_rts_threshold(struct ieee80211_hw *hw, int radio_idx,
+ u32 value)
{
return 0;
}
@@ -1468,7 +1478,7 @@ void iwl_mld_mac80211_mgd_prepare_tx(struct ieee80211_hw *hw,
struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
u32 duration = IWL_MLD_SESSION_PROTECTION_ASSOC_TIME_MS;
- /* After a successful association the connection is etalibeshed
+ /* After a successful association the connection is established
* and we can rely on the quota to send the disassociation frame.
*/
if (info->was_assoc)
@@ -2573,28 +2583,6 @@ static int iwl_mld_mac80211_tx_last_beacon(struct ieee80211_hw *hw)
return mld->ibss_manager;
}
-#define IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS_TIMEOUT (5 * HZ)
-
-static void iwl_mld_vif_iter_emlsr_block_tmp_non_bss(void *_data, u8 *mac,
- struct ieee80211_vif *vif)
-{
- struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
- int ret;
-
- if (!iwl_mld_vif_has_emlsr_cap(vif))
- return;
-
- ret = iwl_mld_block_emlsr_sync(mld_vif->mld, vif,
- IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS,
- iwl_mld_get_primary_link(vif));
- if (ret)
- return;
-
- wiphy_delayed_work_queue(mld_vif->mld->wiphy,
- &mld_vif->emlsr.tmp_non_bss_done_wk,
- IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS_TIMEOUT);
-}
-
static void iwl_mld_prep_add_interface(struct ieee80211_hw *hw,
enum nl80211_iftype type)
{
@@ -2607,10 +2595,7 @@ static void iwl_mld_prep_add_interface(struct ieee80211_hw *hw,
type == NL80211_IFTYPE_P2P_CLIENT))
return;
- ieee80211_iterate_active_interfaces_mtx(mld->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- iwl_mld_vif_iter_emlsr_block_tmp_non_bss,
- NULL);
+ iwl_mld_emlsr_block_tmp_non_bss(mld);
}
static int iwl_mld_set_hw_timestamp(struct ieee80211_hw *hw,
@@ -2640,6 +2625,23 @@ static int iwl_mld_start_pmsr(struct ieee80211_hw *hw,
return iwl_mld_ftm_start(mld, vif, request);
}
+static enum ieee80211_neg_ttlm_res
+iwl_mld_can_neg_ttlm(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_neg_ttlm *neg_ttlm)
+{
+ u16 map;
+
+ /* Verify all TIDs are mapped to the same links set */
+ map = neg_ttlm->downlink[0];
+ for (int i = 0; i < IEEE80211_TTLM_NUM_TIDS; i++) {
+ if (neg_ttlm->downlink[i] != neg_ttlm->uplink[i] ||
+ neg_ttlm->uplink[i] != map)
+ return NEG_TTLM_RES_REJECT;
+ }
+
+ return NEG_TTLM_RES_ACCEPT;
+}
+
const struct ieee80211_ops iwl_mld_hw_ops = {
.tx = iwl_mld_mac80211_tx,
.start = iwl_mld_mac80211_start,
@@ -2709,4 +2711,5 @@ const struct ieee80211_ops iwl_mld_hw_ops = {
.prep_add_interface = iwl_mld_prep_add_interface,
.set_hw_timestamp = iwl_mld_set_hw_timestamp,
.start_pmsr = iwl_mld_start_pmsr,
+ .can_neg_ttlm = iwl_mld_can_neg_ttlm,
};
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mld.c b/drivers/net/wireless/intel/iwlwifi/mld/mld.c
index 1774bb84dd3f..7ade5b714457 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/mld.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mld.c
@@ -357,7 +357,7 @@ iwl_mld_configure_trans(struct iwl_op_mode *op_mode)
trans->conf.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
trans->conf.rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
- trans->conf.rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_res_start);
+ trans->conf.rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_desc);
trans->conf.wide_cmd_header = true;
iwl_trans_op_mode_enter(trans, op_mode);
@@ -725,6 +725,17 @@ static void iwl_mld_device_powered_off(struct iwl_op_mode *op_mode)
{}
#endif
+static void iwl_mld_dump(struct iwl_op_mode *op_mode)
+{
+ struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
+ struct iwl_fw_runtime *fwrt = &mld->fwrt;
+
+ if (!iwl_trans_fw_running(fwrt->trans))
+ return;
+
+ iwl_dbg_tlv_time_point(fwrt, IWL_FW_INI_TIME_POINT_USER_TRIGGER, NULL);
+}
+
static const struct iwl_op_mode_ops iwl_mld_ops = {
.start = iwl_op_mode_mld_start,
.stop = iwl_op_mode_mld_stop,
@@ -739,6 +750,7 @@ static const struct iwl_op_mode_ops iwl_mld_ops = {
.sw_reset = iwl_mld_sw_reset,
.time_point = iwl_mld_time_point,
.device_powered_off = pm_sleep_ptr(iwl_mld_device_powered_off),
+ .dump = iwl_mld_dump,
};
struct iwl_mld_mod_params iwlmld_mod_params = {
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mld.h b/drivers/net/wireless/intel/iwlwifi/mld/mld.h
index 1a2c44f44eff..241ab3a00e56 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/mld.h
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mld.h
@@ -159,6 +159,7 @@
* @addresses: device MAC addresses.
* @scan: instance of the scan object
* @wowlan: WoWLAN support data.
+ * @debug_max_sleep: maximum sleep time in D3 (for debug purposes)
* @led: the led device
* @mcc_src: the source id of the MCC, comes from the firmware
* @bios_enable_puncturing: is puncturing enabled by bios
@@ -252,6 +253,7 @@ struct iwl_mld {
struct iwl_mld_scan scan;
#ifdef CONFIG_PM_SLEEP
struct wiphy_wowlan_support wowlan;
+ u32 debug_max_sleep;
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_IWLWIFI_LEDS
struct led_classdev led;
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mlo.c b/drivers/net/wireless/intel/iwlwifi/mld/mlo.c
index dba5379ed009..be66a71a0fd7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/mlo.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mlo.c
@@ -287,6 +287,36 @@ int iwl_mld_block_emlsr_sync(struct iwl_mld *mld, struct ieee80211_vif *vif,
return _iwl_mld_emlsr_block(mld, vif, reason, link_to_keep, true);
}
+#define IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS_TIMEOUT (10 * HZ)
+
+static void iwl_mld_vif_iter_emlsr_block_tmp_non_bss(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ int ret;
+
+ if (!iwl_mld_vif_has_emlsr_cap(vif))
+ return;
+
+ ret = iwl_mld_block_emlsr_sync(mld_vif->mld, vif,
+ IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS,
+ iwl_mld_get_primary_link(vif));
+ if (ret)
+ return;
+
+ wiphy_delayed_work_queue(mld_vif->mld->wiphy,
+ &mld_vif->emlsr.tmp_non_bss_done_wk,
+ IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS_TIMEOUT);
+}
+
+void iwl_mld_emlsr_block_tmp_non_bss(struct iwl_mld *mld)
+{
+ ieee80211_iterate_active_interfaces_mtx(mld->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mld_vif_iter_emlsr_block_tmp_non_bss,
+ NULL);
+}
+
static void _iwl_mld_select_links(struct iwl_mld *mld,
struct ieee80211_vif *vif);
@@ -530,10 +560,12 @@ void iwl_mld_emlsr_check_tpt(struct wiphy *wiphy, struct wiphy_work *wk)
/*
* TPT is unblocked, need to check if the TPT criteria is still met.
*
- * If EMLSR is active, then we also need to check the secondar link
- * requirements.
+ * If EMLSR is active for at least 5 seconds, then we also
+ * need to check the secondary link requirements.
*/
- if (iwl_mld_emlsr_active(vif)) {
+ if (iwl_mld_emlsr_active(vif) &&
+ time_is_before_jiffies(mld_vif->emlsr.last_entry_ts +
+ IWL_MLD_TPT_COUNT_WINDOW)) {
sec_link_id = iwl_mld_get_other_link(vif, iwl_mld_get_primary_link(vif));
sec_link = iwl_mld_link_dereference_check(mld_vif, sec_link_id);
if (WARN_ON_ONCE(!sec_link))
@@ -1167,8 +1199,8 @@ void iwl_mld_retry_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif)
{
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
- if (!iwl_mld_vif_has_emlsr_cap(vif) || iwl_mld_emlsr_active(vif) ||
- mld_vif->emlsr.blocked_reasons)
+ if (!IWL_MLD_AUTO_EML_ENABLE || !iwl_mld_vif_has_emlsr_cap(vif) ||
+ iwl_mld_emlsr_active(vif) || mld_vif->emlsr.blocked_reasons)
return;
iwl_mld_int_mlo_scan(mld, vif);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mlo.h b/drivers/net/wireless/intel/iwlwifi/mld/mlo.h
index 9afa3d6ea649..704f64134798 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/mlo.h
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mlo.h
@@ -157,6 +157,8 @@ struct iwl_mld_link_sel_data {
u16 grade;
};
+void iwl_mld_emlsr_block_tmp_non_bss(struct iwl_mld *mld);
+
#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS)
u32 iwl_mld_emlsr_pair_state(struct ieee80211_vif *vif,
struct iwl_mld_link_sel_data *a,
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/phy.c b/drivers/net/wireless/intel/iwlwifi/mld/phy.c
index d5a32ee56b92..1d93fb9e4dbf 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/phy.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/phy.c
@@ -181,7 +181,7 @@ int iwl_mld_send_phy_cfg_cmd(struct iwl_mld *mld)
.phy_specific_cfg = mld->fwrt.phy_filters,
};
- IWL_INFO(mld, "Sending Phy CFG command: 0x%x\n", cmd.phy_cfg);
+ IWL_DEBUG_INFO(mld, "Sending Phy CFG command: 0x%x\n", cmd.phy_cfg);
return iwl_mld_send_cmd_pdu(mld, PHY_CONFIGURATION_CMD, &cmd);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/scan.c b/drivers/net/wireless/intel/iwlwifi/mld/scan.c
index 3fce7cd2d512..63d5d39bb083 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/scan.c
@@ -1752,6 +1752,10 @@ int iwl_mld_regular_scan_start(struct iwl_mld *mld, struct ieee80211_vif *vif,
struct cfg80211_scan_request *req,
struct ieee80211_scan_ies *ies)
{
+
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ iwl_mld_emlsr_block_tmp_non_bss(mld);
+
return _iwl_mld_single_scan_start(mld, vif, req, ies,
IWL_MLD_SCAN_REGULAR);
}
@@ -1800,17 +1804,20 @@ static void iwl_mld_int_mlo_scan_start(struct iwl_mld *mld,
IWL_DEBUG_SCAN(mld, "Internal MLO scan: ret=%d\n", ret);
}
+#define IWL_MLD_MLO_SCAN_BLOCKOUT_TIME 5 /* seconds */
+
void iwl_mld_int_mlo_scan(struct iwl_mld *mld, struct ieee80211_vif *vif)
{
struct ieee80211_channel *channels[IEEE80211_MLD_MAX_NUM_LINKS];
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
unsigned long usable_links = ieee80211_vif_usable_links(vif);
size_t n_channels = 0;
u8 link_id;
lockdep_assert_wiphy(mld->wiphy);
- if (!vif->cfg.assoc || !ieee80211_vif_is_mld(vif) ||
- hweight16(vif->valid_links) == 1)
+ if (!IWL_MLD_AUTO_EML_ENABLE || !vif->cfg.assoc ||
+ !ieee80211_vif_is_mld(vif) || hweight16(vif->valid_links) == 1)
return;
if (mld->scan.status & IWL_MLD_SCAN_INT_MLO) {
@@ -1818,6 +1825,15 @@ void iwl_mld_int_mlo_scan(struct iwl_mld *mld, struct ieee80211_vif *vif)
return;
}
+ if (mld_vif->last_link_activation_time > ktime_get_boottime_seconds() -
+ IWL_MLD_MLO_SCAN_BLOCKOUT_TIME) {
+ /* timing doesn't matter much, so use the blockout time */
+ wiphy_delayed_work_queue(mld->wiphy,
+ &mld_vif->mlo_scan_start_wk,
+ IWL_MLD_MLO_SCAN_BLOCKOUT_TIME);
+ return;
+ }
+
for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
struct ieee80211_bss_conf *link_conf =
link_conf_dereference_check(vif, link_id);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/scan.h b/drivers/net/wireless/intel/iwlwifi/mld/scan.h
index 3ae940d55065..4044cac3f086 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/scan.h
+++ b/drivers/net/wireless/intel/iwlwifi/mld/scan.h
@@ -130,7 +130,7 @@ struct iwl_mld_scan {
void *cmd;
unsigned long last_6ghz_passive_jiffies;
unsigned long last_start_time_jiffies;
- unsigned long last_mlo_scan_time;
+ u64 last_mlo_scan_time;
};
#endif /* __iwl_mld_scan_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 507c03198c92..e1070b891300 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -120,19 +120,17 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
switch (key->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104: { /* hack it for now */
- struct {
- struct iwl_mvm_wep_key_cmd wep_key_cmd;
- struct iwl_mvm_wep_key wep_key;
- } __packed wkc = {
- .wep_key_cmd.mac_id_n_color =
- cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
- mvmvif->color)),
- .wep_key_cmd.num_keys = 1,
- /* firmware sets STA_KEY_FLG_WEP_13BYTES */
- .wep_key_cmd.decryption_type = STA_KEY_FLG_WEP,
- .wep_key.key_index = key->keyidx,
- .wep_key.key_size = key->keylen,
- };
+ DEFINE_RAW_FLEX(struct iwl_mvm_wep_key_cmd, wkc, wep_key, 1);
+ struct iwl_mvm_wep_key *wep_key = wkc->wep_key;
+
+ wkc->mac_id_n_color =
+ cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color));
+ wkc->num_keys = 1;
+ /* firmware sets STA_KEY_FLG_WEP_13BYTES */
+ wkc->decryption_type = STA_KEY_FLG_WEP;
+ wep_key->key_index = key->keyidx;
+ wep_key->key_size = key->keylen;
/*
* This will fail -- the key functions don't set support
@@ -142,18 +140,19 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
break;
- memcpy(&wkc.wep_key.key[3], key->key, key->keylen);
+ memcpy(&wep_key->key[3], key->key, key->keylen);
if (key->keyidx == mvmvif->tx_key_idx) {
/* TX key must be at offset 0 */
- wkc.wep_key.key_offset = 0;
+ wep_key->key_offset = 0;
} else {
/* others start at 1 */
data->wep_key_idx++;
- wkc.wep_key.key_offset = data->wep_key_idx;
+ wep_key->key_offset = data->wep_key_idx;
}
mutex_lock(&mvm->mutex);
- ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, 0, sizeof(wkc), &wkc);
+ ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, 0,
+ __struct_size(wkc), wkc);
data->error = ret != 0;
mvm->ptk_ivlen = key->iv_len;
@@ -2061,10 +2060,8 @@ static bool iwl_mvm_mlo_gtk_rekey(struct iwl_wowlan_status_data *status,
struct iwl_wowlan_mlo_gtk *mlo_key = &status->mlo_keys[i];
struct ieee80211_key_conf *key, *old_key;
struct ieee80211_key_seq seq;
- struct {
- struct ieee80211_key_conf conf;
- u8 key[32];
- } conf = {};
+ DEFINE_RAW_FLEX(struct ieee80211_key_conf, conf, key,
+ WOWLAN_KEY_MAX_SIZE);
u16 flags = le16_to_cpu(mlo_key->flags);
int j, link_id, key_id, key_type;
@@ -2081,40 +2078,40 @@ static bool iwl_mvm_mlo_gtk_rekey(struct iwl_wowlan_status_data *status,
key_type >= WOWLAN_MLO_GTK_KEY_NUM_TYPES))
continue;
- conf.conf.cipher = old_keys->cipher[link_id][key_type];
+ conf->cipher = old_keys->cipher[link_id][key_type];
/* WARN_ON? */
- if (!conf.conf.cipher)
+ if (!conf->cipher)
continue;
- conf.conf.keylen = 0;
- switch (conf.conf.cipher) {
+ conf->keylen = 0;
+ switch (conf->cipher) {
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_GCMP:
- conf.conf.keylen = WLAN_KEY_LEN_CCMP;
+ conf->keylen = WLAN_KEY_LEN_CCMP;
break;
case WLAN_CIPHER_SUITE_GCMP_256:
- conf.conf.keylen = WLAN_KEY_LEN_GCMP_256;
+ conf->keylen = WLAN_KEY_LEN_GCMP_256;
break;
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
- conf.conf.keylen = WLAN_KEY_LEN_BIP_GMAC_128;
+ conf->keylen = WLAN_KEY_LEN_BIP_GMAC_128;
break;
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
- conf.conf.keylen = WLAN_KEY_LEN_BIP_GMAC_256;
+ conf->keylen = WLAN_KEY_LEN_BIP_GMAC_256;
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
- conf.conf.keylen = WLAN_KEY_LEN_AES_CMAC;
+ conf->keylen = WLAN_KEY_LEN_AES_CMAC;
break;
case WLAN_CIPHER_SUITE_BIP_CMAC_256:
- conf.conf.keylen = WLAN_KEY_LEN_BIP_CMAC_256;
+ conf->keylen = WLAN_KEY_LEN_BIP_CMAC_256;
break;
}
- if (WARN_ON(!conf.conf.keylen ||
- conf.conf.keylen > sizeof(conf.key)))
+ if (WARN_ON(!conf->keylen ||
+ conf->keylen > WOWLAN_KEY_MAX_SIZE))
continue;
- memcpy(conf.conf.key, mlo_key->key, conf.conf.keylen);
- conf.conf.keyidx = key_id;
+ memcpy(conf->key, mlo_key->key, conf->keylen);
+ conf->keyidx = key_id;
old_key = old_keys->key[link_id][key_id];
if (old_key) {
@@ -2126,7 +2123,7 @@ static bool iwl_mvm_mlo_gtk_rekey(struct iwl_wowlan_status_data *status,
IWL_DEBUG_WOWLAN(mvm, "Add MLO key id %d, link id %d\n",
key_id, link_id);
- key = ieee80211_gtk_rekey_add(vif, &conf.conf, link_id);
+ key = ieee80211_gtk_rekey_add(vif, conf, link_id);
if (WARN_ON(IS_ERR(key))) {
ret = false;
goto out;
@@ -2156,30 +2153,28 @@ static bool iwl_mvm_gtk_rekey(struct iwl_wowlan_status_data *status,
{
int i, j;
struct ieee80211_key_conf *key;
- struct {
- struct ieee80211_key_conf conf;
- u8 key[32];
- } conf = {
- .conf.cipher = gtk_cipher,
- };
+ DEFINE_RAW_FLEX(struct ieee80211_key_conf, conf, key,
+ WOWLAN_KEY_MAX_SIZE);
int link_id = vif->active_links ? __ffs(vif->active_links) : -1;
+ conf->cipher = gtk_cipher;
+
BUILD_BUG_ON(WLAN_KEY_LEN_CCMP != WLAN_KEY_LEN_GCMP);
- BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_CCMP);
- BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_GCMP_256);
- BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_TKIP);
- BUILD_BUG_ON(sizeof(conf.key) < sizeof(status->gtk[0].key));
+ BUILD_BUG_ON(WOWLAN_KEY_MAX_SIZE < WLAN_KEY_LEN_CCMP);
+ BUILD_BUG_ON(WOWLAN_KEY_MAX_SIZE < WLAN_KEY_LEN_GCMP_256);
+ BUILD_BUG_ON(WOWLAN_KEY_MAX_SIZE < WLAN_KEY_LEN_TKIP);
+ BUILD_BUG_ON(WOWLAN_KEY_MAX_SIZE < sizeof(status->gtk[0].key));
switch (gtk_cipher) {
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_GCMP:
- conf.conf.keylen = WLAN_KEY_LEN_CCMP;
+ conf->keylen = WLAN_KEY_LEN_CCMP;
break;
case WLAN_CIPHER_SUITE_GCMP_256:
- conf.conf.keylen = WLAN_KEY_LEN_GCMP_256;
+ conf->keylen = WLAN_KEY_LEN_GCMP_256;
break;
case WLAN_CIPHER_SUITE_TKIP:
- conf.conf.keylen = WLAN_KEY_LEN_TKIP;
+ conf->keylen = WLAN_KEY_LEN_TKIP;
break;
default:
WARN_ON(1);
@@ -2189,14 +2184,14 @@ static bool iwl_mvm_gtk_rekey(struct iwl_wowlan_status_data *status,
if (!status->gtk[i].len)
continue;
- conf.conf.keyidx = status->gtk[i].id;
+ conf->keyidx = status->gtk[i].id;
IWL_DEBUG_WOWLAN(mvm,
"Received from FW GTK cipher %d, key index %d\n",
- conf.conf.cipher, conf.conf.keyidx);
- memcpy(conf.conf.key, status->gtk[i].key,
+ conf->cipher, conf->keyidx);
+ memcpy(conf->key, status->gtk[i].key,
sizeof(status->gtk[i].key));
- key = ieee80211_gtk_rekey_add(vif, &conf.conf, link_id);
+ key = ieee80211_gtk_rekey_add(vif, conf, link_id);
if (IS_ERR(key))
return false;
@@ -2218,42 +2213,40 @@ iwl_mvm_d3_igtk_bigtk_rekey_add(struct iwl_wowlan_status_data *status,
struct ieee80211_vif *vif, u32 cipher,
struct iwl_multicast_key_data *key_data)
{
+ DEFINE_RAW_FLEX(struct ieee80211_key_conf, conf, key,
+ WOWLAN_KEY_MAX_SIZE);
struct ieee80211_key_conf *key_config;
- struct {
- struct ieee80211_key_conf conf;
- u8 key[WOWLAN_KEY_MAX_SIZE];
- } conf = {
- .conf.cipher = cipher,
- .conf.keyidx = key_data->id,
- };
struct ieee80211_key_seq seq;
int link_id = vif->active_links ? __ffs(vif->active_links) : -1;
+ conf->cipher = cipher;
+ conf->keyidx = key_data->id;
+
if (!key_data->len)
return true;
- iwl_mvm_d3_set_igtk_bigtk_ipn(key_data, &seq, conf.conf.cipher);
+ iwl_mvm_d3_set_igtk_bigtk_ipn(key_data, &seq, conf->cipher);
switch (cipher) {
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
- conf.conf.keylen = WLAN_KEY_LEN_BIP_GMAC_128;
+ conf->keylen = WLAN_KEY_LEN_BIP_GMAC_128;
break;
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
- conf.conf.keylen = WLAN_KEY_LEN_BIP_GMAC_256;
+ conf->keylen = WLAN_KEY_LEN_BIP_GMAC_256;
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
- conf.conf.keylen = WLAN_KEY_LEN_AES_CMAC;
+ conf->keylen = WLAN_KEY_LEN_AES_CMAC;
break;
case WLAN_CIPHER_SUITE_BIP_CMAC_256:
- conf.conf.keylen = WLAN_KEY_LEN_BIP_CMAC_256;
+ conf->keylen = WLAN_KEY_LEN_BIP_CMAC_256;
break;
default:
WARN_ON(1);
}
- BUILD_BUG_ON(sizeof(conf.key) < sizeof(key_data->key));
- memcpy(conf.conf.key, key_data->key, conf.conf.keylen);
+ BUILD_BUG_ON(WOWLAN_KEY_MAX_SIZE < sizeof(key_data->key));
+ memcpy(conf->key, key_data->key, conf->keylen);
- key_config = ieee80211_gtk_rekey_add(vif, &conf.conf, link_id);
+ key_config = ieee80211_gtk_rekey_add(vif, conf, link_id);
if (IS_ERR(key_config))
return false;
ieee80211_set_key_rx_seq(key_config, 0, &seq);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 956b491ae5a4..b28c21c20371 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -298,7 +298,8 @@ static const struct wiphy_iftype_ext_capab add_iftypes_ext_capa[] = {
},
};
-int iwl_mvm_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
+int iwl_mvm_op_get_antenna(struct ieee80211_hw *hw, int radio_idx,
+ u32 *tx_ant, u32 *rx_ant)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
*tx_ant = iwl_mvm_get_valid_tx_ant(mvm);
@@ -306,13 +307,15 @@ int iwl_mvm_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
return 0;
}
-int iwl_mvm_op_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
+int iwl_mvm_op_set_antenna(struct ieee80211_hw *hw, int radio_idx, u32 tx_ant,
+ u32 rx_ant)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
/* This has been tested on those devices only */
if (mvm->trans->mac_cfg->device_family != IWL_DEVICE_FAMILY_9000 &&
- mvm->trans->mac_cfg->device_family != IWL_DEVICE_FAMILY_22000)
+ mvm->trans->mac_cfg->device_family != IWL_DEVICE_FAMILY_22000 &&
+ mvm->trans->mac_cfg->device_family != IWL_DEVICE_FAMILY_AX210)
return -EOPNOTSUPP;
if (!mvm->nvm_data)
@@ -4249,7 +4252,8 @@ int iwl_mvm_mac_sta_state_common(struct ieee80211_hw *hw,
return ret;
}
-int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, int radio_idx,
+ u32 value)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index a4f412e750d0..f0d459766365 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -2133,7 +2133,6 @@ bool iwl_mvm_mld_valid_link_pair(struct ieee80211_vif *vif,
s8 iwl_mvm_average_dbm_values(const struct iwl_umac_scan_channel_survey_notif *notif);
-
extern const struct iwl_hcmd_arr iwl_mvm_groups[];
extern const unsigned int iwl_mvm_groups_size;
#endif
@@ -2866,13 +2865,16 @@ void iwl_mvm_mac_wake_tx_queue(struct ieee80211_hw *hw,
int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_ampdu_params *params);
-int iwl_mvm_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant);
-int iwl_mvm_op_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant);
+int iwl_mvm_op_get_antenna(struct ieee80211_hw *hw, int radio_idx, u32 *tx_ant,
+ u32 *rx_ant);
+int iwl_mvm_op_set_antenna(struct ieee80211_hw *hw, int radio_idx, u32 tx_ant,
+ u32 rx_ant);
int iwl_mvm_mac_start(struct ieee80211_hw *hw);
void iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw,
enum ieee80211_reconfig_type reconfig_type);
void iwl_mvm_mac_stop(struct ieee80211_hw *hw, bool suspend);
-static inline int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed)
+static inline int iwl_mvm_mac_config(struct ieee80211_hw *hw, int radio_idx,
+ u32 changed)
{
return 0;
}
@@ -2905,7 +2907,8 @@ iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw,
int num_frames,
enum ieee80211_frame_release_type reason,
bool more_data);
-int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, u32 value);
+int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, int radio_idx,
+ u32 value);
void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_link_sta *link_sta, u32 changed);
void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index a2dc5c3b0596..1c05a3d8e424 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -61,8 +61,10 @@ static int __init iwl_mvm_init(void)
}
ret = iwl_opmode_register("iwlmvm", &iwl_mvm_ops);
- if (ret)
+ if (ret) {
pr_err("Unable to register MVM op_mode: %d\n", ret);
+ iwl_mvm_rate_control_unregister();
+ }
return ret;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
index 69259ebb966b..dfb062b7c5c2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
@@ -411,6 +411,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
* with the mac80211 subsystem. This should be performed prior to calling
* ieee80211_register_hw
*
+ * Return: negative error code, or 0 on success
*/
int iwl_mvm_rate_control_register(void);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 60bd9c7e5f03..5f30109ca18f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -11,6 +11,7 @@
#include "mvm.h"
#include "fw/api/scan.h"
#include "iwl-io.h"
+#include "iwl-utils.h"
#define IWL_DENSE_EBS_SCAN_RATIO 5
#define IWL_SPARSE_EBS_SCAN_RATIO 1
@@ -3685,117 +3686,6 @@ static int iwl_mvm_chanidx_from_phy(struct iwl_mvm *mvm,
return -EINVAL;
}
-static u32 iwl_mvm_div_by_db(u32 value, u8 db)
-{
- /*
- * 2^32 * 10**(i / 10) for i = [1, 10], skipping 0 and simply stopping
- * at 10 dB and looping instead of using a much larger table.
- *
- * Using 64 bit math is overkill, but means the helper does not require
- * a limit on the input range.
- */
- static const u32 db_to_val[] = {
- 0xcb59185e, 0xa1866ba8, 0x804dce7a, 0x65ea59fe, 0x50f44d89,
- 0x404de61f, 0x331426af, 0x2892c18b, 0x203a7e5b, 0x1999999a,
- };
-
- while (value && db > 0) {
- u8 change = min_t(u8, db, ARRAY_SIZE(db_to_val));
-
- value = (((u64)value) * db_to_val[change - 1]) >> 32;
-
- db -= change;
- }
-
- return value;
-}
-
-VISIBLE_IF_IWLWIFI_KUNIT s8
-iwl_mvm_average_dbm_values(const struct iwl_umac_scan_channel_survey_notif *notif)
-{
- s8 average_magnitude;
- u32 average_factor;
- s8 sum_magnitude = -128;
- u32 sum_factor = 0;
- int i, count = 0;
-
- /*
- * To properly average the decibel values (signal values given in dBm)
- * we need to do the math in linear space. Doing a linear average of
- * dB (dBm) values is a bit annoying though due to the large range of
- * at least -10 to -110 dBm that will not fit into a 32 bit integer.
- *
- * A 64 bit integer should be sufficient, but then we still have the
- * problem that there are no directly usable utility functions
- * available.
- *
- * So, lets not deal with that and instead do much of the calculation
- * with a 16.16 fixed point integer along with a base in dBm. 16.16 bit
- * gives us plenty of head-room for adding up a few values and even
- * doing some math on it. And the tail should be accurate enough too
- * (1/2^16 is somewhere around -48 dB, so effectively zero).
- *
- * i.e. the real value of sum is:
- * sum = sum_factor / 2^16 * 10^(sum_magnitude / 10) mW
- *
- * However, that does mean we need to be able to bring two values to
- * a common base, so we need a helper for that.
- *
- * Note that this function takes an input with unsigned negative dBm
- * values but returns a signed dBm (i.e. a negative value).
- */
-
- for (i = 0; i < ARRAY_SIZE(notif->noise); i++) {
- s8 val_magnitude;
- u32 val_factor;
-
- if (notif->noise[i] == 0xff)
- continue;
-
- val_factor = 0x10000;
- val_magnitude = -notif->noise[i];
-
- if (val_magnitude <= sum_magnitude) {
- u8 div_db = sum_magnitude - val_magnitude;
-
- val_factor = iwl_mvm_div_by_db(val_factor, div_db);
- val_magnitude = sum_magnitude;
- } else {
- u8 div_db = val_magnitude - sum_magnitude;
-
- sum_factor = iwl_mvm_div_by_db(sum_factor, div_db);
- sum_magnitude = val_magnitude;
- }
-
- sum_factor += val_factor;
- count++;
- }
-
- /* No valid noise measurement, return a very high noise level */
- if (count == 0)
- return 0;
-
- average_magnitude = sum_magnitude;
- average_factor = sum_factor / count;
-
- /*
- * average_factor will be a number smaller than 1.0 (0x10000) at this
- * point. What we need to do now is to adjust average_magnitude so that
- * average_factor is between -0.5 dB and 0.5 dB.
- *
- * Just do -1 dB steps and find the point where
- * -0.5 dB * -i dB = 0x10000 * 10^(-0.5/10) / i dB
- * = div_by_db(0xe429, i)
- * is smaller than average_factor.
- */
- for (i = 0; average_factor < iwl_mvm_div_by_db(0xe429, i); i++) {
- /* nothing */
- }
-
- return average_magnitude - i;
-}
-EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mvm_average_dbm_values);
-
void iwl_mvm_rx_channel_survey_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
@@ -3853,5 +3743,6 @@ void iwl_mvm_rx_channel_survey_notif(struct iwl_mvm *mvm,
info->time_busy = le32_to_cpu(notif->busy_time);
info->time_rx = le32_to_cpu(notif->rx_time);
info->time_tx = le32_to_cpu(notif->tx_time);
- info->noise = iwl_mvm_average_dbm_values(notif);
+ info->noise =
+ iwl_average_neg_dbm(notif->noise, ARRAY_SIZE(notif->noise));
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index 6b183f5e9bbc..f6906061510b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -214,7 +214,7 @@ struct iwl_mvm_vif;
*/
/**
- * enum iwl_mvm_agg_state
+ * enum iwl_mvm_agg_state - aggregation session state
*
* The state machine of the BA agreement establishment / tear down.
* These states relate to a specific RA / TID.
@@ -483,6 +483,7 @@ struct iwl_mvm_int_sta {
* about. Otherwise (if this is a new STA), this should be false.
* @flags: if update==true, this marks what is being changed via ORs of values
* from enum iwl_sta_modify_flag. Otherwise, this is ignored.
+ * Return: negative error code or 0 on success
*/
int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
bool update, unsigned int flags);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile
index 895d53f223e9..bb33f4a06f1c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile
@@ -1,3 +1,3 @@
-iwlmvm-tests-y += module.o links.o scan.o hcmd.o
+iwlmvm-tests-y += module.o links.o hcmd.o
obj-$(CONFIG_IWLWIFI_KUNIT_TESTS) += iwlmvm-tests.o
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h
index 49256ba4cf58..1ef8768756db 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2019-2020, 2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2019-2020, 2023, 2025 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
*/
#ifndef __time_event_h__
@@ -124,6 +124,8 @@ void iwl_mvm_rx_roc_notif(struct iwl_mvm *mvm,
* ROC request, it will issue a notification to the driver that it is on the
* requested channel. Once the FW completes the ROC request it will issue
* another notification to the driver.
+ *
+ * Return: negative error code or 0 on success
*/
int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
int duration, enum ieee80211_roc_type type);
@@ -179,6 +181,8 @@ void iwl_mvm_remove_csa_period(struct iwl_mvm *mvm,
*
* This function is used to schedule NoA time event and is used to perform
* the channel switch flow.
+ *
+ * Return: negative error code or 0 on success
*/
int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
@@ -188,7 +192,7 @@ int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm,
* iwl_mvm_te_scheduled - check if the fw received the TE cmd
* @te_data: the time event data that corresponds to that time event
*
- * This function returns true iff this TE is added to the fw.
+ * Return: %true if this TE is added to the fw, %false otherwise
*/
static inline bool
iwl_mvm_te_scheduled(struct iwl_mvm_time_event_data *te_data)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-v2.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-v2.c
index 976fd1f58da4..06be929a3ca5 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-v2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-v2.c
@@ -6,7 +6,7 @@
#include "iwl-trans.h"
#include "iwl-fh.h"
#include "iwl-context-info-v2.h"
-#include "internal.h"
+#include "gen1_2/internal.h"
#include "iwl-prph.h"
static const struct dmi_system_id dmi_force_scu_active_approved_list[] = {
@@ -391,13 +391,13 @@ static int iwl_pcie_load_payloads_segments
{
struct iwl_dram_data *cur_payload_dram = &dram_regions->drams[0];
struct iwl_dram_data *desc_dram = &dram_regions->prph_scratch_mem_desc;
- struct iwl_prph_scrath_mem_desc_addr_array *addresses;
+ struct iwl_prph_scratch_mem_desc_addr_array *addresses;
const void *data;
u32 len;
int i;
/* allocate and init DRAM descriptors array */
- len = sizeof(struct iwl_prph_scrath_mem_desc_addr_array);
+ len = sizeof(struct iwl_prph_scratch_mem_desc_addr_array);
desc_dram->block = iwl_pcie_ctxt_info_dma_alloc_coherent
(trans,
len,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
index 4f2be0c1bd97..0957223c776d 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
@@ -6,7 +6,7 @@
#include "iwl-trans.h"
#include "iwl-fh.h"
#include "iwl-context-info.h"
-#include "internal.h"
+#include "gen1_2/internal.h"
#include "iwl-prph.h"
static void *_iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 0a9e0dbb58fb..52a48e82f3bf 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -15,7 +15,7 @@
#include "iwl-trans.h"
#include "iwl-drv.h"
#include "iwl-prph.h"
-#include "internal.h"
+#include "gen1_2/internal.h"
#define _IS_A(cfg, _struct) __builtin_types_compatible_p(typeof(cfg), \
struct _struct)
@@ -545,6 +545,7 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0xE340, PCI_ANY_ID, iwl_sc_mac_cfg)},
{IWL_PCI_DEVICE(0xD340, PCI_ANY_ID, iwl_sc_mac_cfg)},
{IWL_PCI_DEVICE(0x6E70, PCI_ANY_ID, iwl_sc_mac_cfg)},
+ {IWL_PCI_DEVICE(0xD240, PCI_ANY_ID, iwl_sc_mac_cfg)},
#endif /* CONFIG_IWLMLD */
{0}
@@ -1580,12 +1581,21 @@ static const struct dev_pm_ops iwl_dev_pm_ops = {
#endif /* CONFIG_PM_SLEEP */
+static void iwl_pci_dump(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct iwl_trans *trans = pci_get_drvdata(pdev);
+
+ iwl_op_mode_dump(trans->op_mode);
+}
+
static struct pci_driver iwl_pci_driver = {
.name = DRV_NAME,
.id_table = iwl_hw_card_ids,
.probe = iwl_pci_probe,
.remove = iwl_pci_remove,
.driver.pm = IWL_PM_OPS,
+ .driver.coredump = iwl_pci_dump,
};
int __must_check iwl_pci_register_driver(void)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/internal.h
index 3b7c12fc4f9e..23c0771a4231 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/internal.h
@@ -22,7 +22,7 @@
#include "iwl-io.h"
#include "iwl-op-mode.h"
#include "iwl-drv.h"
-#include "iwl-context-info.h"
+#include "pcie/iwl-context-info.h"
/*
* RX related structures and functions
@@ -39,7 +39,7 @@ struct iwl_host_cmd;
* trans_pcie layer */
/**
- * struct iwl_rx_mem_buffer
+ * struct iwl_rx_mem_buffer - driver-side RX buffer descriptor
* @page_dma: bus address of rxb page
* @page: driver's pointer to the rxb page
* @list: list entry for the membuffer
@@ -190,6 +190,7 @@ struct iwl_rb_allocator {
* iwl_get_closed_rb_stts - get closed rb stts from different structs
* @trans: transport pointer (for configuration)
* @rxq: the rxq to get the rb stts from
+ * Return: last closed RB index
*/
static inline u16 iwl_get_closed_rb_stts(struct iwl_trans *trans,
struct iwl_rxq *rxq)
@@ -382,8 +383,7 @@ struct iwl_pcie_txqs {
* @irq_lock: lock to synchronize IRQ handling
* @txq_memory: TXQ allocation array
* @sx_waitq: waitqueue for Sx transitions
- * @sx_complete: completion for Sx transitions
- * @pcie_dbg_dumped_once: indicates PCIe regs were dumped already
+ * @sx_state: state tracking Sx transitions
* @opmode_down: indicates opmode went away
* @num_rx_bufs: number of RX buffers to allocate/use
* @affinity_mask: IRQ affinity mask for each RX queue
@@ -448,13 +448,17 @@ struct iwl_trans_pcie {
u8 __iomem *hw_base;
bool ucode_write_complete;
- bool sx_complete;
+ enum {
+ IWL_SX_INVALID = 0,
+ IWL_SX_WAITING,
+ IWL_SX_ERROR,
+ IWL_SX_COMPLETE,
+ } sx_state;
wait_queue_head_t ucode_write_waitq;
wait_queue_head_t sx_waitq;
u16 num_rx_bufs;
- bool pcie_dbg_dumped_once;
u32 rx_page_order;
u32 rx_buf_bytes;
u32 supported_dma_mask;
@@ -698,6 +702,7 @@ static inline void iwl_txq_stop(struct iwl_trans *trans, struct iwl_txq *txq)
* iwl_txq_inc_wrap - increment queue index, wrap back to beginning
* @trans: the transport (for configuration data)
* @index: current index
+ * Return: the queue index incremented, subject to wrapping
*/
static inline int iwl_txq_inc_wrap(struct iwl_trans *trans, int index)
{
@@ -709,6 +714,7 @@ static inline int iwl_txq_inc_wrap(struct iwl_trans *trans, int index)
* iwl_txq_dec_wrap - decrement queue index, wrap back to end
* @trans: the transport (for configuration data)
* @index: current index
+ * Return: the queue index decremented, subject to wrapping
*/
static inline int iwl_txq_dec_wrap(struct iwl_trans *trans, int index)
{
@@ -1028,40 +1034,12 @@ static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
}
-static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans,
- u32 reg, u32 mask, u32 value)
-{
- u32 v;
-
-#ifdef CONFIG_IWLWIFI_DEBUG
- WARN_ON_ONCE(value & ~mask);
-#endif
-
- v = iwl_read32(trans, reg);
- v &= ~mask;
- v |= value;
- iwl_write32(trans, reg, v);
-}
-
-static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans,
- u32 reg, u32 mask)
-{
- __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0);
-}
-
-static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
- u32 reg, u32 mask)
-{
- __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
-}
-
static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans)
{
return (trans->dbg.dest_tlv || iwl_trans_dbg_ini_valid(trans));
}
void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state, bool from_irq);
-void iwl_trans_pcie_dump_regs(struct iwl_trans *trans);
#ifdef CONFIG_IWLWIFI_DEBUGFS
void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
@@ -1074,6 +1052,7 @@ void iwl_pcie_rx_allocator_work(struct work_struct *data);
/* common trans ops for all generations transports */
void iwl_trans_pcie_op_mode_enter(struct iwl_trans *trans);
+int _iwl_trans_pcie_start_hw(struct iwl_trans *trans);
int iwl_trans_pcie_start_hw(struct iwl_trans *trans);
void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans);
void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val);
@@ -1083,8 +1062,6 @@ u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg);
void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, u32 val);
int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
void *buf, int dwords);
-int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
- const void *buf, int dwords);
int iwl_trans_pcie_sw_reset(struct iwl_trans *trans, bool retake_ownership);
struct iwl_trans_dump_data *
iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
index f0405eddc367..619a9505e6d9 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c
@@ -12,7 +12,7 @@
#include "iwl-io.h"
#include "internal.h"
#include "iwl-op-mode.h"
-#include "iwl-context-info-v2.h"
+#include "pcie/iwl-context-info-v2.h"
#include "fw/dbg.h"
/******************************************************************************
@@ -1700,6 +1700,15 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
timer_delete(&trans_pcie->txqs.txq[i]->stuck_timer);
}
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_SC) {
+ u32 val = iwl_read32(trans, CSR_IPC_STATE);
+
+ if (val & CSR_IPC_STATE_TOP_RESET_REQ) {
+ IWL_ERR(trans, "FW requested TOP reset for FSEQ\n");
+ trans->do_top_reset = 1;
+ }
+ }
+
/* The STATUS_FW_ERROR bit is set in this function. This must happen
* before we wake up the command caller, to ensure a proper cleanup. */
iwl_trans_fw_error(trans, IWL_ERR_TYPE_IRQ);
@@ -1852,7 +1861,12 @@ static void iwl_trans_pcie_handle_reset_interrupt(struct iwl_trans *trans)
}
fallthrough;
case CSR_IPC_STATE_RESET_TOP_READY:
- /* FIXME: handle this case when requesting TOP reset */
+ if (trans_pcie->fw_reset_state == FW_RESET_TOP_REQUESTED) {
+ IWL_DEBUG_ISR(trans, "TOP Reset continues\n");
+ trans_pcie->fw_reset_state = FW_RESET_OK;
+ wake_up(&trans_pcie->fw_reset_waitq);
+ break;
+ }
fallthrough;
case CSR_IPC_STATE_RESET_NONE:
IWL_FW_CHECK_FAILED(trans,
@@ -2380,6 +2394,11 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
} else {
iwl_pcie_irq_handle_error(trans);
}
+
+ if (trans_pcie->sx_state == IWL_SX_WAITING) {
+ trans_pcie->sx_state = IWL_SX_ERROR;
+ wake_up(&trans_pcie->sx_waitq);
+ }
}
/* After checking FH register check HW register */
@@ -2414,13 +2433,20 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP && trans_pcie->prph_info) {
u32 sleep_notif =
le32_to_cpu(trans_pcie->prph_info->sleep_notif);
+
if (sleep_notif == IWL_D3_SLEEP_STATUS_SUSPEND ||
sleep_notif == IWL_D3_SLEEP_STATUS_RESUME) {
IWL_DEBUG_ISR(trans,
"Sx interrupt: sleep notification = 0x%x\n",
sleep_notif);
- trans_pcie->sx_complete = true;
- wake_up(&trans_pcie->sx_waitq);
+ if (trans_pcie->sx_state == IWL_SX_WAITING) {
+ trans_pcie->sx_state = IWL_SX_COMPLETE;
+ wake_up(&trans_pcie->sx_waitq);
+ } else {
+ IWL_ERR(trans,
+ "unexpected Sx interrupt (0x%x)\n",
+ sleep_notif);
+ }
} else {
/* uCode wakes up after power-down sleep */
IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans-gen2.c
index c8f4f3a1d2eb..0df8522ca410 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans-gen2.c
@@ -5,8 +5,8 @@
*/
#include "iwl-trans.h"
#include "iwl-prph.h"
-#include "iwl-context-info.h"
-#include "iwl-context-info-v2.h"
+#include "pcie/iwl-context-info.h"
+#include "pcie/iwl-context-info-v2.h"
#include "internal.h"
#include "fw/dbg.h"
@@ -610,6 +610,11 @@ again:
msleep(10);
IWL_INFO(trans, "TOP reset successful, reinit now\n");
/* now load the firmware again properly */
+ ret = _iwl_trans_pcie_start_hw(trans);
+ if (ret) {
+ IWL_ERR(trans, "failed to start HW after TOP reset\n");
+ goto out;
+ }
trans_pcie->prph_scratch->ctrl_cfg.control.control_flags &=
~cpu_to_le32(IWL_PRPH_SCRATCH_TOP_RESET);
top_reset_done = true;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
index cc4d289b110d..97e90cbeb6cd 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
@@ -28,106 +28,13 @@
#include "mei/iwl-mei.h"
#include "internal.h"
#include "iwl-fh.h"
-#include "iwl-context-info-v2.h"
+#include "pcie/iwl-context-info-v2.h"
+#include "pcie/utils.h"
/* extended range in FW SRAM */
#define IWL_FW_MEM_EXTENDED_START 0x40000
#define IWL_FW_MEM_EXTENDED_END 0x57FFF
-void iwl_trans_pcie_dump_regs(struct iwl_trans *trans)
-{
-#define PCI_DUMP_SIZE 352
-#define PCI_MEM_DUMP_SIZE 64
-#define PCI_PARENT_DUMP_SIZE 524
-#define PREFIX_LEN 32
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct pci_dev *pdev = trans_pcie->pci_dev;
- u32 i, pos, alloc_size, *ptr, *buf;
- char *prefix;
-
- if (trans_pcie->pcie_dbg_dumped_once)
- return;
-
- /* Should be a multiple of 4 */
- BUILD_BUG_ON(PCI_DUMP_SIZE > 4096 || PCI_DUMP_SIZE & 0x3);
- BUILD_BUG_ON(PCI_MEM_DUMP_SIZE > 4096 || PCI_MEM_DUMP_SIZE & 0x3);
- BUILD_BUG_ON(PCI_PARENT_DUMP_SIZE > 4096 || PCI_PARENT_DUMP_SIZE & 0x3);
-
- /* Alloc a max size buffer */
- alloc_size = PCI_ERR_ROOT_ERR_SRC + 4 + PREFIX_LEN;
- alloc_size = max_t(u32, alloc_size, PCI_DUMP_SIZE + PREFIX_LEN);
- alloc_size = max_t(u32, alloc_size, PCI_MEM_DUMP_SIZE + PREFIX_LEN);
- alloc_size = max_t(u32, alloc_size, PCI_PARENT_DUMP_SIZE + PREFIX_LEN);
-
- buf = kmalloc(alloc_size, GFP_ATOMIC);
- if (!buf)
- return;
- prefix = (char *)buf + alloc_size - PREFIX_LEN;
-
- IWL_ERR(trans, "iwlwifi transaction failed, dumping registers\n");
-
- /* Print wifi device registers */
- sprintf(prefix, "iwlwifi %s: ", pci_name(pdev));
- IWL_ERR(trans, "iwlwifi device config registers:\n");
- for (i = 0, ptr = buf; i < PCI_DUMP_SIZE; i += 4, ptr++)
- if (pci_read_config_dword(pdev, i, ptr))
- goto err_read;
- print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
-
- IWL_ERR(trans, "iwlwifi device memory mapped registers:\n");
- for (i = 0, ptr = buf; i < PCI_MEM_DUMP_SIZE; i += 4, ptr++)
- *ptr = iwl_read32(trans, i);
- print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
-
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
- if (pos) {
- IWL_ERR(trans, "iwlwifi device AER capability structure:\n");
- for (i = 0, ptr = buf; i < PCI_ERR_ROOT_COMMAND; i += 4, ptr++)
- if (pci_read_config_dword(pdev, pos + i, ptr))
- goto err_read;
- print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET,
- 32, 4, buf, i, 0);
- }
-
- /* Print parent device registers next */
- if (!pdev->bus->self)
- goto out;
-
- pdev = pdev->bus->self;
- sprintf(prefix, "iwlwifi %s: ", pci_name(pdev));
-
- IWL_ERR(trans, "iwlwifi parent port (%s) config registers:\n",
- pci_name(pdev));
- for (i = 0, ptr = buf; i < PCI_PARENT_DUMP_SIZE; i += 4, ptr++)
- if (pci_read_config_dword(pdev, i, ptr))
- goto err_read;
- print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
-
- /* Print root port AER registers */
- pos = 0;
- pdev = pcie_find_root_port(pdev);
- if (pdev)
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
- if (pos) {
- IWL_ERR(trans, "iwlwifi root port (%s) AER cap structure:\n",
- pci_name(pdev));
- sprintf(prefix, "iwlwifi %s: ", pci_name(pdev));
- for (i = 0, ptr = buf; i <= PCI_ERR_ROOT_ERR_SRC; i += 4, ptr++)
- if (pci_read_config_dword(pdev, pos + i, ptr))
- goto err_read;
- print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32,
- 4, buf, i, 0);
- }
- goto out;
-
-err_read:
- print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
- IWL_ERR(trans, "Read failed at 0x%X\n", i);
-out:
- trans_pcie->pcie_dbg_dumped_once = 1;
- kfree(buf);
-}
-
int iwl_trans_pcie_sw_reset(struct iwl_trans *trans, bool retake_ownership)
{
/* Reset entire device - do controller reset (results in SHRD_HW_RST) */
@@ -387,8 +294,8 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
u32 dl_cfg_reg;
/* Force XTAL ON */
- __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
+ iwl_trans_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
ret = iwl_trans_pcie_sw_reset(trans, true);
@@ -397,8 +304,8 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
if (WARN_ON(ret)) {
/* Release XTAL ON request */
- __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
+ iwl_trans_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
return;
}
@@ -449,12 +356,12 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
/* Activates XTAL resources monitor */
- __iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG,
- CSR_MONITOR_XTAL_RESOURCES);
+ iwl_trans_set_bit(trans, CSR_MONITOR_CFG_REG,
+ CSR_MONITOR_XTAL_RESOURCES);
/* Release XTAL ON request */
- __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
+ iwl_trans_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
udelay(10);
/* Release APMG XTAL */
@@ -704,7 +611,7 @@ static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans,
trans_pcie->ucode_write_complete, 5 * HZ);
if (!ret) {
IWL_ERR(trans, "Failed to load firmware chunk!\n");
- iwl_trans_pcie_dump_regs(trans);
+ iwl_trans_pcie_dump_regs(trans, trans_pcie->pci_dev);
return -ETIMEDOUT;
}
@@ -1536,30 +1443,41 @@ static int iwl_pcie_d3_handshake(struct iwl_trans *trans, bool suspend)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ret;
+ if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
+ return 0;
+
+ trans_pcie->sx_state = IWL_SX_WAITING;
+
if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_AX210)
iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
suspend ? UREG_DOORBELL_TO_ISR6_SUSPEND :
UREG_DOORBELL_TO_ISR6_RESUME);
- else if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ else
iwl_write32(trans, CSR_IPC_SLEEP_CONTROL,
suspend ? CSR_IPC_SLEEP_CONTROL_SUSPEND :
CSR_IPC_SLEEP_CONTROL_RESUME);
- else
- return 0;
ret = wait_event_timeout(trans_pcie->sx_waitq,
- trans_pcie->sx_complete, 2 * HZ);
-
- /* Invalidate it toward next suspend or resume */
- trans_pcie->sx_complete = false;
-
+ trans_pcie->sx_state != IWL_SX_WAITING,
+ 2 * HZ);
if (!ret) {
IWL_ERR(trans, "Timeout %s D3\n",
suspend ? "entering" : "exiting");
- return -ETIMEDOUT;
+ ret = -ETIMEDOUT;
+ } else {
+ ret = 0;
}
- return 0;
+ if (trans_pcie->sx_state == IWL_SX_ERROR) {
+ IWL_ERR(trans, "FW error while %s D3\n",
+ suspend ? "entering" : "exiting");
+ ret = -EIO;
+ }
+
+ /* Invalidate it toward next suspend or resume */
+ trans_pcie->sx_state = IWL_SX_INVALID;
+
+ return ret;
}
int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, bool reset)
@@ -1845,7 +1763,7 @@ static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans)
return iwl_trans_pcie_sw_reset(trans, true);
}
-static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans)
+int _iwl_trans_pcie_start_hw(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int err;
@@ -2412,7 +2330,7 @@ bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent)
}
/* this bit wakes up the NIC */
- __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, write);
+ iwl_trans_set_bit(trans, CSR_GP_CNTRL, write);
if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
udelay(2);
@@ -2449,7 +2367,7 @@ bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent)
"Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
cntrl);
- iwl_trans_pcie_dump_regs(trans);
+ iwl_trans_pcie_dump_regs(trans, trans_pcie->pci_dev);
if (iwlwifi_mod_params.remove_when_gone && cntrl == ~0U)
iwl_trans_pcie_reset(trans,
@@ -2501,11 +2419,11 @@ iwl_trans_pcie_release_nic_access(struct iwl_trans *trans)
if (trans_pcie->cmd_hold_nic_awake)
goto out;
if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
- __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ);
+ iwl_trans_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ);
else
- __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ iwl_trans_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
/*
* Above we read the CSR_GP_CNTRL register, which will flush
* any previous writes, but we need the write that clears the
@@ -2567,24 +2485,6 @@ int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
return 0;
}
-int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
- const void *buf, int dwords)
-{
- int offs, ret = 0;
- const u32 *vals = buf;
-
- if (iwl_trans_grab_nic_access(trans)) {
- iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
- for (offs = 0; offs < dwords; offs++)
- iwl_write32(trans, HBUS_TARG_MEM_WDAT,
- vals ? vals[offs] : 0);
- iwl_trans_release_nic_access(trans);
- } else {
- ret = -EBUSY;
- }
- return ret;
-}
-
int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs,
u32 *val)
{
@@ -2704,7 +2604,7 @@ void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
spin_lock_bh(&trans_pcie->reg_lock);
- __iwl_trans_pcie_set_bits_mask(trans, reg, mask, value);
+ _iwl_trans_set_bits_mask(trans, reg, mask, value);
spin_unlock_bh(&trans_pcie->reg_lock);
}
@@ -4046,7 +3946,7 @@ int iwl_trans_pcie_copy_imr(struct iwl_trans *trans,
IMR_D2S_REQUESTED, 5 * HZ);
if (!ret || trans_pcie->imr_status == IMR_D2S_ERROR) {
IWL_ERR(trans, "Failed to copy IMR Memory chunk!\n");
- iwl_trans_pcie_dump_regs(trans);
+ iwl_trans_pcie_dump_regs(trans, trans_pcie->pci_dev);
return -ETIMEDOUT;
}
trans_pcie->imr_status = IMR_D2S_IDLE;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx-gen2.c
index df0545f09da9..df0545f09da9 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx-gen2.c
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c
index bb467e2b1779..8676726d789b 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c
@@ -25,6 +25,7 @@
#include "iwl-op-mode.h"
#include "internal.h"
#include "fw/api/tx.h"
+#include "pcie/utils.h"
/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
* DMA services
@@ -203,8 +204,8 @@ static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
}
trans_pcie->cmd_hold_nic_awake = false;
- __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ iwl_trans_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
spin_unlock(&trans_pcie->reg_lock);
}
@@ -494,9 +495,9 @@ void iwl_pcie_tx_start(struct iwl_trans *trans)
iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
/* reset context data, TX status and translation data */
- iwl_trans_pcie_write_mem(trans, trans_pcie->scd_base_addr +
- SCD_CONTEXT_MEM_LOWER_BOUND,
- NULL, clear_dwords);
+ iwl_trans_write_mem(trans, trans_pcie->scd_base_addr +
+ SCD_CONTEXT_MEM_LOWER_BOUND,
+ NULL, clear_dwords);
iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
trans_pcie->txqs.scd_bc_tbls.dma >> 10);
@@ -1292,9 +1293,8 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
if (configure_scd) {
iwl_scd_txq_set_inactive(trans, txq_id);
- iwl_trans_pcie_write_mem(trans, stts_addr,
- (const void *)zero_val,
- ARRAY_SIZE(zero_val));
+ iwl_trans_write_mem(trans, stts_addr, (const void *)zero_val,
+ ARRAY_SIZE(zero_val));
}
iwl_pcie_txq_unmap(trans, txq_id);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-v2.h b/drivers/net/wireless/intel/iwlwifi/pcie/iwl-context-info-v2.h
index 8c5c0ea46181..416baadc5017 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-v2.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/iwl-context-info-v2.h
@@ -130,11 +130,11 @@ struct iwl_prph_scratch_pnvm_cfg {
} __packed; /* PERIPH_SCRATCH_PNVM_CFG_S */
/**
- * struct iwl_prph_scrath_mem_desc_addr_array
+ * struct iwl_prph_scratch_mem_desc_addr_array - DRAM
* @mem_descs: array of dram addresses.
- * Each address is the beggining of a pnvm payload.
+ * Each address is the beginning of a PNVM payload.
*/
-struct iwl_prph_scrath_mem_desc_addr_array {
+struct iwl_prph_scratch_mem_desc_addr_array {
__le64 mem_descs[IPC_DRAM_MAP_ENTRY_NUM_MAX];
} __packed; /* PERIPH_SCRATCH_MEM_DESC_ADDR_ARRAY_S_VER_1 */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h b/drivers/net/wireless/intel/iwlwifi/pcie/iwl-context-info.h
index 7ae0fbdef208..7ae0fbdef208 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/iwl-context-info.h
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/utils.c b/drivers/net/wireless/intel/iwlwifi/pcie/utils.c
new file mode 100644
index 000000000000..1bb274d8390c
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/utils.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2025 Intel Corporation
+ */
+
+#include <linux/pci.h>
+#include <linux/gfp.h>
+
+#include "iwl-io.h"
+#include "pcie/utils.h"
+
+void iwl_trans_pcie_dump_regs(struct iwl_trans *trans, struct pci_dev *pdev)
+{
+#define PCI_DUMP_SIZE 352
+#define PCI_MEM_DUMP_SIZE 64
+#define PCI_PARENT_DUMP_SIZE 524
+#define PREFIX_LEN 32
+
+ static bool pcie_dbg_dumped_once = 0;
+ u32 i, pos, alloc_size, *ptr, *buf;
+ char *prefix;
+
+ if (pcie_dbg_dumped_once)
+ return;
+
+ /* Should be a multiple of 4 */
+ BUILD_BUG_ON(PCI_DUMP_SIZE > 4096 || PCI_DUMP_SIZE & 0x3);
+ BUILD_BUG_ON(PCI_MEM_DUMP_SIZE > 4096 || PCI_MEM_DUMP_SIZE & 0x3);
+ BUILD_BUG_ON(PCI_PARENT_DUMP_SIZE > 4096 || PCI_PARENT_DUMP_SIZE & 0x3);
+
+ /* Alloc a max size buffer */
+ alloc_size = PCI_ERR_ROOT_ERR_SRC + 4 + PREFIX_LEN;
+ alloc_size = max_t(u32, alloc_size, PCI_DUMP_SIZE + PREFIX_LEN);
+ alloc_size = max_t(u32, alloc_size, PCI_MEM_DUMP_SIZE + PREFIX_LEN);
+ alloc_size = max_t(u32, alloc_size, PCI_PARENT_DUMP_SIZE + PREFIX_LEN);
+
+ buf = kmalloc(alloc_size, GFP_ATOMIC);
+ if (!buf)
+ return;
+ prefix = (char *)buf + alloc_size - PREFIX_LEN;
+
+ IWL_ERR(trans, "iwlwifi transaction failed, dumping registers\n");
+
+ /* Print wifi device registers */
+ sprintf(prefix, "iwlwifi %s: ", pci_name(pdev));
+ IWL_ERR(trans, "iwlwifi device config registers:\n");
+ for (i = 0, ptr = buf; i < PCI_DUMP_SIZE; i += 4, ptr++)
+ if (pci_read_config_dword(pdev, i, ptr))
+ goto err_read;
+ print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
+
+ IWL_ERR(trans, "iwlwifi device memory mapped registers:\n");
+ for (i = 0, ptr = buf; i < PCI_MEM_DUMP_SIZE; i += 4, ptr++)
+ *ptr = iwl_read32(trans, i);
+ print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
+ if (pos) {
+ IWL_ERR(trans, "iwlwifi device AER capability structure:\n");
+ for (i = 0, ptr = buf; i < PCI_ERR_ROOT_COMMAND; i += 4, ptr++)
+ if (pci_read_config_dword(pdev, pos + i, ptr))
+ goto err_read;
+ print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET,
+ 32, 4, buf, i, 0);
+ }
+
+ /* Print parent device registers next */
+ if (!pdev->bus->self)
+ goto out;
+
+ pdev = pdev->bus->self;
+ sprintf(prefix, "iwlwifi %s: ", pci_name(pdev));
+
+ IWL_ERR(trans, "iwlwifi parent port (%s) config registers:\n",
+ pci_name(pdev));
+ for (i = 0, ptr = buf; i < PCI_PARENT_DUMP_SIZE; i += 4, ptr++)
+ if (pci_read_config_dword(pdev, i, ptr))
+ goto err_read;
+ print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
+
+ /* Print root port AER registers */
+ pos = 0;
+ pdev = pcie_find_root_port(pdev);
+ if (pdev)
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
+ if (pos) {
+ IWL_ERR(trans, "iwlwifi root port (%s) AER cap structure:\n",
+ pci_name(pdev));
+ sprintf(prefix, "iwlwifi %s: ", pci_name(pdev));
+ for (i = 0, ptr = buf; i <= PCI_ERR_ROOT_ERR_SRC; i += 4, ptr++)
+ if (pci_read_config_dword(pdev, pos + i, ptr))
+ goto err_read;
+ print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32,
+ 4, buf, i, 0);
+ }
+ goto out;
+
+err_read:
+ print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
+ IWL_ERR(trans, "Read failed at 0x%X\n", i);
+out:
+ pcie_dbg_dumped_once = 1;
+ kfree(buf);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/utils.h b/drivers/net/wireless/intel/iwlwifi/pcie/utils.h
new file mode 100644
index 000000000000..031dfdf4bba4
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/utils.h
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2025 Intel Corporation
+ */
+
+#ifndef __iwl_pcie_utils_h__
+#define __iwl_pcie_utils_h__
+
+void iwl_trans_pcie_dump_regs(struct iwl_trans *trans, struct pci_dev *pdev);
+
+static inline void _iwl_trans_set_bits_mask(struct iwl_trans *trans,
+ u32 reg, u32 mask, u32 value)
+{
+ u32 v;
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+ WARN_ON_ONCE(value & ~mask);
+#endif
+
+ v = iwl_read32(trans, reg);
+ v &= ~mask;
+ v |= value;
+ iwl_write32(trans, reg, v);
+}
+
+static inline void iwl_trans_clear_bit(struct iwl_trans *trans,
+ u32 reg, u32 mask)
+{
+ _iwl_trans_set_bits_mask(trans, reg, mask, 0);
+}
+
+static inline void iwl_trans_set_bit(struct iwl_trans *trans,
+ u32 reg, u32 mask)
+{
+ _iwl_trans_set_bits_mask(trans, reg, mask, mask);
+}
+
+#endif /* __iwl_pcie_utils_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/tests/Makefile b/drivers/net/wireless/intel/iwlwifi/tests/Makefile
index 84491488f589..1b49241c578f 100644
--- a/drivers/net/wireless/intel/iwlwifi/tests/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/tests/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
-iwlwifi-tests-y += module.o devinfo.o
+iwlwifi-tests-y += module.o devinfo.o utils.o
ccflags-y += -I$(src)/../
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tests/scan.c b/drivers/net/wireless/intel/iwlwifi/tests/utils.c
index 7a3275199ace..df2c3a891e7e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tests/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/tests/utils.c
@@ -1,20 +1,19 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * KUnit tests for channel helper functions
+ * KUnit tests for utilities
*
- * Copyright (C) 2024 Intel Corporation
+ * Copyright (C) 2024-2025 Intel Corporation
*/
-#include <net/mac80211.h>
-#include "../mvm.h"
+#include "../iwl-utils.h"
#include <kunit/test.h>
-MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
+MODULE_IMPORT_NS("IWLWIFI");
-static const struct acs_average_db_case {
+static const struct average_neg_db_case {
const char *desc;
u8 neg_dbm[22];
s8 result;
-} acs_average_db_cases[] = {
+} average_neg_db_cases[] = {
{
.desc = "Smallest possible value, all filled",
.neg_dbm = {
@@ -73,38 +72,38 @@ static const struct acs_average_db_case {
},
};
-KUNIT_ARRAY_PARAM_DESC(acs_average_db, acs_average_db_cases, desc)
+KUNIT_ARRAY_PARAM_DESC(average_neg_db, average_neg_db_cases, desc)
-static void test_acs_average_db(struct kunit *test)
+static void test_average_neg_db(struct kunit *test)
{
- const struct acs_average_db_case *params = test->param_value;
- struct iwl_umac_scan_channel_survey_notif notif;
+ const struct average_neg_db_case *params = test->param_value;
+ u8 reversed[ARRAY_SIZE(params->neg_dbm)];
int i;
/* Test the values in the given order */
- for (i = 0; i < ARRAY_SIZE(params->neg_dbm); i++)
- notif.noise[i] = params->neg_dbm[i];
KUNIT_ASSERT_EQ(test,
- iwl_mvm_average_dbm_values(&notif),
+ iwl_average_neg_dbm(params->neg_dbm,
+ ARRAY_SIZE(params->neg_dbm)),
params->result);
/* Test in reverse order */
for (i = 0; i < ARRAY_SIZE(params->neg_dbm); i++)
- notif.noise[ARRAY_SIZE(params->neg_dbm) - i - 1] =
+ reversed[ARRAY_SIZE(params->neg_dbm) - i - 1] =
params->neg_dbm[i];
KUNIT_ASSERT_EQ(test,
- iwl_mvm_average_dbm_values(&notif),
+ iwl_average_neg_dbm(reversed,
+ ARRAY_SIZE(params->neg_dbm)),
params->result);
}
-static struct kunit_case acs_average_db_case[] = {
- KUNIT_CASE_PARAM(test_acs_average_db, acs_average_db_gen_params),
+static struct kunit_case average_db_case[] = {
+ KUNIT_CASE_PARAM(test_average_neg_db, average_neg_db_gen_params),
{}
};
-static struct kunit_suite acs_average_db = {
- .name = "iwlmvm-acs-average-db",
- .test_cases = acs_average_db_case,
+static struct kunit_suite average_db = {
+ .name = "iwl-average-db",
+ .test_cases = average_db_case,
};
-kunit_test_suite(acs_average_db);
+kunit_test_suite(average_db);
diff --git a/drivers/net/wireless/intersil/p54/main.c b/drivers/net/wireless/intersil/p54/main.c
index 42111bb53f58..2ec3655f1a9c 100644
--- a/drivers/net/wireless/intersil/p54/main.c
+++ b/drivers/net/wireless/intersil/p54/main.c
@@ -313,7 +313,7 @@ static void p54_reset_stats(struct p54_common *priv)
priv->survey_raw.tx = 0;
}
-static int p54_config(struct ieee80211_hw *dev, u32 changed)
+static int p54_config(struct ieee80211_hw *dev, int radio_idx, u32 changed)
{
int ret = 0;
struct p54_common *priv = dev->priv;
@@ -692,6 +692,7 @@ static void p54_flush(struct ieee80211_hw *dev, struct ieee80211_vif *vif,
}
static void p54_set_coverage_class(struct ieee80211_hw *dev,
+ int radio_idx,
s16 coverage_class)
{
struct p54_common *priv = dev->priv;
diff --git a/drivers/net/wireless/marvell/libertas_tf/main.c b/drivers/net/wireless/marvell/libertas_tf/main.c
index 50c0f6179e2d..d1067874428f 100644
--- a/drivers/net/wireless/marvell/libertas_tf/main.c
+++ b/drivers/net/wireless/marvell/libertas_tf/main.c
@@ -337,7 +337,7 @@ static void lbtf_op_remove_interface(struct ieee80211_hw *hw,
lbtf_deb_leave(LBTF_DEB_MACOPS);
}
-static int lbtf_op_config(struct ieee80211_hw *hw, u32 changed)
+static int lbtf_op_config(struct ieee80211_hw *hw, int radio_idx, u32 changed)
{
struct lbtf_private *priv = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index 60c12328c2f3..286378770e9e 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -375,6 +375,7 @@ mwifiex_cfg80211_cancel_remain_on_channel(struct wiphy *wiphy,
static int
mwifiex_cfg80211_set_tx_power(struct wiphy *wiphy,
struct wireless_dev *wdev,
+ int radio_idx,
enum nl80211_tx_power_setting type,
int mbm)
{
@@ -410,6 +411,7 @@ mwifiex_cfg80211_set_tx_power(struct wiphy *wiphy,
static int
mwifiex_cfg80211_get_tx_power(struct wiphy *wiphy,
struct wireless_dev *wdev,
+ int radio_idx,
unsigned int link_id, int *dbm)
{
struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
@@ -737,7 +739,8 @@ mwifiex_set_rts(struct mwifiex_private *priv, u32 rts_thr)
* Fragmentation threshold of the driver.
*/
static int
-mwifiex_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
+mwifiex_cfg80211_set_wiphy_params(struct wiphy *wiphy, int radio_idx,
+ u32 changed)
{
struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
struct mwifiex_private *priv;
@@ -1939,7 +1942,8 @@ mwifiex_cfg80211_del_station(struct wiphy *wiphy, struct net_device *dev,
}
static int
-mwifiex_cfg80211_set_antenna(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant)
+mwifiex_cfg80211_set_antenna(struct wiphy *wiphy, int radio_idx, u32 tx_ant,
+ u32 rx_ant)
{
struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
struct mwifiex_private *priv = mwifiex_get_priv(adapter,
@@ -2002,7 +2006,8 @@ mwifiex_cfg80211_set_antenna(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant)
}
static int
-mwifiex_cfg80211_get_antenna(struct wiphy *wiphy, u32 *tx_ant, u32 *rx_ant)
+mwifiex_cfg80211_get_antenna(struct wiphy *wiphy, int radio_idx, u32 *tx_ant,
+ u32 *rx_ant)
{
struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
struct mwifiex_private *priv = mwifiex_get_priv(adapter,
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index c1fe48448839..f039d6f19183 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -438,7 +438,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8997 = {
.can_auto_tdls = false,
.can_ext_scan = true,
.fw_ready_extra_delay = false,
- .host_mlme = false,
+ .host_mlme = true,
};
static const struct mwifiex_sdio_device mwifiex_sdio_sd8887 = {
diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
index bab9ef37a1ab..bc34a025acd6 100644
--- a/drivers/net/wireless/marvell/mwl8k.c
+++ b/drivers/net/wireless/marvell/mwl8k.c
@@ -3369,7 +3369,8 @@ struct mwl8k_cmd_set_rts_threshold {
} __packed;
static int
-mwl8k_cmd_set_rts_threshold(struct ieee80211_hw *hw, int rts_thresh)
+mwl8k_cmd_set_rts_threshold(struct ieee80211_hw *hw, int radio_idx,
+ int rts_thresh)
{
struct mwl8k_cmd_set_rts_threshold *cmd;
int rc;
@@ -4955,7 +4956,7 @@ fail:
wiphy_err(hw->wiphy, "Firmware restart failed\n");
}
-static int mwl8k_config(struct ieee80211_hw *hw, u32 changed)
+static int mwl8k_config(struct ieee80211_hw *hw, int radio_idx, u32 changed)
{
struct ieee80211_conf *conf = &hw->conf;
struct mwl8k_priv *priv = hw->priv;
@@ -5321,9 +5322,10 @@ static void mwl8k_configure_filter(struct ieee80211_hw *hw,
mwl8k_fw_unlock(hw);
}
-static int mwl8k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+static int mwl8k_set_rts_threshold(struct ieee80211_hw *hw, int radio_idx,
+ u32 value)
{
- return mwl8k_cmd_set_rts_threshold(hw, value);
+ return mwl8k_cmd_set_rts_threshold(hw, radio_idx, value);
}
static int mwl8k_sta_remove(struct ieee80211_hw *hw,
@@ -6056,7 +6058,7 @@ static int mwl8k_reload_firmware(struct ieee80211_hw *hw, char *fw_image)
if (rc)
goto fail;
- rc = mwl8k_config(hw, ~0);
+ rc = mwl8k_config(hw, -1, ~0);
if (rc)
goto fail;
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index 45c8db939d55..3afe4c4cd7bb 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -1892,7 +1892,8 @@ void mt76_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
}
EXPORT_SYMBOL_GPL(mt76_sw_scan_complete);
-int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
+int mt76_get_antenna(struct ieee80211_hw *hw, int radio_idx, u32 *tx_ant,
+ u32 *rx_ant)
{
struct mt76_phy *phy = hw->priv;
struct mt76_dev *dev = phy->dev;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index 5f8d81cda6cd..14927a92f9d1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -1513,7 +1513,8 @@ int mt76_get_sar_power(struct mt76_phy *phy,
void mt76_csa_check(struct mt76_dev *dev);
void mt76_csa_finish(struct mt76_dev *dev);
-int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant);
+int mt76_get_antenna(struct ieee80211_hw *hw, int radio_idx, u32 *tx_ant,
+ u32 *rx_ant);
int mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set);
void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id);
int mt76_get_rate(struct mt76_dev *dev,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
index 3e8b1ec76169..0d7c84941cd0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
@@ -216,7 +216,7 @@ static int mt7603_set_sar_specs(struct ieee80211_hw *hw,
}
static int
-mt7603_config(struct ieee80211_hw *hw, u32 changed)
+mt7603_config(struct ieee80211_hw *hw, int radio_idx, u32 changed)
{
struct mt7603_dev *dev = hw->priv;
int ret = 0;
@@ -657,7 +657,8 @@ mt7603_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
static void
-mt7603_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class)
+mt7603_set_coverage_class(struct ieee80211_hw *hw, int radio_idx,
+ s16 coverage_class)
{
struct mt7603_dev *dev = hw->priv;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
index 8a37fb37f77d..15fe155ac3f3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
@@ -420,7 +420,7 @@ static int mt7615_set_sar_specs(struct ieee80211_hw *hw,
return mt76_update_channel(phy->mt76);
}
-static int mt7615_config(struct ieee80211_hw *hw, u32 changed)
+static int mt7615_config(struct ieee80211_hw *hw, int radio_idx, u32 changed)
{
struct mt7615_dev *dev = mt7615_hw_dev(hw);
struct mt7615_phy *phy = mt7615_hw_phy(hw);
@@ -784,7 +784,8 @@ static void mt7615_tx(struct ieee80211_hw *hw,
mt76_connac_pm_queue_skb(hw, &dev->pm, wcid, skb);
}
-static int mt7615_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
+static int mt7615_set_rts_threshold(struct ieee80211_hw *hw, int radio_idx,
+ u32 val)
{
struct mt7615_dev *dev = mt7615_hw_dev(hw);
struct mt7615_phy *phy = mt7615_hw_phy(hw);
@@ -972,7 +973,8 @@ mt7615_offset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
static void
-mt7615_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class)
+mt7615_set_coverage_class(struct ieee80211_hw *hw, int radio_idx,
+ s16 coverage_class)
{
struct mt7615_phy *phy = mt7615_hw_phy(hw);
struct mt7615_dev *dev = phy->dev;
@@ -984,7 +986,8 @@ mt7615_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class)
}
static int
-mt7615_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
+mt7615_set_antenna(struct ieee80211_hw *hw, int radio_idx,
+ u32 tx_ant, u32 rx_ant)
{
struct mt7615_dev *dev = mt7615_hw_dev(hw);
struct mt7615_phy *phy = mt7615_hw_phy(hw);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
index 4aa2dcedc874..a5c40d350612 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
@@ -57,7 +57,7 @@ out:
}
EXPORT_SYMBOL_GPL(mt76x0_set_sar_specs);
-int mt76x0_config(struct ieee80211_hw *hw, u32 changed)
+int mt76x0_config(struct ieee80211_hw *hw, int radio_idx, u32 changed)
{
struct mt76x02_dev *dev = hw->priv;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
index 50f755344968..e5bc14d4c712 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
@@ -48,7 +48,7 @@ void mt76x0_chip_onoff(struct mt76x02_dev *dev, bool enable, bool reset);
void mt76x0_mac_stop(struct mt76x02_dev *dev);
-int mt76x0_config(struct ieee80211_hw *hw, u32 changed);
+int mt76x0_config(struct ieee80211_hw *hw, int radio_idx, u32 changed);
int mt76x0_set_channel(struct mt76_phy *mphy);
int mt76x0_set_sar_specs(struct ieee80211_hw *hw,
const struct cfg80211_sar_specs *sar);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h
index 4cd63bacd742..2094c7d2af81 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h
@@ -183,8 +183,8 @@ void mt76x02_wdt_work(struct work_struct *work);
void mt76x02_tx_set_txpwr_auto(struct mt76x02_dev *dev, s8 txpwr);
void mt76x02_set_tx_ackto(struct mt76x02_dev *dev);
void mt76x02_set_coverage_class(struct ieee80211_hw *hw,
- s16 coverage_class);
-int mt76x02_set_rts_threshold(struct ieee80211_hw *hw, u32 val);
+ int radio_idx, s16 coverage_class);
+int mt76x02_set_rts_threshold(struct ieee80211_hw *hw, int radio_idx, u32 val);
void mt76x02_remove_hdr_pad(struct sk_buff *skb, int len);
bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update);
void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
index 4fb30589fa7a..7dfcb20c692c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
@@ -548,7 +548,7 @@ void mt76x02_set_tx_ackto(struct mt76x02_dev *dev)
EXPORT_SYMBOL_GPL(mt76x02_set_tx_ackto);
void mt76x02_set_coverage_class(struct ieee80211_hw *hw,
- s16 coverage_class)
+ int radio_idx, s16 coverage_class)
{
struct mt76x02_dev *dev = hw->priv;
@@ -559,7 +559,7 @@ void mt76x02_set_coverage_class(struct ieee80211_hw *hw,
}
EXPORT_SYMBOL_GPL(mt76x02_set_coverage_class);
-int mt76x02_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
+int mt76x02_set_rts_threshold(struct ieee80211_hw *hw, int radio_idx, u32 val)
{
struct mt76x02_dev *dev = hw->priv;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
index eb70130d2711..c5dfb06d81e8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
@@ -54,7 +54,7 @@ int mt76x2e_set_channel(struct mt76_phy *phy)
}
static int
-mt76x2_config(struct ieee80211_hw *hw, u32 changed)
+mt76x2_config(struct ieee80211_hw *hw, int radio_idx, u32 changed)
{
struct mt76x02_dev *dev = hw->priv;
@@ -99,8 +99,8 @@ mt76x2_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
{
}
-static int mt76x2_set_antenna(struct ieee80211_hw *hw, u32 tx_ant,
- u32 rx_ant)
+static int mt76x2_set_antenna(struct ieee80211_hw *hw, int radio_idx,
+ u32 tx_ant, u32 rx_ant)
{
struct mt76x02_dev *dev = hw->priv;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
index 83e7061b10e2..6671c53faf9f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
@@ -50,7 +50,7 @@ int mt76x2u_set_channel(struct mt76_phy *mphy)
}
static int
-mt76x2u_config(struct ieee80211_hw *hw, u32 changed)
+mt76x2u_config(struct ieee80211_hw *hw, int radio_idx, u32 changed)
{
struct mt76x02_dev *dev = hw->priv;
int err = 0;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
index 3aa31c5cefa6..fe0639c14bf9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
@@ -449,7 +449,8 @@ out:
return err;
}
-static int mt7915_config(struct ieee80211_hw *hw, u32 changed)
+static int mt7915_config(struct ieee80211_hw *hw, int radio_idx,
+ u32 changed)
{
struct mt7915_dev *dev = mt7915_hw_dev(hw);
struct mt7915_phy *phy = mt7915_hw_phy(hw);
@@ -906,7 +907,8 @@ static void mt7915_tx(struct ieee80211_hw *hw,
mt76_tx(mphy, control->sta, wcid, skb);
}
-static int mt7915_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
+static int mt7915_set_rts_threshold(struct ieee80211_hw *hw, int radio_idx,
+ u32 val)
{
struct mt7915_dev *dev = mt7915_hw_dev(hw);
struct mt7915_phy *phy = mt7915_hw_phy(hw);
@@ -1102,7 +1104,8 @@ mt7915_offset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
static void
-mt7915_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class)
+mt7915_set_coverage_class(struct ieee80211_hw *hw, int radio_idx,
+ s16 coverage_class)
{
struct mt7915_phy *phy = mt7915_hw_phy(hw);
struct mt7915_dev *dev = phy->dev;
@@ -1114,7 +1117,7 @@ mt7915_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class)
}
static int
-mt7915_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
+mt7915_set_antenna(struct ieee80211_hw *hw, int radio_idx, u32 tx_ant, u32 rx_ant)
{
struct mt7915_dev *dev = mt7915_hw_dev(hw);
struct mt7915_phy *phy = mt7915_hw_phy(hw);
@@ -1655,7 +1658,7 @@ mt7915_twt_teardown_request(struct ieee80211_hw *hw,
}
static int
-mt7915_set_frag_threshold(struct ieee80211_hw *hw, u32 val)
+mt7915_set_frag_threshold(struct ieee80211_hw *hw, int radio_idx, u32 val)
{
return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
index 1fffa43379b2..1678204296d7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
@@ -624,7 +624,7 @@ void mt7921_set_runtime_pm(struct mt792x_dev *dev)
mt76_connac_mcu_set_deep_sleep(&dev->mt76, pm->ds_enable);
}
-static int mt7921_config(struct ieee80211_hw *hw, u32 changed)
+static int mt7921_config(struct ieee80211_hw *hw, int radio_idx, u32 changed)
{
struct mt792x_dev *dev = mt792x_hw_dev(hw);
struct mt792x_phy *phy = mt792x_hw_phy(hw);
@@ -907,7 +907,8 @@ void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
}
EXPORT_SYMBOL_GPL(mt7921_mac_sta_remove);
-static int mt7921_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
+static int mt7921_set_rts_threshold(struct ieee80211_hw *hw, int radio_idx,
+ u32 val)
{
struct mt792x_dev *dev = mt792x_hw_dev(hw);
@@ -1088,7 +1089,8 @@ mt7921_stop_sched_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
}
static int
-mt7921_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
+mt7921_set_antenna(struct ieee80211_hw *hw, int radio_idx,
+ u32 tx_ant, u32 rx_ant)
{
struct mt792x_dev *dev = mt792x_hw_dev(hw);
struct mt792x_phy *phy = mt792x_hw_phy(hw);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/main.c b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
index 94b0099dcd41..ed7cd75aa6bc 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
@@ -757,7 +757,7 @@ void mt7925_set_runtime_pm(struct mt792x_dev *dev)
mt7925_mcu_set_deep_sleep(dev, pm->ds_enable);
}
-static int mt7925_config(struct ieee80211_hw *hw, u32 changed)
+static int mt7925_config(struct ieee80211_hw *hw, int radio_idx, u32 changed)
{
struct mt792x_dev *dev = mt792x_hw_dev(hw);
int ret = 0;
@@ -1265,7 +1265,8 @@ void mt7925_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
}
EXPORT_SYMBOL_GPL(mt7925_mac_sta_remove);
-static int mt7925_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
+static int mt7925_set_rts_threshold(struct ieee80211_hw *hw, int radio_idx,
+ u32 val)
{
struct mt792x_dev *dev = mt792x_hw_dev(hw);
@@ -1507,7 +1508,8 @@ mt7925_stop_sched_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
}
static int
-mt7925_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
+mt7925_set_antenna(struct ieee80211_hw *hw, int radio_idx,
+ u32 tx_ant, u32 rx_ant)
{
struct mt792x_dev *dev = mt792x_hw_dev(hw);
struct mt792x_phy *phy = mt792x_hw_phy(hw);
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x.h b/drivers/net/wireless/mediatek/mt76/mt792x.h
index e0359d431eca..443d397d9961 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x.h
+++ b/drivers/net/wireless/mediatek/mt76/mt792x.h
@@ -412,7 +412,8 @@ void mt792x_sta_statistics(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct station_info *sinfo);
-void mt792x_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class);
+void mt792x_set_coverage_class(struct ieee80211_hw *hw, int radio_idx,
+ s16 coverage_class);
void mt792x_dma_cleanup(struct mt792x_dev *dev);
int mt792x_dma_enable(struct mt792x_dev *dev);
int mt792x_wpdma_reset(struct mt792x_dev *dev, bool force);
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_core.c b/drivers/net/wireless/mediatek/mt76/mt792x_core.c
index a50c1723ca29..43a7ac0f718e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_core.c
@@ -579,7 +579,8 @@ void mt792x_sta_statistics(struct ieee80211_hw *hw,
}
EXPORT_SYMBOL_GPL(mt792x_sta_statistics);
-void mt792x_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class)
+void mt792x_set_coverage_class(struct ieee80211_hw *hw, int radio_idx,
+ s16 coverage_class)
{
struct mt792x_phy *phy = mt792x_hw_phy(hw);
struct mt792x_dev *dev = phy->dev;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/main.c b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
index 78ae9f5cb176..5283aee619a9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
@@ -591,7 +591,7 @@ static int mt7996_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return err;
}
-static int mt7996_config(struct ieee80211_hw *hw, u32 changed)
+static int mt7996_config(struct ieee80211_hw *hw, int radio_idx, u32 changed)
{
return 0;
}
@@ -1251,7 +1251,8 @@ unlock:
rcu_read_unlock();
}
-static int mt7996_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
+static int mt7996_set_rts_threshold(struct ieee80211_hw *hw, int radio_idx,
+ u32 val)
{
struct mt7996_dev *dev = mt7996_hw_dev(hw);
int i, ret = 0;
@@ -1491,7 +1492,8 @@ unlock:
}
static void
-mt7996_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class)
+mt7996_set_coverage_class(struct ieee80211_hw *hw, int radio_idx,
+ s16 coverage_class)
{
struct mt7996_dev *dev = mt7996_hw_dev(hw);
struct mt7996_phy *phy;
@@ -1505,7 +1507,8 @@ mt7996_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class)
}
static int
-mt7996_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
+mt7996_set_antenna(struct ieee80211_hw *hw, int radio_idx,
+ u32 tx_ant, u32 rx_ant)
{
struct mt7996_dev *dev = mt7996_hw_dev(hw);
int i;
diff --git a/drivers/net/wireless/mediatek/mt7601u/main.c b/drivers/net/wireless/mediatek/mt7601u/main.c
index 7570c6ceecea..05ba43e1985c 100644
--- a/drivers/net/wireless/mediatek/mt7601u/main.c
+++ b/drivers/net/wireless/mediatek/mt7601u/main.c
@@ -78,7 +78,7 @@ static void mt7601u_remove_interface(struct ieee80211_hw *hw,
dev->wcid_mask[wcid / BITS_PER_LONG] &= ~BIT(wcid % BITS_PER_LONG);
}
-static int mt7601u_config(struct ieee80211_hw *hw, u32 changed)
+static int mt7601u_config(struct ieee80211_hw *hw, int radio_idx, u32 changed)
{
struct mt7601u_dev *dev = hw->priv;
int ret = 0;
@@ -334,7 +334,8 @@ mt7601u_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return mt76_mac_wcid_set_key(dev, msta->wcid.idx, key);
}
-static int mt7601u_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+static int mt7601u_set_rts_threshold(struct ieee80211_hw *hw, int radio_idx,
+ u32 value)
{
struct mt7601u_dev *dev = hw->priv;
diff --git a/drivers/net/wireless/microchip/wilc1000/cfg80211.c b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
index e7aa0f991923..a395829ebadf 100644
--- a/drivers/net/wireless/microchip/wilc1000/cfg80211.c
+++ b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
@@ -800,7 +800,7 @@ static int change_bss(struct wiphy *wiphy, struct net_device *dev,
return 0;
}
-static int set_wiphy_params(struct wiphy *wiphy, u32 changed)
+static int set_wiphy_params(struct wiphy *wiphy, int radio_idx, u32 changed)
{
int ret = -EINVAL;
struct cfg_param_attr cfg_param_val;
@@ -1637,7 +1637,8 @@ static void wilc_set_wakeup(struct wiphy *wiphy, bool enabled)
}
static int set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
- enum nl80211_tx_power_setting type, int mbm)
+ int radio_idx, enum nl80211_tx_power_setting type,
+ int mbm)
{
int ret;
int srcu_idx;
@@ -1669,7 +1670,7 @@ static int set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
}
static int get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
- unsigned int link_id, int *dbm)
+ int radio_idx, unsigned int link_id, int *dbm)
{
int ret;
struct wilc_vif *vif = netdev_priv(wdev->netdev);
diff --git a/drivers/net/wireless/purelifi/plfxlc/mac.c b/drivers/net/wireless/purelifi/plfxlc/mac.c
index 82d1bf7edba2..d375ad60167f 100644
--- a/drivers/net/wireless/purelifi/plfxlc/mac.c
+++ b/drivers/net/wireless/purelifi/plfxlc/mac.c
@@ -531,7 +531,7 @@ static void plfxlc_op_remove_interface(struct ieee80211_hw *hw,
mac->vif = NULL;
}
-static int plfxlc_op_config(struct ieee80211_hw *hw, u32 changed)
+static int plfxlc_op_config(struct ieee80211_hw *hw, int radio_idx, u32 changed)
{
return 0;
}
@@ -677,7 +677,8 @@ static void plfxlc_get_et_stats(struct ieee80211_hw *hw,
data[1] = mac->crc_errors;
}
-static int plfxlc_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+static int plfxlc_set_rts_threshold(struct ieee80211_hw *hw, int radio_idx,
+ u32 value)
{
return 0;
}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
index 0b2282528342..f1188368e66b 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -370,7 +370,8 @@ static int qtnf_stop_ap(struct wiphy *wiphy, struct net_device *dev,
return ret;
}
-static int qtnf_set_wiphy_params(struct wiphy *wiphy, u32 changed)
+static int qtnf_set_wiphy_params(struct wiphy *wiphy, int radio_idx,
+ u32 changed)
{
struct qtnf_wmac *mac = wiphy_priv(wiphy);
struct qtnf_vif *vif;
@@ -881,7 +882,7 @@ static int qtnf_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
}
static int qtnf_get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
- unsigned int link_id, int *dbm)
+ int radio_idx, unsigned int link_id, int *dbm)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(wdev->netdev);
int ret;
@@ -894,7 +895,8 @@ static int qtnf_get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
}
static int qtnf_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
- enum nl80211_tx_power_setting type, int mbm)
+ int radio_idx, enum nl80211_tx_power_setting type,
+ int mbm)
{
struct qtnf_vif *vif;
int ret;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index b7ea606bda08..4b5a7c9b6499 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -12100,7 +12100,7 @@ void rt2800_get_key_seq(struct ieee80211_hw *hw,
}
EXPORT_SYMBOL_GPL(rt2800_get_key_seq);
-int rt2800_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+int rt2800_set_rts_threshold(struct ieee80211_hw *hw, int radio_idx, u32 value)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
u32 reg;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h
index 194de676df8f..620a3d9872ce 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h
@@ -253,7 +253,8 @@ int rt2800_probe_hw(struct rt2x00_dev *rt2x00dev);
void rt2800_get_key_seq(struct ieee80211_hw *hw,
struct ieee80211_key_conf *key,
struct ieee80211_key_seq *seq);
-int rt2800_set_rts_threshold(struct ieee80211_hw *hw, u32 value);
+int rt2800_set_rts_threshold(struct ieee80211_hw *hw, int radio_idx,
+ u32 value);
int rt2800_conf_tx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
unsigned int link_id, u16 queue_idx,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
index dfb4bb370f01..09b9d1f9f793 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
@@ -1457,7 +1457,7 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
void rt2x00mac_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
-int rt2x00mac_config(struct ieee80211_hw *hw, u32 changed);
+int rt2x00mac_config(struct ieee80211_hw *hw, int radio_idx, u32 changed);
void rt2x00mac_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
unsigned int *total_flags,
@@ -1489,8 +1489,10 @@ int rt2x00mac_conf_tx(struct ieee80211_hw *hw,
void rt2x00mac_rfkill_poll(struct ieee80211_hw *hw);
void rt2x00mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop);
-int rt2x00mac_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant);
-int rt2x00mac_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant);
+int rt2x00mac_set_antenna(struct ieee80211_hw *hw, int radio_idx,
+ u32 tx_ant, u32 rx_ant);
+int rt2x00mac_get_antenna(struct ieee80211_hw *hw, int radio_idx,
+ u32 *tx_ant, u32 *rx_ant);
void rt2x00mac_get_ringparam(struct ieee80211_hw *hw,
u32 *tx, u32 *tx_max, u32 *rx, u32 *rx_max);
bool rt2x00mac_tx_frames_pending(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
index 451632488805..3bc0c1c906c9 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
@@ -304,7 +304,7 @@ void rt2x00mac_remove_interface(struct ieee80211_hw *hw,
}
EXPORT_SYMBOL_GPL(rt2x00mac_remove_interface);
-int rt2x00mac_config(struct ieee80211_hw *hw, u32 changed)
+int rt2x00mac_config(struct ieee80211_hw *hw, int radio_idx, u32 changed)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
@@ -740,7 +740,8 @@ void rt2x00mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
EXPORT_SYMBOL_GPL(rt2x00mac_flush);
-int rt2x00mac_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
+int rt2x00mac_set_antenna(struct ieee80211_hw *hw, int radio_idx,
+ u32 tx_ant, u32 rx_ant)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
struct link_ant *ant = &rt2x00dev->link.ant;
@@ -785,7 +786,8 @@ int rt2x00mac_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
}
EXPORT_SYMBOL_GPL(rt2x00mac_set_antenna);
-int rt2x00mac_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
+int rt2x00mac_get_antenna(struct ieee80211_hw *hw, int radio_idx,
+ u32 *tx_ant, u32 *rx_ant)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
struct link_ant *ant = &rt2x00dev->link.ant;
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
index ded8d4d59289..2905baea6239 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
@@ -1370,7 +1370,7 @@ static void rtl8180_remove_interface(struct ieee80211_hw *dev,
priv->vif = NULL;
}
-static int rtl8180_config(struct ieee80211_hw *dev, u32 changed)
+static int rtl8180_config(struct ieee80211_hw *dev, int radio_idx, u32 changed)
{
struct rtl8180_priv *priv = dev->priv;
struct ieee80211_conf *conf = &dev->conf;
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
index 220ac5bdf279..0c5c66401daa 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
@@ -1041,10 +1041,11 @@ static void rtl8187_stop(struct ieee80211_hw *dev, bool suspend)
rtl818x_iowrite8(priv, &priv->map->CONFIG4, reg | RTL818X_CONFIG4_VCOOFF);
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
+ usb_kill_anchored_urbs(&priv->anchored);
+
while ((skb = skb_dequeue(&priv->b_tx_status.queue)))
dev_kfree_skb_any(skb);
- usb_kill_anchored_urbs(&priv->anchored);
mutex_unlock(&priv->conf_mutex);
if (!priv->is_rtl8187b)
@@ -1151,7 +1152,7 @@ static void rtl8187_remove_interface(struct ieee80211_hw *dev,
mutex_unlock(&priv->conf_mutex);
}
-static int rtl8187_config(struct ieee80211_hw *dev, u32 changed)
+static int rtl8187_config(struct ieee80211_hw *dev, int radio_idx, u32 changed)
{
struct rtl8187_priv *priv = dev->priv;
struct ieee80211_conf *conf = &dev->conf;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/core.c b/drivers/net/wireless/realtek/rtl8xxxu/core.c
index 569856ca677f..496836f716aa 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/core.c
@@ -4552,7 +4552,8 @@ static void rtl8xxxu_cam_write(struct rtl8xxxu_priv *priv,
}
static
-int rtl8xxxu_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
+int rtl8xxxu_get_antenna(struct ieee80211_hw *hw, int radio_idx, u32 *tx_ant,
+ u32 *rx_ant)
{
struct rtl8xxxu_priv *priv = hw->priv;
@@ -6839,7 +6840,7 @@ static void rtl8xxxu_remove_interface(struct ieee80211_hw *hw,
priv->vifs[rtlvif->port_num] = NULL;
}
-static int rtl8xxxu_config(struct ieee80211_hw *hw, u32 changed)
+static int rtl8xxxu_config(struct ieee80211_hw *hw, int radio_idx, u32 changed)
{
struct rtl8xxxu_priv *priv = hw->priv;
struct device *dev = &priv->udev->dev;
@@ -6988,7 +6989,8 @@ static void rtl8xxxu_configure_filter(struct ieee80211_hw *hw,
FIF_PROBE_REQ);
}
-static int rtl8xxxu_set_rts_threshold(struct ieee80211_hw *hw, u32 rts)
+static int rtl8xxxu_set_rts_threshold(struct ieee80211_hw *hw, int radio_idx,
+ u32 rts)
{
if (rts > 2347 && rts != (u32)-1)
return -EINVAL;
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index 819cf519e66e..22633c301564 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -566,7 +566,7 @@ static int rtl_op_resume(struct ieee80211_hw *hw)
}
#endif
-static int rtl_op_config(struct ieee80211_hw *hw, u32 changed)
+static int rtl_op_config(struct ieee80211_hw *hw, int radio_idx, u32 changed)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index 898f597f70a9..d080469264cf 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -572,8 +572,11 @@ remap:
dma_map_single(&rtlpci->pdev->dev, skb_tail_pointer(skb),
rtlpci->rxbuffersize, DMA_FROM_DEVICE);
bufferaddress = *((dma_addr_t *)skb->cb);
- if (dma_mapping_error(&rtlpci->pdev->dev, bufferaddress))
+ if (dma_mapping_error(&rtlpci->pdev->dev, bufferaddress)) {
+ if (!new_skb)
+ kfree_skb(skb);
return 0;
+ }
rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb;
if (rtlpriv->use_new_trx_flow) {
/* skb->cb may be 64 bit address */
@@ -802,13 +805,19 @@ new_trx_end:
skb = new_skb;
no_new:
if (rtlpriv->use_new_trx_flow) {
- _rtl_pci_init_one_rxdesc(hw, skb, (u8 *)buffer_desc,
- rxring_idx,
- rtlpci->rx_ring[rxring_idx].idx);
+ if (!_rtl_pci_init_one_rxdesc(hw, skb, (u8 *)buffer_desc,
+ rxring_idx,
+ rtlpci->rx_ring[rxring_idx].idx)) {
+ if (new_skb)
+ dev_kfree_skb_any(skb);
+ }
} else {
- _rtl_pci_init_one_rxdesc(hw, skb, (u8 *)pdesc,
- rxring_idx,
- rtlpci->rx_ring[rxring_idx].idx);
+ if (!_rtl_pci_init_one_rxdesc(hw, skb, (u8 *)pdesc,
+ rxring_idx,
+ rtlpci->rx_ring[rxring_idx].idx)) {
+ if (new_skb)
+ dev_kfree_skb_any(skb);
+ }
if (rtlpci->rx_ring[rxring_idx].idx ==
rtlpci->rxringcount - 1)
rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
index d122f1eb345e..53c32e1de7e4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
@@ -1738,9 +1738,9 @@ static void read_power_value_fromprom(struct ieee80211_hw *hw,
}
}
-static void _rtl88ee_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
- bool autoload_fail,
- u8 *hwinfo)
+static noinline_for_stack void
+_rtl88ee_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
+ bool autoload_fail, u8 *hwinfo)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
index 2ad4523d1bef..79c6e0901e57 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
@@ -190,7 +190,7 @@ static bool rtl88e_get_btc_status(void)
return false;
}
-static struct rtl_hal_ops rtl8188ee_hal_ops = {
+static const struct rtl_hal_ops rtl8188ee_hal_ops = {
.init_sw_vars = rtl88e_init_sw_vars,
.deinit_sw_vars = rtl88e_deinit_sw_vars,
.read_eeprom_info = rtl88ee_read_eeprom_info,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
index 0bc915723b93..5ca6b49e73c7 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
@@ -1412,9 +1412,9 @@ void rtl92ce_update_interrupt_mask(struct ieee80211_hw *hw,
rtl92ce_enable_interrupt(hw);
}
-static void _rtl92ce_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
- bool autoload_fail,
- u8 *hwinfo)
+static noinline_for_stack void
+_rtl92ce_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
+ bool autoload_fail, u8 *hwinfo)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
index ce7c28d9c874..f06b159f975d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
@@ -167,7 +167,7 @@ static void rtl92c_deinit_sw_vars(struct ieee80211_hw *hw)
}
}
-static struct rtl_hal_ops rtl8192ce_hal_ops = {
+static const struct rtl_hal_ops rtl8192ce_hal_ops = {
.init_sw_vars = rtl92c_init_sw_vars,
.deinit_sw_vars = rtl92c_deinit_sw_vars,
.read_eeprom_info = rtl92ce_read_eeprom_info,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
index 0195c9a3e9e8..ec5d558609fe 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
@@ -95,9 +95,9 @@ static void _rtl92cu_phy_param_tab_init(struct ieee80211_hw *hw)
}
}
-static void _rtl92cu_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
- bool autoload_fail,
- u8 *hwinfo)
+static noinline_for_stack void
+_rtl92cu_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
+ bool autoload_fail, u8 *hwinfo)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
index c9b9e2bc90cc..00a6778df704 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
@@ -81,7 +81,7 @@ static bool rtl92cu_get_btc_status(void)
return false;
}
-static struct rtl_hal_ops rtl8192cu_hal_ops = {
+static const struct rtl_hal_ops rtl8192cu_hal_ops = {
.init_sw_vars = rtl92cu_init_sw_vars,
.deinit_sw_vars = rtl92cu_deinit_sw_vars,
.read_chip_version = rtl92c_read_chip_version,
@@ -156,7 +156,7 @@ static struct rtl_hal_usbint_cfg rtl92cu_interface_cfg = {
.usb_mq_to_hwq = rtl8192cu_mq_to_hwq,
};
-static struct rtl_hal_cfg rtl92cu_hal_cfg = {
+static const struct rtl_hal_cfg rtl92cu_hal_cfg = {
.name = "rtl92c_usb",
.alt_fw_name = "rtlwifi/rtl8192cufw.bin",
.ops = &rtl8192cu_hal_ops,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
index e36e4aeb9a95..7612c22a9842 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
@@ -184,7 +184,7 @@ static void rtl92d_deinit_sw_vars(struct ieee80211_hw *hw)
skb_queue_purge(&rtlpriv->mac80211.skb_waitq[tid]);
}
-static struct rtl_hal_ops rtl8192de_hal_ops = {
+static const struct rtl_hal_ops rtl8192de_hal_ops = {
.init_sw_vars = rtl92d_init_sw_vars,
.deinit_sw_vars = rtl92d_deinit_sw_vars,
.read_eeprom_info = rtl92d_read_eeprom_info,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
index d4da5cdc8414..48a3c94606be 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
@@ -1731,7 +1731,7 @@ void rtl92ee_update_interrupt_mask(struct ieee80211_hw *hw,
rtl92ee_enable_interrupt(hw);
}
-static u8 _rtl92ee_get_chnl_group(u8 chnl)
+static __always_inline u8 _rtl92ee_get_chnl_group(u8 chnl)
{
u8 group = 0;
@@ -2009,8 +2009,9 @@ static void _rtl8192ee_read_power_value_fromprom(struct ieee80211_hw *hw,
}
}
-static void _rtl92ee_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
- bool autoload_fail, u8 *hwinfo)
+static noinline_for_stack void
+_rtl92ee_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
+ bool autoload_fail, u8 *hwinfo)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_efuse *efu = rtl_efuse(rtl_priv(hw));
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
index 162e734d5b08..181dd7823b26 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
@@ -176,7 +176,7 @@ static bool rtl92ee_get_btc_status(void)
return true;
}
-static struct rtl_hal_ops rtl8192ee_hal_ops = {
+static const struct rtl_hal_ops rtl8192ee_hal_ops = {
.init_sw_vars = rtl92ee_init_sw_vars,
.deinit_sw_vars = rtl92ee_deinit_sw_vars,
.read_eeprom_info = rtl92ee_read_eeprom_info,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
index e63c67b1861b..1cf801feb45e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
@@ -221,7 +221,7 @@ static bool rtl92se_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue,
return true;
}
-static struct rtl_hal_ops rtl8192se_hal_ops = {
+static const struct rtl_hal_ops rtl8192se_hal_ops = {
.init_sw_vars = rtl92s_init_sw_vars,
.deinit_sw_vars = rtl92s_deinit_sw_vars,
.read_eeprom_info = rtl92se_read_eeprom_info,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
index 6991713a66d0..21b827f519b6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
@@ -1381,9 +1381,9 @@ static u8 _rtl8723e_get_chnl_group(u8 chnl)
return group;
}
-static void _rtl8723e_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
- bool autoload_fail,
- u8 *hwinfo)
+static noinline_for_stack void
+_rtl8723e_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
+ bool autoload_fail, u8 *hwinfo)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
index 048744166a92..dcd7cdb96aa4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
@@ -183,7 +183,7 @@ static bool is_fw_header(struct rtlwifi_firmware_header *hdr)
return (le16_to_cpu(hdr->signature) & 0xfff0) == 0x2300;
}
-static struct rtl_hal_ops rtl8723e_hal_ops = {
+static const struct rtl_hal_ops rtl8723e_hal_ops = {
.init_sw_vars = rtl8723e_init_sw_vars,
.deinit_sw_vars = rtl8723e_deinit_sw_vars,
.read_eeprom_info = rtl8723e_read_eeprom_info,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
index bcfc53af4c1a..e1f811218894 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
@@ -1935,9 +1935,9 @@ static void _rtl8723be_read_power_value_fromprom(struct ieee80211_hw *hw,
}
}
-static void _rtl8723be_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
- bool autoload_fail,
- u8 *hwinfo)
+static noinline_for_stack void
+_rtl8723be_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
+ bool autoload_fail, u8 *hwinfo)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
index 0a92d0325098..5967df08e34e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
@@ -187,7 +187,7 @@ static bool is_fw_header(struct rtlwifi_firmware_header *hdr)
return (le16_to_cpu(hdr->signature) & 0xfff0) == 0x5300;
}
-static struct rtl_hal_ops rtl8723be_hal_ops = {
+static const struct rtl_hal_ops rtl8723be_hal_ops = {
.init_sw_vars = rtl8723be_init_sw_vars,
.deinit_sw_vars = rtl8723be_deinit_sw_vars,
.read_eeprom_info = rtl8723be_read_eeprom_info,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
index f4b232f038a9..a5a34b5edcfd 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
@@ -2782,9 +2782,9 @@ static void _rtl8812ae_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
"eeprom_regulatory = 0x%x\n", rtlefuse->eeprom_regulatory);
}
#endif
-static void _rtl8821ae_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
- bool autoload_fail,
- u8 *hwinfo)
+static noinline_for_stack void
+_rtl8821ae_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
+ bool autoload_fail, u8 *hwinfo)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
@@ -3064,10 +3064,12 @@ static void _rtl8821ae_read_adapter_info(struct ieee80211_hw *hw, bool b_pseudo_
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- int params[] = {RTL_EEPROM_ID, EEPROM_VID, EEPROM_DID,
- EEPROM_SVID, EEPROM_SMID, EEPROM_MAC_ADDR,
- EEPROM_CHANNELPLAN, EEPROM_VERSION, EEPROM_CUSTOMER_ID,
- COUNTRY_CODE_WORLD_WIDE_13};
+ static const int params[] = {
+ RTL_EEPROM_ID, EEPROM_VID, EEPROM_DID,
+ EEPROM_SVID, EEPROM_SMID, EEPROM_MAC_ADDR,
+ EEPROM_CHANNELPLAN, EEPROM_VERSION, EEPROM_CUSTOMER_ID,
+ COUNTRY_CODE_WORLD_WIDE_13
+ };
u8 *hwinfo;
if (b_pseudo_test) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
index b5266e560416..1557d32efdd2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
@@ -229,7 +229,7 @@ static bool rtl8821ae_get_btc_status(void)
return true;
}
-static struct rtl_hal_ops rtl8821ae_hal_ops = {
+static const struct rtl_hal_ops rtl8821ae_hal_ops = {
.init_sw_vars = rtl8821ae_init_sw_vars,
.deinit_sw_vars = rtl8821ae_deinit_sw_vars,
.read_eeprom_info = rtl8821ae_read_eeprom_info,
diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
index 4fc78b882080..c68a9fff6808 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.c
+++ b/drivers/net/wireless/realtek/rtw88/fw.c
@@ -521,7 +521,7 @@ rtw_fw_send_general_info(struct rtw_dev *rtwdev)
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
u16 total_size = H2C_PKT_HDR_SIZE + 4;
- if (rtw_chip_wcpu_11n(rtwdev))
+ if (rtw_chip_wcpu_8051(rtwdev))
return;
rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_GENERAL_INFO);
@@ -544,7 +544,7 @@ rtw_fw_send_phydm_info(struct rtw_dev *rtwdev)
u16 total_size = H2C_PKT_HDR_SIZE + 8;
u8 fw_rf_type = 0;
- if (rtw_chip_wcpu_11n(rtwdev))
+ if (rtw_chip_wcpu_8051(rtwdev))
return;
if (hal->rf_type == RF_1T1R)
@@ -1480,7 +1480,7 @@ int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
bckp[2] = rtw_read8(rtwdev, REG_BCN_CTRL);
- if (rtw_chip_wcpu_11n(rtwdev)) {
+ if (rtw_chip_wcpu_8051(rtwdev)) {
rtw_write32_set(rtwdev, REG_DWBCN0_CTRL, BIT_BCN_VALID);
} else {
pg_addr &= BIT_MASK_BCN_HEAD_1_V1;
@@ -1509,7 +1509,7 @@ int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
goto restore;
}
- if (rtw_chip_wcpu_11n(rtwdev)) {
+ if (rtw_chip_wcpu_8051(rtwdev)) {
bcn_valid_addr = REG_DWBCN0_CTRL;
bcn_valid_mask = BIT_BCN_VALID;
} else {
diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c
index f66d1b302dc5..011b81c82f3b 100644
--- a/drivers/net/wireless/realtek/rtw88/mac.c
+++ b/drivers/net/wireless/realtek/rtw88/mac.c
@@ -41,7 +41,7 @@ void rtw_set_channel_mac(struct rtw_dev *rtwdev, u8 channel, u8 bw,
}
rtw_write32(rtwdev, REG_WMAC_TRXPTCL_CTL, value32);
- if (rtw_chip_wcpu_11n(rtwdev))
+ if (rtw_chip_wcpu_8051(rtwdev))
return;
value32 = rtw_read32(rtwdev, REG_AFE_CTRL1) & ~(BIT_MAC_CLK_SEL);
@@ -67,7 +67,7 @@ static int rtw_mac_pre_system_cfg(struct rtw_dev *rtwdev)
rtw_write8(rtwdev, REG_RSV_CTRL, 0);
- if (rtw_chip_wcpu_11n(rtwdev)) {
+ if (rtw_chip_wcpu_8051(rtwdev)) {
if (rtw_read32(rtwdev, REG_SYS_CFG1) & BIT_LDO)
rtw_write8(rtwdev, REG_LDO_SWR_CTRL, LDO_SEL);
else
@@ -278,7 +278,7 @@ static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on)
bool cur_pwr;
int ret;
- if (rtw_chip_wcpu_11ac(rtwdev)) {
+ if (rtw_chip_wcpu_3081(rtwdev)) {
rpwm = rtw_read8(rtwdev, rtwdev->hci.rpwm_addr);
/* Check FW still exist or not */
@@ -369,7 +369,7 @@ static int __rtw_mac_init_system_cfg_legacy(struct rtw_dev *rtwdev)
static int rtw_mac_init_system_cfg(struct rtw_dev *rtwdev)
{
- if (rtw_chip_wcpu_11n(rtwdev))
+ if (rtw_chip_wcpu_8051(rtwdev))
return __rtw_mac_init_system_cfg_legacy(rtwdev);
return __rtw_mac_init_system_cfg(rtwdev);
@@ -981,7 +981,7 @@ out:
static
int _rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw)
{
- if (rtw_chip_wcpu_11n(rtwdev))
+ if (rtw_chip_wcpu_8051(rtwdev))
return __rtw_download_firmware_legacy(rtwdev, fw);
return __rtw_download_firmware(rtwdev, fw);
@@ -1122,7 +1122,7 @@ static int txdma_queue_mapping(struct rtw_dev *rtwdev)
rtw_write8(rtwdev, REG_CR, 0);
rtw_write8(rtwdev, REG_CR, MAC_TRX_ENABLE);
- if (rtw_chip_wcpu_11ac(rtwdev))
+ if (rtw_chip_wcpu_3081(rtwdev))
rtw_write32(rtwdev, REG_H2CQ_CSR, BIT_H2CQ_FULL);
if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_SDIO) {
@@ -1145,7 +1145,7 @@ int rtw_set_trx_fifo_info(struct rtw_dev *rtwdev)
/* config rsvd page num */
fifo->rsvd_drv_pg_num = chip->rsvd_drv_pg_num;
fifo->txff_pg_num = chip->txff_size / chip->page_size;
- if (rtw_chip_wcpu_11n(rtwdev))
+ if (rtw_chip_wcpu_8051(rtwdev))
fifo->rsvd_pg_num = fifo->rsvd_drv_pg_num;
else
fifo->rsvd_pg_num = fifo->rsvd_drv_pg_num +
@@ -1163,7 +1163,7 @@ int rtw_set_trx_fifo_info(struct rtw_dev *rtwdev)
fifo->rsvd_boundary = fifo->txff_pg_num - fifo->rsvd_pg_num;
cur_pg_addr = fifo->txff_pg_num;
- if (rtw_chip_wcpu_11ac(rtwdev)) {
+ if (rtw_chip_wcpu_3081(rtwdev)) {
cur_pg_addr -= csi_buf_pg_num;
fifo->rsvd_csibuf_addr = cur_pg_addr;
cur_pg_addr -= RSVD_PG_FW_TXBUF_NUM;
@@ -1292,7 +1292,7 @@ static int priority_queue_cfg(struct rtw_dev *rtwdev)
pubq_num = fifo->acq_pg_num - pg_tbl->hq_num - pg_tbl->lq_num -
pg_tbl->nq_num - pg_tbl->exq_num - pg_tbl->gapq_num;
- if (rtw_chip_wcpu_11n(rtwdev))
+ if (rtw_chip_wcpu_8051(rtwdev))
return __priority_queue_cfg_legacy(rtwdev, pg_tbl, pubq_num);
else
return __priority_queue_cfg(rtwdev, pg_tbl, pubq_num);
@@ -1308,7 +1308,7 @@ static int init_h2c(struct rtw_dev *rtwdev)
u32 h2cq_free;
u32 wp, rp;
- if (rtw_chip_wcpu_11n(rtwdev))
+ if (rtw_chip_wcpu_8051(rtwdev))
return 0;
h2cq_addr = fifo->rsvd_h2cq_addr << TX_PAGE_SIZE_SHIFT;
@@ -1375,7 +1375,7 @@ static int rtw_drv_info_cfg(struct rtw_dev *rtwdev)
u8 value8;
rtw_write8(rtwdev, REG_RX_DRVINFO_SZ, PHY_STATUS_SIZE);
- if (rtw_chip_wcpu_11ac(rtwdev)) {
+ if (rtw_chip_wcpu_3081(rtwdev)) {
value8 = rtw_read8(rtwdev, REG_TRXFF_BNDY + 1);
value8 &= 0xF0;
/* For rxdesc len = 0 issue */
diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
index 77f9fbe1870c..766f22d31079 100644
--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
@@ -71,7 +71,7 @@ static void rtw_ops_stop(struct ieee80211_hw *hw, bool suspend)
mutex_unlock(&rtwdev->mutex);
}
-static int rtw_ops_config(struct ieee80211_hw *hw, u32 changed)
+static int rtw_ops_config(struct ieee80211_hw *hw, int radio_idx, u32 changed)
{
struct rtw_dev *rtwdev = hw->priv;
int ret = 0;
@@ -708,7 +708,8 @@ static void rtw_ops_mgd_prepare_tx(struct ieee80211_hw *hw,
mutex_unlock(&rtwdev->mutex);
}
-static int rtw_ops_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+static int rtw_ops_set_rts_threshold(struct ieee80211_hw *hw, int radio_idx,
+ u32 value)
{
struct rtw_dev *rtwdev = hw->priv;
@@ -797,6 +798,7 @@ static int rtw_ops_set_bitrate_mask(struct ieee80211_hw *hw,
}
static int rtw_ops_set_antenna(struct ieee80211_hw *hw,
+ int radio_idx,
u32 tx_antenna,
u32 rx_antenna)
{
@@ -808,13 +810,14 @@ static int rtw_ops_set_antenna(struct ieee80211_hw *hw,
return -EOPNOTSUPP;
mutex_lock(&rtwdev->mutex);
- ret = chip->ops->set_antenna(rtwdev, tx_antenna, rx_antenna);
+ ret = chip->ops->set_antenna(rtwdev, -1, tx_antenna, rx_antenna);
mutex_unlock(&rtwdev->mutex);
return ret;
}
static int rtw_ops_get_antenna(struct ieee80211_hw *hw,
+ int radio_idx,
u32 *tx_antenna,
u32 *rx_antenna)
{
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index c4de5d114eda..97756bdf57b2 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -636,6 +636,7 @@ void rtw_fw_recovery(struct rtw_dev *rtwdev)
if (!test_bit(RTW_FLAG_RESTARTING, rtwdev->flags))
ieee80211_queue_work(rtwdev->hw, &rtwdev->fw_recovery_work);
}
+EXPORT_SYMBOL(rtw_fw_recovery);
static void __fw_recovery_work(struct rtw_dev *rtwdev)
{
@@ -1765,7 +1766,7 @@ static void __update_firmware_info_legacy(struct rtw_dev *rtwdev,
static void update_firmware_info(struct rtw_dev *rtwdev,
struct rtw_fw_state *fw)
{
- if (rtw_chip_wcpu_11n(rtwdev))
+ if (rtw_chip_wcpu_8051(rtwdev))
__update_firmware_info_legacy(rtwdev, fw);
else
__update_firmware_info(rtwdev, fw);
@@ -2218,7 +2219,6 @@ EXPORT_SYMBOL(rtw_core_deinit);
int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
{
- bool sta_mode_only = rtwdev->hci.type == RTW_HCI_TYPE_SDIO;
struct rtw_hal *hal = &rtwdev->hal;
int max_tx_headroom = 0;
int ret;
@@ -2248,12 +2248,9 @@ int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
ieee80211_hw_set(hw, TX_AMSDU);
ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
- if (sta_mode_only)
- hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
- else
- hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_ADHOC);
+ hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_ADHOC);
hw->wiphy->available_antennas_tx = hal->antenna_tx;
hw->wiphy->available_antennas_rx = hal->antenna_rx;
@@ -2264,7 +2261,7 @@ int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
hw->wiphy->max_scan_ssids = RTW_SCAN_MAX_SSIDS;
hw->wiphy->max_scan_ie_len = rtw_get_max_scan_ie_len(rtwdev);
- if (!sta_mode_only && rtwdev->chip->id == RTW_CHIP_TYPE_8822C) {
+ if (rtwdev->chip->id == RTW_CHIP_TYPE_8822C) {
hw->wiphy->iface_combinations = rtw_iface_combs;
hw->wiphy->n_iface_combinations = ARRAY_SIZE(rtw_iface_combs);
}
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
index b0f1fabe9554..b42538cce359 100644
--- a/drivers/net/wireless/realtek/rtw88/main.h
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -873,7 +873,7 @@ struct rtw_chip_ops {
void (*set_tx_power_index)(struct rtw_dev *rtwdev);
int (*rsvd_page_dump)(struct rtw_dev *rtwdev, u8 *buf, u32 offset,
u32 size);
- int (*set_antenna)(struct rtw_dev *rtwdev,
+ int (*set_antenna)(struct rtw_dev *rtwdev, int radio_idx,
u32 antenna_tx,
u32 antenna_rx);
void (*cfg_ldo25)(struct rtw_dev *rtwdev, bool enable);
@@ -1173,8 +1173,8 @@ struct rtw_pwr_track_tbl {
};
enum rtw_wlan_cpu {
- RTW_WCPU_11AC,
- RTW_WCPU_11N,
+ RTW_WCPU_3081,
+ RTW_WCPU_8051,
};
enum rtw_fw_fifo_sel {
@@ -2166,14 +2166,14 @@ static inline void rtw_chip_efuse_grant_off(struct rtw_dev *rtwdev)
rtwdev->chip->ops->efuse_grant(rtwdev, false);
}
-static inline bool rtw_chip_wcpu_11n(struct rtw_dev *rtwdev)
+static inline bool rtw_chip_wcpu_8051(struct rtw_dev *rtwdev)
{
- return rtwdev->chip->wlan_cpu == RTW_WCPU_11N;
+ return rtwdev->chip->wlan_cpu == RTW_WCPU_8051;
}
-static inline bool rtw_chip_wcpu_11ac(struct rtw_dev *rtwdev)
+static inline bool rtw_chip_wcpu_3081(struct rtw_dev *rtwdev)
{
- return rtwdev->chip->wlan_cpu == RTW_WCPU_11AC;
+ return rtwdev->chip->wlan_cpu == RTW_WCPU_3081;
}
static inline bool rtw_chip_has_rx_ldpc(struct rtw_dev *rtwdev)
diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
index 7f2b6dc21f56..56b16186d3aa 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.c
+++ b/drivers/net/wireless/realtek/rtw88/pci.c
@@ -405,7 +405,7 @@ static void rtw_pci_reset_buf_desc(struct rtw_dev *rtwdev)
dma = rtwpci->tx_rings[RTW_TX_QUEUE_BCN].r.dma;
rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BCNQ, dma);
- if (!rtw_chip_wcpu_11n(rtwdev)) {
+ if (!rtw_chip_wcpu_8051(rtwdev)) {
len = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.len;
dma = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.dma;
rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.rp = 0;
@@ -467,7 +467,7 @@ static void rtw_pci_reset_buf_desc(struct rtw_dev *rtwdev)
rtw_write32(rtwdev, RTK_PCI_TXBD_RWPTR_CLR, 0xffffffff);
/* reset H2C Queue index in a single write */
- if (rtw_chip_wcpu_11ac(rtwdev))
+ if (rtw_chip_wcpu_3081(rtwdev))
rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR,
BIT_CLR_H2CQ_HOST_IDX | BIT_CLR_H2CQ_HW_IDX);
}
@@ -487,7 +487,7 @@ static void rtw_pci_enable_interrupt(struct rtw_dev *rtwdev,
rtw_write32(rtwdev, RTK_PCI_HIMR0, rtwpci->irq_mask[0] & ~imr0_unmask);
rtw_write32(rtwdev, RTK_PCI_HIMR1, rtwpci->irq_mask[1]);
- if (rtw_chip_wcpu_11ac(rtwdev))
+ if (rtw_chip_wcpu_3081(rtwdev))
rtw_write32(rtwdev, RTK_PCI_HIMR3, rtwpci->irq_mask[3]);
rtwpci->irq_enabled = true;
@@ -507,7 +507,7 @@ static void rtw_pci_disable_interrupt(struct rtw_dev *rtwdev,
rtw_write32(rtwdev, RTK_PCI_HIMR0, 0);
rtw_write32(rtwdev, RTK_PCI_HIMR1, 0);
- if (rtw_chip_wcpu_11ac(rtwdev))
+ if (rtw_chip_wcpu_3081(rtwdev))
rtw_write32(rtwdev, RTK_PCI_HIMR3, 0);
rtwpci->irq_enabled = false;
@@ -1125,7 +1125,7 @@ static void rtw_pci_irq_recognized(struct rtw_dev *rtwdev,
irq_status[0] = rtw_read32(rtwdev, RTK_PCI_HISR0);
irq_status[1] = rtw_read32(rtwdev, RTK_PCI_HISR1);
- if (rtw_chip_wcpu_11ac(rtwdev))
+ if (rtw_chip_wcpu_3081(rtwdev))
irq_status[3] = rtw_read32(rtwdev, RTK_PCI_HISR3);
else
irq_status[3] = 0;
@@ -1134,7 +1134,7 @@ static void rtw_pci_irq_recognized(struct rtw_dev *rtwdev,
irq_status[3] &= rtwpci->irq_mask[3];
rtw_write32(rtwdev, RTK_PCI_HISR0, irq_status[0]);
rtw_write32(rtwdev, RTK_PCI_HISR1, irq_status[1]);
- if (rtw_chip_wcpu_11ac(rtwdev))
+ if (rtw_chip_wcpu_3081(rtwdev))
rtw_write32(rtwdev, RTK_PCI_HISR3, irq_status[3]);
spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
@@ -1707,6 +1707,43 @@ static void rtw_pci_napi_deinit(struct rtw_dev *rtwdev)
free_netdev(rtwpci->netdev);
}
+static pci_ers_result_t rtw_pci_io_err_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+
+ netif_device_detach(netdev);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t rtw_pci_io_slot_reset(struct pci_dev *pdev)
+{
+ struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+ struct rtw_dev *rtwdev = hw->priv;
+
+ rtw_fw_recovery(rtwdev);
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void rtw_pci_io_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+
+ /* ack any pending wake events, disable PME */
+ pci_enable_wake(pdev, PCI_D0, 0);
+
+ netif_device_attach(netdev);
+}
+
+const struct pci_error_handlers rtw_pci_err_handler = {
+ .error_detected = rtw_pci_io_err_detected,
+ .slot_reset = rtw_pci_io_slot_reset,
+ .resume = rtw_pci_io_resume,
+};
+EXPORT_SYMBOL(rtw_pci_err_handler);
+
int rtw_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
diff --git a/drivers/net/wireless/realtek/rtw88/pci.h b/drivers/net/wireless/realtek/rtw88/pci.h
index 13988db1cb4c..8ffdea11378f 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.h
+++ b/drivers/net/wireless/realtek/rtw88/pci.h
@@ -231,6 +231,7 @@ struct rtw_pci {
};
extern const struct dev_pm_ops rtw_pm_ops;
+extern const struct pci_error_handlers rtw_pci_err_handler;
int rtw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id);
void rtw_pci_remove(struct pci_dev *pdev);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8703b.c b/drivers/net/wireless/realtek/rtw88/rtw8703b.c
index 9e6700c43a63..03475af973b5 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8703b.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8703b.c
@@ -1882,7 +1882,7 @@ const struct rtw_chip_info rtw8703b_hw_spec = {
.id = RTW_CHIP_TYPE_8703B,
.fw_name = "rtw88/rtw8703b_fw.bin",
- .wlan_cpu = RTW_WCPU_11N,
+ .wlan_cpu = RTW_WCPU_8051,
.tx_pkt_desc_sz = 40,
.tx_buf_desc_sz = 16,
.rx_pkt_desc_sz = 24,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723d.c b/drivers/net/wireless/realtek/rtw88/rtw8723d.c
index 31876e708f9e..bf69f5b06ce2 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8723d.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723d.c
@@ -2116,7 +2116,7 @@ const struct rtw_chip_info rtw8723d_hw_spec = {
.ops = &rtw8723d_ops,
.id = RTW_CHIP_TYPE_8723D,
.fw_name = "rtw88/rtw8723d_fw.bin",
- .wlan_cpu = RTW_WCPU_11N,
+ .wlan_cpu = RTW_WCPU_8051,
.tx_pkt_desc_sz = 40,
.tx_buf_desc_sz = 16,
.rx_pkt_desc_sz = 24,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723de.c b/drivers/net/wireless/realtek/rtw88/rtw8723de.c
index 87c8bc9d18a9..c6d0c88e5d81 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8723de.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723de.c
@@ -23,6 +23,7 @@ static struct pci_driver rtw_8723de_driver = {
.remove = rtw_pci_remove,
.driver.pm = &rtw_pm_ops,
.shutdown = rtw_pci_shutdown,
+ .err_handler = &rtw_pci_err_handler,
};
module_pci_driver(rtw_8723de_driver);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8812a.c b/drivers/net/wireless/realtek/rtw88/rtw8812a.c
index c2ef41767ff9..03b441639611 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8812a.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8812a.c
@@ -1038,7 +1038,7 @@ const struct rtw_chip_info rtw8812a_hw_spec = {
.ops = &rtw8812a_ops,
.id = RTW_CHIP_TYPE_8812A,
.fw_name = "rtw88/rtw8812a_fw.bin",
- .wlan_cpu = RTW_WCPU_11N,
+ .wlan_cpu = RTW_WCPU_8051,
.tx_pkt_desc_sz = 40,
.tx_buf_desc_sz = 16,
.rx_pkt_desc_sz = 24,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8814a.c b/drivers/net/wireless/realtek/rtw88/rtw8814a.c
index 44dd3090484b..4a1f850d05c8 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8814a.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8814a.c
@@ -2180,7 +2180,7 @@ const struct rtw_chip_info rtw8814a_hw_spec = {
.ops = &rtw8814a_ops,
.id = RTW_CHIP_TYPE_8814A,
.fw_name = "rtw88/rtw8814a_fw.bin",
- .wlan_cpu = RTW_WCPU_11AC,
+ .wlan_cpu = RTW_WCPU_3081,
.tx_pkt_desc_sz = 40,
.tx_buf_desc_sz = 16,
.rx_pkt_desc_sz = 24,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821a.c b/drivers/net/wireless/realtek/rtw88/rtw8821a.c
index 413aec694c33..1d02ea400b2e 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821a.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821a.c
@@ -1138,7 +1138,7 @@ const struct rtw_chip_info rtw8821a_hw_spec = {
.ops = &rtw8821a_ops,
.id = RTW_CHIP_TYPE_8821A,
.fw_name = "rtw88/rtw8821a_fw.bin",
- .wlan_cpu = RTW_WCPU_11N,
+ .wlan_cpu = RTW_WCPU_8051,
.tx_pkt_desc_sz = 40,
.tx_buf_desc_sz = 16,
.rx_pkt_desc_sz = 24,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
index 413130a30ca9..a2a358d6033f 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
@@ -1973,7 +1973,7 @@ const struct rtw_chip_info rtw8821c_hw_spec = {
.ops = &rtw8821c_ops,
.id = RTW_CHIP_TYPE_8821C,
.fw_name = "rtw88/rtw8821c_fw.bin",
- .wlan_cpu = RTW_WCPU_11AC,
+ .wlan_cpu = RTW_WCPU_3081,
.tx_pkt_desc_sz = 48,
.tx_buf_desc_sz = 16,
.rx_pkt_desc_sz = 24,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821ce.c b/drivers/net/wireless/realtek/rtw88/rtw8821ce.c
index 40637c079d99..52a19cb17daa 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821ce.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821ce.c
@@ -27,6 +27,7 @@ static struct pci_driver rtw_8821ce_driver = {
.remove = rtw_pci_remove,
.driver.pm = &rtw_pm_ops,
.shutdown = rtw_pci_shutdown,
+ .err_handler = &rtw_pci_err_handler,
};
module_pci_driver(rtw_8821ce_driver);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
index ab199eaea3c7..bb5c41905afe 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
@@ -983,6 +983,7 @@ static bool rtw8822b_check_rf_path(u8 antenna)
}
static int rtw8822b_set_antenna(struct rtw_dev *rtwdev,
+ int radio_idx,
u32 antenna_tx,
u32 antenna_rx)
{
@@ -2513,7 +2514,7 @@ const struct rtw_chip_info rtw8822b_hw_spec = {
.ops = &rtw8822b_ops,
.id = RTW_CHIP_TYPE_8822B,
.fw_name = "rtw88/rtw8822b_fw.bin",
- .wlan_cpu = RTW_WCPU_11AC,
+ .wlan_cpu = RTW_WCPU_3081,
.tx_pkt_desc_sz = 48,
.tx_buf_desc_sz = 16,
.rx_pkt_desc_sz = 24,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822be.c b/drivers/net/wireless/realtek/rtw88/rtw8822be.c
index 0bb9f70e7920..dda597d73219 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822be.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822be.c
@@ -23,6 +23,7 @@ static struct pci_driver rtw_8822be_driver = {
.remove = rtw_pci_remove,
.driver.pm = &rtw_pm_ops,
.shutdown = rtw_pci_shutdown,
+ .err_handler = &rtw_pci_err_handler,
};
module_pci_driver(rtw_8822be_driver);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
index 017d959de3ce..58c1958e6170 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
@@ -2767,6 +2767,7 @@ static void rtw8822c_set_tx_power_index(struct rtw_dev *rtwdev)
}
static int rtw8822c_set_antenna(struct rtw_dev *rtwdev,
+ int radio_idx,
u32 antenna_tx,
u32 antenna_rx)
{
@@ -5332,7 +5333,7 @@ const struct rtw_chip_info rtw8822c_hw_spec = {
.ops = &rtw8822c_ops,
.id = RTW_CHIP_TYPE_8822C,
.fw_name = "rtw88/rtw8822c_fw.bin",
- .wlan_cpu = RTW_WCPU_11AC,
+ .wlan_cpu = RTW_WCPU_3081,
.tx_pkt_desc_sz = 48,
.tx_buf_desc_sz = 16,
.rx_pkt_desc_sz = 24,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822ce.c b/drivers/net/wireless/realtek/rtw88/rtw8822ce.c
index 9def732480af..7ae95415c224 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822ce.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822ce.c
@@ -27,6 +27,7 @@ static struct pci_driver rtw_8822ce_driver = {
.remove = rtw_pci_remove,
.driver.pm = &rtw_pm_ops,
.shutdown = rtw_pci_shutdown,
+ .err_handler = &rtw_pci_err_handler,
};
module_pci_driver(rtw_8822ce_driver);
diff --git a/drivers/net/wireless/realtek/rtw88/sdio.c b/drivers/net/wireless/realtek/rtw88/sdio.c
index e733ed846123..cc2d4fef3587 100644
--- a/drivers/net/wireless/realtek/rtw88/sdio.c
+++ b/drivers/net/wireless/realtek/rtw88/sdio.c
@@ -547,7 +547,7 @@ static int rtw_sdio_check_free_txpg(struct rtw_dev *rtwdev, u8 queue,
{
unsigned int pages_free, pages_needed;
- if (rtw_chip_wcpu_11n(rtwdev)) {
+ if (rtw_chip_wcpu_8051(rtwdev)) {
u32 free_txpg;
free_txpg = rtw_sdio_read32(rtwdev, REG_SDIO_FREE_TXPG);
@@ -1030,7 +1030,7 @@ static void rtw_sdio_rx_isr(struct rtw_dev *rtwdev)
u32 rx_len, hisr, total_rx_bytes = 0;
do {
- if (rtw_chip_wcpu_11n(rtwdev))
+ if (rtw_chip_wcpu_8051(rtwdev))
rx_len = rtw_read16(rtwdev, REG_SDIO_RX0_REQ_LEN);
else
rx_len = rtw_read32(rtwdev, REG_SDIO_RX0_REQ_LEN);
@@ -1042,7 +1042,7 @@ static void rtw_sdio_rx_isr(struct rtw_dev *rtwdev)
total_rx_bytes += rx_len;
- if (rtw_chip_wcpu_11n(rtwdev)) {
+ if (rtw_chip_wcpu_8051(rtwdev)) {
/* Stop if no more RX requests are pending, even if
* rx_len could be greater than zero in the next
* iteration. This is needed because the RX buffer may
@@ -1054,7 +1054,7 @@ static void rtw_sdio_rx_isr(struct rtw_dev *rtwdev)
*/
hisr = rtw_read32(rtwdev, REG_SDIO_HISR);
} else {
- /* RTW_WCPU_11AC chips have improved hardware or
+ /* RTW_WCPU_3081 chips have improved hardware or
* firmware and can use rx_len unconditionally.
*/
hisr = REG_SDIO_HISR_RX_REQUEST;
diff --git a/drivers/net/wireless/realtek/rtw89/chan.c b/drivers/net/wireless/realtek/rtw89/chan.c
index 806f42429a29..6f10235647a1 100644
--- a/drivers/net/wireless/realtek/rtw89/chan.c
+++ b/drivers/net/wireless/realtek/rtw89/chan.c
@@ -170,22 +170,26 @@ int rtw89_iterate_entity_chan(struct rtw89_dev *rtwdev,
static void __rtw89_config_entity_chandef(struct rtw89_dev *rtwdev,
enum rtw89_chanctx_idx idx,
- const struct cfg80211_chan_def *chandef,
- bool from_stack)
+ const struct cfg80211_chan_def *chandef)
{
struct rtw89_hal *hal = &rtwdev->hal;
hal->chanctx[idx].chandef = *chandef;
-
- if (from_stack)
- set_bit(idx, hal->entity_map);
}
void rtw89_config_entity_chandef(struct rtw89_dev *rtwdev,
enum rtw89_chanctx_idx idx,
const struct cfg80211_chan_def *chandef)
{
- __rtw89_config_entity_chandef(rtwdev, idx, chandef, true);
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ if (!chandef) {
+ clear_bit(idx, hal->entity_map);
+ return;
+ }
+
+ __rtw89_config_entity_chandef(rtwdev, idx, chandef);
+ set_bit(idx, hal->entity_map);
}
void rtw89_config_roc_chandef(struct rtw89_dev *rtwdev,
@@ -227,7 +231,7 @@ static void rtw89_config_default_chandef(struct rtw89_dev *rtwdev)
struct cfg80211_chan_def chandef = {0};
rtw89_get_default_chandef(&chandef);
- __rtw89_config_entity_chandef(rtwdev, RTW89_CHANCTX_0, &chandef, false);
+ __rtw89_config_entity_chandef(rtwdev, RTW89_CHANCTX_0, &chandef);
}
void rtw89_entity_init(struct rtw89_dev *rtwdev)
@@ -265,6 +269,8 @@ static void rtw89_entity_calculate_weight(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif;
int idx;
+ w->registered_chanctxs = bitmap_weight(hal->entity_map, NUM_OF_RTW89_CHANCTX);
+
for_each_set_bit(idx, hal->entity_map, NUM_OF_RTW89_CHANCTX) {
cfg = hal->chanctx[idx].cfg;
if (!cfg) {
@@ -473,7 +479,8 @@ enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev)
bitmap_zero(recalc_map, NUM_OF_RTW89_CHANCTX);
fallthrough;
case 0:
- rtw89_config_default_chandef(rtwdev);
+ if (!w.registered_chanctxs)
+ rtw89_config_default_chandef(rtwdev);
set_bit(RTW89_CHANCTX_0, recalc_map);
fallthrough;
case 1:
@@ -953,6 +960,7 @@ static int rtw89_mcc_fill_all_roles(struct rtw89_dev *rtwdev)
}
sel.bind_vif[i] = rtwvif_link;
+ rtw89_p2p_disable_all_noa(rtwdev, rtwvif_link, NULL);
}
ret = rtw89_iterate_mcc_roles(rtwdev, rtw89_mcc_fill_role_iterator, &sel);
@@ -1265,6 +1273,8 @@ static int __rtw89_mcc_calc_pattern_anchor(struct rtw89_dev *rtwdev,
if (bcn_ofst < RTW89_MCC_MIN_RX_BCN_TIME)
small_bcn_ofst = true;
+ else if (bcn_ofst < aux->duration - aux->limit.max_toa)
+ small_bcn_ofst = true;
else if (mcc_intvl - bcn_ofst < RTW89_MCC_MIN_RX_BCN_TIME)
small_bcn_ofst = false;
else
@@ -1595,6 +1605,35 @@ static bool rtw89_mcc_duration_decision_on_bt(struct rtw89_dev *rtwdev)
return false;
}
+void rtw89_mcc_prepare_done_work(struct wiphy *wiphy, struct wiphy_work *work)
+{
+ struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
+ mcc_prepare_done_work.work);
+
+ lockdep_assert_wiphy(wiphy);
+
+ ieee80211_wake_queues(rtwdev->hw);
+}
+
+static void rtw89_mcc_prepare(struct rtw89_dev *rtwdev, bool start)
+{
+ struct rtw89_mcc_info *mcc = &rtwdev->mcc;
+ struct rtw89_mcc_config *config = &mcc->config;
+
+ if (start) {
+ ieee80211_stop_queues(rtwdev->hw);
+
+ wiphy_delayed_work_queue(rtwdev->hw->wiphy,
+ &rtwdev->mcc_prepare_done_work,
+ usecs_to_jiffies(config->prepare_delay));
+ } else {
+ wiphy_delayed_work_queue(rtwdev->hw->wiphy,
+ &rtwdev->mcc_prepare_done_work, 0);
+ wiphy_delayed_work_flush(rtwdev->hw->wiphy,
+ &rtwdev->mcc_prepare_done_work);
+ }
+}
+
static int rtw89_mcc_fill_start_tsf(struct rtw89_dev *rtwdev)
{
struct rtw89_mcc_info *mcc = &rtwdev->mcc;
@@ -1630,6 +1669,8 @@ static int rtw89_mcc_fill_start_tsf(struct rtw89_dev *rtwdev)
config->start_tsf = start_tsf;
config->start_tsf_in_aux_domain = tsf_aux + start_tsf - tsf;
+ config->prepare_delay = start_tsf - tsf;
+
return 0;
}
@@ -2177,6 +2218,18 @@ static void rtw89_mcc_stop_beacon_noa(struct rtw89_dev *rtwdev)
rtw89_mcc_handle_beacon_noa(rtwdev, false);
}
+static bool rtw89_mcc_ignore_bcn(struct rtw89_dev *rtwdev, struct rtw89_mcc_role *role)
+{
+ enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen;
+
+ if (role->is_go)
+ return true;
+ else if (chip_gen == RTW89_CHIP_BE && role->is_gc)
+ return true;
+ else
+ return false;
+}
+
static int rtw89_mcc_start(struct rtw89_dev *rtwdev)
{
struct rtw89_mcc_info *mcc = &rtwdev->mcc;
@@ -2200,6 +2253,15 @@ static int rtw89_mcc_start(struct rtw89_dev *rtwdev)
else
mcc->mode = RTW89_MCC_MODE_GC_STA;
+ if (rtw89_mcc_ignore_bcn(rtwdev, ref)) {
+ rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, aux->rtwvif_link, false);
+ } else if (rtw89_mcc_ignore_bcn(rtwdev, aux)) {
+ rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, ref->rtwvif_link, false);
+ } else {
+ rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, ref->rtwvif_link, true);
+ rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, aux->rtwvif_link, true);
+ }
+
rtw89_debug(rtwdev, RTW89_DBG_CHAN, "MCC sel mode: %d\n", mcc->mode);
mcc->group = RTW89_MCC_DFLT_GROUP;
@@ -2219,6 +2281,8 @@ static int rtw89_mcc_start(struct rtw89_dev *rtwdev)
rtw89_chanctx_notify(rtwdev, RTW89_CHANCTX_STATE_MCC_START);
rtw89_mcc_start_beacon_noa(rtwdev);
+
+ rtw89_mcc_prepare(rtwdev, true);
return 0;
}
@@ -2307,6 +2371,8 @@ static void rtw89_mcc_stop(struct rtw89_dev *rtwdev,
rtw89_chanctx_notify(rtwdev, RTW89_CHANCTX_STATE_MCC_STOP);
rtw89_mcc_stop_beacon_noa(rtwdev);
+
+ rtw89_mcc_prepare(rtwdev, false);
}
static int rtw89_mcc_update(struct rtw89_dev *rtwdev)
@@ -2362,15 +2428,44 @@ static int rtw89_mcc_update(struct rtw89_dev *rtwdev)
return 0;
}
+static void rtw89_mcc_detect_connection(struct rtw89_dev *rtwdev,
+ struct rtw89_mcc_role *role)
+{
+ struct ieee80211_vif *vif;
+ int ret;
+
+ ret = rtw89_core_send_nullfunc(rtwdev, role->rtwvif_link, true, false,
+ RTW89_MCC_PROBE_TIMEOUT);
+ if (ret)
+ role->probe_count++;
+ else
+ role->probe_count = 0;
+
+ if (role->probe_count < RTW89_MCC_PROBE_MAX_TRIES)
+ return;
+
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MCC <macid %d> can not detect AP\n", role->rtwvif_link->mac_id);
+ vif = rtwvif_link_to_vif(role->rtwvif_link);
+ ieee80211_connection_loss(vif);
+}
+
static void rtw89_mcc_track(struct rtw89_dev *rtwdev)
{
struct rtw89_mcc_info *mcc = &rtwdev->mcc;
struct rtw89_mcc_config *config = &mcc->config;
struct rtw89_mcc_pattern *pattern = &config->pattern;
+ struct rtw89_mcc_role *ref = &mcc->role_ref;
+ struct rtw89_mcc_role *aux = &mcc->role_aux;
u16 tolerance;
u16 bcn_ofst;
u16 diff;
+ if (rtw89_mcc_ignore_bcn(rtwdev, ref))
+ rtw89_mcc_detect_connection(rtwdev, aux);
+ else if (rtw89_mcc_ignore_bcn(rtwdev, aux))
+ rtw89_mcc_detect_connection(rtwdev, ref);
+
if (mcc->mode != RTW89_MCC_MODE_GC_STA)
return;
@@ -2619,6 +2714,201 @@ void rtw89_queue_chanctx_work(struct rtw89_dev *rtwdev)
rtw89_queue_chanctx_change(rtwdev, RTW89_CHANCTX_CHANGE_DFLT);
}
+static enum rtw89_mr_wtype __rtw89_query_mr_wtype(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_entity_mgnt *mgnt = &rtwdev->hal.entity_mgnt;
+ enum rtw89_chanctx_idx chanctx_idx;
+ struct ieee80211_vif *vif;
+ struct rtw89_vif *rtwvif;
+ unsigned int num_mld = 0;
+ unsigned int num_ml = 0;
+ unsigned int cnt = 0;
+ u8 role_idx;
+ u8 idx;
+
+ for (role_idx = 0; role_idx < RTW89_MAX_INTERFACE_NUM; role_idx++) {
+ rtwvif = mgnt->active_roles[role_idx];
+ if (!rtwvif)
+ continue;
+
+ cnt++;
+
+ vif = rtwvif_to_vif(rtwvif);
+ if (!ieee80211_vif_is_mld(vif))
+ continue;
+
+ num_mld++;
+
+ for (idx = 0; idx < __RTW89_MLD_MAX_LINK_NUM; idx++) {
+ chanctx_idx = mgnt->chanctx_tbl[role_idx][idx];
+ if (chanctx_idx != RTW89_CHANCTX_IDLE)
+ num_ml++;
+ }
+ }
+
+ if (num_mld > 1)
+ goto err;
+
+ switch (cnt) {
+ case 0:
+ return RTW89_MR_WTYPE_NONE;
+ case 1:
+ if (!num_mld)
+ return RTW89_MR_WTYPE_NONMLD;
+ switch (num_ml) {
+ case 1:
+ return RTW89_MR_WTYPE_MLD1L1R;
+ case 2:
+ return RTW89_MR_WTYPE_MLD2L1R;
+ default:
+ break;
+ }
+ break;
+ case 2:
+ if (!num_mld)
+ return RTW89_MR_WTYPE_NONMLD_NONMLD;
+ switch (num_ml) {
+ case 1:
+ return RTW89_MR_WTYPE_MLD1L1R_NONMLD;
+ case 2:
+ return RTW89_MR_WTYPE_MLD2L1R_NONMLD;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+err:
+ rtw89_warn(rtwdev, "%s: unhandled cnt %u mld %u ml %u\n", __func__,
+ cnt, num_mld, num_ml);
+ return RTW89_MR_WTYPE_UNKNOWN;
+}
+
+static enum rtw89_mr_wmode __rtw89_query_mr_wmode(struct rtw89_dev *rtwdev,
+ u8 inst_idx)
+{
+ struct rtw89_entity_mgnt *mgnt = &rtwdev->hal.entity_mgnt;
+ unsigned int num[NUM_NL80211_IFTYPES] = {};
+ enum rtw89_chanctx_idx chanctx_idx;
+ struct ieee80211_vif *vif;
+ struct rtw89_vif *rtwvif;
+ unsigned int cnt = 0;
+ u8 role_idx;
+
+ if (unlikely(inst_idx >= __RTW89_MLD_MAX_LINK_NUM))
+ return RTW89_MR_WMODE_UNKNOWN;
+
+ for (role_idx = 0; role_idx < RTW89_MAX_INTERFACE_NUM; role_idx++) {
+ chanctx_idx = mgnt->chanctx_tbl[role_idx][inst_idx];
+ if (chanctx_idx == RTW89_CHANCTX_IDLE)
+ continue;
+
+ rtwvif = mgnt->active_roles[role_idx];
+ if (unlikely(!rtwvif))
+ continue;
+
+ vif = rtwvif_to_vif(rtwvif);
+ num[vif->type]++;
+ cnt++;
+ }
+
+ switch (cnt) {
+ case 0:
+ return RTW89_MR_WMODE_NONE;
+ case 1:
+ if (num[NL80211_IFTYPE_STATION])
+ return RTW89_MR_WMODE_1CLIENT;
+ if (num[NL80211_IFTYPE_AP])
+ return RTW89_MR_WMODE_1AP;
+ break;
+ case 2:
+ if (num[NL80211_IFTYPE_STATION] == 2)
+ return RTW89_MR_WMODE_2CLIENTS;
+ if (num[NL80211_IFTYPE_AP] == 2)
+ return RTW89_MR_WMODE_2APS;
+ if (num[NL80211_IFTYPE_STATION] && num[NL80211_IFTYPE_AP])
+ return RTW89_MR_WMODE_1AP_1CLIENT;
+ break;
+ default:
+ break;
+ }
+
+ rtw89_warn(rtwdev, "%s: unhandled cnt %u\n", __func__, cnt);
+ return RTW89_MR_WMODE_UNKNOWN;
+}
+
+static enum rtw89_mr_ctxtype __rtw89_query_mr_ctxtype(struct rtw89_dev *rtwdev,
+ u8 inst_idx)
+{
+ struct rtw89_entity_mgnt *mgnt = &rtwdev->hal.entity_mgnt;
+ DECLARE_BITMAP(map, NUM_OF_RTW89_CHANCTX) = {};
+ unsigned int num[RTW89_BAND_NUM] = {};
+ enum rtw89_chanctx_idx chanctx_idx;
+ const struct rtw89_chan *chan;
+ unsigned int cnt = 0;
+ u8 role_idx;
+
+ if (unlikely(inst_idx >= __RTW89_MLD_MAX_LINK_NUM))
+ return RTW89_MR_CTX_UNKNOWN;
+
+ for (role_idx = 0; role_idx < RTW89_MAX_INTERFACE_NUM; role_idx++) {
+ chanctx_idx = mgnt->chanctx_tbl[role_idx][inst_idx];
+ if (chanctx_idx == RTW89_CHANCTX_IDLE)
+ continue;
+
+ if (__test_and_set_bit(chanctx_idx, map))
+ continue;
+
+ chan = rtw89_chan_get(rtwdev, chanctx_idx);
+ num[chan->band_type]++;
+ cnt++;
+ }
+
+ switch (cnt) {
+ case 0:
+ return RTW89_MR_CTX_NONE;
+ case 1:
+ if (num[RTW89_BAND_2G])
+ return RTW89_MR_CTX1_2GHZ;
+ if (num[RTW89_BAND_5G])
+ return RTW89_MR_CTX1_5GHZ;
+ if (num[RTW89_BAND_6G])
+ return RTW89_MR_CTX1_6GHZ;
+ break;
+ case 2:
+ if (num[RTW89_BAND_2G] == 2)
+ return RTW89_MR_CTX2_2GHZ;
+ if (num[RTW89_BAND_5G] == 2)
+ return RTW89_MR_CTX2_5GHZ;
+ if (num[RTW89_BAND_6G] == 2)
+ return RTW89_MR_CTX2_6GHZ;
+ if (num[RTW89_BAND_2G] && num[RTW89_BAND_5G])
+ return RTW89_MR_CTX2_2GHZ_5GHZ;
+ if (num[RTW89_BAND_2G] && num[RTW89_BAND_6G])
+ return RTW89_MR_CTX2_2GHZ_6GHZ;
+ if (num[RTW89_BAND_5G] && num[RTW89_BAND_6G])
+ return RTW89_MR_CTX2_5GHZ_6GHZ;
+ break;
+ default:
+ break;
+ }
+
+ rtw89_warn(rtwdev, "%s: unhandled cnt %u\n", __func__, cnt);
+ return RTW89_MR_CTX_UNKNOWN;
+}
+
+void rtw89_query_mr_chanctx_info(struct rtw89_dev *rtwdev, u8 inst_idx,
+ struct rtw89_mr_chanctx_info *info)
+{
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
+
+ info->wtype = __rtw89_query_mr_wtype(rtwdev);
+ info->wmode = __rtw89_query_mr_wmode(rtwdev, inst_idx);
+ info->ctxtype = __rtw89_query_mr_ctxtype(rtwdev, inst_idx);
+}
+
void rtw89_chanctx_track(struct rtw89_dev *rtwdev)
{
struct rtw89_hal *hal = &rtwdev->hal;
@@ -2782,10 +3072,9 @@ int rtw89_chanctx_ops_add(struct rtw89_dev *rtwdev,
void rtw89_chanctx_ops_remove(struct rtw89_dev *rtwdev,
struct ieee80211_chanctx_conf *ctx)
{
- struct rtw89_hal *hal = &rtwdev->hal;
struct rtw89_chanctx_cfg *cfg = (struct rtw89_chanctx_cfg *)ctx->drv_priv;
- clear_bit(cfg->idx, hal->entity_map);
+ rtw89_config_entity_chandef(rtwdev, cfg->idx, NULL);
}
void rtw89_chanctx_ops_change(struct rtw89_dev *rtwdev,
@@ -2816,6 +3105,9 @@ int rtw89_chanctx_ops_assign_vif(struct rtw89_dev *rtwdev,
rtwvif_link->chanctx_assigned = true;
cfg->ref_count++;
+ if (rtwdev->scanning)
+ rtw89_hw_scan_abort(rtwdev, rtwdev->scan_info.scanning_vif);
+
if (list_empty(&rtwvif->mgnt_entry))
list_add_tail(&rtwvif->mgnt_entry, &mgnt->active_list);
@@ -2855,6 +3147,9 @@ void rtw89_chanctx_ops_unassign_vif(struct rtw89_dev *rtwdev,
rtwvif_link->chanctx_assigned = false;
cfg->ref_count--;
+ if (rtwdev->scanning)
+ rtw89_hw_scan_abort(rtwdev, rtwdev->scan_info.scanning_vif);
+
if (!rtw89_vif_is_active_role(rtwvif))
list_del_init(&rtwvif->mgnt_entry);
@@ -2906,3 +3201,35 @@ out:
break;
}
}
+
+int rtw89_chanctx_ops_reassign_vif(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link,
+ struct ieee80211_chanctx_conf *old_ctx,
+ struct ieee80211_chanctx_conf *new_ctx,
+ bool replace)
+{
+ int ret;
+
+ rtw89_chanctx_ops_unassign_vif(rtwdev, rtwvif_link, old_ctx);
+
+ if (!replace)
+ goto assign;
+
+ rtw89_chanctx_ops_remove(rtwdev, old_ctx);
+ ret = rtw89_chanctx_ops_add(rtwdev, new_ctx);
+ if (ret) {
+ rtw89_err(rtwdev, "%s: failed to add chanctx: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+assign:
+ ret = rtw89_chanctx_ops_assign_vif(rtwdev, rtwvif_link, new_ctx);
+ if (ret) {
+ rtw89_err(rtwdev, "%s: failed to assign chanctx: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wireless/realtek/rtw89/chan.h b/drivers/net/wireless/realtek/rtw89/chan.h
index 2a25563593af..57355cb3d765 100644
--- a/drivers/net/wireless/realtek/rtw89/chan.h
+++ b/drivers/net/wireless/realtek/rtw89/chan.h
@@ -19,6 +19,9 @@
#define RTW89_MCC_MIN_RX_BCN_TIME 10
#define RTW89_MCC_DFLT_BCN_OFST_TIME 40
+#define RTW89_MCC_PROBE_TIMEOUT 100
+#define RTW89_MCC_PROBE_MAX_TRIES 3
+
#define RTW89_MCC_MIN_GO_DURATION \
(RTW89_MCC_EARLY_TX_BCN_TIME + RTW89_MCC_MIN_RX_BCN_TIME)
@@ -28,7 +31,7 @@
#define RTW89_MCC_DFLT_GROUP 0
#define RTW89_MCC_NEXT_GROUP(cur) (((cur) + 1) % 4)
-#define RTW89_MCC_DFLT_TX_NULL_EARLY 3
+#define RTW89_MCC_DFLT_TX_NULL_EARLY 7
#define RTW89_MCC_DFLT_COURTESY_SLOT 3
#define RTW89_MCC_REQ_COURTESY_TIME 5
@@ -41,6 +44,49 @@
#define NUM_OF_RTW89_MCC_ROLES 2
+enum rtw89_mr_wtype {
+ RTW89_MR_WTYPE_NONE,
+ RTW89_MR_WTYPE_NONMLD,
+ RTW89_MR_WTYPE_MLD1L1R,
+ RTW89_MR_WTYPE_MLD2L1R,
+ RTW89_MR_WTYPE_MLD2L2R,
+ RTW89_MR_WTYPE_NONMLD_NONMLD,
+ RTW89_MR_WTYPE_MLD1L1R_NONMLD,
+ RTW89_MR_WTYPE_MLD2L1R_NONMLD,
+ RTW89_MR_WTYPE_MLD2L2R_NONMLD,
+ RTW89_MR_WTYPE_UNKNOWN,
+};
+
+enum rtw89_mr_wmode {
+ RTW89_MR_WMODE_NONE,
+ RTW89_MR_WMODE_1CLIENT,
+ RTW89_MR_WMODE_1AP,
+ RTW89_MR_WMODE_1AP_1CLIENT,
+ RTW89_MR_WMODE_2CLIENTS,
+ RTW89_MR_WMODE_2APS,
+ RTW89_MR_WMODE_UNKNOWN,
+};
+
+enum rtw89_mr_ctxtype {
+ RTW89_MR_CTX_NONE,
+ RTW89_MR_CTX1_2GHZ,
+ RTW89_MR_CTX1_5GHZ,
+ RTW89_MR_CTX1_6GHZ,
+ RTW89_MR_CTX2_2GHZ,
+ RTW89_MR_CTX2_5GHZ,
+ RTW89_MR_CTX2_6GHZ,
+ RTW89_MR_CTX2_2GHZ_5GHZ,
+ RTW89_MR_CTX2_2GHZ_6GHZ,
+ RTW89_MR_CTX2_5GHZ_6GHZ,
+ RTW89_MR_CTX_UNKNOWN,
+};
+
+struct rtw89_mr_chanctx_info {
+ enum rtw89_mr_wtype wtype;
+ enum rtw89_mr_wmode wmode;
+ enum rtw89_mr_ctxtype ctxtype;
+};
+
enum rtw89_chanctx_pause_reasons {
RTW89_CHANCTX_PAUSE_REASON_HW_SCAN,
RTW89_CHANCTX_PAUSE_REASON_ROC,
@@ -58,6 +104,7 @@ struct rtw89_chanctx_cb_parm {
};
struct rtw89_entity_weight {
+ unsigned int registered_chanctxs;
unsigned int active_chanctxs;
unsigned int active_roles;
};
@@ -116,6 +163,8 @@ void rtw89_chanctx_work(struct wiphy *wiphy, struct wiphy_work *work);
void rtw89_queue_chanctx_work(struct rtw89_dev *rtwdev);
void rtw89_queue_chanctx_change(struct rtw89_dev *rtwdev,
enum rtw89_chanctx_changes change);
+void rtw89_query_mr_chanctx_info(struct rtw89_dev *rtwdev, u8 inst_idx,
+ struct rtw89_mr_chanctx_info *info);
void rtw89_chanctx_track(struct rtw89_dev *rtwdev);
void rtw89_chanctx_pause(struct rtw89_dev *rtwdev,
const struct rtw89_chanctx_pause_parm *parm);
@@ -129,6 +178,8 @@ const struct rtw89_chan *__rtw89_mgnt_chan_get(struct rtw89_dev *rtwdev,
#define rtw89_mgnt_chan_get(rtwdev, link_index) \
__rtw89_mgnt_chan_get(rtwdev, __func__, link_index)
+void rtw89_mcc_prepare_done_work(struct wiphy *wiphy, struct wiphy_work *work);
+
int rtw89_chanctx_ops_add(struct rtw89_dev *rtwdev,
struct ieee80211_chanctx_conf *ctx);
void rtw89_chanctx_ops_remove(struct rtw89_dev *rtwdev,
@@ -142,5 +193,10 @@ int rtw89_chanctx_ops_assign_vif(struct rtw89_dev *rtwdev,
void rtw89_chanctx_ops_unassign_vif(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
struct ieee80211_chanctx_conf *ctx);
+int rtw89_chanctx_ops_reassign_vif(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link,
+ struct ieee80211_chanctx_conf *old_ctx,
+ struct ieee80211_chanctx_conf *new_ctx,
+ bool replace);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/coex.c b/drivers/net/wireless/realtek/rtw89/coex.c
index 5ccf0cbaed2f..e4e6daf51a1b 100644
--- a/drivers/net/wireless/realtek/rtw89/coex.c
+++ b/drivers/net/wireless/realtek/rtw89/coex.c
@@ -2,6 +2,7 @@
/* Copyright(c) 2019-2020 Realtek Corporation
*/
+#include "chan.h"
#include "coex.h"
#include "debug.h"
#include "fw.h"
@@ -10,7 +11,7 @@
#include "ps.h"
#include "reg.h"
-#define RTW89_COEX_VERSION 0x07000413
+#define RTW89_COEX_VERSION 0x09000013
#define FCXDEF_STEP 50 /* MUST <= FCXMAX_STEP and match with wl fw*/
#define BTC_E2G_LIMIT_DEF 80
@@ -138,7 +139,7 @@ static const struct rtw89_btc_ver rtw89_btc_ver_defs[] = {
.fcxbtver = 7, .fcxbtscan = 7, .fcxbtafh = 7, .fcxbtdevinfo = 7,
.fwlrole = 7, .frptmap = 3, .fcxctrl = 7, .fcxinit = 7,
.fwevntrptl = 1, .fwc2hfunc = 2, .drvinfo_type = 1, .info_buf = 1800,
- .max_role_num = 6,
+ .max_role_num = 6, .fcxosi = 0, .fcxmlo = 0, .bt_desired = 8,
},
{RTL8852BT, RTW89_FW_VER_CODE(0, 29, 90, 0),
.fcxbtcrpt = 7, .fcxtdma = 7, .fcxslots = 7, .fcxcysta = 7,
@@ -146,7 +147,23 @@ static const struct rtw89_btc_ver rtw89_btc_ver_defs[] = {
.fcxbtver = 7, .fcxbtscan = 7, .fcxbtafh = 7, .fcxbtdevinfo = 7,
.fwlrole = 7, .frptmap = 3, .fcxctrl = 7, .fcxinit = 7,
.fwevntrptl = 1, .fwc2hfunc = 2, .drvinfo_type = 1, .info_buf = 1800,
- .max_role_num = 6,
+ .max_role_num = 6, .fcxosi = 0, .fcxmlo = 0, .bt_desired = 8,
+ },
+ {RTL8922A, RTW89_FW_VER_CODE(0, 35, 71, 0),
+ .fcxbtcrpt = 8, .fcxtdma = 7, .fcxslots = 7, .fcxcysta = 7,
+ .fcxstep = 7, .fcxnullsta = 7, .fcxmreg = 7, .fcxgpiodbg = 7,
+ .fcxbtver = 7, .fcxbtscan = 7, .fcxbtafh = 7, .fcxbtdevinfo = 7,
+ .fwlrole = 8, .frptmap = 4, .fcxctrl = 7, .fcxinit = 7,
+ .fwevntrptl = 1, .fwc2hfunc = 3, .drvinfo_type = 2, .info_buf = 1800,
+ .max_role_num = 6, .fcxosi = 1, .fcxmlo = 1, .bt_desired = 9,
+ },
+ {RTL8922A, RTW89_FW_VER_CODE(0, 35, 63, 0),
+ .fcxbtcrpt = 8, .fcxtdma = 7, .fcxslots = 7, .fcxcysta = 7,
+ .fcxstep = 7, .fcxnullsta = 7, .fcxmreg = 7, .fcxgpiodbg = 7,
+ .fcxbtver = 7, .fcxbtscan = 7, .fcxbtafh = 7, .fcxbtdevinfo = 7,
+ .fwlrole = 8, .frptmap = 4, .fcxctrl = 7, .fcxinit = 7,
+ .fwevntrptl = 1, .fwc2hfunc = 3, .drvinfo_type = 2, .info_buf = 1800,
+ .max_role_num = 6, .fcxosi = 1, .fcxmlo = 1, .bt_desired = 9,
},
{RTL8922A, RTW89_FW_VER_CODE(0, 35, 8, 0),
.fcxbtcrpt = 8, .fcxtdma = 7, .fcxslots = 7, .fcxcysta = 7,
@@ -154,7 +171,7 @@ static const struct rtw89_btc_ver rtw89_btc_ver_defs[] = {
.fcxbtver = 7, .fcxbtscan = 7, .fcxbtafh = 7, .fcxbtdevinfo = 7,
.fwlrole = 8, .frptmap = 3, .fcxctrl = 7, .fcxinit = 7,
.fwevntrptl = 1, .fwc2hfunc = 1, .drvinfo_type = 1, .info_buf = 1800,
- .max_role_num = 6,
+ .max_role_num = 6, .fcxosi = 0, .fcxmlo = 0, .bt_desired = 7,
},
{RTL8851B, RTW89_FW_VER_CODE(0, 29, 29, 0),
.fcxbtcrpt = 105, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 5,
@@ -162,7 +179,7 @@ static const struct rtw89_btc_ver rtw89_btc_ver_defs[] = {
.fcxbtver = 1, .fcxbtscan = 2, .fcxbtafh = 2, .fcxbtdevinfo = 1,
.fwlrole = 2, .frptmap = 3, .fcxctrl = 1, .fcxinit = 0,
.fwevntrptl = 0, .fwc2hfunc = 1, .drvinfo_type = 0, .info_buf = 1800,
- .max_role_num = 6,
+ .max_role_num = 6, .fcxosi = 0, .fcxmlo = 0, .bt_desired = 7,
},
{RTL8852C, RTW89_FW_VER_CODE(0, 27, 57, 0),
.fcxbtcrpt = 4, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 3,
@@ -170,7 +187,7 @@ static const struct rtw89_btc_ver rtw89_btc_ver_defs[] = {
.fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 2, .fcxbtdevinfo = 1,
.fwlrole = 1, .frptmap = 3, .fcxctrl = 1, .fcxinit = 0,
.fwevntrptl = 0, .fwc2hfunc = 1, .drvinfo_type = 0, .info_buf = 1280,
- .max_role_num = 5,
+ .max_role_num = 5, .fcxosi = 0, .fcxmlo = 0, .bt_desired = 7,
},
{RTL8852C, RTW89_FW_VER_CODE(0, 27, 42, 0),
.fcxbtcrpt = 4, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 3,
@@ -178,7 +195,7 @@ static const struct rtw89_btc_ver rtw89_btc_ver_defs[] = {
.fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 2, .fcxbtdevinfo = 1,
.fwlrole = 1, .frptmap = 2, .fcxctrl = 1, .fcxinit = 0,
.fwevntrptl = 0, .fwc2hfunc = 1, .drvinfo_type = 0, .info_buf = 1280,
- .max_role_num = 5,
+ .max_role_num = 5, .fcxosi = 0, .fcxmlo = 0, .bt_desired = 7,
},
{RTL8852C, RTW89_FW_VER_CODE(0, 27, 0, 0),
.fcxbtcrpt = 4, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 3,
@@ -186,7 +203,15 @@ static const struct rtw89_btc_ver rtw89_btc_ver_defs[] = {
.fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 1, .fcxbtdevinfo = 1,
.fwlrole = 1, .frptmap = 2, .fcxctrl = 1, .fcxinit = 0,
.fwevntrptl = 0, .fwc2hfunc = 1, .drvinfo_type = 0, .info_buf = 1280,
- .max_role_num = 5,
+ .max_role_num = 5, .fcxosi = 0, .fcxmlo = 0, .bt_desired = 7,
+ },
+ {RTL8852B, RTW89_FW_VER_CODE(0, 29, 122, 0),
+ .fcxbtcrpt = 8, .fcxtdma = 7, .fcxslots = 7, .fcxcysta = 7,
+ .fcxstep = 7, .fcxnullsta = 7, .fcxmreg = 7, .fcxgpiodbg = 7,
+ .fcxbtver = 7, .fcxbtscan = 7, .fcxbtafh = 7, .fcxbtdevinfo = 7,
+ .fwlrole = 7, .frptmap = 3, .fcxctrl = 7, .fcxinit = 7,
+ .fwevntrptl = 1, .fwc2hfunc = 2, .drvinfo_type = 1, .info_buf = 1800,
+ .max_role_num = 6, .fcxosi = 0, .fcxmlo = 0, .bt_desired = 8,
},
{RTL8852B, RTW89_FW_VER_CODE(0, 29, 29, 0),
.fcxbtcrpt = 105, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 5,
@@ -194,7 +219,7 @@ static const struct rtw89_btc_ver rtw89_btc_ver_defs[] = {
.fcxbtver = 1, .fcxbtscan = 2, .fcxbtafh = 2, .fcxbtdevinfo = 1,
.fwlrole = 2, .frptmap = 3, .fcxctrl = 1, .fcxinit = 0,
.fwevntrptl = 0, .fwc2hfunc = 1, .drvinfo_type = 0, .info_buf = 1800,
- .max_role_num = 6,
+ .max_role_num = 6, .fcxosi = 0, .fcxmlo = 0, .bt_desired = 7,
},
{RTL8852B, RTW89_FW_VER_CODE(0, 29, 14, 0),
.fcxbtcrpt = 5, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 4,
@@ -202,7 +227,7 @@ static const struct rtw89_btc_ver rtw89_btc_ver_defs[] = {
.fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 2, .fcxbtdevinfo = 1,
.fwlrole = 1, .frptmap = 3, .fcxctrl = 1, .fcxinit = 0,
.fwevntrptl = 0, .fwc2hfunc = 1, .drvinfo_type = 0, .info_buf = 1800,
- .max_role_num = 6,
+ .max_role_num = 6, .fcxosi = 0, .fcxmlo = 0, .bt_desired = 7,
},
{RTL8852B, RTW89_FW_VER_CODE(0, 27, 0, 0),
.fcxbtcrpt = 4, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 3,
@@ -210,7 +235,7 @@ static const struct rtw89_btc_ver rtw89_btc_ver_defs[] = {
.fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 1, .fcxbtdevinfo = 1,
.fwlrole = 1, .frptmap = 1, .fcxctrl = 1, .fcxinit = 0,
.fwevntrptl = 0, .fwc2hfunc = 1, .drvinfo_type = 0, .info_buf = 1280,
- .max_role_num = 5,
+ .max_role_num = 5, .fcxosi = 0, .fcxmlo = 0, .bt_desired = 7,
},
{RTL8852A, RTW89_FW_VER_CODE(0, 13, 37, 0),
.fcxbtcrpt = 4, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 3,
@@ -218,7 +243,7 @@ static const struct rtw89_btc_ver rtw89_btc_ver_defs[] = {
.fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 2, .fcxbtdevinfo = 1,
.fwlrole = 1, .frptmap = 3, .fcxctrl = 1, .fcxinit = 0,
.fwevntrptl = 0, .fwc2hfunc = 0, .drvinfo_type = 0, .info_buf = 1280,
- .max_role_num = 5,
+ .max_role_num = 5, .fcxosi = 0, .fcxmlo = 0, .bt_desired = 7,
},
{RTL8852A, RTW89_FW_VER_CODE(0, 13, 0, 0),
.fcxbtcrpt = 1, .fcxtdma = 1, .fcxslots = 1, .fcxcysta = 2,
@@ -226,7 +251,7 @@ static const struct rtw89_btc_ver rtw89_btc_ver_defs[] = {
.fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 1, .fcxbtdevinfo = 1,
.fwlrole = 0, .frptmap = 0, .fcxctrl = 0, .fcxinit = 0,
.fwevntrptl = 0, .fwc2hfunc = 0, .drvinfo_type = 0, .info_buf = 1024,
- .max_role_num = 5,
+ .max_role_num = 5, .fcxosi = 0, .fcxmlo = 0, .bt_desired = 7,
},
/* keep it to be the last as default entry */
@@ -236,7 +261,7 @@ static const struct rtw89_btc_ver rtw89_btc_ver_defs[] = {
.fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 1, .fcxbtdevinfo = 1,
.fwlrole = 0, .frptmap = 0, .fcxctrl = 0, .fcxinit = 0,
.fwevntrptl = 0, .fwc2hfunc = 1, .drvinfo_type = 0, .info_buf = 1024,
- .max_role_num = 5,
+ .max_role_num = 5, .fcxosi = 0, .fcxmlo = 0, .bt_desired = 7,
},
};
@@ -269,6 +294,39 @@ static u32 chip_id_to_bt_rom_code_id(u32 id)
}
}
+#define CASE_BTC_MLME_STATE(e) case MLME_##e: return #e
+
+static const char *id_to_mlme_state(u32 id)
+{
+ switch (id) {
+ CASE_BTC_MLME_STATE(NO_LINK);
+ CASE_BTC_MLME_STATE(LINKING);
+ CASE_BTC_MLME_STATE(LINKED);
+ default:
+ return "unknown";
+ }
+}
+
+static char *chip_id_str(u32 id)
+{
+ switch (id) {
+ case RTL8852A:
+ return "RTL8852A";
+ case RTL8852B:
+ return "RTL8852B";
+ case RTL8852C:
+ return "RTL8852C";
+ case RTL8852BT:
+ return "RTL8852BT";
+ case RTL8851B:
+ return "RTL8851B";
+ case RTL8922A:
+ return "RTL8922A";
+ default:
+ return "UNKNOWN";
+ }
+}
+
struct rtw89_btc_btf_tlv {
u8 type;
u8 len;
@@ -291,6 +349,7 @@ enum btc_btf_set_report_en {
RPT_EN_BT_DEVICE_INFO,
RPT_EN_BT_AFH_MAP,
RPT_EN_BT_AFH_MAP_LE,
+ RPT_EN_BT_TX_PWR_LVL,
RPT_EN_FW_STEP_INFO,
RPT_EN_TEST,
RPT_EN_WL_ALL,
@@ -668,6 +727,27 @@ enum btc_wl_link_mode {
BTC_WLINK_MAX
};
+#define CASE_BTC_WL_LINK_MODE(e) case BTC_WLINK_## e: return #e
+
+static const char *id_to_linkmode(u8 id)
+{
+ switch (id) {
+ CASE_BTC_WL_LINK_MODE(NOLINK);
+ CASE_BTC_WL_LINK_MODE(2G_STA);
+ CASE_BTC_WL_LINK_MODE(2G_AP);
+ CASE_BTC_WL_LINK_MODE(2G_GO);
+ CASE_BTC_WL_LINK_MODE(2G_GC);
+ CASE_BTC_WL_LINK_MODE(2G_SCC);
+ CASE_BTC_WL_LINK_MODE(2G_MCC);
+ CASE_BTC_WL_LINK_MODE(25G_MCC);
+ CASE_BTC_WL_LINK_MODE(25G_DBCC);
+ CASE_BTC_WL_LINK_MODE(5G);
+ CASE_BTC_WL_LINK_MODE(OTHER);
+ default:
+ return "unknown";
+ }
+}
+
enum btc_wl_mrole_type {
BTC_WLMROLE_NONE = 0x0,
BTC_WLMROLE_STA_GC,
@@ -833,6 +913,9 @@ static int _send_fw_cmd(struct rtw89_dev *rtwdev, u8 h2c_class, u8 h2c_func,
return ret;
}
+#define BTC_BT_DEF_BR_TX_PWR 4
+#define BTC_BT_DEF_LE_TX_PWR 4
+
static void _reset_btc_var(struct rtw89_dev *rtwdev, u8 type)
{
struct rtw89_btc *btc = &rtwdev->btc;
@@ -901,6 +984,9 @@ static void _reset_btc_var(struct rtw89_dev *rtwdev, u8 type)
if (type & BTC_RESET_MDINFO)
memset(&btc->mdinfo, 0, sizeof(btc->mdinfo));
+
+ bt->link_info.bt_txpwr_desc.br_dbm = BTC_BT_DEF_BR_TX_PWR;
+ bt->link_info.bt_txpwr_desc.le_dbm = BTC_BT_DEF_LE_TX_PWR;
}
static u8 _search_reg_index(struct rtw89_dev *rtwdev, u8 mreg_num, u16 reg_type, u32 target)
@@ -1324,6 +1410,7 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
u8 *prptbuf, u32 index)
{
struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_ver *fwsubver = &btc->fwinfo.fw_subver;
const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_dm *dm = &btc->dm;
struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
@@ -1366,23 +1453,29 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
if (ver->fcxbtcrpt == 1) {
pfinfo = &pfwinfo->rpt_ctrl.finfo.v1;
pcinfo->req_len = sizeof(pfwinfo->rpt_ctrl.finfo.v1);
+ fwsubver->fcxbtcrpt = pfwinfo->rpt_ctrl.finfo.v1.fver;
} else if (ver->fcxbtcrpt == 4) {
pfinfo = &pfwinfo->rpt_ctrl.finfo.v4;
pcinfo->req_len = sizeof(pfwinfo->rpt_ctrl.finfo.v4);
+ fwsubver->fcxbtcrpt = pfwinfo->rpt_ctrl.finfo.v4.fver;
} else if (ver->fcxbtcrpt == 5) {
pfinfo = &pfwinfo->rpt_ctrl.finfo.v5;
pcinfo->req_len = sizeof(pfwinfo->rpt_ctrl.finfo.v5);
+ fwsubver->fcxbtcrpt = pfwinfo->rpt_ctrl.finfo.v5.fver;
} else if (ver->fcxbtcrpt == 105) {
pfinfo = &pfwinfo->rpt_ctrl.finfo.v105;
pcinfo->req_len = sizeof(pfwinfo->rpt_ctrl.finfo.v105);
+ fwsubver->fcxbtcrpt = pfwinfo->rpt_ctrl.finfo.v105.fver;
pcinfo->req_fver = 5;
break;
} else if (ver->fcxbtcrpt == 8) {
pfinfo = &pfwinfo->rpt_ctrl.finfo.v8;
pcinfo->req_len = sizeof(pfwinfo->rpt_ctrl.finfo.v8);
+ fwsubver->fcxbtcrpt = pfwinfo->rpt_ctrl.finfo.v8.fver;
} else if (ver->fcxbtcrpt == 7) {
pfinfo = &pfwinfo->rpt_ctrl.finfo.v7;
pcinfo->req_len = sizeof(pfwinfo->rpt_ctrl.finfo.v7);
+ fwsubver->fcxbtcrpt = pfwinfo->rpt_ctrl.finfo.v7.fver;
} else {
goto err;
}
@@ -1393,9 +1486,11 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
if (ver->fcxtdma == 1) {
pfinfo = &pfwinfo->rpt_fbtc_tdma.finfo.v1;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_tdma.finfo.v1);
+ fwsubver->fcxtdma = 0;
} else if (ver->fcxtdma == 3 || ver->fcxtdma == 7) {
pfinfo = &pfwinfo->rpt_fbtc_tdma.finfo.v3;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_tdma.finfo.v3);
+ fwsubver->fcxtdma = pfwinfo->rpt_fbtc_tdma.finfo.v3.fver;
} else {
goto err;
}
@@ -1406,9 +1501,11 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
if (ver->fcxslots == 1) {
pfinfo = &pfwinfo->rpt_fbtc_slots.finfo.v1;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_slots.finfo.v1);
+ fwsubver->fcxslots = pfwinfo->rpt_fbtc_slots.finfo.v1.fver;
} else if (ver->fcxslots == 7) {
pfinfo = &pfwinfo->rpt_fbtc_slots.finfo.v7;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_slots.finfo.v7);
+ fwsubver->fcxslots = pfwinfo->rpt_fbtc_slots.finfo.v7.fver;
} else {
goto err;
}
@@ -1421,22 +1518,27 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
pfinfo = &pfwinfo->rpt_fbtc_cysta.finfo.v2;
pcysta->v2 = pfwinfo->rpt_fbtc_cysta.finfo.v2;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_cysta.finfo.v2);
+ fwsubver->fcxcysta = pfwinfo->rpt_fbtc_cysta.finfo.v2.fver;
} else if (ver->fcxcysta == 3) {
pfinfo = &pfwinfo->rpt_fbtc_cysta.finfo.v3;
pcysta->v3 = pfwinfo->rpt_fbtc_cysta.finfo.v3;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_cysta.finfo.v3);
+ fwsubver->fcxcysta = pfwinfo->rpt_fbtc_cysta.finfo.v3.fver;
} else if (ver->fcxcysta == 4) {
pfinfo = &pfwinfo->rpt_fbtc_cysta.finfo.v4;
pcysta->v4 = pfwinfo->rpt_fbtc_cysta.finfo.v4;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_cysta.finfo.v4);
+ fwsubver->fcxcysta = pfwinfo->rpt_fbtc_cysta.finfo.v4.fver;
} else if (ver->fcxcysta == 5) {
pfinfo = &pfwinfo->rpt_fbtc_cysta.finfo.v5;
pcysta->v5 = pfwinfo->rpt_fbtc_cysta.finfo.v5;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_cysta.finfo.v5);
+ fwsubver->fcxcysta = pfwinfo->rpt_fbtc_cysta.finfo.v5.fver;
} else if (ver->fcxcysta == 7) {
pfinfo = &pfwinfo->rpt_fbtc_cysta.finfo.v7;
pcysta->v7 = pfwinfo->rpt_fbtc_cysta.finfo.v7;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_cysta.finfo.v7);
+ fwsubver->fcxcysta = pfwinfo->rpt_fbtc_cysta.finfo.v7.fver;
} else {
goto err;
}
@@ -1452,11 +1554,13 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_step.finfo.v2.step[0]) *
trace_step +
offsetof(struct rtw89_btc_fbtc_steps_v2, step);
+ fwsubver->fcxstep = pfwinfo->rpt_fbtc_step.finfo.v2.fver;
} else if (ver->fcxstep == 3) {
pfinfo = &pfwinfo->rpt_fbtc_step.finfo.v3;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_step.finfo.v3.step[0]) *
trace_step +
offsetof(struct rtw89_btc_fbtc_steps_v3, step);
+ fwsubver->fcxstep = pfwinfo->rpt_fbtc_step.finfo.v3.fver;
} else {
goto err;
}
@@ -1467,12 +1571,15 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
if (ver->fcxnullsta == 1) {
pfinfo = &pfwinfo->rpt_fbtc_nullsta.finfo.v1;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_nullsta.finfo.v1);
+ fwsubver->fcxnullsta = pfwinfo->rpt_fbtc_nullsta.finfo.v1.fver;
} else if (ver->fcxnullsta == 2) {
pfinfo = &pfwinfo->rpt_fbtc_nullsta.finfo.v2;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_nullsta.finfo.v2);
+ fwsubver->fcxnullsta = pfwinfo->rpt_fbtc_nullsta.finfo.v2.fver;
} else if (ver->fcxnullsta == 7) {
pfinfo = &pfwinfo->rpt_fbtc_nullsta.finfo.v7;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_nullsta.finfo.v7);
+ fwsubver->fcxnullsta = pfwinfo->rpt_fbtc_nullsta.finfo.v7.fver;
} else {
goto err;
}
@@ -1483,12 +1590,15 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
if (ver->fcxmreg == 1) {
pfinfo = &pfwinfo->rpt_fbtc_mregval.finfo.v1;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_mregval.finfo.v1);
+ fwsubver->fcxmreg = pfwinfo->rpt_fbtc_mregval.finfo.v1.fver;
} else if (ver->fcxmreg == 2) {
pfinfo = &pfwinfo->rpt_fbtc_mregval.finfo.v2;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_mregval.finfo.v2);
+ fwsubver->fcxmreg = pfwinfo->rpt_fbtc_mregval.finfo.v2.fver;
} else if (ver->fcxmreg == 7) {
pfinfo = &pfwinfo->rpt_fbtc_mregval.finfo.v7;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_mregval.finfo.v7);
+ fwsubver->fcxmreg = pfwinfo->rpt_fbtc_mregval.finfo.v7.fver;
} else {
goto err;
}
@@ -1499,9 +1609,11 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
if (ver->fcxgpiodbg == 7) {
pfinfo = &pfwinfo->rpt_fbtc_gpio_dbg.finfo.v7;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_gpio_dbg.finfo.v7);
+ fwsubver->fcxgpiodbg = pfwinfo->rpt_fbtc_gpio_dbg.finfo.v7.fver;
} else {
pfinfo = &pfwinfo->rpt_fbtc_gpio_dbg.finfo.v1;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_gpio_dbg.finfo.v1);
+ fwsubver->fcxgpiodbg = pfwinfo->rpt_fbtc_gpio_dbg.finfo.v1.fver;
}
pcinfo->req_fver = ver->fcxgpiodbg;
break;
@@ -1510,9 +1622,11 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
if (ver->fcxbtver == 1) {
pfinfo = &pfwinfo->rpt_fbtc_btver.finfo.v1;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btver.finfo.v1);
+ fwsubver->fcxbtver = pfwinfo->rpt_fbtc_btver.finfo.v1.fver;
} else if (ver->fcxbtver == 7) {
pfinfo = &pfwinfo->rpt_fbtc_btver.finfo.v7;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btver.finfo.v7);
+ fwsubver->fcxbtver = pfwinfo->rpt_fbtc_btver.finfo.v7.fver;
}
pcinfo->req_fver = ver->fcxbtver;
break;
@@ -1521,12 +1635,15 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
if (ver->fcxbtscan == 1) {
pfinfo = &pfwinfo->rpt_fbtc_btscan.finfo.v1;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btscan.finfo.v1);
+ fwsubver->fcxbtscan = pfwinfo->rpt_fbtc_btscan.finfo.v1.fver;
} else if (ver->fcxbtscan == 2) {
pfinfo = &pfwinfo->rpt_fbtc_btscan.finfo.v2;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btscan.finfo.v2);
+ fwsubver->fcxbtscan = pfwinfo->rpt_fbtc_btscan.finfo.v2.fver;
} else if (ver->fcxbtscan == 7) {
pfinfo = &pfwinfo->rpt_fbtc_btscan.finfo.v7;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btscan.finfo.v7);
+ fwsubver->fcxbtscan = pfwinfo->rpt_fbtc_btscan.finfo.v7.fver;
} else {
goto err;
}
@@ -1537,12 +1654,15 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
if (ver->fcxbtafh == 1) {
pfinfo = &pfwinfo->rpt_fbtc_btafh.finfo.v1;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btafh.finfo.v1);
+ fwsubver->fcxbtafh = pfwinfo->rpt_fbtc_btafh.finfo.v1.fver;
} else if (ver->fcxbtafh == 2) {
pfinfo = &pfwinfo->rpt_fbtc_btafh.finfo.v2;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btafh.finfo.v2);
+ fwsubver->fcxbtafh = pfwinfo->rpt_fbtc_btafh.finfo.v2.fver;
} else if (ver->fcxbtafh == 7) {
pfinfo = &pfwinfo->rpt_fbtc_btafh.finfo.v7;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btafh.finfo.v7);
+ fwsubver->fcxbtafh = pfwinfo->rpt_fbtc_btafh.finfo.v7.fver;
} else {
goto err;
}
@@ -1552,6 +1672,7 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
pcinfo = &pfwinfo->rpt_fbtc_btdev.cinfo;
pfinfo = &pfwinfo->rpt_fbtc_btdev.finfo;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btdev.finfo);
+ fwsubver->fcxbtdevinfo = pfwinfo->rpt_fbtc_btdev.finfo.fver;
pcinfo->req_fver = ver->fcxbtdevinfo;
break;
default:
@@ -2283,6 +2404,7 @@ static u32 rtw89_btc_fw_rpt_ver(struct rtw89_dev *rtwdev, u32 rpt_map)
bit_map = BIT(6);
break;
case 3:
+ case 4:
bit_map = BIT(5);
break;
default:
@@ -2297,6 +2419,7 @@ static u32 rtw89_btc_fw_rpt_ver(struct rtw89_dev *rtwdev, u32 rpt_map)
bit_map = BIT(5);
break;
case 3:
+ case 4:
bit_map = BIT(6);
break;
default:
@@ -2309,12 +2432,27 @@ static u32 rtw89_btc_fw_rpt_ver(struct rtw89_dev *rtwdev, u32 rpt_map)
bit_map = BIT(8);
break;
case 3:
+ case 4:
bit_map = BIT(7);
break;
default:
break;
}
break;
+ case RPT_EN_BT_TX_PWR_LVL:
+ switch (ver->frptmap) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ break;
+ case 4:
+ bit_map = BIT(8);
+ break;
+ default:
+ break;
+ }
+ break;
case RPT_EN_FW_STEP_INFO:
switch (ver->frptmap) {
case 1:
@@ -2324,6 +2462,9 @@ static u32 rtw89_btc_fw_rpt_ver(struct rtw89_dev *rtwdev, u32 rpt_map)
case 3:
bit_map = BIT(8);
break;
+ case 4:
+ bit_map = BIT(9);
+ break;
default:
break;
}
@@ -2341,6 +2482,9 @@ static u32 rtw89_btc_fw_rpt_ver(struct rtw89_dev *rtwdev, u32 rpt_map)
case 3:
bit_map = GENMASK(2, 0) | BIT(8);
break;
+ case 4:
+ bit_map = GENMASK(2, 0) | BIT(9);
+ break;
default:
break;
}
@@ -2357,6 +2501,9 @@ static u32 rtw89_btc_fw_rpt_ver(struct rtw89_dev *rtwdev, u32 rpt_map)
case 3:
bit_map = GENMASK(7, 3);
break;
+ case 4:
+ bit_map = GENMASK(8, 3);
+ break;
default:
break;
}
@@ -2373,6 +2520,9 @@ static u32 rtw89_btc_fw_rpt_ver(struct rtw89_dev *rtwdev, u32 rpt_map)
case 3:
bit_map = GENMASK(8, 0);
break;
+ case 4:
+ bit_map = GENMASK(9, 0);
+ break;
default:
break;
}
@@ -2389,6 +2539,9 @@ static u32 rtw89_btc_fw_rpt_ver(struct rtw89_dev *rtwdev, u32 rpt_map)
case 3:
bit_map = GENMASK(8, 2);
break;
+ case 4:
+ bit_map = GENMASK(9, 2);
+ break;
default:
break;
}
@@ -2678,6 +2831,16 @@ static void _fw_set_drv_info(struct rtw89_dev *rtwdev, u8 type)
case CXDRVINFO_FDDT:
case CXDRVINFO_MLO:
case CXDRVINFO_OSI:
+ if (!ver->fcxosi)
+ return;
+
+ if (ver->drvinfo_type == 2)
+ type = 7;
+ else
+ return;
+
+ rtw89_fw_h2c_cxdrv_osi_info(rtwdev, type);
+ break;
default:
break;
}
@@ -2758,6 +2921,8 @@ static void _set_gnt_v1(struct rtw89_dev *rtwdev, u8 phy_map,
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_dm *dm = &btc->dm;
+ struct rtw89_btc_fbtc_outsrc_set_info *osi = &dm->ost_info;
+ struct rtw89_mac_ax_wl_act *b = dm->gnt.bt;
struct rtw89_mac_ax_gnt *g = dm->gnt.band;
u8 i, bt_idx = dm->bt_select + 1;
@@ -2806,21 +2971,35 @@ static void _set_gnt_v1(struct rtw89_dev *rtwdev, u8 phy_map,
switch (wlact_state) {
case BTC_WLACT_HW:
- dm->gnt.bt[i].wlan_act_en = 0;
- dm->gnt.bt[i].wlan_act = 0;
+ b[i].wlan_act_en = 0;
+ b[i].wlan_act = 0;
break;
case BTC_WLACT_SW_LO:
- dm->gnt.bt[i].wlan_act_en = 1;
- dm->gnt.bt[i].wlan_act = 0;
+ b[i].wlan_act_en = 1;
+ b[i].wlan_act = 0;
break;
case BTC_WLACT_SW_HI:
- dm->gnt.bt[i].wlan_act_en = 1;
- dm->gnt.bt[i].wlan_act = 1;
+ b[i].wlan_act_en = 1;
+ b[i].wlan_act = 1;
break;
}
}
}
- rtw89_mac_cfg_gnt_v2(rtwdev, &dm->gnt);
+
+ if (!btc->ver->fcxosi) {
+ rtw89_mac_cfg_gnt_v2(rtwdev, &dm->gnt);
+ return;
+ }
+
+ memcpy(osi->gnt_set, dm->gnt.band, sizeof(osi->gnt_set));
+ memcpy(osi->wlact_set, dm->gnt.bt, sizeof(osi->wlact_set));
+
+ /* GBT source should be GBT_S1 in 1+1 (HWB0:5G + HWB1:2G) case */
+ if (osi->rf_band[BTC_RF_S0] == 1 &&
+ osi->rf_band[BTC_RF_S1] == 0)
+ osi->rf_gbt_source = BTC_RF_S1;
+ else
+ osi->rf_gbt_source = BTC_RF_S0;
}
#define BTC_TDMA_WLROLE_MAX 3
@@ -3062,7 +3241,7 @@ static void _update_btc_state_map(struct rtw89_dev *rtwdev)
}
}
-static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
+static void _set_bt_afh_info_v0(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
@@ -3231,6 +3410,115 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
btc->cx.cnt_wl[BTC_WCNT_CH_UPDATE]++;
}
+static void _set_bt_afh_info_v1(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ struct rtw89_btc_wl_role_info_v8 *wl_rinfo = &wl->role_info_v8;
+ struct rtw89_btc_wl_afh_info *wl_afh = &wl->afh_info;
+ struct rtw89_btc_bt_info *bt = &btc->cx.bt;
+ struct rtw89_btc_wl_rlink *rlink;
+ u8 en = 0, ch = 0, bw = 0, buf[3] = {};
+ u8 i, j, link_mode;
+
+ if (btc->manual_ctrl || wl->status.map.scan)
+ return;
+
+ link_mode = wl_rinfo->link_mode;
+
+ for (i = 0; i < btc->ver->max_role_num; i++) {
+ for (j = RTW89_MAC_0; j < RTW89_MAC_NUM; j++) {
+ if (wl->status.map.rf_off || bt->whql_test ||
+ link_mode == BTC_WLINK_NOLINK ||
+ link_mode == BTC_WLINK_5G)
+ break;
+
+ rlink = &wl_rinfo->rlink[i][j];
+
+ /* Don't care no-connected/non-2G-band role */
+ if (!rlink->connected || !rlink->active ||
+ rlink->rf_band != RTW89_BAND_2G)
+ continue;
+
+ en = 1;
+ ch = rlink->ch;
+ bw = rlink->bw;
+
+ if (link_mode == BTC_WLINK_2G_MCC &&
+ (rlink->role == RTW89_WIFI_ROLE_AP ||
+ rlink->role == RTW89_WIFI_ROLE_P2P_GO ||
+ rlink->role == RTW89_WIFI_ROLE_P2P_CLIENT)) {
+ /* for 2.4G MCC, take role = ap/go/gc */
+ break;
+ } else if (link_mode != BTC_WLINK_2G_SCC ||
+ rlink->bw == RTW89_CHANNEL_WIDTH_40) {
+ /* for 2.4G scc, take bw = 40M */
+ break;
+ }
+ }
+ }
+
+ /* default AFH channel sapn = center-ch +- 6MHz */
+ switch (bw) {
+ case RTW89_CHANNEL_WIDTH_20:
+ if (btc->dm.freerun || btc->dm.fddt_train)
+ bw = 48;
+ else
+ bw = 20 + chip->afh_guard_ch * 2;
+ break;
+ case RTW89_CHANNEL_WIDTH_40:
+ if (btc->dm.freerun)
+ bw = 40 + chip->afh_guard_ch * 2;
+ else
+ bw = 40;
+ break;
+ case RTW89_CHANNEL_WIDTH_5:
+ bw = 5 + chip->afh_guard_ch * 2;
+ break;
+ case RTW89_CHANNEL_WIDTH_10:
+ bw = 10 + chip->afh_guard_ch * 2;
+ break;
+ default:
+ en = false; /* turn off AFH info if invalid BW */
+ bw = 0;
+ ch = 0;
+ break;
+ }
+
+ if (!en || ch > 14 || ch == 0) {
+ en = false;
+ bw = 0;
+ ch = 0;
+ }
+
+ if (wl_afh->en == en &&
+ wl_afh->ch == ch &&
+ wl_afh->bw == bw &&
+ (!bt->enable.now || bt->enable.last))
+ return;
+
+ wl_afh->en = buf[0];
+ wl_afh->ch = buf[1];
+ wl_afh->bw = buf[2];
+
+ if (_send_fw_cmd(rtwdev, BTFC_SET, SET_BT_WL_CH_INFO, &wl->afh_info, 3)) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): en=%d, ch=%d, bw=%d\n",
+ __func__, en, ch, bw);
+
+ btc->cx.cnt_wl[BTC_WCNT_CH_UPDATE]++;
+ }
+}
+
+static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
+{
+ if (rtwdev->chip->chip_id == RTL8922A)
+ _set_bt_afh_info_v1(rtwdev);
+ else
+ _set_bt_afh_info_v0(rtwdev);
+}
+
static bool _check_freerun(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
@@ -3716,6 +4004,15 @@ void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type)
u32 tbl_w1, tbl_b1, tbl_b4;
u16 dur_2;
+ if (wl->status.map.lps) {
+ _slot_set_le(btc, CXST_E2G, s_def[CXST_E2G].dur,
+ s_def[CXST_E2G].cxtbl, s_def[CXST_E2G].cxtype);
+ _slot_set_le(btc, CXST_E5G, s_def[CXST_E5G].dur,
+ s_def[CXST_E5G].cxtbl, s_def[CXST_E5G].cxtype);
+ _slot_set_le(btc, CXST_EBT, s_def[CXST_EBT].dur,
+ s_def[CXST_EBT].cxtbl, s_def[CXST_EBT].cxtype);
+ }
+
type = FIELD_GET(BTC_CXP_MASK, policy_type);
if (btc->ant_type == BTC_ANT_SHARED) {
@@ -3836,13 +4133,13 @@ void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type)
switch (policy_type) {
case BTC_CXP_OFFE_2GBWISOB: /* for normal-case */
- _slot_set(btc, CXST_E2G, 0, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_E2G, 5, tbl_w1, SLOT_ISO);
_slot_set_le(btc, CXST_EBT, s_def[CXST_EBT].dur,
s_def[CXST_EBT].cxtbl, s_def[CXST_EBT].cxtype);
_slot_set_dur(btc, CXST_EBT, dur_2);
break;
case BTC_CXP_OFFE_2GISOB: /* for bt no-link */
- _slot_set(btc, CXST_E2G, 0, cxtbl[1], SLOT_ISO);
+ _slot_set(btc, CXST_E2G, 5, cxtbl[1], SLOT_ISO);
_slot_set_le(btc, CXST_EBT, s_def[CXST_EBT].dur,
s_def[CXST_EBT].cxtbl, s_def[CXST_EBT].cxtype);
_slot_set_dur(btc, CXST_EBT, dur_2);
@@ -3868,15 +4165,15 @@ void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type)
break;
case BTC_CXP_OFFE_2GBWMIXB:
if (a2dp->exist)
- _slot_set(btc, CXST_E2G, 0, cxtbl[2], SLOT_MIX);
+ _slot_set(btc, CXST_E2G, 5, cxtbl[2], SLOT_MIX);
else
- _slot_set(btc, CXST_E2G, 0, tbl_w1, SLOT_MIX);
- _slot_set_le(btc, CXST_EBT, s_def[CXST_EBT].dur,
+ _slot_set(btc, CXST_E2G, 5, tbl_w1, SLOT_MIX);
+ _slot_set_le(btc, CXST_EBT, cpu_to_le16(40),
s_def[CXST_EBT].cxtbl, s_def[CXST_EBT].cxtype);
break;
case BTC_CXP_OFFE_WL: /* for 4-way */
- _slot_set(btc, CXST_E2G, 0, cxtbl[1], SLOT_MIX);
- _slot_set(btc, CXST_EBT, 0, cxtbl[1], SLOT_MIX);
+ _slot_set(btc, CXST_E2G, 5, cxtbl[1], SLOT_MIX);
+ _slot_set(btc, CXST_EBT, 5, cxtbl[1], SLOT_MIX);
break;
default:
break;
@@ -4864,16 +5161,14 @@ static void _set_btg_ctrl(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2;
struct rtw89_btc_wl_role_info_v7 *wl_rinfo_v7 = &wl->role_info_v7;
struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8;
+ struct rtw89_btc_fbtc_outsrc_set_info *o_info = &btc->dm.ost_info;
struct rtw89_btc_wl_role_info *wl_rinfo_v0 = &wl->role_info;
- struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info;
const struct rtw89_chip_info *chip = rtwdev->chip;
const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
struct rtw89_btc_dm *dm = &btc->dm;
struct _wl_rinfo_now wl_rinfo;
- u32 run_reason = btc->dm.run_reason;
- u32 is_btg;
- u8 i, val;
+ u32 is_btg = BTC_BTGCTRL_DISABLE;
if (btc->manual_ctrl)
return;
@@ -4891,63 +5186,62 @@ static void _set_btg_ctrl(struct rtw89_dev *rtwdev)
else
return;
- if (rtwdev->dbcc_en) {
- if (ver->fwlrole == 0) {
- wl_rinfo.dbcc_2g_phy = RTW89_PHY_NUM;
+ /* notify halbb ignore GNT_BT or not for WL BB Rx-AGC control */
+ if (btc->ant_type == BTC_ANT_SHARED) {
+ if (!(bt->run_patch_code && bt->enable.now))
+ is_btg = BTC_BTGCTRL_DISABLE;
+ else if (wl_rinfo.link_mode != BTC_WLINK_5G)
+ is_btg = BTC_BTGCTRL_ENABLE;
+ else
+ is_btg = BTC_BTGCTRL_DISABLE;
- for (i = 0; i < RTW89_PHY_NUM; i++) {
- if (wl_dinfo->real_band[i] == RTW89_BAND_2G)
- wl_rinfo.dbcc_2g_phy = i;
- }
- } else if (ver->fwlrole == 1) {
- wl_rinfo.dbcc_2g_phy = wl_rinfo_v1->dbcc_2g_phy;
- } else if (ver->fwlrole == 2) {
- wl_rinfo.dbcc_2g_phy = wl_rinfo_v2->dbcc_2g_phy;
- } else if (ver->fwlrole == 7) {
- wl_rinfo.dbcc_2g_phy = wl_rinfo_v7->dbcc_2g_phy;
- } else if (ver->fwlrole == 8) {
- wl_rinfo.dbcc_2g_phy = wl_rinfo_v8->dbcc_2g_phy;
- } else {
- return;
- }
+ /* bb call ctrl_btg() in WL FW by slot */
+ if (!ver->fcxosi &&
+ wl_rinfo.link_mode == BTC_WLINK_25G_MCC)
+ is_btg = BTC_BTGCTRL_BB_GNT_FWCTRL;
}
- if (wl_rinfo.link_mode == BTC_WLINK_25G_MCC)
- is_btg = BTC_BTGCTRL_BB_GNT_FWCTRL;
- else if (!(bt->run_patch_code && bt->enable.now))
- is_btg = BTC_BTGCTRL_DISABLE;
- else if (wl_rinfo.link_mode == BTC_WLINK_5G)
- is_btg = BTC_BTGCTRL_DISABLE;
- else if (dm->freerun)
- is_btg = BTC_BTGCTRL_DISABLE;
- else if (rtwdev->dbcc_en && wl_rinfo.dbcc_2g_phy != RTW89_PHY_1)
- is_btg = BTC_BTGCTRL_DISABLE;
+ if (is_btg == dm->wl_btg_rx)
+ return;
else
- is_btg = BTC_BTGCTRL_ENABLE;
+ dm->wl_btg_rx = is_btg;
- if (dm->wl_btg_rx_rb != dm->wl_btg_rx &&
- dm->wl_btg_rx_rb != BTC_BTGCTRL_BB_GNT_NOTFOUND) {
- _get_reg_status(rtwdev, BTC_CSTATUS_BB_GNT_MUX, &val);
- dm->wl_btg_rx_rb = val;
- }
+ /* skip setup if btg_ctrl set by wl fw */
+ if (!ver->fcxosi && is_btg > BTC_BTGCTRL_ENABLE)
+ return;
- if (run_reason == BTC_RSN_NTFY_INIT ||
- run_reason == BTC_RSN_NTFY_SWBAND ||
- dm->wl_btg_rx_rb != dm->wl_btg_rx ||
- is_btg != dm->wl_btg_rx) {
+ /* Below flow is for BTC_FEAT_NEW_BBAPI_FLOW = 1 */
+ if (o_info->rf_band[BTC_RF_S0] != o_info->rf_band[BTC_RF_S1]) {/* 1+1 */
+ if (o_info->rf_band[BTC_RF_S0]) /* Non-2G */
+ o_info->btg_rx[BTC_RF_S0] = BTC_BTGCTRL_DISABLE;
+ else
+ o_info->btg_rx[BTC_RF_S0] = is_btg;
- dm->wl_btg_rx = is_btg;
+ if (o_info->rf_band[BTC_RF_S1]) /* Non-2G */
+ o_info->btg_rx[BTC_RF_S1] = BTC_BTGCTRL_DISABLE;
+ else
+ o_info->btg_rx[BTC_RF_S1] = is_btg;
+ } else { /* 2+0 or 0+2 */
+ o_info->btg_rx[BTC_RF_S0] = is_btg;
+ o_info->btg_rx[BTC_RF_S1] = is_btg;
+ }
- if (is_btg > BTC_BTGCTRL_ENABLE)
- return;
+ if (ver->fcxosi)
+ return;
- chip->ops->ctrl_btg_bt_rx(rtwdev, is_btg, RTW89_PHY_0);
- }
+ chip->ops->ctrl_btg_bt_rx(rtwdev, o_info->btg_rx[BTC_RF_S0],
+ RTW89_PHY_0);
+ if (chip->chip_id != RTL8922A)
+ return;
+
+ chip->ops->ctrl_btg_bt_rx(rtwdev, o_info->btg_rx[BTC_RF_S1],
+ RTW89_PHY_1);
}
static void _set_wl_preagc_ctrl(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_fbtc_outsrc_set_info *o_info = &btc->dm.ost_info;
struct rtw89_btc_bt_link_info *bt_linfo = &btc->cx.bt.link_info;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_wl_role_info_v2 *rinfo_v2 = &wl->role_info_v2;
@@ -4979,9 +5273,7 @@ static void _set_wl_preagc_ctrl(struct rtw89_dev *rtwdev)
return;
}
- if (link_mode == BTC_WLINK_25G_MCC) {
- is_preagc = BTC_PREAGC_BB_FWCTRL;
- } else if (!(bt->run_patch_code && bt->enable.now)) {
+ if (!(bt->run_patch_code && bt->enable.now)) {
is_preagc = BTC_PREAGC_DISABLE;
} else if (link_mode == BTC_WLINK_5G) {
is_preagc = BTC_PREAGC_DISABLE;
@@ -5001,6 +5293,9 @@ static void _set_wl_preagc_ctrl(struct rtw89_dev *rtwdev)
is_preagc = BTC_PREAGC_ENABLE;
}
+ if (!btc->ver->fcxosi && link_mode == BTC_WLINK_25G_MCC)
+ is_preagc = BTC_PREAGC_BB_FWCTRL;
+
if (dm->wl_pre_agc_rb != dm->wl_pre_agc &&
dm->wl_pre_agc_rb != BTC_PREAGC_NOTFOUND) {
_get_reg_status(rtwdev, BTC_CSTATUS_BB_PRE_AGC, &val);
@@ -5014,9 +5309,34 @@ static void _set_wl_preagc_ctrl(struct rtw89_dev *rtwdev)
is_preagc != dm->wl_pre_agc) {
dm->wl_pre_agc = is_preagc;
- if (is_preagc > BTC_PREAGC_ENABLE)
+ if (!btc->ver->fcxosi && is_preagc > BTC_PREAGC_ENABLE)
return;
- chip->ops->ctrl_nbtg_bt_tx(rtwdev, dm->wl_pre_agc, RTW89_PHY_0);
+
+ if (o_info->rf_band[BTC_RF_S0] != o_info->rf_band[BTC_RF_S1]) {/* 1+1 */
+ if (o_info->rf_band[BTC_RF_S0]) /* Non-2G */
+ o_info->nbtg_tx[BTC_RF_S0] = BTC_PREAGC_DISABLE;
+ else
+ o_info->nbtg_tx[BTC_RF_S0] = is_preagc;
+
+ if (o_info->rf_band[BTC_RF_S1]) /* Non-2G */
+ o_info->nbtg_tx[BTC_RF_S1] = BTC_PREAGC_DISABLE;
+ else
+ o_info->nbtg_tx[BTC_RF_S1] = is_preagc;
+
+ } else { /* 2+0 or 0+2 */
+ o_info->nbtg_tx[BTC_RF_S0] = is_preagc;
+ o_info->nbtg_tx[BTC_RF_S1] = is_preagc;
+ }
+
+ if (btc->ver->fcxosi)
+ return;
+
+ chip->ops->ctrl_nbtg_bt_tx(rtwdev, o_info->nbtg_tx[BTC_RF_S0],
+ RTW89_PHY_0);
+ if (chip->chip_id != RTL8922A)
+ return;
+ chip->ops->ctrl_nbtg_bt_tx(rtwdev, o_info->nbtg_tx[BTC_RF_S1],
+ RTW89_PHY_1);
}
}
@@ -5229,15 +5549,47 @@ static void _set_bt_rx_scan_pri(struct rtw89_dev *rtwdev)
_write_scbd(rtwdev, BTC_WSCB_RXSCAN_PRI, (bool)(!!bt->scan_rx_low_pri));
}
+static void _wl_req_mac(struct rtw89_dev *rtwdev, u8 mac)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ u32 add;
+
+ if (mac == wl->pta_req_mac)
+ return;
+
+ dm->ost_info.pta_req_hw_band = mac;
+ wl->pta_req_mac = mac;
+ wl->pta_reg_mac_chg = true;
+
+ if (btc->ver->fcxosi)
+ return;
+
+ if (rtwdev->chip->chip_gen == RTW89_CHIP_BE)
+ add = R_BE_BTC_CFG;
+ else
+ add = R_AX_BTC_CFG;
+
+ if (mac == RTW89_MAC_0)
+ rtw89_write32_clr(rtwdev, add, B_AX_WL_SRC);
+ else
+ rtw89_write32_set(rtwdev, add, B_AX_WL_SRC);
+}
+
static void _action_common(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ struct rtw89_btc_wl_role_info_v8 *rinfo_v8 = &wl->role_info_v8;
struct rtw89_btc_wl_smap *wl_smap = &wl->status.map;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
struct rtw89_btc_dm *dm = &btc->dm;
u32 bt_rom_code_id, bt_fw_ver;
+ if (btc->ver->fwlrole == 8)
+ _wl_req_mac(rtwdev, rinfo_v8->pta_req_band);
+
_set_btg_ctrl(rtwdev);
_set_wl_preagc_ctrl(rtwdev);
_set_wl_tx_limit(rtwdev);
@@ -5273,7 +5625,18 @@ static void _action_common(struct rtw89_dev *rtwdev)
wl->scbd_change = false;
btc->cx.cnt_wl[BTC_WCNT_SCBDUPDATE]++;
}
+
+ if (btc->ver->fcxosi) {
+ if (memcmp(&dm->ost_info_last, &dm->ost_info,
+ sizeof(dm->ost_info_last)) ||
+ dm->run_reason == BTC_RSN_NTFY_INIT ||
+ dm->run_reason == BTC_RSN_NTFY_RADIO_STATE) {
+ dm->ost_info_last = dm->ost_info;
+ _fw_set_drv_info(rtwdev, CXDRVINFO_OSI);
+ }
+ }
btc->dm.tdma_instant_excute = 0;
+ wl->pta_reg_mac_chg = false;
}
static void _action_by_bt(struct rtw89_dev *rtwdev)
@@ -5736,14 +6099,6 @@ _update_rssi_state(struct rtw89_dev *rtwdev, u8 pre_state, u8 rssi, u8 thresh)
return next_state;
}
-static void _wl_req_mac(struct rtw89_dev *rtwdev, u8 mac)
-{
- if (mac == RTW89_MAC_0)
- rtw89_write32_clr(rtwdev, R_AX_BTC_CFG, B_AX_WL_SRC);
- else
- rtw89_write32_set(rtwdev, R_AX_BTC_CFG, B_AX_WL_SRC);
-}
-
static
void _update_dbcc_band(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
{
@@ -6239,23 +6594,16 @@ static bool _chk_role_ch_group(const struct rtw89_btc_chdef *r1,
}
static u8 _chk_dbcc(struct rtw89_dev *rtwdev, struct rtw89_btc_chdef *ch,
- u8 *phy, u8 *role, u8 *dbcc_2g_phy)
+ u8 *phy, u8 *role, u8 link_cnt)
{
struct rtw89_btc_wl_info *wl = &rtwdev->btc.cx.wl;
struct rtw89_btc_wl_role_info_v7 *rinfo_v7 = &wl->role_info_v7;
struct rtw89_btc_wl_role_info_v8 *rinfo_v8 = &wl->role_info_v8;
bool is_2g_ch_exist = false, is_multi_role_in_2g_phy = false;
- u8 j, k, dbcc_2g_cid, dbcc_2g_cid2, connect_cnt;
-
- if (rtwdev->btc.ver->fwlrole == 7)
- connect_cnt = rinfo_v7->connect_cnt;
- else if (rtwdev->btc.ver->fwlrole == 8)
- connect_cnt = rinfo_v8->connect_cnt;
- else
- return BTC_WLINK_NOLINK;
+ u8 j, k, dbcc_2g_cid, dbcc_2g_cid2, dbcc_2g_phy, pta_req_band;
/* find out the 2G-PHY by connect-id ->ch */
- for (j = 0; j < connect_cnt; j++) {
+ for (j = 0; j < link_cnt; j++) {
if (ch[j].center_ch <= 14) {
is_2g_ch_exist = true;
break;
@@ -6264,21 +6612,33 @@ static u8 _chk_dbcc(struct rtw89_dev *rtwdev, struct rtw89_btc_chdef *ch,
/* If no any 2G-port exist, it's impossible because 5G-exclude */
if (!is_2g_ch_exist)
- return BTC_WLINK_OTHER;
+ return BTC_WLINK_5G;
dbcc_2g_cid = j;
- *dbcc_2g_phy = phy[dbcc_2g_cid];
+ dbcc_2g_phy = phy[dbcc_2g_cid];
+
+ if (dbcc_2g_phy == RTW89_PHY_1)
+ pta_req_band = RTW89_PHY_1;
+ else
+ pta_req_band = RTW89_PHY_0;
+
+ if (rtwdev->btc.ver->fwlrole == 7) {
+ rinfo_v7->dbcc_2g_phy = dbcc_2g_phy;
+ } else if (rtwdev->btc.ver->fwlrole == 8) {
+ rinfo_v8->dbcc_2g_phy = dbcc_2g_phy;
+ rinfo_v8->pta_req_band = pta_req_band;
+ }
/* connect_cnt <= 2 */
- if (connect_cnt < BTC_TDMA_WLROLE_MAX)
+ if (link_cnt < BTC_TDMA_WLROLE_MAX)
return (_get_role_link_mode((role[dbcc_2g_cid])));
/* find the other-port in the 2G-PHY, ex: PHY-0:6G, PHY1: mcc/scc */
- for (k = 0; k < connect_cnt; k++) {
+ for (k = 0; k < link_cnt; k++) {
if (k == dbcc_2g_cid)
continue;
- if (phy[k] == *dbcc_2g_phy) {
+ if (phy[k] == dbcc_2g_phy) {
is_multi_role_in_2g_phy = true;
dbcc_2g_cid2 = k;
break;
@@ -6480,7 +6840,7 @@ static void _update_wl_info_v7(struct rtw89_dev *rtwdev, u8 rid)
} else if (cnt > BTC_TDMA_WLROLE_MAX) {
mode = BTC_WLINK_OTHER;
} else if (rtwdev->dbcc_en) {
- mode = _chk_dbcc(rtwdev, cid_ch, cid_phy, cid_role, &dbcc_2g_phy);
+ mode = _chk_dbcc(rtwdev, cid_ch, cid_phy, cid_role, cnt);
/* correct 2G-located PHY band for gnt ctrl */
if (dbcc_2g_phy < RTW89_PHY_NUM)
@@ -6525,26 +6885,336 @@ static void _update_wl_info_v7(struct rtw89_dev *rtwdev, u8 rid)
_fw_set_drv_info(rtwdev, CXDRVINFO_ROLE);
}
+static u8 _update_wl_link_mode(struct rtw89_dev *rtwdev, u8 hw_band, u8 type)
+{
+ struct rtw89_btc_wl_info *wl = &rtwdev->btc.cx.wl;
+ struct rtw89_btc_wl_mlo_info *mlo_info = &wl->mlo_info;
+ u8 mode = BTC_WLINK_NOLINK;
+
+ switch (type) {
+ case RTW89_MR_WTYPE_NONE: /* no-link */
+ mode = BTC_WLINK_NOLINK;
+ break;
+ case RTW89_MR_WTYPE_NONMLD: /* Non_MLO 1-role 2+0/0+2 */
+ case RTW89_MR_WTYPE_MLD1L1R: /* MLO only-1 link 2+0/0+2 */
+ if (mlo_info->hwb_rf_band[hw_band] != RTW89_BAND_2G) {
+ mode = BTC_WLINK_5G;
+ } else if (mlo_info->wmode[hw_band] == RTW89_MR_WMODE_1AP) {
+ mode = BTC_WLINK_2G_GO;
+ } else if (mlo_info->wmode[hw_band] == RTW89_MR_WMODE_1CLIENT) {
+ if (wl->role_info_v8.p2p_2g)
+ mode = BTC_WLINK_2G_GC;
+ else
+ mode = BTC_WLINK_2G_STA;
+ }
+ break;
+ case RTW89_MR_WTYPE_NONMLD_NONMLD: /* Non_MLO 2-role 2+0/0+2 */
+ case RTW89_MR_WTYPE_MLD1L1R_NONMLD: /* MLO only-1 link + P2P 2+0/0+2 */
+ if (mlo_info->hwb_rf_band[hw_band] != RTW89_BAND_2G) {
+ mode = BTC_WLINK_5G;
+ } else if (mlo_info->ch_type[hw_band] == RTW89_MR_CTX2_2GHZ_5GHZ ||
+ mlo_info->ch_type[hw_band] == RTW89_MR_CTX2_2GHZ_6GHZ) {
+ mode = BTC_WLINK_25G_MCC;
+ } else if (mlo_info->ch_type[hw_band] == RTW89_MR_CTX2_2GHZ) {
+ mode = BTC_WLINK_2G_MCC;
+ } else if (mlo_info->ch_type[hw_band] == RTW89_MR_CTX1_2GHZ) {
+ mode = BTC_WLINK_2G_SCC;
+ }
+ break;
+ case RTW89_MR_WTYPE_MLD2L1R: /* MLO_MLSR 2+0/0+2 */
+ if (mlo_info->hwb_rf_band[hw_band] != RTW89_BAND_2G)
+ mode = BTC_WLINK_5G;
+ else if (wl->role_info_v8.p2p_2g)
+ mode = BTC_WLINK_2G_GC;
+ else
+ mode = BTC_WLINK_2G_STA;
+ break;
+ case RTW89_MR_WTYPE_MLD2L1R_NONMLD: /* MLO_MLSR + P2P 2+0/0+2 */
+ case RTW89_MR_WTYPE_MLD2L2R_NONMLD: /* MLO_MLMR + P2P 1+1/2+2 */
+ /* driver may doze 1-link to
+ * 2G+5G -> TDMA slot switch by E2G/E5G
+ * 5G only -> TDMA slot switch by E5G
+ */
+ mode = BTC_WLINK_25G_MCC;
+ break;
+ case RTW89_MR_WTYPE_MLD2L2R: /* MLO_MLMR 1+1/2+2 */
+ if (mlo_info->hwb_rf_band[hw_band] != RTW89_BAND_2G) {
+ mode = BTC_WLINK_5G;
+ } else if (mlo_info->wmode[hw_band] == RTW89_MR_WMODE_1AP) {
+ mode = BTC_WLINK_2G_GO;
+ } else if (mlo_info->wmode[hw_band] == RTW89_MR_WMODE_1CLIENT) {
+ if (wl->role_info_v8.p2p_2g)
+ mode = BTC_WLINK_2G_GC;
+ else
+ mode = BTC_WLINK_2G_STA;
+ }
+ break;
+ }
+ return mode;
+}
+
+static void _update_wl_mlo_info(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc_wl_info *wl = &rtwdev->btc.cx.wl;
+ struct rtw89_btc_wl_role_info_v8 *wl_rinfo = &wl->role_info_v8;
+ struct rtw89_btc_wl_mlo_info *mlo_info = &wl->mlo_info;
+ struct rtw89_mr_chanctx_info qinfo;
+ u8 track_band = RTW89_PHY_0;
+ u8 rf_band = RTW89_BAND_2G;
+ u8 i, type;
+
+ /* parse MLO info form PHL API for each HW-band */
+ for (i = RTW89_MAC_0; i <= RTW89_MAC_1; i++) {
+ memset(&qinfo, 0, sizeof(qinfo));
+
+ rtw89_query_mr_chanctx_info(rtwdev, i, &qinfo);
+ mlo_info->wmode[i] = qinfo.wmode;
+ mlo_info->ch_type[i] = qinfo.ctxtype;
+ mlo_info->wtype = qinfo.wtype;
+
+ if (mlo_info->ch_type[i] == RTW89_MR_CTX1_5GHZ ||
+ mlo_info->ch_type[i] == RTW89_MR_CTX2_5GHZ ||
+ mlo_info->ch_type[i] == RTW89_MR_CTX2_5GHZ_6GHZ)
+ mlo_info->hwb_rf_band[i] = RTW89_BAND_5G;
+ else if (mlo_info->ch_type[i] == RTW89_MR_CTX1_6GHZ ||
+ mlo_info->ch_type[i] == RTW89_MR_CTX2_6GHZ)
+ mlo_info->hwb_rf_band[i] = RTW89_BAND_6G;
+ else /* check if "2G-included" or unknown in each HW-band */
+ mlo_info->hwb_rf_band[i] = RTW89_BAND_2G;
+ }
+
+ mlo_info->link_status = rtwdev->mlo_dbcc_mode;
+ type = mlo_info->wtype;
+
+ if (mlo_info->wtype == RTW89_MR_WTYPE_MLD1L1R ||
+ mlo_info->wtype == RTW89_MR_WTYPE_MLD2L1R ||
+ mlo_info->wtype == RTW89_MR_WTYPE_MLD2L2R ||
+ mlo_info->wtype == RTW89_MR_WTYPE_MLD1L1R_NONMLD ||
+ mlo_info->wtype == RTW89_MR_WTYPE_MLD2L1R_NONMLD ||
+ mlo_info->wtype == RTW89_MR_WTYPE_MLD2L2R_NONMLD)
+ mlo_info->mlo_en = 1;
+ else
+ mlo_info->mlo_en = 0;
+
+ if (mlo_info->ch_type[RTW89_MAC_0] != RTW89_MR_CTX_NONE &&
+ mlo_info->ch_type[RTW89_MAC_0] != RTW89_MR_CTX_UNKNOWN &&
+ mlo_info->ch_type[RTW89_MAC_1] != RTW89_MR_CTX_NONE &&
+ mlo_info->ch_type[RTW89_MAC_1] != RTW89_MR_CTX_UNKNOWN)
+ mlo_info->dual_hw_band_en = 1; /* two HW-hand link exist */
+ else
+ mlo_info->dual_hw_band_en = 0;
+
+ if (mlo_info->link_status == MLO_2_PLUS_0_2RF ||
+ mlo_info->link_status == MLO_0_PLUS_2_2RF ||
+ mlo_info->link_status == MLO_2_PLUS_2_2RF)
+ mlo_info->mlo_adie = 2;
+ else
+ mlo_info->mlo_adie = 1;
+
+ switch (mlo_info->link_status) {
+ default:
+ case MLO_2_PLUS_0_1RF: /* 2+0 */
+ case MLO_2_PLUS_0_2RF:
+ mlo_info->rf_combination = BTC_MLO_RF_2_PLUS_0;
+ track_band = RTW89_MAC_0;
+ rf_band = mlo_info->hwb_rf_band[RTW89_MAC_0];
+ mlo_info->path_rf_band[BTC_RF_S0] = rf_band;
+ mlo_info->path_rf_band[BTC_RF_S1] = rf_band;
+
+ wl_rinfo->pta_req_band = RTW89_MAC_0;
+ wl_rinfo->dbcc_2g_phy = RTW89_PHY_0;
+ wl_rinfo->dbcc_en = 0;
+ break;
+ case MLO_0_PLUS_2_1RF: /* 0+2 */
+ case MLO_0_PLUS_2_2RF:
+ mlo_info->rf_combination = BTC_MLO_RF_0_PLUS_2;
+ track_band = RTW89_MAC_1;
+ rf_band = mlo_info->hwb_rf_band[RTW89_MAC_1];
+ mlo_info->path_rf_band[BTC_RF_S0] = rf_band;
+ mlo_info->path_rf_band[BTC_RF_S1] = rf_band;
+
+ wl_rinfo->pta_req_band = RTW89_MAC_1;
+ wl_rinfo->dbcc_2g_phy = RTW89_PHY_1;
+ wl_rinfo->dbcc_en = 0;
+ break;
+ case MLO_1_PLUS_1_1RF: /* 1+1 */
+ case MLO_1_PLUS_1_2RF: /* 1+1 */
+ case MLO_2_PLUS_2_2RF: /* 2+2 */
+ case DBCC_LEGACY: /* DBCC 1+1 */
+ if (mlo_info->link_status == MLO_2_PLUS_2_2RF)
+ mlo_info->rf_combination = BTC_MLO_RF_2_PLUS_2;
+ else
+ mlo_info->rf_combination = BTC_MLO_RF_1_PLUS_1;
+
+ if (mlo_info->hwb_rf_band[RTW89_MAC_0] == RTW89_BAND_2G)
+ track_band = RTW89_MAC_0;
+ else
+ track_band = RTW89_MAC_1;
+
+ mlo_info->path_rf_band[BTC_RF_S0] =
+ mlo_info->hwb_rf_band[RTW89_MAC_0];
+ mlo_info->path_rf_band[BTC_RF_S1] =
+ mlo_info->hwb_rf_band[RTW89_MAC_1];
+
+ /* Check ch count from ch_type @ 2.4G HW-band, and modify type */
+ if (mlo_info->ch_type[track_band] == RTW89_MR_CTX1_2GHZ)
+ type = RTW89_MR_WTYPE_NONMLD; /* only 1-role at 2G */
+ else
+ type = RTW89_MR_WTYPE_NONMLD_NONMLD;
+
+ if (mlo_info->hwb_rf_band[RTW89_MAC_0] == RTW89_BAND_2G) {
+ wl_rinfo->pta_req_band = RTW89_MAC_0;
+ wl_rinfo->dbcc_2g_phy = RTW89_PHY_0;
+ } else {
+ wl_rinfo->pta_req_band = RTW89_MAC_1;
+ wl_rinfo->dbcc_2g_phy = RTW89_PHY_1;
+ }
+
+ if (mlo_info->wmode[RTW89_MAC_0] == RTW89_MR_WMODE_NONE &&
+ mlo_info->wmode[RTW89_MAC_1] == RTW89_MR_WMODE_NONE)
+ wl_rinfo->dbcc_en = 0;
+ else
+ wl_rinfo->dbcc_en = 1;
+ break;
+ }
+
+ wl_rinfo->link_mode = _update_wl_link_mode(rtwdev, track_band, type);
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(), mode=%s, pta_band=%d",
+ __func__, id_to_linkmode(wl_rinfo->link_mode),
+ wl_rinfo->pta_req_band);
+}
+
+static void _update_wl_non_mlo_info(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc_wl_info *wl = &rtwdev->btc.cx.wl;
+ struct rtw89_btc_wl_rlink *rlink = NULL;
+ struct rtw89_btc_wl_role_info_v8 *wl_rinfo = &wl->role_info_v8;
+ struct rtw89_btc_chdef cid_ch[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER] = {};
+ u8 cid_role[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER] = {};
+ u8 cid_phy[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER] = {};
+ bool b2g = false, b5g = false, outloop = false;
+ u8 mode = BTC_WLINK_NOLINK;
+ u8 cnt_2g = 0, cnt_5g = 0;
+ u8 i, j, cnt = 0;
+
+ for (j = RTW89_PHY_0; j < RTW89_PHY_NUM; j++) {
+ for (i = 0; i < RTW89_BE_BTC_WL_MAX_ROLE_NUMBER; i++) {
+ rlink = &wl_rinfo->rlink[i][j];
+
+ if (!rlink->active || !rlink->connected)
+ continue;
+
+ if (cnt >= RTW89_BE_BTC_WL_MAX_ROLE_NUMBER) {
+ outloop = true;
+ break;
+ }
+
+ cid_ch[cnt] = wl->rlink_info[i][j].chdef;
+ cid_phy[cnt] = rlink->phy;
+ cid_role[cnt] = rlink->role;
+ cnt++;
+
+ if (rlink->rf_band != RTW89_BAND_2G) {
+ cnt_5g++;
+ b5g = true;
+ } else {
+ cnt_2g++;
+ b2g = true;
+ }
+ }
+ if (outloop)
+ break;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): cnt_2g=%d, cnt_5g=%d\n", __func__, cnt_2g, cnt_5g);
+
+ wl_rinfo->dbcc_en = rtwdev->dbcc_en;
+ /* Be careful to change the following sequence!! */
+ if (cnt == 0) {
+ mode = BTC_WLINK_NOLINK;
+ } else if (!b2g && b5g) {
+ mode = BTC_WLINK_5G;
+ } else if (wl_rinfo->dbcc_en) {
+ mode = _chk_dbcc(rtwdev, cid_ch, cid_phy, cid_role, cnt);
+ } else if (b2g && b5g) {
+ mode = BTC_WLINK_25G_MCC;
+ } else if (!b5g && cnt >= 2) {
+ if (_chk_role_ch_group(&cid_ch[0], &cid_ch[1]))
+ mode = BTC_WLINK_2G_SCC;
+ else
+ mode = BTC_WLINK_2G_MCC;
+ } else if (!b5g) { /* cnt_connect = 1 */
+ mode = _get_role_link_mode(cid_role[0]);
+ }
+
+ wl_rinfo->link_mode = mode;
+}
+
+static void _modify_role_link_mode(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc_wl_info *wl = &rtwdev->btc.cx.wl;
+ struct rtw89_btc_wl_role_info_v8 *wl_rinfo = &wl->role_info_v8;
+ u8 go_cleint_exist = wl->go_client_exist;
+ u8 link_mode = wl_rinfo->link_mode;
+ u32 role_map = wl_rinfo->role_map;
+ u8 noa_exist = wl->noa_exist;
+ u32 mrole = BTC_WLMROLE_NONE;
+
+ /* if no client_joined, don't care P2P-GO/AP role */
+ if (((role_map & BIT(RTW89_WIFI_ROLE_P2P_GO)) ||
+ (role_map & BIT(RTW89_WIFI_ROLE_AP))) && !go_cleint_exist) {
+ if (link_mode == BTC_WLINK_2G_SCC) {
+ wl_rinfo->link_mode = BTC_WLINK_2G_STA;
+ } else if (link_mode == BTC_WLINK_2G_GO ||
+ link_mode == BTC_WLINK_2G_AP) {
+ wl_rinfo->link_mode = BTC_WLINK_NOLINK;
+ }
+ }
+
+ /* Identify 2-Role type */
+ if (link_mode == BTC_WLINK_2G_SCC ||
+ link_mode == BTC_WLINK_2G_MCC ||
+ link_mode == BTC_WLINK_25G_MCC ||
+ link_mode == BTC_WLINK_5G) {
+ if ((role_map & BIT(RTW89_WIFI_ROLE_P2P_GO)) ||
+ (role_map & BIT(RTW89_WIFI_ROLE_AP))) {
+ if (noa_exist)
+ mrole = BTC_WLMROLE_STA_GO_NOA;
+ else
+ mrole = BTC_WLMROLE_STA_GO;
+ } else if (role_map & BIT(RTW89_WIFI_ROLE_P2P_CLIENT)) {
+ if (noa_exist)
+ mrole = BTC_WLMROLE_STA_GC_NOA;
+ else
+ mrole = BTC_WLMROLE_STA_GC;
+ } else {
+ mrole = BTC_WLMROLE_STA_STA;
+ }
+ }
+
+ wl_rinfo->mrole_type = mrole;
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): link_mode=%s, mrole_type=%d\n", __func__,
+ id_to_linkmode(wl_rinfo->link_mode), wl_rinfo->mrole_type);
+}
+
static void _update_wl_info_v8(struct rtw89_dev *rtwdev, u8 role_id, u8 rlink_id,
enum btc_role_state state)
{
+ struct rtw89_btc_wl_rlink *rlink = NULL;
+ struct rtw89_btc_wl_link_info *wl_linfo;
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
- struct rtw89_btc_chdef cid_ch[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER];
struct rtw89_btc_wl_role_info_v8 *wl_rinfo = &wl->role_info_v8;
- struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info;
- bool client_joined = false, b2g = false, b5g = false;
- u8 cid_role[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER] = {};
- u8 cid_phy[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER] = {};
- u8 dbcc_en = 0, pta_req_band = RTW89_MAC_0;
- u8 i, j, cnt = 0, cnt_2g = 0, cnt_5g = 0;
- struct rtw89_btc_wl_link_info *wl_linfo;
- struct rtw89_btc_wl_rlink *rlink = NULL;
- u8 dbcc_2g_phy = RTW89_PHY_0;
- u8 mode = BTC_WLINK_NOLINK;
- u32 noa_dur = 0;
+ bool client_joined = false, noa_exist = false, p2p_exist = false;
+ bool is_5g_hi_channel = false, bg_mode = false, dbcc_en_ori;
+ u8 i, j, link_mode_ori;
+ u32 role_map = 0;
- if (role_id >= RTW89_BE_BTC_WL_MAX_ROLE_NUMBER || rlink_id > RTW89_MAC_1)
+ if (role_id >= RTW89_BE_BTC_WL_MAX_ROLE_NUMBER || rlink_id >= RTW89_MAC_NUM)
return;
/* Extract wl->link_info[role_id][rlink_id] to wl->role_info
@@ -6554,10 +7224,8 @@ static void _update_wl_info_v8(struct rtw89_dev *rtwdev, u8 role_id, u8 rlink_id
*/
wl_linfo = &wl->rlink_info[role_id][rlink_id];
- if (wl_linfo->connected == MLME_LINKING)
- return;
-
rlink = &wl_rinfo->rlink[role_id][rlink_id];
+
rlink->role = wl_linfo->role;
rlink->active = wl_linfo->active; /* Doze or not */
rlink->pid = wl_linfo->pid;
@@ -6573,8 +7241,6 @@ static void _update_wl_info_v8(struct rtw89_dev *rtwdev, u8 role_id, u8 rlink_id
switch (wl_linfo->connected) {
case MLME_NO_LINK:
rlink->connected = 0;
- if (rlink->role == RTW89_WIFI_ROLE_STATION)
- btc->dm.leak_ap = 0;
break;
case MLME_LINKED:
rlink->connected = 1;
@@ -6583,130 +7249,72 @@ static void _update_wl_info_v8(struct rtw89_dev *rtwdev, u8 role_id, u8 rlink_id
return;
}
- wl->is_5g_hi_channel = false;
- wl->bg_mode = false;
- wl_rinfo->role_map = 0;
- wl_rinfo->p2p_2g = 0;
- memset(cid_ch, 0, sizeof(cid_ch));
-
- for (i = 0; i < RTW89_BE_BTC_WL_MAX_ROLE_NUMBER; i++) {
- for (j = RTW89_MAC_0; j <= RTW89_MAC_1; j++) {
+ for (j = RTW89_MAC_0; j <= RTW89_MAC_1; j++) {
+ for (i = 0; i < RTW89_BE_BTC_WL_MAX_ROLE_NUMBER; i++) {
rlink = &wl_rinfo->rlink[i][j];
if (!rlink->active || !rlink->connected)
continue;
- cnt++;
- wl_rinfo->role_map |= BIT(rlink->role);
-
- /* only if client connect for p2p-Go/AP */
- if ((rlink->role == RTW89_WIFI_ROLE_P2P_GO ||
- rlink->role == RTW89_WIFI_ROLE_AP) &&
- rlink->client_cnt > 1)
- client_joined = true;
-
- /* Identufy if P2P-Go (GO/GC/AP) exist at 2G band*/
- if (rlink->rf_band == RTW89_BAND_2G &&
- (client_joined || rlink->role == RTW89_WIFI_ROLE_P2P_CLIENT))
- wl_rinfo->p2p_2g = 1;
+ role_map |= BIT(rlink->role);
/* only one noa-role exist */
if (rlink->noa && rlink->noa_dur > 0)
- noa_dur = rlink->noa_dur;
+ noa_exist = true;
/* for WL 5G-Rx interfered with BT issue */
- if (rlink->rf_band == RTW89_BAND_5G && rlink->ch >= 100)
- wl->is_5g_hi_channel = 1;
-
- if ((rlink->mode & BIT(BTC_WL_MODE_11B)) ||
- (rlink->mode & BIT(BTC_WL_MODE_11G)))
- wl->bg_mode = 1;
+ if (rlink->rf_band == RTW89_BAND_5G) {
+ if (rlink->ch >= 100)
+ is_5g_hi_channel = true;
- if (rtwdev->chip->para_ver & BTC_FEAT_MLO_SUPPORT)
continue;
+ }
- cid_ch[cnt - 1] = wl_linfo->chdef;
- cid_phy[cnt - 1] = rlink->phy;
- cid_role[cnt - 1] = rlink->role;
-
- if (rlink->rf_band != RTW89_BAND_2G) {
- cnt_5g++;
- b5g = true;
- } else {
- cnt_2g++;
- b2g = true;
+ /* only if client connect for p2p-Go/AP */
+ if ((rlink->role == RTW89_WIFI_ROLE_P2P_GO ||
+ rlink->role == RTW89_WIFI_ROLE_AP) &&
+ rlink->client_cnt > 1) {
+ p2p_exist = true;
+ client_joined = true;
}
- }
- }
- if (rtwdev->chip->para_ver & BTC_FEAT_MLO_SUPPORT) {
- rtw89_debug(rtwdev, RTW89_DBG_BTC,
- "[BTC] rlink cnt_2g=%d cnt_5g=%d\n", cnt_2g, cnt_5g);
- rtw89_warn(rtwdev, "not support MLO feature yet");
- } else {
- dbcc_en = rtwdev->dbcc_en;
+ /* Identify if P2P-Go (GO/GC/AP) exist at 2G band */
+ if (rlink->role == RTW89_WIFI_ROLE_P2P_CLIENT)
+ p2p_exist = true;
- /* Be careful to change the following sequence!! */
- if (cnt == 0) {
- mode = BTC_WLINK_NOLINK;
- } else if (!b2g && b5g) {
- mode = BTC_WLINK_5G;
- } else if (wl_rinfo->role_map & BIT(RTW89_WIFI_ROLE_NAN)) {
- mode = BTC_WLINK_2G_NAN;
- } else if (cnt > BTC_TDMA_WLROLE_MAX) {
- mode = BTC_WLINK_OTHER;
- } else if (dbcc_en) {
- mode = _chk_dbcc(rtwdev, cid_ch, cid_phy, cid_role,
- &dbcc_2g_phy);
- } else if (b2g && b5g && cnt == 2) {
- mode = BTC_WLINK_25G_MCC;
- } else if (!b5g && cnt == 2) { /* cnt_connect = 2 */
- if (_chk_role_ch_group(&cid_ch[0], &cid_ch[cnt - 1]))
- mode = BTC_WLINK_2G_SCC;
- else
- mode = BTC_WLINK_2G_MCC;
- } else if (!b5g && cnt == 1) { /* cnt_connect = 1 */
- mode = _get_role_link_mode(cid_role[0]);
+ if ((rlink->mode & BIT(BTC_WL_MODE_11B)) ||
+ (rlink->mode & BIT(BTC_WL_MODE_11G)))
+ bg_mode = true;
}
}
- wl_rinfo->link_mode = mode;
- wl_rinfo->connect_cnt = cnt;
- if (wl_rinfo->connect_cnt == 0)
- wl_rinfo->role_map = BIT(RTW89_WIFI_ROLE_NONE);
- _update_role_link_mode(rtwdev, client_joined, noa_dur);
+ link_mode_ori = wl_rinfo->link_mode;
+ wl->is_5g_hi_channel = is_5g_hi_channel;
+ wl->bg_mode = bg_mode;
+ wl->go_client_exist = client_joined;
+ wl->noa_exist = noa_exist;
+ wl_rinfo->p2p_2g = p2p_exist;
+ wl_rinfo->role_map = role_map;
- wl_rinfo->dbcc_2g_phy = dbcc_2g_phy;
- if (wl_rinfo->dbcc_en != dbcc_en) {
- wl_rinfo->dbcc_en = dbcc_en;
- wl_rinfo->dbcc_chg = 1;
- btc->cx.cnt_wl[BTC_WCNT_DBCC_CHG]++;
+ dbcc_en_ori = wl_rinfo->dbcc_en;
+
+ if (rtwdev->chip->para_ver & BTC_FEAT_MLO_SUPPORT) {
+ /* for MLO-supported, link-mode from driver directly */
+ _update_wl_mlo_info(rtwdev);
} else {
- wl_rinfo->dbcc_chg = 0;
+ /* for non-MLO-supported, link-mode by BTC */
+ _update_wl_non_mlo_info(rtwdev);
}
- if (wl_rinfo->dbcc_en) {
- memset(wl_dinfo, 0, sizeof(struct rtw89_btc_wl_dbcc_info));
+ _modify_role_link_mode(rtwdev);
- if (mode == BTC_WLINK_5G) {
- pta_req_band = RTW89_PHY_0;
- wl_dinfo->op_band[RTW89_PHY_0] = RTW89_BAND_5G;
- wl_dinfo->op_band[RTW89_PHY_1] = RTW89_BAND_2G;
- } else if (wl_rinfo->dbcc_2g_phy == RTW89_PHY_1) {
- pta_req_band = RTW89_PHY_1;
- wl_dinfo->op_band[RTW89_PHY_0] = RTW89_BAND_5G;
- wl_dinfo->op_band[RTW89_PHY_1] = RTW89_BAND_2G;
- } else {
- pta_req_band = RTW89_PHY_0;
- wl_dinfo->op_band[RTW89_PHY_0] = RTW89_BAND_2G;
- wl_dinfo->op_band[RTW89_PHY_1] = RTW89_BAND_5G;
- }
- _update_dbcc_band(rtwdev, RTW89_PHY_0);
- _update_dbcc_band(rtwdev, RTW89_PHY_1);
- }
+ if (link_mode_ori != wl_rinfo->link_mode)
+ wl->link_mode_chg = true;
- wl_rinfo->pta_req_band = pta_req_band;
- _fw_set_drv_info(rtwdev, CXDRVINFO_ROLE);
+ if (wl_rinfo->dbcc_en != dbcc_en_ori) {
+ wl->dbcc_chg = true;
+ btc->cx.cnt_wl[BTC_WCNT_DBCC_CHG]++;
+ }
}
void rtw89_coex_act1_work(struct wiphy *wiphy, struct wiphy_work *work)
@@ -6829,12 +7437,33 @@ static void _update_bt_scbd(struct rtw89_dev *rtwdev, bool only_update)
bt->rfk_info.map.req = !!(val & BTC_BSCB_RFK_REQ);
bt->hi_lna_rx = !!(val & BTC_BSCB_BT_HILNA);
bt->link_info.status.map.connect = !!(val & BTC_BSCB_BT_CONNECT);
+ if (bt->run_patch_code != !!(val & BTC_BSCB_PATCH_CODE))
+ status_change = true;
bt->run_patch_code = !!(val & BTC_BSCB_PATCH_CODE);
if (!only_update && status_change)
_run_coex(rtwdev, BTC_RSN_UPDATE_BT_SCBD);
}
+#define BTC_BTINFO_PWR_LEN 5
+static void _update_bt_txpwr_info(struct rtw89_dev *rtwdev, u8 *buf, u32 len)
+{
+ struct rtw89_btc_bt_info *bt = &rtwdev->btc.cx.bt;
+ struct rtw89_btc_bt_link_info *b = &bt->link_info;
+
+ if (len != BTC_BTINFO_PWR_LEN)
+ return;
+
+ if (!memcmp(bt->txpwr_info, buf, sizeof(bt->txpwr_info))) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s return by info duplicate!\n", __func__);
+ return;
+ }
+
+ memcpy(bt->txpwr_info, buf, BTC_BTINFO_MAX);
+ memcpy(&b->bt_txpwr_desc, &buf[2], sizeof(b->bt_txpwr_desc));
+}
+
static bool _chk_wl_rfk_request(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
@@ -7618,7 +8247,6 @@ void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev,
wlinfo = &wl->link_info[r.pid];
- rlink_id = 0; /* to do */
if (ver->fwlrole == 0) {
*wlinfo = r;
_update_wl_info(rtwdev);
@@ -7632,6 +8260,7 @@ void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev,
*wlinfo = r;
_update_wl_info_v7(rtwdev, r.pid);
} else if (ver->fwlrole == 8) {
+ rlink_id = rtwvif_link->mac_idx;
wlinfo = &wl->rlink_info[r.pid][rlink_id];
*wlinfo = r;
link_mode_ori = wl->role_info_v8.link_mode;
@@ -7876,7 +8505,11 @@ void __rtw89_btc_ntfy_wl_sta_iter(struct rtw89_vif_link *rtwvif_link,
rssi = ewma_rssi_read(&rtwsta_link->avg_rssi) >> RSSI_FACTOR;
rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], rssi=%d\n", rssi);
- link_info = &wl->link_info[port];
+ if (btc->ver->fwlrole != 8)
+ link_info = &wl->link_info[port];
+ else
+ link_info = &wl->rlink_info[port][rtwvif_link->mac_idx];
+
link_info->stat.traffic = *stats;
link_info_t = &link_info->stat.traffic;
@@ -7957,13 +8590,12 @@ void __rtw89_btc_ntfy_wl_sta_iter(struct rtw89_vif_link *rtwvif_link,
r1->active_role_v1[port].rx_lvl = stats->rx_tfc_lv;
r1->active_role_v1[port].tx_rate = rtwsta_link->ra_report.hw_rate;
r1->active_role_v1[port].rx_rate = rtwsta_link->rx_hw_rate;
- } else if (ver->fwlrole == 2) {
- dm->trx_info.tx_lvl = stats->tx_tfc_lv;
- dm->trx_info.rx_lvl = stats->rx_tfc_lv;
- dm->trx_info.tx_rate = rtwsta_link->ra_report.hw_rate;
- dm->trx_info.rx_rate = rtwsta_link->rx_hw_rate;
}
+ dm->trx_info.tx_lvl = stats->tx_tfc_lv;
+ dm->trx_info.rx_lvl = stats->rx_tfc_lv;
+ dm->trx_info.tx_rate = rtwsta_link->ra_report.hw_rate;
+ dm->trx_info.rx_rate = rtwsta_link->rx_hw_rate;
dm->trx_info.tx_tp = link_info_t->tx_throughput;
dm->trx_info.rx_tp = link_info_t->rx_throughput;
@@ -8070,6 +8702,8 @@ static u8 rtw89_btc_c2h_get_index_by_ver(struct rtw89_dev *rtwdev, u8 func)
return BTF_EVNT_BUF_OVERFLOW;
else if (ver->fwc2hfunc == 2)
return func;
+ else if (ver->fwc2hfunc == 3)
+ return BTF_EVNT_BUF_OVERFLOW;
else
return BTF_EVNT_MAX;
case BTF_EVNT_BUF_OVERFLOW:
@@ -8079,11 +8713,20 @@ static u8 rtw89_btc_c2h_get_index_by_ver(struct rtw89_dev *rtwdev, u8 func)
return BTF_EVNT_C2H_LOOPBACK;
else if (ver->fwc2hfunc == 2)
return func;
+ else if (ver->fwc2hfunc == 3)
+ return BTF_EVNT_C2H_LOOPBACK;
else
return BTF_EVNT_MAX;
case BTF_EVNT_C2H_LOOPBACK:
if (ver->fwc2hfunc == 2)
return func;
+ else if (ver->fwc2hfunc == 3)
+ return BTF_EVNT_BT_LEAUDIO_INFO;
+ else
+ return BTF_EVNT_MAX;
+ case BTF_EVNT_BT_QUERY_TXPWR:
+ if (ver->fwc2hfunc == 3)
+ return func;
else
return BTF_EVNT_MAX;
case BTF_EVNT_MAX:
@@ -8146,6 +8789,9 @@ void rtw89_btc_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
case BTF_EVNT_CX_RUNINFO:
btc->dm.cnt_dm[BTC_DCNT_CX_RUNINFO]++;
break;
+ case BTF_EVNT_BT_QUERY_TXPWR:
+ btc->cx.cnt_bt[BTC_BCNT_BTTXPWR_UPDATE]++;
+ _update_bt_txpwr_info(rtwdev, buf, len);
}
}
@@ -8168,11 +8814,9 @@ static int _show_cx_info(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
if (!(dm->coex_info_map & BTC_COEX_INFO_CX))
return 0;
- dm->cnt_notify[BTC_NCNT_SHOW_COEX_INFO]++;
-
p += scnprintf(p, end - p,
- "========== [BTC COEX INFO (%d)] ==========\n",
- chip->chip_id);
+ "\n========== [BTC COEX INFO (%s)] ==========\n",
+ chip_id_str(chip->chip_id));
ver_main = FIELD_GET(GENMASK(31, 24), RTW89_COEX_VERSION);
ver_sub = FIELD_GET(GENMASK(23, 16), RTW89_COEX_VERSION);
@@ -8198,8 +8842,8 @@ static int _show_cx_info(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
p += scnprintf(p, end - p, "BT_FW_coex:%d(%s, desired:%d)\n",
bt->ver_info.fw_coex,
- (bt->ver_info.fw_coex >= chip->btcx_desired ?
- "Match" : "Mismatch"), chip->btcx_desired);
+ (bt->ver_info.fw_coex >= ver->bt_desired ?
+ "Match" : "Mismatch"), ver->bt_desired);
if (bt->enable.now && bt->ver_info.fw == 0)
rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_VER_INFO, true);
@@ -8248,65 +8892,52 @@ static int _show_wl_role_info(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_wl_link_info *plink = NULL;
- struct rtw89_btc_wl_info *wl = &btc->cx.wl;
- struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info;
struct rtw89_traffic_stats *t;
char *p = buf, *end = buf + bufsz;
- u8 i;
-
- if (rtwdev->dbcc_en) {
- p += scnprintf(p, end - p,
- " %-15s : PHY0_band(op:%d/scan:%d/real:%d), ",
- "[dbcc_info]", wl_dinfo->op_band[RTW89_PHY_0],
- wl_dinfo->scan_band[RTW89_PHY_0],
- wl_dinfo->real_band[RTW89_PHY_0]);
- p += scnprintf(p, end - p,
- "PHY1_band(op:%d/scan:%d/real:%d)\n",
- wl_dinfo->op_band[RTW89_PHY_1],
- wl_dinfo->scan_band[RTW89_PHY_1],
- wl_dinfo->real_band[RTW89_PHY_1]);
- }
+ u8 i, j;
- for (i = 0; i < RTW89_PORT_NUM; i++) {
- if (btc->ver->fwlrole == 8)
- plink = &btc->cx.wl.rlink_info[i][0];
- else
- plink = &btc->cx.wl.link_info[i];
+ for (i = 0; i < btc->ver->max_role_num; i++) {
+ for (j = 0; j < RTW89_MAC_NUM; j++) {
+ if (btc->ver->fwlrole == 8)
+ plink = &btc->cx.wl.rlink_info[i][j];
+ else
+ plink = &btc->cx.wl.link_info[i];
- if (!plink->active)
- continue;
+ if (!plink->active)
+ continue;
- p += scnprintf(p, end - p,
- " [port_%d] : role=%d(phy-%d), connect=%d(client_cnt=%d), mode=%d, center_ch=%d, bw=%d",
- plink->pid, (u32)plink->role, plink->phy,
- (u32)plink->connected, plink->client_cnt - 1,
- (u32)plink->mode, plink->ch, (u32)plink->bw);
+ p += scnprintf(p, end - p,
+ " [port_%d] : role=%d(phy-%d), connect=%s(client_cnt=%d), mode=%d, center_ch=%d, bw=%d",
+ plink->pid, plink->role, plink->phy,
+ id_to_mlme_state(plink->connected),
+ plink->client_cnt - 1, plink->mode,
+ plink->ch, plink->bw);
- if (plink->connected == MLME_NO_LINK)
- continue;
+ if (plink->connected == MLME_NO_LINK)
+ continue;
- p += scnprintf(p, end - p,
- ", mac_id=%d, max_tx_time=%dus, max_tx_retry=%d\n",
- plink->mac_id, plink->tx_time, plink->tx_retry);
+ p += scnprintf(p, end - p,
+ ", mac_id=%d, max_tx_time=%dus, max_tx_retry=%d\n",
+ plink->mac_id, plink->tx_time, plink->tx_retry);
- p += scnprintf(p, end - p,
- " [port_%d] : rssi=-%ddBm(%d), busy=%d, dir=%s, ",
- plink->pid, 110 - plink->stat.rssi,
- plink->stat.rssi, plink->busy,
- plink->dir == RTW89_TFC_UL ? "UL" : "DL");
+ p += scnprintf(p, end - p,
+ " [port_%d] : rssi=-%ddBm(%d), busy=%d, dir=%s, ",
+ plink->pid, 110 - plink->stat.rssi,
+ plink->stat.rssi, plink->busy,
+ plink->dir == RTW89_TFC_UL ? "UL" : "DL");
- t = &plink->stat.traffic;
+ t = &plink->stat.traffic;
- p += scnprintf(p, end - p,
- "tx[rate:%d/busy_level:%d], ",
- (u32)t->tx_rate, t->tx_tfc_lv);
+ p += scnprintf(p, end - p,
+ "tx[rate:%d/busy_level:%d], ",
+ t->tx_rate, t->tx_tfc_lv);
- p += scnprintf(p, end - p,
- "rx[rate:%d/busy_level:%d/drop:%d]\n",
- (u32)t->rx_rate,
- t->rx_tfc_lv, plink->rx_rate_drop_cnt);
+ p += scnprintf(p, end - p,
+ "rx[rate:%d/busy_level:%d/drop:%d]\n",
+ t->rx_rate,
+ t->rx_tfc_lv, plink->rx_rate_drop_cnt);
+ }
}
-
return p - buf;
}
@@ -8342,8 +8973,8 @@ static int _show_wl_info(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
else
goto out;
- p += scnprintf(p, end - p, " %-15s : link_mode:%d, ", "[status]",
- mode);
+ p += scnprintf(p, end - p, " %-15s : link_mode:%s, ", "[status]",
+ id_to_linkmode(mode));
p += scnprintf(p, end - p,
"rf_off:%d, power_save:%d, scan:%s(band:%d/phy_map:0x%x), ",
@@ -8433,8 +9064,11 @@ static int _show_bt_info(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
struct rtw89_btc_cx *cx = &btc->cx;
struct rtw89_btc_bt_info *bt = &cx->bt;
struct rtw89_btc_wl_info *wl = &cx->wl;
+ u32 ver_main = FIELD_GET(GENMASK(31, 24), wl->ver_info.fw_coex);
struct rtw89_btc_bt_link_info *bt_linfo = &bt->link_info;
union rtw89_btc_module_info *md = &btc->mdinfo;
+ s8 br_dbm = bt->link_info.bt_txpwr_desc.br_dbm;
+ s8 le_dbm = bt->link_info.bt_txpwr_desc.le_dbm;
char *p = buf, *end = buf + bufsz;
u8 *afh = bt_linfo->afh_map;
u8 *afh_le = bt_linfo->afh_map_le;
@@ -8567,6 +9201,28 @@ static int _show_bt_info(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
p += scnprintf(p, end - p, "\n");
}
+ if (ver_main >= 9 && bt_linfo->profile_cnt.now)
+ rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_TX_PWR_LVL, true);
+ else
+ rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_TX_PWR_LVL, false);
+
+ if (cx->cnt_bt[BTC_BCNT_BTTXPWR_UPDATE]) {
+ p += scnprintf(p, end - p,
+ " %-15s : br_index:0x%x, le_index:0x%x",
+ "[bt_txpwr_lvl]",
+ bt->link_info.bt_txpwr_desc.br_gain_index,
+ bt->link_info.bt_txpwr_desc.le_gain_index);
+ p += scnprintf(p, end - p, ", br_dbm:%d dBm", br_dbm);
+ p += scnprintf(p, end - p, ", le_dbm:%d dBm", le_dbm);
+ } else {
+ p += scnprintf(p, end - p,
+ " %-15s : br_index:NA, le_index:NA, br_dbm:%d dBm[def], le_dbm:%d dBm[def]",
+ "[bt_txpwr_lvl]",
+ bt->link_info.bt_txpwr_desc.br_dbm,
+ bt->link_info.bt_txpwr_desc.le_dbm);
+ }
+ p += scnprintf(p, end - p, "\n");
+
if (bt_linfo->profile_cnt.now || bt_linfo->status.map.ble_connect)
rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_AFH_MAP, true);
else
@@ -9142,7 +9798,6 @@ static int _show_fbtc_slots(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
if (i % 5 == 4)
p += scnprintf(p, end - p, "\n");
}
- p += scnprintf(p, end - p, "\n");
return p - buf;
}
@@ -9714,7 +10369,7 @@ static int _show_fbtc_cysta_v7(struct rtw89_dev *rtwdev, char *buf, size_t bufsz
return 0;
pcysta = &pfwinfo->rpt_fbtc_cysta.finfo.v7;
- p += scnprintf(p, end - p, "\n\r %-15s : cycle:%d", "[slot_stat]",
+ p += scnprintf(p, end - p, "\n %-15s : cycle:%d", "[slot_stat]",
le16_to_cpu(pcysta->cycles));
for (i = 0; i < CXST_MAX; i++) {
@@ -9862,7 +10517,7 @@ static int _show_fbtc_nullsta(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
ns = &pfwinfo->rpt_fbtc_nullsta.finfo;
if (ver->fcxnullsta == 1) {
for (i = 0; i < 2; i++) {
- p += scnprintf(p, end - p, " %-15s : ", "[NULL-STA]");
+ p += scnprintf(p, end - p, " %-15s : ", "\n[NULL-STA]");
p += scnprintf(p, end - p, "null-%d", i);
p += scnprintf(p, end - p, "[ok:%d/",
le32_to_cpu(ns->v1.result[i][1]));
@@ -9875,13 +10530,13 @@ static int _show_fbtc_nullsta(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
p += scnprintf(p, end - p, "avg_t:%d.%03d/",
le32_to_cpu(ns->v1.avg_t[i]) / 1000,
le32_to_cpu(ns->v1.avg_t[i]) % 1000);
- p += scnprintf(p, end - p, "max_t:%d.%03d]\n",
+ p += scnprintf(p, end - p, "max_t:%d.%03d]",
le32_to_cpu(ns->v1.max_t[i]) / 1000,
le32_to_cpu(ns->v1.max_t[i]) % 1000);
}
} else if (ver->fcxnullsta == 7) {
for (i = 0; i < 2; i++) {
- p += scnprintf(p, end - p, " %-15s : ", "[NULL-STA]");
+ p += scnprintf(p, end - p, " %-15s : ", "\n[NULL-STA]");
p += scnprintf(p, end - p, "null-%d", i);
p += scnprintf(p, end - p, "[Tx:%d/",
le32_to_cpu(ns->v7.result[i][4]));
@@ -9896,13 +10551,13 @@ static int _show_fbtc_nullsta(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
p += scnprintf(p, end - p, "avg_t:%d.%03d/",
le32_to_cpu(ns->v7.tavg[i]) / 1000,
le32_to_cpu(ns->v7.tavg[i]) % 1000);
- p += scnprintf(p, end - p, "max_t:%d.%03d]\n",
+ p += scnprintf(p, end - p, "max_t:%d.%03d]",
le32_to_cpu(ns->v7.tmax[i]) / 1000,
le32_to_cpu(ns->v7.tmax[i]) % 1000);
}
} else {
for (i = 0; i < 2; i++) {
- p += scnprintf(p, end - p, " %-15s : ", "[NULL-STA]");
+ p += scnprintf(p, end - p, " %-15s : ", "\n[NULL-STA]");
p += scnprintf(p, end - p, "null-%d", i);
p += scnprintf(p, end - p, "[Tx:%d/",
le32_to_cpu(ns->v2.result[i][4]));
@@ -9917,7 +10572,7 @@ static int _show_fbtc_nullsta(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
p += scnprintf(p, end - p, "avg_t:%d.%03d/",
le32_to_cpu(ns->v2.avg_t[i]) / 1000,
le32_to_cpu(ns->v2.avg_t[i]) % 1000);
- p += scnprintf(p, end - p, "max_t:%d.%03d]\n",
+ p += scnprintf(p, end - p, "max_t:%d.%03d]",
le32_to_cpu(ns->v2.max_t[i]) / 1000,
le32_to_cpu(ns->v2.max_t[i]) % 1000);
}
@@ -10159,7 +10814,6 @@ static int _show_gpio_dbg(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): stop due rpt_fbtc_gpio_dbg.cinfo\n",
__func__);
- p += scnprintf(p, end - p, "\n");
goto out;
}
@@ -10432,7 +11086,6 @@ static int _show_mreg_v7(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
id_to_regtype(type), offset, val);
cnt++;
}
- p += scnprintf(p, end - p, "\n");
out:
return p - buf;
@@ -11132,37 +11785,39 @@ static int _show_summary_v8(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
ssize_t rtw89_btc_dump_info(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
- struct rtw89_fw_suit *fw_suit = &rtwdev->fw.normal;
struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_ver *fwsubver = &btc->fwinfo.fw_subver;
const struct rtw89_btc_ver *ver = btc->ver;
- struct rtw89_btc_cx *cx = &btc->cx;
- struct rtw89_btc_bt_info *bt = &cx->bt;
+ struct rtw89_btc_dm *dm = &btc->dm;
char *p = buf, *end = buf + bufsz;
+ dm->cnt_notify[BTC_NCNT_SHOW_COEX_INFO]++;
+
+ p += scnprintf(p, end - p,
+ "\n\n\n** Page:%3d/RunCNT:%3d **",
+ dm->cnt_notify[BTC_NCNT_SHOW_COEX_INFO],
+ dm->cnt_dm[BTC_DCNT_RUN]);
p += scnprintf(p, end - p,
- "=========================================\n");
+ "\n========== [BTC FEATURE SUB VER] ==========");
p += scnprintf(p, end - p,
- "WL FW / BT FW %d.%d.%d.%d / NA\n",
- fw_suit->major_ver, fw_suit->minor_ver,
- fw_suit->sub_ver, fw_suit->sub_idex);
- p += scnprintf(p, end - p, "manual %d\n",
- btc->manual_ctrl);
-
+ "\n %-15s : fcxbtcrpt[%d/%d], fcxtdma[%d/%d], fcxslots[%d/%d], fcxcysta[%d/%d]",
+ "[FW/DRV]", fwsubver->fcxbtcrpt, ver->fcxbtcrpt,
+ fwsubver->fcxtdma, ver->fcxtdma, fwsubver->fcxslots,
+ ver->fcxslots, fwsubver->fcxcysta, ver->fcxcysta);
p += scnprintf(p, end - p,
- "=========================================\n");
-
+ "\n %-15s : fcxstep[%d/%d], fcxnullsta[%d/%d], fcxmreg[%d/%d], fcxgpiodbg[%d/%d]",
+ "[FW/DRV]", fwsubver->fcxstep, ver->fcxstep,
+ fwsubver->fcxnullsta, ver->fcxnullsta, fwsubver->fcxmreg,
+ ver->fcxmreg, fwsubver->fcxgpiodbg, ver->fcxgpiodbg);
p += scnprintf(p, end - p,
- "\n\r %-15s : raw_data[%02x %02x %02x %02x %02x %02x] (type:%s/cnt:%d/same:%d)",
- "[bt_info]",
- bt->raw_info[2], bt->raw_info[3],
- bt->raw_info[4], bt->raw_info[5],
- bt->raw_info[6], bt->raw_info[7],
- bt->raw_info[0] == BTC_BTINFO_AUTO ? "auto" : "reply",
- cx->cnt_bt[BTC_BCNT_INFOUPDATE],
- cx->cnt_bt[BTC_BCNT_INFOSAME]);
-
+ "\n %-15s : fcxbtver[%d/%d], fcxbtscan[%d/%d], fcxbtafh[%d/%d], fcxbtdevinfo[%d/%d]",
+ "[FW/DRV]", fwsubver->fcxbtver, ver->fcxbtver,
+ fwsubver->fcxbtscan, ver->fcxbtscan, fwsubver->fcxbtafh,
+ ver->fcxbtafh, fwsubver->fcxbtdevinfo, ver->fcxbtdevinfo);
p += scnprintf(p, end - p,
- "\n=========================================\n");
+ "\n %-15s : fcxosi[%d/%d], fcxmlo[%d/%d],",
+ "[FW/DRV]", fwsubver->fcxosi, ver->fcxosi,
+ fwsubver->fcxmlo, ver->fcxmlo);
p += _show_cx_info(rtwdev, p, end - p);
p += _show_wl_info(rtwdev, p, end - p);
diff --git a/drivers/net/wireless/realtek/rtw89/coex.h b/drivers/net/wireless/realtek/rtw89/coex.h
index e3a1fcd79620..ea2c1e5d70f5 100644
--- a/drivers/net/wireless/realtek/rtw89/coex.h
+++ b/drivers/net/wireless/realtek/rtw89/coex.h
@@ -224,6 +224,13 @@ enum btc_wl_mode {
BTC_WL_MODE_NUM,
};
+enum btc_mlo_rf_combin {
+ BTC_MLO_RF_2_PLUS_0 = 0,
+ BTC_MLO_RF_0_PLUS_2 = 1,
+ BTC_MLO_RF_1_PLUS_1 = 2,
+ BTC_MLO_RF_2_PLUS_2 = 3,
+};
+
enum btc_wl_gpio_debug {
BTC_DBG_GNT_BT = 0,
BTC_DBG_GNT_WL = 1,
diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
index 49447668cbf3..1f5639a5d166 100644
--- a/drivers/net/wireless/realtek/rtw89/core.c
+++ b/drivers/net/wireless/realtek/rtw89/core.c
@@ -204,6 +204,7 @@ static const struct ieee80211_iface_combination rtw89_iface_combs[] = {
};
static const u8 rtw89_ext_capa_sta[] = {
+ [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
[2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
[7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
};
@@ -1722,7 +1723,7 @@ static u16 rtw89_core_get_phy_status_ie_len(struct rtw89_dev *rtwdev,
},
[RTW89_CHIP_BE] = {
32, 40, 24, 24, 8, 8, 8, 8, VAR_LEN, 8, VAR_LEN, 176, VAR_LEN,
- VAR_LEN, VAR_LEN, VAR_LEN, VAR_LEN, VAR_LEN, 16, 24, VAR_LEN,
+ VAR_LEN, VAR_LEN, VAR_LEN, VAR_LEN, VAR_LEN, 88, 56, VAR_LEN,
VAR_LEN, VAR_LEN, 0, 24, 24, 24, 24, 32, 32, 32, 32
},
};
@@ -1917,6 +1918,8 @@ static int rtw89_core_rx_parse_phy_sts(struct rtw89_dev *rtwdev,
return -EINVAL;
pos = phy_ppdu->buf + PHY_STS_HDR_LEN;
+ if (phy_ppdu->hdr_2_en)
+ pos += PHY_STS_HDR_LEN;
end = phy_ppdu->buf + phy_ppdu->len;
while (pos < end) {
const struct rtw89_phy_sts_iehdr *iehdr = pos;
@@ -2158,6 +2161,11 @@ static void rtw89_core_cancel_6ghz_probe_tx(struct rtw89_dev *rtwdev,
if (rx_status->band != NL80211_BAND_6GHZ)
return;
+ if (unlikely(!(rtwdev->chip->support_bands & BIT(NL80211_BAND_6GHZ)))) {
+ rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "invalid rx on unsupported 6 GHz\n");
+ return;
+ }
+
ssid_ie = cfg80211_find_ie(WLAN_EID_SSID, ies, skb->len);
list_for_each_entry(info, &pkt_list[NL80211_BAND_6GHZ], list) {
@@ -2477,6 +2485,41 @@ static void rtw89_core_update_rx_freq_from_ie(struct rtw89_dev *rtwdev,
rx_status->freq = ieee80211_channel_to_frequency(chan, rx_status->band);
}
+static void rtw89_core_correct_mcc_chan(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_desc_info *desc_info,
+ struct ieee80211_rx_status *rx_status,
+ struct rtw89_rx_phy_ppdu *phy_ppdu)
+{
+ enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen;
+ struct rtw89_vif_link *rtwvif_link;
+ struct rtw89_sta_link *rtwsta_link;
+ const struct rtw89_chan *chan;
+ u8 mac_id = desc_info->mac_id;
+ enum rtw89_entity_mode mode;
+ enum nl80211_band band;
+
+ mode = rtw89_get_entity_mode(rtwdev);
+ if (likely(mode != RTW89_ENTITY_MODE_MCC))
+ return;
+
+ if (chip_gen == RTW89_CHIP_BE && phy_ppdu)
+ mac_id = phy_ppdu->mac_id;
+
+ rcu_read_lock();
+
+ rtwsta_link = rtw89_assoc_link_rcu_dereference(rtwdev, mac_id);
+ if (!rtwsta_link)
+ goto out;
+
+ rtwvif_link = rtwsta_link->rtwvif_link;
+ chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
+ band = rtw89_hw_to_nl80211_band(chan->band_type);
+ rx_status->freq = ieee80211_channel_to_frequency(chan->primary_channel, band);
+
+out:
+ rcu_read_unlock();
+}
+
static void rtw89_core_rx_to_mac80211(struct rtw89_dev *rtwdev,
struct rtw89_rx_phy_ppdu *phy_ppdu,
struct rtw89_rx_desc_info *desc_info,
@@ -2495,6 +2538,7 @@ static void rtw89_core_rx_to_mac80211(struct rtw89_dev *rtwdev,
rtw89_core_update_radiotap(rtwdev, skb_ppdu, rx_status);
rtw89_core_validate_rx_signal(rx_status);
rtw89_core_update_rx_freq_from_ie(rtwdev, skb_ppdu, rx_status);
+ rtw89_core_correct_mcc_chan(rtwdev, desc_info, rx_status, phy_ppdu);
/* In low power mode, it does RX in thread context. */
local_bh_disable();
@@ -2752,9 +2796,11 @@ static void rtw89_core_stats_sta_rx_status(struct rtw89_dev *rtwdev,
}
static void rtw89_core_update_rx_status(struct rtw89_dev *rtwdev,
+ struct sk_buff *skb,
struct rtw89_rx_desc_info *desc_info,
struct ieee80211_rx_status *rx_status)
{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
const struct cfg80211_chan_def *chandef =
rtw89_chandef_get(rtwdev, RTW89_CHANCTX_0);
u16 data_rate;
@@ -2766,6 +2812,10 @@ static void rtw89_core_update_rx_status(struct rtw89_dev *rtwdev,
rx_status->freq = chandef->chan->center_freq;
rx_status->band = chandef->chan->band;
+ if (ieee80211_is_beacon(hdr->frame_control) ||
+ ieee80211_is_probe_resp(hdr->frame_control))
+ rx_status->boottime_ns = ktime_get_boottime_ns();
+
if (rtwdev->scanning &&
RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw)) {
const struct rtw89_chan *cur = rtw89_scan_chan_get(rtwdev);
@@ -2922,7 +2972,7 @@ void rtw89_core_rx(struct rtw89_dev *rtwdev,
rx_status = IEEE80211_SKB_RXCB(skb);
memset(rx_status, 0, sizeof(*rx_status));
- rtw89_core_update_rx_status(rtwdev, desc_info, rx_status);
+ rtw89_core_update_rx_status(rtwdev, skb, desc_info, rx_status);
rtw89_core_rx_pkt_hdl(rtwdev, skb, desc_info);
if (desc_info->long_rxdesc &&
BIT(desc_info->frame_type) & PPDU_FILTER_BITMAP)
@@ -3330,8 +3380,8 @@ static void rtw89_core_handle_sta_pending_tx(struct rtw89_dev *rtwdev,
rtwvif_link);
}
-static int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev,
- struct rtw89_vif_link *rtwvif_link, bool qos, bool ps)
+int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
+ bool qos, bool ps, int timeout)
{
struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
int link_id = ieee80211_vif_is_mld(vif) ? rtwvif_link->link_id : -1;
@@ -3379,7 +3429,7 @@ static int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev,
rcu_read_unlock();
return rtw89_core_tx_kick_off_and_wait(rtwdev, skb, qsel,
- RTW89_ROC_TX_TIMEOUT);
+ timeout);
out:
rcu_read_unlock();
@@ -3416,7 +3466,8 @@ void rtw89_roc_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
pause_parm.trigger = rtwvif_link;
rtw89_chanctx_pause(rtwdev, &pause_parm);
- ret = rtw89_core_send_nullfunc(rtwdev, rtwvif_link, true, true);
+ ret = rtw89_core_send_nullfunc(rtwdev, rtwvif_link, true, true,
+ RTW89_ROC_TX_TIMEOUT);
if (ret)
rtw89_debug(rtwdev, RTW89_DBG_TXRX,
"roc send null-1 failed: %d\n", ret);
@@ -3476,7 +3527,8 @@ void rtw89_roc_end(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
roc->state = RTW89_ROC_IDLE;
rtw89_config_roc_chandef(rtwdev, rtwvif_link, NULL);
rtw89_chanctx_proceed(rtwdev, NULL);
- ret = rtw89_core_send_nullfunc(rtwdev, rtwvif_link, true, false);
+ ret = rtw89_core_send_nullfunc(rtwdev, rtwvif_link, true, false,
+ RTW89_ROC_TX_TIMEOUT);
if (ret)
rtw89_debug(rtwdev, RTW89_DBG_TXRX,
"roc send null-0 failed: %d\n", ret);
@@ -3742,7 +3794,7 @@ static void rtw89_track_work(struct wiphy *wiphy, struct wiphy_work *work)
lockdep_assert_wiphy(wiphy);
- if (test_bit(RTW89_FLAG_FORBIDDEN_TRACK_WROK, rtwdev->flags))
+ if (test_bit(RTW89_FLAG_FORBIDDEN_TRACK_WORK, rtwdev->flags))
return;
if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags))
@@ -3967,6 +4019,12 @@ int rtw89_core_sta_link_add(struct rtw89_dev *rtwdev,
rtw89_btc_ntfy_role_info(rtwdev, rtwvif_link, rtwsta_link,
BTC_ROLE_MSTS_STA_CONN_START);
rtw89_chip_rfk_channel(rtwdev, rtwvif_link);
+
+ if (vif->p2p) {
+ rtw89_mac_get_tx_retry_limit(rtwdev, rtwsta_link,
+ &rtwsta_link->tx_retry);
+ rtw89_mac_set_tx_retry_limit(rtwdev, rtwsta_link, false, 60);
+ }
} else if (vif->type == NL80211_IFTYPE_AP || sta->tdls) {
ret = rtw89_mac_set_macid_pause(rtwdev, rtwsta_link->mac_id, false);
if (ret) {
@@ -4151,6 +4209,10 @@ int rtw89_core_sta_link_assoc(struct rtw89_dev *rtwdev,
}
rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, rtwvif_link, true);
+
+ if (vif->p2p)
+ rtw89_mac_set_tx_retry_limit(rtwdev, rtwsta_link, false,
+ rtwsta_link->tx_retry);
}
rtw89_assoc_link_set(rtwsta_link);
@@ -4169,6 +4231,10 @@ int rtw89_core_sta_link_remove(struct rtw89_dev *rtwdev,
rtw89_reg_6ghz_recalc(rtwdev, rtwvif_link, false);
rtw89_btc_ntfy_role_info(rtwdev, rtwvif_link, rtwsta_link,
BTC_ROLE_MSTS_STA_DIS_CONN);
+
+ if (vif->p2p)
+ rtw89_mac_set_tx_retry_limit(rtwdev, rtwsta_link, false,
+ rtwsta_link->tx_retry);
} else if (vif->type == NL80211_IFTYPE_AP || sta->tdls) {
ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif_link, rtwsta_link,
RTW89_ROLE_REMOVE);
@@ -4655,6 +4721,43 @@ void rtw89_core_update_beacon_work(struct wiphy *wiphy, struct wiphy_work *work)
rtw89_chip_h2c_update_beacon(rtwdev, rtwvif_link);
}
+void rtw89_core_csa_beacon_work(struct wiphy *wiphy, struct wiphy_work *work)
+{
+ struct rtw89_vif_link *rtwvif_link =
+ container_of(work, struct rtw89_vif_link, csa_beacon_work.work);
+ struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+ struct rtw89_dev *rtwdev = rtwvif->rtwdev;
+ struct ieee80211_bss_conf *bss_conf;
+ unsigned int delay;
+
+ lockdep_assert_wiphy(wiphy);
+
+ if (rtwvif_link->net_type != RTW89_NET_TYPE_AP_MODE)
+ return;
+
+ rcu_read_lock();
+
+ bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
+ if (!bss_conf->csa_active) {
+ rcu_read_unlock();
+ return;
+ }
+
+ delay = ieee80211_tu_to_usec(bss_conf->beacon_int);
+
+ rcu_read_unlock();
+
+ if (!ieee80211_beacon_cntdwn_is_complete(vif, rtwvif_link->link_id)) {
+ rtw89_chip_h2c_update_beacon(rtwdev, rtwvif_link);
+
+ wiphy_delayed_work_queue(wiphy, &rtwvif_link->csa_beacon_work,
+ usecs_to_jiffies(delay));
+ } else {
+ ieee80211_csa_finish(vif, rtwvif_link->link_id);
+ }
+}
+
int rtw89_wait_for_cond(struct rtw89_wait_info *wait, unsigned int cond)
{
struct completion *cmpl = &wait->completion;
@@ -4808,6 +4911,7 @@ void rtw89_core_stop(struct rtw89_dev *rtwdev)
wiphy_delayed_work_cancel(wiphy, &rtwdev->coex_bt_devinfo_work);
wiphy_delayed_work_cancel(wiphy, &rtwdev->coex_rfk_chk_work);
wiphy_delayed_work_cancel(wiphy, &rtwdev->cfo_track_work);
+ wiphy_delayed_work_cancel(wiphy, &rtwdev->mcc_prepare_done_work);
cancel_delayed_work_sync(&rtwdev->forbid_ba_work);
wiphy_delayed_work_cancel(wiphy, &rtwdev->antdiv_work);
@@ -5034,6 +5138,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
wiphy_delayed_work_init(&rtwdev->coex_bt_devinfo_work, rtw89_coex_bt_devinfo_work);
wiphy_delayed_work_init(&rtwdev->coex_rfk_chk_work, rtw89_coex_rfk_chk_work);
wiphy_delayed_work_init(&rtwdev->cfo_track_work, rtw89_phy_cfo_track_work);
+ wiphy_delayed_work_init(&rtwdev->mcc_prepare_done_work, rtw89_mcc_prepare_done_work);
INIT_DELAYED_WORK(&rtwdev->forbid_ba_work, rtw89_forbid_ba_work);
wiphy_delayed_work_init(&rtwdev->antdiv_work, rtw89_phy_antdiv_work);
rtwdev->txq_wq = alloc_workqueue("rtw89_tx_wq", WQ_UNBOUND | WQ_HIGHPRI, 0);
@@ -5127,6 +5232,7 @@ void rtw89_core_scan_complete(struct rtw89_dev *rtwdev,
{
struct ieee80211_bss_conf *bss_conf;
struct rtw89_bb_ctx *bb;
+ int ret;
if (!rtwvif_link)
return;
@@ -5146,6 +5252,14 @@ void rtw89_core_scan_complete(struct rtw89_dev *rtwdev,
rtw89_phy_config_edcca(rtwdev, bb, false);
rtw89_tas_scan(rtwdev, false);
+ if (hw_scan) {
+ ret = rtw89_core_send_nullfunc(rtwdev, rtwvif_link, false, false,
+ RTW89_SCAN_NULL_TIMEOUT);
+ if (ret)
+ rtw89_debug(rtwdev, RTW89_DBG_TXRX,
+ "scan send null-0 failed: %d\n", ret);
+ }
+
rtwdev->scanning = false;
rtw89_for_each_active_bb(rtwdev, bb)
bb->dig.bypass_dig = true;
@@ -5239,7 +5353,8 @@ int rtw89_core_mlsr_switch(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
if (unlikely(!ieee80211_vif_is_mld(vif)))
return -EOPNOTSUPP;
- if (unlikely(!(usable_links & BIT(link_id)))) {
+ if (unlikely(link_id >= IEEE80211_MLD_MAX_NUM_LINKS ||
+ !(usable_links & BIT(link_id)))) {
rtw89_warn(rtwdev, "%s: link id %u is not usable\n", __func__,
link_id);
return -ENOLINK;
@@ -5504,6 +5619,7 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
ieee80211_hw_set(hw, WANT_MONITOR_VIF);
+ ieee80211_hw_set(hw, CHANCTX_STA_CSA);
if (chip->support_bandwidths & BIT(NL80211_CHAN_WIDTH_160))
ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
@@ -5530,6 +5646,7 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
WIPHY_FLAG_TDLS_EXTERNAL_SETUP |
WIPHY_FLAG_AP_UAPSD |
+ WIPHY_FLAG_HAS_CHANNEL_SWITCH |
WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK;
if (!chip->support_rnr)
diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h
index 1c8f3b9b7c4c..cdacf100a59a 100644
--- a/drivers/net/wireless/realtek/rtw89/core.h
+++ b/drivers/net/wireless/realtek/rtw89/core.h
@@ -1205,7 +1205,7 @@ struct rtw89_mac_ax_gnt {
struct rtw89_mac_ax_wl_act {
u8 wlan_act_en;
u8 wlan_act;
-};
+} __packed;
#define RTW89_MAC_AX_COEX_GNT_NR 2
struct rtw89_mac_ax_coex_gnt {
@@ -1322,6 +1322,7 @@ enum rtw89_btc_bt_state_cnt {
BTC_BCNT_POLUT_NOW,
BTC_BCNT_POLUT_DIFF,
BTC_BCNT_RATECHG,
+ BTC_BCNT_BTTXPWR_UPDATE,
BTC_BCNT_NUM,
};
@@ -1557,6 +1558,25 @@ struct rtw89_btc_wl_dbcc_info {
u8 role[RTW89_PHY_NUM]; /* role in each phy */
};
+struct rtw89_btc_wl_mlo_info {
+ u8 wmode[RTW89_PHY_NUM]; /* enum phl_mr_wmode */
+ u8 ch_type[RTW89_PHY_NUM]; /* enum phl_mr_ch_type */
+ u8 hwb_rf_band[RTW89_PHY_NUM]; /* enum band_type, RF-band for HW-band */
+ u8 path_rf_band[RTW89_PHY_NUM]; /* enum band_type, RF-band for PHY0/1 */
+
+ u8 wtype; /* enum phl_mr_wtype */
+ u8 mrcx_mode;
+ u8 mrcx_act_hwb_map;
+ u8 mrcx_bt_slot_rsp;
+
+ u8 rf_combination; /* enum btc_mlo_rf_combin 0:2+0, 1:0+2, 2:1+1,3:2+2 */
+ u8 mlo_en; /* MLO enable */
+ u8 mlo_adie; /* a-die count */
+ u8 dual_hw_band_en; /* both 2 HW-band link exist */
+
+ u32 link_status; /* enum mlo_dbcc_mode_type */
+};
+
struct rtw89_btc_wl_active_role {
u8 connected: 1;
u8 pid: 3;
@@ -1791,6 +1811,13 @@ union rtw89_btc_bt_state_map {
#define BTC_BT_AFH_GROUP 12
#define BTC_BT_AFH_LE_GROUP 5
+struct rtw89_btc_bt_txpwr_desc {
+ s8 br_dbm;
+ s8 le_dbm;
+ u8 br_gain_index;
+ u8 le_gain_index;
+};
+
struct rtw89_btc_bt_link_info {
struct rtw89_btc_u8_sta_chg profile_cnt;
struct rtw89_btc_bool_sta_chg multi_link;
@@ -1800,6 +1827,7 @@ struct rtw89_btc_bt_link_info {
struct rtw89_btc_bt_a2dp_desc a2dp_desc;
struct rtw89_btc_bt_pan_desc pan_desc;
union rtw89_btc_bt_state_map status;
+ struct rtw89_btc_bt_txpwr_desc bt_txpwr_desc;
u8 sut_pwr_level[BTC_PROFILE_MAX];
u8 golden_rx_shift[BTC_PROFILE_MAX];
@@ -1895,6 +1923,7 @@ struct rtw89_btc_wl_info {
struct rtw89_btc_wl_role_info_v8 role_info_v8;
struct rtw89_btc_wl_scan_info scan_info;
struct rtw89_btc_wl_dbcc_info dbcc_info;
+ struct rtw89_btc_wl_mlo_info mlo_info;
struct rtw89_btc_rf_para rf_para;
struct rtw89_btc_wl_nhm nhm;
union rtw89_btc_wl_state_map status;
@@ -1907,12 +1936,16 @@ struct rtw89_btc_wl_info {
u8 bt_polut_type[RTW89_PHY_NUM]; /* BT polluted WL-Tx type for phy0/1 */
bool is_5g_hi_channel;
+ bool go_client_exist;
+ bool noa_exist;
bool pta_reg_mac_chg;
bool bg_mode;
bool he_mode;
bool scbd_change;
bool fw_ver_mismatch;
bool client_cnt_inc_2g;
+ bool link_mode_chg;
+ bool dbcc_chg;
u32 scbd;
};
@@ -2065,6 +2098,7 @@ struct rtw89_btc_bt_info {
union rtw89_btc_bt_rfk_info_map rfk_info;
u8 raw_info[BTC_BTINFO_MAX]; /* raw bt info from mailbox */
+ u8 txpwr_info[BTC_BTINFO_MAX];
u8 rssi_level;
u32 scbd;
@@ -2903,12 +2937,32 @@ struct rtw89_btc_trx_info {
u32 rx_err_ratio;
};
+enum btc_rf_path {
+ BTC_RF_S0 = 0,
+ BTC_RF_S1 = 1,
+ BTC_RF_NUM,
+};
+
+struct rtw89_btc_fbtc_outsrc_set_info {
+ u8 rf_band[BTC_RF_NUM]; /* 0:2G, 1:non-2G */
+ u8 btg_rx[BTC_RF_NUM];
+ u8 nbtg_tx[BTC_RF_NUM];
+
+ struct rtw89_mac_ax_gnt gnt_set[BTC_RF_NUM]; /* refer to btc_gnt_ctrl */
+ struct rtw89_mac_ax_wl_act wlact_set[BTC_RF_NUM]; /* BT0/BT1 */
+
+ u8 pta_req_hw_band;
+ u8 rf_gbt_source;
+} __packed;
+
union rtw89_btc_fbtc_slot_u {
struct rtw89_btc_fbtc_slot v1[CXST_MAX];
struct rtw89_btc_fbtc_slot_v7 v7[CXST_MAX];
};
struct rtw89_btc_dm {
+ struct rtw89_btc_fbtc_outsrc_set_info ost_info_last; /* outsrc API setup info */
+ struct rtw89_btc_fbtc_outsrc_set_info ost_info; /* outsrc API setup info */
union rtw89_btc_fbtc_slot_u slot;
union rtw89_btc_fbtc_slot_u slot_now;
struct rtw89_btc_fbtc_tdma tdma;
@@ -2998,6 +3052,7 @@ enum rtw89_btc_btf_fw_event {
BTF_EVNT_BT_LEAUDIO_INFO = 7, /* fwc2hfunc > 1 */
BTF_EVNT_BUF_OVERFLOW,
BTF_EVNT_C2H_LOOPBACK,
+ BTF_EVNT_BT_QUERY_TXPWR, /* fwc2hfunc > 3 */
BTF_EVNT_MAX,
};
@@ -3116,31 +3171,6 @@ enum rtw89_btc_btfre_type {
BTFRE_MAX,
};
-struct rtw89_btc_btf_fwinfo {
- u32 cnt_c2h;
- u32 cnt_h2c;
- u32 cnt_h2c_fail;
- u32 event[BTF_EVNT_MAX];
-
- u32 err[BTFRE_MAX];
- u32 len_mismch;
- u32 fver_mismch;
- u32 rpt_en_map;
-
- struct rtw89_btc_report_ctrl_state rpt_ctrl;
- struct rtw89_btc_rpt_fbtc_tdma rpt_fbtc_tdma;
- struct rtw89_btc_rpt_fbtc_slots rpt_fbtc_slots;
- struct rtw89_btc_rpt_fbtc_cysta rpt_fbtc_cysta;
- struct rtw89_btc_rpt_fbtc_step rpt_fbtc_step;
- struct rtw89_btc_rpt_fbtc_nullsta rpt_fbtc_nullsta;
- struct rtw89_btc_rpt_fbtc_mreg rpt_fbtc_mregval;
- struct rtw89_btc_rpt_fbtc_gpio_dbg rpt_fbtc_gpio_dbg;
- struct rtw89_btc_rpt_fbtc_btver rpt_fbtc_btver;
- struct rtw89_btc_rpt_fbtc_btscan rpt_fbtc_btscan;
- struct rtw89_btc_rpt_fbtc_btafh rpt_fbtc_btafh;
- struct rtw89_btc_rpt_fbtc_btdev rpt_fbtc_btdev;
-};
-
struct rtw89_btc_ver {
enum rtw89_core_chip_id chip_id;
u32 fw_ver_code;
@@ -3167,6 +3197,35 @@ struct rtw89_btc_ver {
u8 drvinfo_type;
u16 info_buf;
u8 max_role_num;
+ u8 fcxosi;
+ u8 fcxmlo;
+ u8 bt_desired;
+};
+
+struct rtw89_btc_btf_fwinfo {
+ u32 cnt_c2h;
+ u32 cnt_h2c;
+ u32 cnt_h2c_fail;
+ u32 event[BTF_EVNT_MAX];
+
+ u32 err[BTFRE_MAX];
+ u32 len_mismch;
+ u32 fver_mismch;
+ u32 rpt_en_map;
+
+ struct rtw89_btc_ver fw_subver;
+ struct rtw89_btc_report_ctrl_state rpt_ctrl;
+ struct rtw89_btc_rpt_fbtc_tdma rpt_fbtc_tdma;
+ struct rtw89_btc_rpt_fbtc_slots rpt_fbtc_slots;
+ struct rtw89_btc_rpt_fbtc_cysta rpt_fbtc_cysta;
+ struct rtw89_btc_rpt_fbtc_step rpt_fbtc_step;
+ struct rtw89_btc_rpt_fbtc_nullsta rpt_fbtc_nullsta;
+ struct rtw89_btc_rpt_fbtc_mreg rpt_fbtc_mregval;
+ struct rtw89_btc_rpt_fbtc_gpio_dbg rpt_fbtc_gpio_dbg;
+ struct rtw89_btc_rpt_fbtc_btver rpt_fbtc_btver;
+ struct rtw89_btc_rpt_fbtc_btscan rpt_fbtc_btscan;
+ struct rtw89_btc_rpt_fbtc_btafh rpt_fbtc_btafh;
+ struct rtw89_btc_rpt_fbtc_btdev rpt_fbtc_btdev;
};
#define RTW89_BTC_POLICY_MAXLEN 512
@@ -3385,6 +3444,7 @@ struct rtw89_sta_link {
unsigned int link_id;
u8 mac_id;
+ u8 tx_retry;
bool er_cap;
struct rtw89_vif_link *rtwvif_link;
struct rtw89_ra_info ra;
@@ -3440,6 +3500,8 @@ struct rtw89_tx_skb_data {
u8 hci_priv[];
};
+#define RTW89_SCAN_NULL_TIMEOUT 30
+
#define RTW89_ROC_IDLE_TIMEOUT 500
#define RTW89_ROC_TX_TIMEOUT 30
enum rtw89_roc_state {
@@ -3533,6 +3595,7 @@ struct rtw89_vif_link {
bool pwr_diff_en;
u8 def_tri_idx;
struct wiphy_work update_beacon_work;
+ struct wiphy_delayed_work csa_beacon_work;
struct rtw89_addr_cam_entry addr_cam;
struct rtw89_bssid_cam_entry bssid_cam;
struct ieee80211_tx_queue_params tx_params[IEEE80211_NUM_ACS];
@@ -3645,6 +3708,8 @@ struct rtw89_chip_ops {
enum rtw89_phy_idx phy_idx);
int (*init_txpwr_unit)(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
u8 (*get_thermal)(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path);
+ u32 (*chan_to_rf18_val)(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan);
void (*ctrl_btg_bt_rx)(struct rtw89_dev *rtwdev, bool en,
enum rtw89_phy_idx phy_idx);
void (*query_ppdu)(struct rtw89_dev *rtwdev,
@@ -4358,7 +4423,6 @@ struct rtw89_chip_info {
u32 para_ver;
u32 wlcx_desired;
- u8 btcx_desired;
u8 scbd;
u8 mailbox;
@@ -4513,6 +4577,7 @@ enum rtw89_fw_feature {
RTW89_FW_FEATURE_RFK_PRE_NOTIFY_V0,
RTW89_FW_FEATURE_RFK_PRE_NOTIFY_V1,
RTW89_FW_FEATURE_RFK_RXDCK_V0,
+ RTW89_FW_FEATURE_RFK_IQK_V0,
RTW89_FW_FEATURE_NO_WOW_CPU_IO_RX,
RTW89_FW_FEATURE_NOTIFY_AP_INFO,
RTW89_FW_FEATURE_CH_INFO_BE_V0,
@@ -4520,6 +4585,8 @@ enum rtw89_fw_feature {
RTW89_FW_FEATURE_NO_PHYCAP_P1,
RTW89_FW_FEATURE_NO_POWER_DIFFERENCE,
RTW89_FW_FEATURE_BEACON_LOSS_COUNT_V1,
+ RTW89_FW_FEATURE_SCAN_OFFLOAD_EXTRA_OP,
+ RTW89_FW_FEATURE_RFK_NTFY_MCC_V0,
};
struct rtw89_fw_suit {
@@ -4912,7 +4979,7 @@ enum rtw89_flags {
RTW89_FLAG_CRASH_SIMULATING,
RTW89_FLAG_SER_HANDLING,
RTW89_FLAG_WOWLAN,
- RTW89_FLAG_FORBIDDEN_TRACK_WROK,
+ RTW89_FLAG_FORBIDDEN_TRACK_WORK,
RTW89_FLAG_CHANGING_INTERFACE,
RTW89_FLAG_HW_RFKILL_STATE,
@@ -5447,11 +5514,18 @@ struct rtw89_early_h2c {
u16 h2c_len;
};
+struct rtw89_hw_scan_extra_op {
+ bool set;
+ u8 macid;
+ struct rtw89_chan chan;
+};
+
struct rtw89_hw_scan_info {
struct rtw89_vif_link *scanning_vif;
struct list_head pkt_list[NUM_NL80211_BANDS];
struct list_head chan_list;
struct rtw89_chan op_chan;
+ struct rtw89_hw_scan_extra_op extra_op;
bool connected;
bool abort;
};
@@ -5672,6 +5746,7 @@ struct rtw89_mcc_role {
/* byte-array in LE order for FW */
u8 macid_bitmap[BITS_TO_BYTES(RTW89_MAX_MAC_ID_NUM)];
+ u8 probe_count;
u16 duration; /* TU */
u16 beacon_interval; /* TU */
@@ -5728,6 +5803,7 @@ struct rtw89_mcc_config {
struct rtw89_mcc_sync sync;
u64 start_tsf;
u64 start_tsf_in_aux_domain;
+ u64 prepare_delay;
u16 mcc_interval; /* TU */
u16 beacon_offset; /* TU */
};
@@ -5858,6 +5934,7 @@ struct rtw89_dev {
struct wiphy_delayed_work coex_bt_devinfo_work;
struct wiphy_delayed_work coex_rfk_chk_work;
struct wiphy_delayed_work cfo_track_work;
+ struct wiphy_delayed_work mcc_prepare_done_work;
struct delayed_work forbid_ba_work;
struct wiphy_delayed_work antdiv_work;
struct rtw89_ppdu_sts_info ppdu_sts;
@@ -6880,6 +6957,17 @@ static inline u8 rtw89_chip_get_thermal(struct rtw89_dev *rtwdev,
return chip->ops->get_thermal(rtwdev, rf_path);
}
+static inline u32 rtw89_chip_chan_to_rf18_val(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (!chip->ops->chan_to_rf18_val)
+ return 0;
+
+ return chip->ops->chan_to_rf18_val(rtwdev, chan);
+}
+
static inline void rtw89_chip_query_ppdu(struct rtw89_dev *rtwdev,
struct rtw89_rx_phy_ppdu *phy_ppdu,
struct ieee80211_rx_status *status)
@@ -7317,6 +7405,9 @@ void rtw89_complete_cond(struct rtw89_wait_info *wait, unsigned int cond,
int rtw89_core_start(struct rtw89_dev *rtwdev);
void rtw89_core_stop(struct rtw89_dev *rtwdev);
void rtw89_core_update_beacon_work(struct wiphy *wiphy, struct wiphy_work *work);
+void rtw89_core_csa_beacon_work(struct wiphy *wiphy, struct wiphy_work *work);
+int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
+ bool qos, bool ps, int timeout);
void rtw89_roc_work(struct wiphy *wiphy, struct wiphy_work *work);
void rtw89_roc_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
void rtw89_roc_end(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c
index d6016fa107fb..4acb567b3ad4 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.c
+++ b/drivers/net/wireless/realtek/rtw89/debug.c
@@ -1114,6 +1114,7 @@ static int rtw89_debug_dump_mac_mem(struct rtw89_dev *rtwdev,
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
u32 filter_model_addr = mac->filter_model_addr;
u32 indir_access_addr = mac->indir_access_addr;
+ u32 mem_page_size = mac->mem_page_size;
u32 base_addr, start_page, residue;
char *p = buf, *end = buf + bufsz;
u32 i, j, pp, pages;
@@ -1121,14 +1122,14 @@ static int rtw89_debug_dump_mac_mem(struct rtw89_dev *rtwdev,
u32 val;
remain = len;
- pages = len / MAC_MEM_DUMP_PAGE_SIZE + 1;
- start_page = start_addr / MAC_MEM_DUMP_PAGE_SIZE;
- residue = start_addr % MAC_MEM_DUMP_PAGE_SIZE;
+ pages = len / mem_page_size + 1;
+ start_page = start_addr / mem_page_size;
+ residue = start_addr % mem_page_size;
base_addr = mac->mem_base_addrs[sel];
- base_addr += start_page * MAC_MEM_DUMP_PAGE_SIZE;
+ base_addr += start_page * mem_page_size;
for (pp = 0; pp < pages; pp++) {
- dump_len = min_t(u32, remain, MAC_MEM_DUMP_PAGE_SIZE);
+ dump_len = min_t(u32, remain, mem_page_size);
rtw89_write32(rtwdev, filter_model_addr, base_addr);
for (i = indir_access_addr + residue;
i < indir_access_addr + dump_len;) {
@@ -1142,7 +1143,7 @@ static int rtw89_debug_dump_mac_mem(struct rtw89_dev *rtwdev,
}
p += scnprintf(p, end - p, "\n");
}
- base_addr += MAC_MEM_DUMP_PAGE_SIZE;
+ base_addr += mem_page_size;
}
return p - buf;
diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
index 00b65b2995cf..c613431e754f 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.c
+++ b/drivers/net/wireless/realtek/rtw89/fw.c
@@ -833,17 +833,20 @@ static const struct __fw_feat_cfg fw_feat_tbl[] = {
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 91, 0, SCAN_OFFLOAD),
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 110, 0, BEACON_FILTER),
__CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS),
+ __CFG_FW_FEAT(RTL8852C, ge, 0, 0, 0, 0, RFK_NTFY_MCC_V0),
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 34, 0, TX_WAKE),
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD),
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 40, 0, CRASH_TRIGGER),
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 56, 10, BEACON_FILTER),
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 80, 0, WOW_REASON_V1),
+ __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 128, 0, BEACON_LOSS_COUNT_V1),
__CFG_FW_FEAT(RTL8922A, ge, 0, 34, 30, 0, CRASH_TRIGGER),
__CFG_FW_FEAT(RTL8922A, ge, 0, 34, 11, 0, MACID_PAUSE_SLEEP),
__CFG_FW_FEAT(RTL8922A, ge, 0, 34, 35, 0, SCAN_OFFLOAD),
__CFG_FW_FEAT(RTL8922A, lt, 0, 35, 21, 0, SCAN_OFFLOAD_BE_V0),
__CFG_FW_FEAT(RTL8922A, ge, 0, 35, 12, 0, BEACON_FILTER),
__CFG_FW_FEAT(RTL8922A, ge, 0, 35, 22, 0, WOW_REASON_V1),
+ __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 28, 0, RFK_IQK_V0),
__CFG_FW_FEAT(RTL8922A, lt, 0, 35, 31, 0, RFK_PRE_NOTIFY_V0),
__CFG_FW_FEAT(RTL8922A, lt, 0, 35, 31, 0, LPS_CH_INFO),
__CFG_FW_FEAT(RTL8922A, lt, 0, 35, 42, 0, RFK_RXDCK_V0),
@@ -3030,12 +3033,10 @@ fail:
#define H2C_P2P_ACT_LEN 20
int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
- struct ieee80211_bss_conf *bss_conf,
struct ieee80211_p2p_noa_desc *desc,
- u8 act, u8 noa_id)
+ u8 act, u8 noa_id, u8 ctwindow_oppps)
{
bool p2p_type_gc = rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT;
- u8 ctwindow_oppps = bss_conf->p2p_noa_attr.oppps_ctwindow;
struct sk_buff *skb;
u8 *cmd;
int ret;
@@ -5083,6 +5084,46 @@ fail:
return ret;
}
+int rtw89_fw_h2c_cxdrv_osi_info(struct rtw89_dev *rtwdev, u8 type)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_fbtc_outsrc_set_info *osi = &btc->dm.ost_info;
+ struct rtw89_h2c_cxosi *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_osi\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_cxosi *)skb->data;
+
+ h2c->hdr.type = type;
+ h2c->hdr.ver = btc->ver->fcxosi;
+ h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7;
+ h2c->osi = *osi;
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, BTFC_SET,
+ SET_DRV_INFO, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
#define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR)
int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev, u8 type)
{
@@ -5361,6 +5402,7 @@ static
int rtw89_fw_h2c_scan_list_offload_ax(struct rtw89_dev *rtwdev, int ch_num,
struct list_head *chan_list)
{
+ struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
struct rtw89_h2c_chinfo_elem *elem;
struct rtw89_mac_chinfo_ax *ch_info;
@@ -5403,6 +5445,10 @@ int rtw89_fw_h2c_scan_list_offload_ax(struct rtw89_dev *rtwdev, int ch_num,
le32_encode_bits(ch_info->tx_null, RTW89_H2C_CHINFO_W1_TX_NULL) |
le32_encode_bits(ch_info->rand_seq_num, RTW89_H2C_CHINFO_W1_RANDOM);
+ if (scan_info->extra_op.set)
+ elem->w1 |= le32_encode_bits(ch_info->macid_tx,
+ RTW89_H2C_CHINFO_W1_MACID_TX);
+
elem->w2 = le32_encode_bits(ch_info->pkt_id[0], RTW89_H2C_CHINFO_W2_PKT0) |
le32_encode_bits(ch_info->pkt_id[1], RTW89_H2C_CHINFO_W2_PKT1) |
le32_encode_bits(ch_info->pkt_id[2], RTW89_H2C_CHINFO_W2_PKT2) |
@@ -5543,6 +5589,7 @@ int rtw89_fw_h2c_scan_offload_ax(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
bool wowlan)
{
+ struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
struct rtw89_chan *op = &rtwdev->scan_info.op_chan;
enum rtw89_scan_mode scan_mode = RTW89_SCAN_IMMEDIATE;
@@ -5602,6 +5649,10 @@ int rtw89_fw_h2c_scan_offload_ax(struct rtw89_dev *rtwdev,
h2c->tsf_low = le32_encode_bits(lower_32_bits(tsf),
RTW89_H2C_SCANOFLD_W4_TSF_LOW);
+ if (scan_info->extra_op.set)
+ h2c->w6 = le32_encode_bits(scan_info->extra_op.macid,
+ RTW89_H2C_SCANOFLD_W6_SECOND_MACID);
+
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
H2C_FUNC_SCANOFLD, 1, 1,
@@ -5877,31 +5928,48 @@ fail:
int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev)
{
struct rtw89_rfk_mcc_info_data *rfk_mcc = rtwdev->rfk_mcc.data;
+ struct rtw89_fw_h2c_rf_get_mccch_v0 *mccch_v0;
struct rtw89_fw_h2c_rf_get_mccch *mccch;
+ u32 len = sizeof(*mccch);
struct sk_buff *skb;
+ u8 ver = U8_MAX;
int ret;
u8 idx;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, sizeof(*mccch));
+ if (RTW89_CHK_FW_FEATURE(RFK_NTFY_MCC_V0, &rtwdev->fw)) {
+ len = sizeof(*mccch_v0);
+ ver = 0;
+ }
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
return -ENOMEM;
}
- skb_put(skb, sizeof(*mccch));
- mccch = (struct rtw89_fw_h2c_rf_get_mccch *)skb->data;
+ skb_put(skb, len);
idx = rfk_mcc->table_idx;
- mccch->ch_0 = cpu_to_le32(rfk_mcc->ch[0]);
- mccch->ch_1 = cpu_to_le32(rfk_mcc->ch[1]);
- mccch->band_0 = cpu_to_le32(rfk_mcc->band[0]);
- mccch->band_1 = cpu_to_le32(rfk_mcc->band[1]);
- mccch->current_channel = cpu_to_le32(rfk_mcc->ch[idx]);
- mccch->current_band_type = cpu_to_le32(rfk_mcc->band[idx]);
+ if (ver == 0) {
+ mccch_v0 = (struct rtw89_fw_h2c_rf_get_mccch_v0 *)skb->data;
+ mccch_v0->ch_0 = cpu_to_le32(rfk_mcc->ch[0]);
+ mccch_v0->ch_1 = cpu_to_le32(rfk_mcc->ch[1]);
+ mccch_v0->band_0 = cpu_to_le32(rfk_mcc->band[0]);
+ mccch_v0->band_1 = cpu_to_le32(rfk_mcc->band[1]);
+ mccch_v0->current_band_type = cpu_to_le32(rfk_mcc->band[idx]);
+ mccch_v0->current_channel = cpu_to_le32(rfk_mcc->ch[idx]);
+ } else {
+ mccch = (struct rtw89_fw_h2c_rf_get_mccch *)skb->data;
+ mccch->ch_0_0 = cpu_to_le32(rfk_mcc->ch[0]);
+ mccch->ch_0_1 = cpu_to_le32(rfk_mcc->ch[0]);
+ mccch->ch_1_0 = cpu_to_le32(rfk_mcc->ch[1]);
+ mccch->ch_1_1 = cpu_to_le32(rfk_mcc->ch[1]);
+ mccch->current_channel = cpu_to_le32(rfk_mcc->ch[idx]);
+ }
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY,
H2C_FUNC_OUTSRC_RF_GET_MCCCH, 0, 0,
- sizeof(*mccch));
+ len);
ret = rtw89_h2c_tx(rtwdev, skb, false);
if (ret) {
@@ -5917,6 +5985,65 @@ fail:
}
EXPORT_SYMBOL(rtw89_fw_h2c_rf_ntfy_mcc);
+int rtw89_fw_h2c_rf_ps_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_vif_link *rtwvif_link;
+ struct rtw89_h2c_rf_ps_info *h2c;
+ const struct rtw89_chan *chan;
+ u32 len = sizeof(*h2c);
+ unsigned int link_id;
+ struct sk_buff *skb;
+ int ret;
+ u8 path;
+ u32 val;
+
+ if (chip->chip_gen != RTW89_CHIP_BE)
+ return 0;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c rf ps info\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_rf_ps_info *)skb->data;
+ h2c->mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode);
+
+ rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) {
+ chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
+ path = rtw89_phy_get_syn_sel(rtwdev, rtwvif_link->phy_idx);
+ val = rtw89_chip_chan_to_rf18_val(rtwdev, chan);
+
+ if (path >= chip->rf_path_num) {
+ rtw89_err(rtwdev, "unsupported rf path (%d)\n", path);
+ ret = -ENOENT;
+ goto fail;
+ }
+
+ h2c->rf18[path] = cpu_to_le32(val);
+ h2c->pri_ch[path] = chan->primary_channel;
+ }
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY,
+ H2C_FUNC_OUTSRC_RF_PS_INFO, 0, 0,
+ sizeof(*h2c));
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+EXPORT_SYMBOL(rtw89_fw_h2c_rf_ps_info);
+
int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx)
{
@@ -6031,6 +6158,7 @@ fail:
int rtw89_fw_h2c_rf_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
const struct rtw89_chan *chan, enum rtw89_tssi_mode tssi_mode)
{
+ struct rtw89_efuse *efuse = &rtwdev->efuse;
struct rtw89_hal *hal = &rtwdev->hal;
struct rtw89_h2c_rf_tssi *h2c;
u32 len = sizeof(*h2c);
@@ -6053,6 +6181,7 @@ int rtw89_fw_h2c_rf_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
h2c->hwtx_en = true;
h2c->cv = hal->cv;
h2c->tssi_mode = tssi_mode;
+ h2c->rfe_type = efuse->rfe_type;
rtw89_phy_rfk_tssi_fill_fwcmd_efuse_to_de(rtwdev, phy_idx, chan, h2c);
rtw89_phy_rfk_tssi_fill_fwcmd_tmeter_tbl(rtwdev, phy_idx, chan, h2c);
@@ -6077,22 +6206,47 @@ fail:
int rtw89_fw_h2c_rf_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
const struct rtw89_chan *chan)
{
+ struct rtw89_hal *hal = &rtwdev->hal;
+ struct rtw89_h2c_rf_iqk_v0 *h2c_v0;
struct rtw89_h2c_rf_iqk *h2c;
u32 len = sizeof(*h2c);
struct sk_buff *skb;
+ u8 ver = U8_MAX;
int ret;
+ if (RTW89_CHK_FW_FEATURE(RFK_IQK_V0, &rtwdev->fw)) {
+ len = sizeof(*h2c_v0);
+ ver = 0;
+ }
+
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c RF IQK\n");
return -ENOMEM;
}
skb_put(skb, len);
+
+ if (ver == 0) {
+ h2c_v0 = (struct rtw89_h2c_rf_iqk_v0 *)skb->data;
+
+ h2c_v0->phy_idx = cpu_to_le32(phy_idx);
+ h2c_v0->dbcc = cpu_to_le32(rtwdev->dbcc_en);
+
+ goto done;
+ }
+
h2c = (struct rtw89_h2c_rf_iqk *)skb->data;
- h2c->phy_idx = cpu_to_le32(phy_idx);
- h2c->dbcc = cpu_to_le32(rtwdev->dbcc_en);
+ h2c->len = sizeof(*h2c);
+ h2c->ktype = 0;
+ h2c->phy = phy_idx;
+ h2c->kpath = rtw89_phy_get_kpath(rtwdev, phy_idx);
+ h2c->band = chan->band_type;
+ h2c->bw = chan->band_width;
+ h2c->ch = chan->channel;
+ h2c->cv = hal->cv;
+done:
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
H2C_FUNC_RFK_IQK_OFFLOAD, 0, 0, len);
@@ -6845,6 +6999,7 @@ static void rtw89_hw_scan_add_chan_ax(struct rtw89_dev *rtwdev, int chan_type,
{
struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif;
+ const struct rtw89_hw_scan_extra_op *ext = &scan_info->extra_op;
struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
struct ieee80211_scan_ies *ies = rtwvif->scan_ies;
struct cfg80211_scan_request *req = rtwvif->scan_req;
@@ -6915,6 +7070,15 @@ static void rtw89_hw_scan_add_chan_ax(struct rtw89_dev *rtwdev, int chan_type,
case RTW89_CHAN_ACTIVE:
ch_info->pause_data = true;
break;
+ case RTW89_CHAN_EXTRA_OP:
+ ch_info->central_ch = ext->chan.channel;
+ ch_info->pri_ch = ext->chan.primary_channel;
+ ch_info->ch_band = ext->chan.band_type;
+ ch_info->bw = ext->chan.band_width;
+ ch_info->tx_null = true;
+ ch_info->num_pkt = 0;
+ ch_info->macid_tx = true;
+ break;
default:
rtw89_err(rtwdev, "Channel type out of bound\n");
}
@@ -7073,10 +7237,45 @@ out:
return ret;
}
+static int rtw89_hw_scan_add_op_types_ax(struct rtw89_dev *rtwdev,
+ enum rtw89_chan_type type,
+ struct list_head *chan_list,
+ struct cfg80211_scan_request *req,
+ int *off_chan_time)
+{
+ struct rtw89_mac_chinfo_ax *tmp;
+
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ switch (type) {
+ case RTW89_CHAN_OPERATE:
+ tmp->period = req->duration_mandatory ?
+ req->duration : RTW89_CHANNEL_TIME;
+ *off_chan_time = 0;
+ break;
+ case RTW89_CHAN_EXTRA_OP:
+ tmp->period = RTW89_CHANNEL_TIME_EXTRA_OP;
+ /* still calc @off_chan_time for scan op */
+ *off_chan_time += tmp->period;
+ break;
+ default:
+ kfree(tmp);
+ return -EINVAL;
+ }
+
+ rtw89_hw_scan_add_chan_ax(rtwdev, type, 0, tmp);
+ list_add_tail(&tmp->list, chan_list);
+
+ return 0;
+}
+
int rtw89_hw_scan_prep_chan_list_ax(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link)
{
struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+ const struct rtw89_hw_scan_extra_op *ext = &scan_info->extra_op;
struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
struct cfg80211_scan_request *req = rtwvif->scan_req;
struct rtw89_mac_chinfo_ax *ch_info, *tmp;
@@ -7103,6 +7302,8 @@ int rtw89_hw_scan_prep_chan_list_ax(struct rtw89_dev *rtwdev,
else if (channel->band == NL80211_BAND_6GHZ)
ch_info->period = RTW89_CHANNEL_TIME_6G +
RTW89_DWELL_TIME_6G;
+ else if (rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT)
+ ch_info->period = RTW89_P2P_CHAN_TIME;
else
ch_info->period = RTW89_CHANNEL_TIME;
@@ -7119,22 +7320,28 @@ int rtw89_hw_scan_prep_chan_list_ax(struct rtw89_dev *rtwdev,
type = RTW89_CHAN_ACTIVE;
rtw89_hw_scan_add_chan_ax(rtwdev, type, req->n_ssids, ch_info);
- if (scan_info->connected &&
- off_chan_time + ch_info->period > RTW89_OFF_CHAN_TIME) {
- tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
- if (!tmp) {
- ret = -ENOMEM;
- kfree(ch_info);
- goto out;
- }
+ if (!(scan_info->connected &&
+ off_chan_time + ch_info->period > RTW89_OFF_CHAN_TIME))
+ goto next;
+
+ ret = rtw89_hw_scan_add_op_types_ax(rtwdev, RTW89_CHAN_OPERATE,
+ &chan_list, req, &off_chan_time);
+ if (ret) {
+ kfree(ch_info);
+ goto out;
+ }
- type = RTW89_CHAN_OPERATE;
- tmp->period = req->duration_mandatory ?
- req->duration : RTW89_CHANNEL_TIME;
- rtw89_hw_scan_add_chan_ax(rtwdev, type, 0, tmp);
- list_add_tail(&tmp->list, &chan_list);
- off_chan_time = 0;
+ if (!ext->set)
+ goto next;
+
+ ret = rtw89_hw_scan_add_op_types_ax(rtwdev, RTW89_CHAN_EXTRA_OP,
+ &chan_list, req, &off_chan_time);
+ if (ret) {
+ kfree(ch_info);
+ goto out;
}
+
+next:
list_add_tail(&ch_info->list, &chan_list);
off_chan_time += ch_info->period;
}
@@ -7273,6 +7480,8 @@ int rtw89_hw_scan_prep_chan_list_be(struct rtw89_dev *rtwdev,
ch_info->period = req->duration;
else if (channel->band == NL80211_BAND_6GHZ)
ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G;
+ else if (rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT)
+ ch_info->period = RTW89_P2P_CHAN_TIME;
else
ch_info->period = RTW89_CHANNEL_TIME;
@@ -7436,6 +7645,47 @@ static void rtw89_hw_scan_update_beacon_noa(struct rtw89_dev *rtwdev,
}
}
+static void rtw89_hw_scan_set_extra_op_info(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *scan_rtwvif,
+ const struct rtw89_chan *scan_op)
+{
+ struct rtw89_entity_mgnt *mgnt = &rtwdev->hal.entity_mgnt;
+ struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+ struct rtw89_hw_scan_extra_op *ext = &scan_info->extra_op;
+ struct rtw89_vif *tmp;
+
+ ext->set = false;
+ if (!RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD_EXTRA_OP, &rtwdev->fw))
+ return;
+
+ list_for_each_entry(tmp, &mgnt->active_list, mgnt_entry) {
+ const struct rtw89_chan *tmp_chan;
+ struct rtw89_vif_link *tmp_link;
+
+ if (tmp == scan_rtwvif)
+ continue;
+
+ tmp_link = rtw89_vif_get_link_inst(tmp, 0);
+ if (unlikely(!tmp_link)) {
+ rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN,
+ "hw scan: no HW-0 link for extra op\n");
+ continue;
+ }
+
+ tmp_chan = rtw89_chan_get(rtwdev, tmp_link->chanctx_idx);
+ *ext = (struct rtw89_hw_scan_extra_op){
+ .set = true,
+ .macid = tmp_link->mac_id,
+ .chan = *tmp_chan,
+ };
+
+ rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN,
+ "hw scan: extra op: center %d primary %d\n",
+ ext->chan.channel, ext->chan.primary_channel);
+ break;
+ }
+}
+
int rtw89_hw_scan_start(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
struct ieee80211_scan_request *scan_req)
@@ -7458,6 +7708,12 @@ int rtw89_hw_scan_start(struct rtw89_dev *rtwdev,
/* clone op and keep it during scan */
rtwdev->scan_info.op_chan = *chan;
+ rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN,
+ "hw scan: op: center %d primary %d\n",
+ chan->channel, chan->primary_channel);
+
+ rtw89_hw_scan_set_extra_op_info(rtwdev, rtwvif, chan);
+
rtwdev->scan_info.connected = rtw89_is_any_vif_connected_or_connecting(rtwdev);
rtwdev->scan_info.scanning_vif = rtwvif_link;
rtwdev->scan_info.abort = false;
diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h
index 0fcc824e41be..24d2e8b0d079 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.h
+++ b/drivers/net/wireless/realtek/rtw89/fw.h
@@ -237,6 +237,7 @@ enum rtw89_chan_type {
RTW89_CHAN_OPERATE = 0,
RTW89_CHAN_ACTIVE,
RTW89_CHAN_DFS,
+ RTW89_CHAN_EXTRA_OP,
};
enum rtw89_p2pps_action {
@@ -316,8 +317,10 @@ struct rtw89_fw_macid_pause_sleep_grp {
#define RTW89_H2C_MAX_SIZE 2048
#define RTW89_CHANNEL_TIME 45
#define RTW89_CHANNEL_TIME_6G 20
+#define RTW89_CHANNEL_TIME_EXTRA_OP 30
#define RTW89_DFS_CHAN_TIME 105
#define RTW89_OFF_CHAN_TIME 100
+#define RTW89_P2P_CHAN_TIME 105
#define RTW89_DWELL_TIME 20
#define RTW89_DWELL_TIME_6G 10
#define RTW89_SCAN_WIDTH 0
@@ -352,7 +355,8 @@ struct rtw89_mac_chinfo_ax {
u8 tx_null:1;
u8 rand_seq_num:1;
u8 cfg_tx_pwr:1;
- u8 rsvd0: 4;
+ u8 macid_tx: 1;
+ u8 rsvd0: 3;
u8 pkt_id[RTW89_SCANOFLD_MAX_SSID];
u16 tx_pwr_idx;
u8 rsvd1;
@@ -2247,6 +2251,11 @@ struct rtw89_h2c_cxrole_v8 {
struct rtw89_btc_wl_role_info_v8_u32 _u32;
} __packed;
+struct rtw89_h2c_cxosi {
+ struct rtw89_h2c_cxhdr_v7 hdr;
+ struct rtw89_btc_fbtc_outsrc_set_info osi;
+} __packed;
+
struct rtw89_h2c_cxinit {
struct rtw89_h2c_cxhdr hdr;
u8 ant_type;
@@ -2674,6 +2683,7 @@ struct rtw89_h2c_chinfo_elem {
#define RTW89_H2C_CHINFO_W1_TX_NULL BIT(25)
#define RTW89_H2C_CHINFO_W1_RANDOM BIT(26)
#define RTW89_H2C_CHINFO_W1_CFG_TX BIT(27)
+#define RTW89_H2C_CHINFO_W1_MACID_TX BIT(29)
#define RTW89_H2C_CHINFO_W2_PKT0 GENMASK(7, 0)
#define RTW89_H2C_CHINFO_W2_PKT1 GENMASK(15, 8)
#define RTW89_H2C_CHINFO_W2_PKT2 GENMASK(23, 16)
@@ -2773,6 +2783,7 @@ struct rtw89_h2c_scanofld {
#define RTW89_H2C_SCANOFLD_W2_SLOW_PD GENMASK(23, 16)
#define RTW89_H2C_SCANOFLD_W3_TSF_HIGH GENMASK(31, 0)
#define RTW89_H2C_SCANOFLD_W4_TSF_LOW GENMASK(31, 0)
+#define RTW89_H2C_SCANOFLD_W6_SECOND_MACID GENMASK(31, 24)
struct rtw89_h2c_scanofld_be_macc_role {
__le32 w0;
@@ -4337,6 +4348,7 @@ enum rtw89_mrc_h2c_func {
#define H2C_CL_OUTSRC_RF_REG_B 0x9
#define H2C_CL_OUTSRC_RF_FW_NOTIFY 0xa
#define H2C_FUNC_OUTSRC_RF_GET_MCCCH 0x2
+#define H2C_FUNC_OUTSRC_RF_PS_INFO 0x10
#define H2C_CL_OUTSRC_RF_FW_RFK 0xb
enum rtw89_rfk_offload_h2c_func {
@@ -4350,6 +4362,14 @@ enum rtw89_rfk_offload_h2c_func {
};
struct rtw89_fw_h2c_rf_get_mccch {
+ __le32 ch_0_0;
+ __le32 ch_0_1;
+ __le32 ch_1_0;
+ __le32 ch_1_1;
+ __le32 current_channel;
+} __packed;
+
+struct rtw89_fw_h2c_rf_get_mccch_v0 {
__le32 ch_0;
__le32 ch_1;
__le32 band_0;
@@ -4361,6 +4381,12 @@ struct rtw89_fw_h2c_rf_get_mccch {
#define NUM_OF_RTW89_FW_RFK_PATH 2
#define NUM_OF_RTW89_FW_RFK_TBL 3
+struct rtw89_h2c_rf_ps_info {
+ __le32 rf18[NUM_OF_RTW89_FW_RFK_PATH];
+ __le32 mlo_mode;
+ u8 pri_ch[NUM_OF_RTW89_FW_RFK_PATH];
+} __packed;
+
struct rtw89_fw_h2c_rfk_pre_info_common {
struct {
__le32 ch[NUM_OF_RTW89_FW_RFK_PATH][NUM_OF_RTW89_FW_RFK_TBL];
@@ -4435,13 +4461,25 @@ struct rtw89_h2c_rf_tssi {
u8 pg_thermal[2];
u8 ftable[2][128];
u8 tssi_mode;
+ u8 rfe_type;
} __packed;
-struct rtw89_h2c_rf_iqk {
+struct rtw89_h2c_rf_iqk_v0 {
__le32 phy_idx;
__le32 dbcc;
} __packed;
+struct rtw89_h2c_rf_iqk {
+ u8 len;
+ u8 ktype;
+ u8 phy;
+ u8 kpath;
+ u8 band;
+ u8 bw;
+ u8 ch;
+ u8 cv;
+} __packed;
+
struct rtw89_h2c_rf_dpk {
u8 len;
u8 phy;
@@ -4713,6 +4751,7 @@ int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev, u8 type);
int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev, u8 type);
int rtw89_fw_h2c_cxdrv_role_v7(struct rtw89_dev *rtwdev, u8 type);
int rtw89_fw_h2c_cxdrv_role_v8(struct rtw89_dev *rtwdev, u8 type);
+int rtw89_fw_h2c_cxdrv_osi_info(struct rtw89_dev *rtwdev, u8 type);
int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev, u8 type);
int rtw89_fw_h2c_cxdrv_ctrl_v7(struct rtw89_dev *rtwdev, u8 type);
int rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev *rtwdev, u8 type);
@@ -4732,6 +4771,7 @@ int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev,
struct rtw89_fw_h2c_rf_reg_info *info,
u16 len, u8 page);
int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev);
+int rtw89_fw_h2c_rf_ps_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx);
int rtw89_fw_h2c_rf_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
@@ -4815,9 +4855,8 @@ int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev,
const struct rtw89_pkt_drop_params *params);
int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
- struct ieee80211_bss_conf *bss_conf,
struct ieee80211_p2p_noa_desc *desc,
- u8 act, u8 noa_id);
+ u8 act, u8 noa_id, u8 ctwindow_oppps);
int rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
bool en);
diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c
index 9f0e30e75009..53628838a7c5 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.c
+++ b/drivers/net/wireless/realtek/rtw89/mac.c
@@ -4388,7 +4388,33 @@ static void rtw89_mac_port_cfg_tx_sw_by_nettype(struct rtw89_dev *rtwdev,
rtw89_mac_port_cfg_tx_sw(rtwdev, rtwvif_link, en);
}
-void rtw89_mac_enable_beacon_for_ap_vifs(struct rtw89_dev *rtwdev, bool en)
+static void rtw89_mac_enable_ap_bcn_by_chan(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link,
+ const struct rtw89_chan *to_match,
+ bool en)
+{
+ const struct rtw89_chan *chan;
+
+ if (rtwvif_link->net_type != RTW89_NET_TYPE_AP_MODE)
+ return;
+
+ if (!to_match)
+ goto doit;
+
+ /* @to_match may not be in the same domain as return of calling
+ * rtw89_chan_get(). So, cannot compare their addresses directly.
+ */
+ chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
+ if (chan->channel != to_match->channel)
+ return;
+
+doit:
+ rtw89_mac_port_cfg_tx_sw(rtwdev, rtwvif_link, en);
+}
+
+static void rtw89_mac_enable_aps_bcn_by_chan(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *to_match,
+ bool en)
{
struct rtw89_vif_link *rtwvif_link;
struct rtw89_vif *rtwvif;
@@ -4396,8 +4422,13 @@ void rtw89_mac_enable_beacon_for_ap_vifs(struct rtw89_dev *rtwdev, bool en)
rtw89_for_each_rtwvif(rtwdev, rtwvif)
rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
- if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE)
- rtw89_mac_port_cfg_tx_sw(rtwdev, rtwvif_link, en);
+ rtw89_mac_enable_ap_bcn_by_chan(rtwdev, rtwvif_link,
+ to_match, en);
+}
+
+void rtw89_mac_enable_beacon_for_ap_vifs(struct rtw89_dev *rtwdev, bool en)
+{
+ rtw89_mac_enable_aps_bcn_by_chan(rtwdev, NULL, en);
}
static void rtw89_mac_port_cfg_bcn_intv(struct rtw89_dev *rtwdev,
@@ -4891,11 +4922,22 @@ rtw89_mac_c2h_macid_pause(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len
{
}
-static bool rtw89_is_op_chan(struct rtw89_dev *rtwdev, u8 band, u8 channel)
+static const struct rtw89_chan *
+rtw89_hw_scan_search_op_chan(struct rtw89_dev *rtwdev, u8 band, u8 channel)
{
+ struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
const struct rtw89_chan *op = &rtwdev->scan_info.op_chan;
- return band == op->band_type && channel == op->primary_channel;
+ if (band == op->band_type && channel == op->primary_channel)
+ return op;
+
+ if (scan_info->extra_op.set) {
+ op = &scan_info->extra_op.chan;
+ if (band == op->band_type && channel == op->primary_channel)
+ return op;
+ }
+
+ return NULL;
}
static void
@@ -4905,6 +4947,7 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb,
const struct rtw89_c2h_scanofld *c2h =
(const struct rtw89_c2h_scanofld *)skb->data;
struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif;
+ const struct rtw89_chan *op_chan;
struct rtw89_vif *rtwvif;
struct rtw89_chan new;
u16 actual_period, expect_period;
@@ -4960,8 +5003,9 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb,
switch (reason) {
case RTW89_SCAN_LEAVE_OP_NOTIFY:
case RTW89_SCAN_LEAVE_CH_NOTIFY:
- if (rtw89_is_op_chan(rtwdev, band, chan)) {
- rtw89_mac_enable_beacon_for_ap_vifs(rtwdev, false);
+ op_chan = rtw89_hw_scan_search_op_chan(rtwdev, band, chan);
+ if (op_chan) {
+ rtw89_mac_enable_aps_bcn_by_chan(rtwdev, op_chan, false);
ieee80211_stop_queues(rtwdev->hw);
}
return;
@@ -4982,10 +5026,10 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb,
break;
case RTW89_SCAN_ENTER_OP_NOTIFY:
case RTW89_SCAN_ENTER_CH_NOTIFY:
- if (rtw89_is_op_chan(rtwdev, band, chan)) {
- rtw89_assign_entity_chan(rtwdev, rtwvif_link->chanctx_idx,
- &rtwdev->scan_info.op_chan);
- rtw89_mac_enable_beacon_for_ap_vifs(rtwdev, true);
+ op_chan = rtw89_hw_scan_search_op_chan(rtwdev, band, chan);
+ if (op_chan) {
+ rtw89_assign_entity_chan(rtwdev, rtwvif_link->chanctx_idx, op_chan);
+ rtw89_mac_enable_aps_bcn_by_chan(rtwdev, op_chan, true);
ieee80211_wake_queues(rtwdev->hw);
} else {
rtw89_chan_create(&new, chan, chan, band,
@@ -5717,6 +5761,7 @@ void rtw89_mac_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
handler = rtw89_mac_c2h_ap_handler[func];
break;
case RTW89_MAC_C2H_CLASS_FWDBG:
+ case RTW89_MAC_C2H_CLASS_ROLE:
return;
default:
rtw89_info(rtwdev, "MAC c2h class %d not support\n", class);
@@ -6869,6 +6914,7 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_ax = {
.filter_model_addr = R_AX_FILTER_MODEL_ADDR,
.indir_access_addr = R_AX_INDIR_ACCESS_ENTRY,
.mem_base_addrs = rtw89_mac_mem_base_addrs_ax,
+ .mem_page_size = MAC_MEM_DUMP_PAGE_SIZE_AX,
.rx_fltr = R_AX_RX_FLTR_OPT,
.port_base = &rtw89_port_base_ax,
.agg_len_ht = R_AX_AGG_LEN_HT_0,
diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h
index 8013c852d5be..b7fd4a0fdb84 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.h
+++ b/drivers/net/wireless/realtek/rtw89/mac.h
@@ -8,7 +8,9 @@
#include "core.h"
#include "reg.h"
-#define MAC_MEM_DUMP_PAGE_SIZE 0x40000
+#define MAC_MEM_DUMP_PAGE_SIZE_AX 0x40000
+#define MAC_MEM_DUMP_PAGE_SIZE_BE 0x80000
+
#define ADDR_CAM_ENT_SIZE 0x40
#define ADDR_CAM_ENT_SHORT_SIZE 0x20
#define BSSID_CAM_ENT_SIZE 0x08
@@ -469,6 +471,7 @@ enum rtw89_mac_c2h_class {
RTW89_MAC_C2H_CLASS_MLO = 0xc,
RTW89_MAC_C2H_CLASS_MRC = 0xe,
RTW89_MAC_C2H_CLASS_AP = 0x18,
+ RTW89_MAC_C2H_CLASS_ROLE = 0x1b,
RTW89_MAC_C2H_CLASS_MAX,
};
@@ -969,6 +972,7 @@ struct rtw89_mac_gen_def {
u32 filter_model_addr;
u32 indir_access_addr;
const u32 *mem_base_addrs;
+ u32 mem_page_size;
u32 rx_fltr;
const struct rtw89_port_reg *port_base;
u32 agg_len_ht;
diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c
index a47971003bd4..a3ae1e654a98 100644
--- a/drivers/net/wireless/realtek/rtw89/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw89/mac80211.c
@@ -72,7 +72,7 @@ static void rtw89_ops_stop(struct ieee80211_hw *hw, bool suspend)
rtw89_core_stop(rtwdev);
}
-static int rtw89_ops_config(struct ieee80211_hw *hw, u32 changed)
+static int rtw89_ops_config(struct ieee80211_hw *hw, int radio_idx, u32 changed)
{
struct rtw89_dev *rtwdev = hw->priv;
@@ -112,6 +112,8 @@ static int __rtw89_ops_add_iface_link(struct rtw89_dev *rtwdev,
rtw89_vif_type_mapping(rtwvif_link, false);
wiphy_work_init(&rtwvif_link->update_beacon_work, rtw89_core_update_beacon_work);
+ wiphy_delayed_work_init(&rtwvif_link->csa_beacon_work, rtw89_core_csa_beacon_work);
+
INIT_LIST_HEAD(&rtwvif_link->general_pkt_list);
rtw89_p2p_noa_once_init(rtwvif_link);
@@ -144,6 +146,7 @@ static void __rtw89_ops_remove_iface_link(struct rtw89_dev *rtwdev,
lockdep_assert_wiphy(rtwdev->hw->wiphy);
wiphy_work_cancel(rtwdev->hw->wiphy, &rtwvif_link->update_beacon_work);
+ wiphy_delayed_work_cancel(rtwdev->hw->wiphy, &rtwvif_link->csa_beacon_work);
rtw89_p2p_noa_once_deinit(rtwvif_link);
@@ -1007,7 +1010,8 @@ static int rtw89_ops_ampdu_action(struct ieee80211_hw *hw,
return 0;
}
-static int rtw89_ops_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+static int rtw89_ops_set_rts_threshold(struct ieee80211_hw *hw, int radio_idx,
+ u32 value)
{
struct rtw89_dev *rtwdev = hw->priv;
@@ -1119,7 +1123,7 @@ static int rtw89_ops_set_bitrate_mask(struct ieee80211_hw *hw,
}
static
-int rtw89_ops_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
+int rtw89_ops_set_antenna(struct ieee80211_hw *hw, int radio_idx, u32 tx_ant, u32 rx_ant)
{
struct rtw89_dev *rtwdev = hw->priv;
struct rtw89_hal *hal = &rtwdev->hal;
@@ -1142,7 +1146,8 @@ int rtw89_ops_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
}
static
-int rtw89_ops_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
+int rtw89_ops_get_antenna(struct ieee80211_hw *hw, int radio_idx, u32 *tx_ant,
+ u32 *rx_ant)
{
struct rtw89_dev *rtwdev = hw->priv;
struct rtw89_hal *hal = &rtwdev->hal;
@@ -1354,6 +1359,73 @@ static void rtw89_ops_unassign_vif_chanctx(struct ieee80211_hw *hw,
rtw89_chanctx_ops_unassign_vif(rtwdev, rtwvif_link, ctx);
}
+static
+int rtw89_ops_switch_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs,
+ enum ieee80211_chanctx_switch_mode mode)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ bool replace;
+ int ret;
+ int i;
+
+ lockdep_assert_wiphy(hw->wiphy);
+
+ switch (mode) {
+ case CHANCTX_SWMODE_REASSIGN_VIF:
+ replace = false;
+ break;
+ case CHANCTX_SWMODE_SWAP_CONTEXTS:
+ replace = true;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ for (i = 0; i < n_vifs; i++) {
+ struct ieee80211_vif_chanctx_switch *p = &vifs[i];
+ struct ieee80211_bss_conf *link_conf = p->link_conf;
+ struct rtw89_vif *rtwvif = vif_to_rtwvif(p->vif);
+ struct rtw89_vif_link *rtwvif_link;
+
+ rtwvif_link = rtwvif->links[link_conf->link_id];
+ if (unlikely(!rtwvif_link)) {
+ rtw89_err(rtwdev,
+ "%s: rtwvif link (link_id %u) is not active\n",
+ __func__, link_conf->link_id);
+ return -ENOLINK;
+ }
+
+ ret = rtw89_chanctx_ops_reassign_vif(rtwdev, rtwvif_link,
+ p->old_ctx, p->new_ctx,
+ replace);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void rtw89_ops_channel_switch_beacon(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_chan_def *chandef)
+{
+ struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
+ struct rtw89_dev *rtwdev = hw->priv;
+ struct rtw89_vif_link *rtwvif_link;
+
+ BUILD_BUG_ON(RTW89_MLD_NON_STA_LINK_NUM != 1);
+
+ rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0);
+ if (unlikely(!rtwvif_link)) {
+ rtw89_err(rtwdev, "chsw bcn: find no link on HW-0\n");
+ return;
+ }
+
+ wiphy_delayed_work_queue(hw->wiphy, &rtwvif_link->csa_beacon_work, 0);
+}
+
static int rtw89_ops_remain_on_channel(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_channel *chan,
@@ -1698,13 +1770,13 @@ static int rtw89_ops_suspend(struct ieee80211_hw *hw,
lockdep_assert_wiphy(hw->wiphy);
- set_bit(RTW89_FLAG_FORBIDDEN_TRACK_WROK, rtwdev->flags);
+ set_bit(RTW89_FLAG_FORBIDDEN_TRACK_WORK, rtwdev->flags);
wiphy_delayed_work_cancel(hw->wiphy, &rtwdev->track_work);
ret = rtw89_wow_suspend(rtwdev, wowlan);
if (ret) {
rtw89_warn(rtwdev, "failed to suspend for wow %d\n", ret);
- clear_bit(RTW89_FLAG_FORBIDDEN_TRACK_WROK, rtwdev->flags);
+ clear_bit(RTW89_FLAG_FORBIDDEN_TRACK_WORK, rtwdev->flags);
return 1;
}
@@ -1722,7 +1794,7 @@ static int rtw89_ops_resume(struct ieee80211_hw *hw)
if (ret)
rtw89_warn(rtwdev, "failed to resume for wow %d\n", ret);
- clear_bit(RTW89_FLAG_FORBIDDEN_TRACK_WROK, rtwdev->flags);
+ clear_bit(RTW89_FLAG_FORBIDDEN_TRACK_WORK, rtwdev->flags);
wiphy_delayed_work_queue(hw->wiphy, &rtwdev->track_work,
RTW89_TRACK_WORK_PERIOD);
@@ -1805,6 +1877,8 @@ const struct ieee80211_ops rtw89_ops = {
.change_chanctx = rtw89_ops_change_chanctx,
.assign_vif_chanctx = rtw89_ops_assign_vif_chanctx,
.unassign_vif_chanctx = rtw89_ops_unassign_vif_chanctx,
+ .switch_vif_chanctx = rtw89_ops_switch_vif_chanctx,
+ .channel_switch_beacon = rtw89_ops_channel_switch_beacon,
.remain_on_channel = rtw89_ops_remain_on_channel,
.cancel_remain_on_channel = rtw89_ops_cancel_remain_on_channel,
.set_sar_specs = rtw89_ops_set_sar_specs,
diff --git a/drivers/net/wireless/realtek/rtw89/mac_be.c b/drivers/net/wireless/realtek/rtw89/mac_be.c
index 8c9d326dc907..0078080b3999 100644
--- a/drivers/net/wireless/realtek/rtw89/mac_be.c
+++ b/drivers/net/wireless/realtek/rtw89/mac_be.c
@@ -2567,6 +2567,7 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_be = {
.filter_model_addr = R_BE_FILTER_MODEL_ADDR,
.indir_access_addr = R_BE_INDIR_ACCESS_ENTRY,
.mem_base_addrs = rtw89_mac_mem_base_addrs_be,
+ .mem_page_size = MAC_MEM_DUMP_PAGE_SIZE_BE,
.rx_fltr = R_BE_RX_FLTR_OPT,
.port_base = &rtw89_port_base_be,
.agg_len_ht = R_BE_AGG_LEN_HT_0,
diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c
index 064f6a940107..204a3748d913 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.c
+++ b/drivers/net/wireless/realtek/rtw89/pci.c
@@ -4353,6 +4353,43 @@ static int __maybe_unused rtw89_pci_resume(struct device *dev)
SIMPLE_DEV_PM_OPS(rtw89_pm_ops, rtw89_pci_suspend, rtw89_pci_resume);
EXPORT_SYMBOL(rtw89_pm_ops);
+static pci_ers_result_t rtw89_pci_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+
+ netif_device_detach(netdev);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t rtw89_pci_io_slot_reset(struct pci_dev *pdev)
+{
+ struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+ struct rtw89_dev *rtwdev = hw->priv;
+
+ rtw89_ser_notify(rtwdev, MAC_AX_ERR_ASSERTION);
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void rtw89_pci_io_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+
+ /* ack any pending wake events, disable PME */
+ pci_enable_wake(pdev, PCI_D0, 0);
+
+ netif_device_attach(netdev);
+}
+
+const struct pci_error_handlers rtw89_pci_err_handler = {
+ .error_detected = rtw89_pci_io_error_detected,
+ .slot_reset = rtw89_pci_io_slot_reset,
+ .resume = rtw89_pci_io_resume,
+};
+EXPORT_SYMBOL(rtw89_pci_err_handler);
+
const struct rtw89_pci_gen_def rtw89_pci_gen_ax = {
.isr_rdu = B_AX_RDU_INT,
.isr_halt_c2h = B_AX_HALT_C2H_INT_EN,
diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h
index 79fef5f90140..52f527069da6 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.h
+++ b/drivers/net/wireless/realtek/rtw89/pci.h
@@ -1622,6 +1622,7 @@ static inline bool rtw89_pci_ltr_is_err_reg_val(u32 val)
extern const struct dev_pm_ops rtw89_pm_ops;
extern const struct dev_pm_ops rtw89_pm_ops_be;
+extern const struct pci_error_handlers rtw89_pci_err_handler;
extern const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set;
extern const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_v1;
extern const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_be;
diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c
index 76a2e26d4a10..f81bee4149bf 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.c
+++ b/drivers/net/wireless/realtek/rtw89/phy.c
@@ -119,10 +119,12 @@ static u64 get_eht_mcs_ra_mask(u8 *max_nss, u8 start_mcs, u8 n_nss)
return mask;
}
-static u64 get_eht_ra_mask(struct ieee80211_link_sta *link_sta)
+static u64 get_eht_ra_mask(struct rtw89_vif_link *rtwvif_link,
+ struct ieee80211_link_sta *link_sta)
{
- struct ieee80211_sta_eht_cap *eht_cap = &link_sta->eht_cap;
+ struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
struct ieee80211_eht_mcs_nss_supp_20mhz_only *mcs_nss_20mhz;
+ struct ieee80211_sta_eht_cap *eht_cap = &link_sta->eht_cap;
struct ieee80211_eht_mcs_nss_supp_bw *mcs_nss;
u8 *he_phy_cap = link_sta->he_cap.he_cap_elem.phy_cap_info;
@@ -136,8 +138,8 @@ static u64 get_eht_ra_mask(struct ieee80211_link_sta *link_sta)
/* MCS 9, 11, 13 */
return get_eht_mcs_ra_mask(mcs_nss->rx_tx_max_nss, 9, 3);
case IEEE80211_STA_RX_BW_20:
- if (!(he_phy_cap[0] &
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL)) {
+ if (vif->type == NL80211_IFTYPE_AP &&
+ !(he_phy_cap[0] & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL)) {
mcs_nss_20mhz = &eht_cap->eht_mcs_nss_supp.only_20mhz;
/* MCS 7, 9, 11, 13 */
return get_eht_mcs_ra_mask(mcs_nss_20mhz->rx_tx_max_nss, 7, 4);
@@ -332,7 +334,7 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
/* Set the ra mask from sta's capability */
if (link_sta->eht_cap.has_eht) {
mode |= RTW89_RA_MODE_EHT;
- ra_mask |= get_eht_ra_mask(link_sta);
+ ra_mask |= get_eht_ra_mask(rtwvif_link, link_sta);
if (rtwdev->hal.no_mcs_12_13)
high_rate_masks = rtw89_ra_mask_eht_mcs0_11;
@@ -5828,14 +5830,20 @@ void rtw89_phy_env_monitor_track(struct rtw89_dev *rtwdev)
__rtw89_phy_env_monitor_track(rtwdev, bb);
}
-static bool rtw89_physts_ie_page_valid(enum rtw89_phy_status_bitmap *ie_page)
+static bool rtw89_physts_ie_page_valid(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_status_bitmap *ie_page)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
if (*ie_page >= RTW89_PHYSTS_BITMAP_NUM ||
*ie_page == RTW89_RSVD_9)
return false;
- else if (*ie_page > RTW89_RSVD_9)
+ else if (*ie_page > RTW89_RSVD_9 && *ie_page < RTW89_EHT_PKT)
*ie_page -= 1;
+ if (*ie_page == RTW89_EHT_PKT && chip->chip_gen == RTW89_CHIP_AX)
+ return false;
+
return true;
}
@@ -5843,6 +5851,9 @@ static u32 rtw89_phy_get_ie_bitmap_addr(enum rtw89_phy_status_bitmap ie_page)
{
static const u8 ie_page_shift = 2;
+ if (ie_page == RTW89_EHT_PKT)
+ return R_PHY_STS_BITMAP_EHT;
+
return R_PHY_STS_BITMAP_ADDR_START + (ie_page << ie_page_shift);
}
@@ -5852,7 +5863,7 @@ static u32 rtw89_physts_get_ie_bitmap(struct rtw89_dev *rtwdev,
{
u32 addr;
- if (!rtw89_physts_ie_page_valid(&ie_page))
+ if (!rtw89_physts_ie_page_valid(rtwdev, &ie_page))
return 0;
addr = rtw89_phy_get_ie_bitmap_addr(ie_page);
@@ -5867,7 +5878,7 @@ static void rtw89_physts_set_ie_bitmap(struct rtw89_dev *rtwdev,
const struct rtw89_chip_info *chip = rtwdev->chip;
u32 addr;
- if (!rtw89_physts_ie_page_valid(&ie_page))
+ if (!rtw89_physts_ie_page_valid(rtwdev, &ie_page))
return;
if (chip->chip_id == RTL8852A)
@@ -5877,21 +5888,6 @@ static void rtw89_physts_set_ie_bitmap(struct rtw89_dev *rtwdev,
rtw89_phy_write32_idx(rtwdev, addr, MASKDWORD, val, phy_idx);
}
-static void rtw89_physts_enable_ie_bitmap(struct rtw89_dev *rtwdev,
- enum rtw89_phy_status_bitmap bitmap,
- enum rtw89_phy_status_ie_type ie,
- bool enable, enum rtw89_phy_idx phy_idx)
-{
- u32 val = rtw89_physts_get_ie_bitmap(rtwdev, bitmap, phy_idx);
-
- if (enable)
- val |= BIT(ie);
- else
- val &= ~BIT(ie);
-
- rtw89_physts_set_ie_bitmap(rtwdev, bitmap, val, phy_idx);
-}
-
static void rtw89_physts_enable_fail_report(struct rtw89_dev *rtwdev,
bool enable,
enum rtw89_phy_idx phy_idx)
@@ -5915,30 +5911,37 @@ static void rtw89_physts_enable_fail_report(struct rtw89_dev *rtwdev,
static void __rtw89_physts_parsing_init(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ u32 val;
u8 i;
rtw89_physts_enable_fail_report(rtwdev, false, phy_idx);
for (i = 0; i < RTW89_PHYSTS_BITMAP_NUM; i++) {
- if (i >= RTW89_CCK_PKT)
- rtw89_physts_enable_ie_bitmap(rtwdev, i,
- RTW89_PHYSTS_IE09_FTR_0,
- true, phy_idx);
- if ((i >= RTW89_CCK_BRK && i <= RTW89_VHT_MU) ||
- (i >= RTW89_RSVD_9 && i <= RTW89_CCK_PKT))
+ if (i == RTW89_RSVD_9 ||
+ (i == RTW89_EHT_PKT && chip->chip_gen == RTW89_CHIP_AX))
continue;
- rtw89_physts_enable_ie_bitmap(rtwdev, i,
- RTW89_PHYSTS_IE24_OFDM_TD_PATH_A,
- true, phy_idx);
- }
- rtw89_physts_enable_ie_bitmap(rtwdev, RTW89_VHT_PKT,
- RTW89_PHYSTS_IE13_DL_MU_DEF, true, phy_idx);
- rtw89_physts_enable_ie_bitmap(rtwdev, RTW89_HE_PKT,
- RTW89_PHYSTS_IE13_DL_MU_DEF, true, phy_idx);
- /* force IE01 for channel index, only channel field is valid */
- rtw89_physts_enable_ie_bitmap(rtwdev, RTW89_CCK_PKT,
- RTW89_PHYSTS_IE01_CMN_OFDM, true, phy_idx);
+ val = rtw89_physts_get_ie_bitmap(rtwdev, i, phy_idx);
+ if (i == RTW89_HE_MU || i == RTW89_VHT_MU) {
+ val |= BIT(RTW89_PHYSTS_IE13_DL_MU_DEF);
+ } else if (i == RTW89_TRIG_BASE_PPDU) {
+ val |= BIT(RTW89_PHYSTS_IE13_DL_MU_DEF) |
+ BIT(RTW89_PHYSTS_IE01_CMN_OFDM);
+ } else if (i >= RTW89_CCK_PKT) {
+ val |= BIT(RTW89_PHYSTS_IE09_FTR_0);
+
+ val &= ~(GENMASK(RTW89_PHYSTS_IE07_CMN_EXT_PATH_D,
+ RTW89_PHYSTS_IE04_CMN_EXT_PATH_A));
+
+ if (i == RTW89_CCK_PKT)
+ val |= BIT(RTW89_PHYSTS_IE01_CMN_OFDM);
+ else if (i >= RTW89_HT_PKT)
+ val |= BIT(RTW89_PHYSTS_IE20_DBG_OFDM_FD_USER_SEG_0);
+ }
+
+ rtw89_physts_set_ie_bitmap(rtwdev, i, val, phy_idx);
+ }
}
static void rtw89_physts_parsing_init(struct rtw89_dev *rtwdev)
@@ -7125,7 +7128,7 @@ static void rtw89_phy_edcca_log(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *b
const struct rtw89_edcca_p_regs *edcca_p_regs;
bool flag_fb, flag_p20, flag_s20, flag_s40, flag_s80;
s8 pwdb_fb, pwdb_p20, pwdb_s20, pwdb_s40, pwdb_s80;
- u8 path, per20_bitmap;
+ u8 path, per20_bitmap = 0;
u8 pwdb[8];
u32 tmp;
@@ -7155,14 +7158,11 @@ static void rtw89_phy_edcca_log(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *b
pwdb_fb = u32_get_bits(tmp, MASKBYTE3);
rtw89_phy_write32_mask(rtwdev, edcca_p_regs->rpt_sel,
- edcca_p_regs->rpt_sel_mask, 4);
+ edcca_p_regs->rpt_sel_mask, 5);
tmp = rtw89_phy_read32(rtwdev, edcca_p_regs->rpt_b);
pwdb_s80 = u32_get_bits(tmp, MASKBYTE1);
pwdb_s40 = u32_get_bits(tmp, MASKBYTE2);
- per20_bitmap = rtw89_phy_read32_mask(rtwdev, edcca_p_regs->rpt_a,
- MASKBYTE0);
-
if (rtwdev->chip->chip_id == RTL8922A) {
rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel_be,
edcca_regs->rpt_sel_be_mask, 4);
@@ -7171,6 +7171,8 @@ static void rtw89_phy_edcca_log(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *b
pwdb[1] = u32_get_bits(tmp, MASKBYTE2);
pwdb[2] = u32_get_bits(tmp, MASKBYTE1);
pwdb[3] = u32_get_bits(tmp, MASKBYTE0);
+ per20_bitmap = rtw89_phy_read32_mask(rtwdev, edcca_p_regs->rpt_a,
+ MASKBYTE0);
rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel_be,
edcca_regs->rpt_sel_be_mask, 5);
@@ -7187,7 +7189,7 @@ static void rtw89_phy_edcca_log(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *b
pwdb[1] = u32_get_bits(tmp, MASKBYTE2);
rtw89_phy_write32_mask(rtwdev, edcca_p_regs->rpt_sel,
- edcca_p_regs->rpt_sel_mask, 1);
+ edcca_p_regs->rpt_sel_mask, 5);
tmp = rtw89_phy_read32(rtwdev, edcca_p_regs->rpt_a);
pwdb[2] = u32_get_bits(tmp, MASKBYTE3);
pwdb[3] = u32_get_bits(tmp, MASKBYTE2);
diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h
index 5b451f1cfaac..63cc33c16c9a 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.h
+++ b/drivers/net/wireless/realtek/rtw89/phy.h
@@ -252,6 +252,7 @@ enum rtw89_phy_status_bitmap {
RTW89_HT_PKT = 13,
RTW89_VHT_PKT = 14,
RTW89_HE_PKT = 15,
+ RTW89_EHT_PKT = 16,
RTW89_PHYSTS_BITMAP_NUM
};
diff --git a/drivers/net/wireless/realtek/rtw89/ps.c b/drivers/net/wireless/realtek/rtw89/ps.c
index 8e4fe73e7d77..3411d642c84a 100644
--- a/drivers/net/wireless/realtek/rtw89/ps.c
+++ b/drivers/net/wireless/realtek/rtw89/ps.c
@@ -137,6 +137,8 @@ void rtw89_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
can_ps_mode = false;
}
+ rtw89_fw_h2c_rf_ps_info(rtwdev, rtwvif);
+
if (RTW89_CHK_FW_FEATURE(LPS_CH_INFO, &rtwdev->fw))
rtw89_fw_h2c_lps_ch_info(rtwdev, rtwvif);
else
@@ -236,13 +238,23 @@ static void rtw89_tsf32_toggle(struct rtw89_dev *rtwdev,
rtw89_fw_h2c_tsf32_toggle(rtwdev, rtwvif_link, false);
}
-static void rtw89_p2p_disable_all_noa(struct rtw89_dev *rtwdev,
- struct rtw89_vif_link *rtwvif_link,
- struct ieee80211_bss_conf *bss_conf)
+void rtw89_p2p_disable_all_noa(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link,
+ struct ieee80211_bss_conf *bss_conf)
{
enum rtw89_p2pps_action act;
+ u8 oppps_ctwindow;
u8 noa_id;
+ rcu_read_lock();
+
+ if (!bss_conf)
+ bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
+
+ oppps_ctwindow = bss_conf->p2p_noa_attr.oppps_ctwindow;
+
+ rcu_read_unlock();
+
if (rtwvif_link->last_noa_nr == 0)
return;
@@ -252,8 +264,8 @@ static void rtw89_p2p_disable_all_noa(struct rtw89_dev *rtwdev,
else
act = RTW89_P2P_ACT_REMOVE;
rtw89_tsf32_toggle(rtwdev, rtwvif_link, act);
- rtw89_fw_h2c_p2p_act(rtwdev, rtwvif_link, bss_conf,
- NULL, act, noa_id);
+ rtw89_fw_h2c_p2p_act(rtwdev, rtwvif_link, NULL,
+ act, noa_id, oppps_ctwindow);
}
}
@@ -275,8 +287,8 @@ static void rtw89_p2p_update_noa(struct rtw89_dev *rtwdev,
else
act = RTW89_P2P_ACT_UPDATE;
rtw89_tsf32_toggle(rtwdev, rtwvif_link, act);
- rtw89_fw_h2c_p2p_act(rtwdev, rtwvif_link, bss_conf,
- desc, act, noa_id);
+ rtw89_fw_h2c_p2p_act(rtwdev, rtwvif_link, desc, act, noa_id,
+ bss_conf->p2p_noa_attr.oppps_ctwindow);
}
rtwvif_link->last_noa_nr = noa_id;
}
diff --git a/drivers/net/wireless/realtek/rtw89/ps.h b/drivers/net/wireless/realtek/rtw89/ps.h
index b2c43d44820d..729477153de6 100644
--- a/drivers/net/wireless/realtek/rtw89/ps.h
+++ b/drivers/net/wireless/realtek/rtw89/ps.h
@@ -25,6 +25,9 @@ u8 rtw89_p2p_noa_fetch(struct rtw89_vif_link *rtwvif_link, void **data);
void rtw89_p2p_noa_once_init(struct rtw89_vif_link *rtwvif_link);
void rtw89_p2p_noa_once_deinit(struct rtw89_vif_link *rtwvif_link);
void rtw89_p2p_noa_once_recalc(struct rtw89_vif_link *rtwvif_link);
+void rtw89_p2p_disable_all_noa(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link,
+ struct ieee80211_bss_conf *bss_conf);
static inline void rtw89_leave_ips_by_hwflags(struct rtw89_dev *rtwdev)
{
diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h
index f05c81ae5869..4a65b0c9c2d1 100644
--- a/drivers/net/wireless/realtek/rtw89/reg.h
+++ b/drivers/net/wireless/realtek/rtw89/reg.h
@@ -6070,6 +6070,7 @@
#define B_BE_MACID_ACQ_GRP0_CLR_P BIT(2)
#define B_BE_R_MACID_ACQ_CHK_EN BIT(0)
+#define R_BE_BTC_CFG 0x0E300
#define R_BE_BT_BREAK_TABLE 0x0E344
#define R_BE_GNT_SW_CTRL 0x0E348
@@ -8024,6 +8025,7 @@
#define R_PHY_STS_BITMAP_HT 0x076C
#define R_PHY_STS_BITMAP_VHT 0x0770
#define R_PHY_STS_BITMAP_HE 0x0774
+#define R_PHY_STS_BITMAP_EHT 0x0788
#define R_EDCCA_RPTREG_SEL_BE 0x078C
#define B_EDCCA_RPTREG_SEL_BE_MSK GENMASK(22, 20)
#define R_PMAC_GNT 0x0980
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b.c b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
index fafa200a9c8d..c55833f259de 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
@@ -2402,6 +2402,7 @@ static const struct rtw89_chip_ops rtw8851b_chip_ops = {
.set_txpwr_ctrl = rtw8851b_set_txpwr_ctrl,
.init_txpwr_unit = rtw8851b_init_txpwr_unit,
.get_thermal = rtw8851b_get_thermal,
+ .chan_to_rf18_val = NULL,
.ctrl_btg_bt_rx = rtw8851b_ctrl_btg_bt_rx,
.query_ppdu = rtw8851b_query_ppdu,
.convert_rpl_to_rssi = NULL,
@@ -2528,7 +2529,6 @@ const struct rtw89_chip_info rtw8851b_chip_info = {
.phycap_size = 128,
.para_ver = 0,
.wlcx_desired = 0x06000000,
- .btcx_desired = 0x7,
.scbd = 0x1,
.mailbox = 0x1,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851be.c b/drivers/net/wireless/realtek/rtw89/rtw8851be.c
index 5810af825242..598730831707 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851be.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851be.c
@@ -89,6 +89,7 @@ static struct pci_driver rtw89_8851be_driver = {
.probe = rtw89_pci_probe,
.remove = rtw89_pci_remove,
.driver.pm = &rtw89_pm_ops,
+ .err_handler = &rtw89_pci_err_handler,
};
module_pci_driver(rtw89_8851be_driver);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
index cd5987fc52d7..080636e8d0c3 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
@@ -2128,6 +2128,7 @@ static const struct rtw89_chip_ops rtw8852a_chip_ops = {
.set_txpwr_ctrl = rtw8852a_set_txpwr_ctrl,
.init_txpwr_unit = rtw8852a_init_txpwr_unit,
.get_thermal = rtw8852a_get_thermal,
+ .chan_to_rf18_val = NULL,
.ctrl_btg_bt_rx = rtw8852a_ctrl_btg_bt_rx,
.query_ppdu = rtw8852a_query_ppdu,
.convert_rpl_to_rssi = NULL,
@@ -2246,7 +2247,6 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.phycap_size = 128,
.para_ver = 0x0,
.wlcx_desired = 0x06000000,
- .btcx_desired = 0x7,
.scbd = 0x1,
.mailbox = 0x1,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
index 2037713e3952..90ffaf9f4f6a 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
@@ -91,6 +91,7 @@ static struct pci_driver rtw89_8852ae_driver = {
.probe = rtw89_pci_probe,
.remove = rtw89_pci_remove,
.driver.pm = &rtw89_pm_ops,
+ .err_handler = &rtw89_pci_err_handler,
};
module_pci_driver(rtw89_8852ae_driver);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.c b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
index dacdb384de2c..c0bf80450acf 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
@@ -755,6 +755,7 @@ static const struct rtw89_chip_ops rtw8852b_chip_ops = {
.set_txpwr_ctrl = rtw8852bx_set_txpwr_ctrl,
.init_txpwr_unit = rtw8852bx_init_txpwr_unit,
.get_thermal = rtw8852bx_get_thermal,
+ .chan_to_rf18_val = NULL,
.ctrl_btg_bt_rx = rtw8852bx_ctrl_btg_bt_rx,
.query_ppdu = rtw8852bx_query_ppdu,
.convert_rpl_to_rssi = rtw8852bx_convert_rpl_to_rssi,
@@ -882,7 +883,6 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.phycap_size = 128,
.para_ver = 0,
.wlcx_desired = 0x05050000,
- .btcx_desired = 0x5,
.scbd = 0x1,
.mailbox = 0x1,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852be.c b/drivers/net/wireless/realtek/rtw89/rtw8852be.c
index abdeafc14b0b..b0726f590ca2 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852be.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852be.c
@@ -93,6 +93,7 @@ static struct pci_driver rtw89_8852be_driver = {
.probe = rtw89_pci_probe,
.remove = rtw89_pci_remove,
.driver.pm = &rtw89_pm_ops,
+ .err_handler = &rtw89_pci_err_handler,
};
module_pci_driver(rtw89_8852be_driver);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt.c b/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
index 289dce688d72..95e088734423 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
@@ -689,6 +689,7 @@ static const struct rtw89_chip_ops rtw8852bt_chip_ops = {
.set_txpwr_ctrl = rtw8852bx_set_txpwr_ctrl,
.init_txpwr_unit = rtw8852bx_init_txpwr_unit,
.get_thermal = rtw8852bx_get_thermal,
+ .chan_to_rf18_val = NULL,
.ctrl_btg_bt_rx = rtw8852bx_ctrl_btg_bt_rx,
.query_ppdu = rtw8852bx_query_ppdu,
.convert_rpl_to_rssi = rtw8852bx_convert_rpl_to_rssi,
@@ -815,7 +816,6 @@ const struct rtw89_chip_info rtw8852bt_chip_info = {
.phycap_size = 128,
.para_ver = 0,
.wlcx_desired = 0x070e0000,
- .btcx_desired = 0x7,
.scbd = 0x1,
.mailbox = 0x1,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bte.c b/drivers/net/wireless/realtek/rtw89/rtw8852bte.c
index b69fa17beb33..a584c75b801d 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852bte.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bte.c
@@ -95,6 +95,7 @@ static struct pci_driver rtw89_8852bte_driver = {
.probe = rtw89_pci_probe,
.remove = rtw89_pci_remove,
.driver.pm = &rtw89_pm_ops,
+ .err_handler = &rtw89_pci_err_handler,
};
module_pci_driver(rtw89_8852bte_driver);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
index 2a6143a8d256..8f3d0c91a3f8 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
@@ -2948,6 +2948,7 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = {
.set_txpwr_ctrl = rtw8852c_set_txpwr_ctrl,
.init_txpwr_unit = rtw8852c_init_txpwr_unit,
.get_thermal = rtw8852c_get_thermal,
+ .chan_to_rf18_val = NULL,
.ctrl_btg_bt_rx = rtw8852c_ctrl_btg_bt_rx,
.query_ppdu = rtw8852c_query_ppdu,
.convert_rpl_to_rssi = NULL,
@@ -3069,7 +3070,6 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.phycap_size = 0x60,
.para_ver = 0x1,
.wlcx_desired = 0x06000000,
- .btcx_desired = 0x7,
.scbd = 0x1,
.mailbox = 0x1,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
index 5d864fd5974e..db01d3966c27 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
@@ -118,6 +118,7 @@ static struct pci_driver rtw89_8852ce_driver = {
.probe = rtw89_pci_probe,
.remove = rtw89_pci_remove,
.driver.pm = &rtw89_pm_ops,
+ .err_handler = &rtw89_pci_err_handler,
};
module_pci_driver(rtw89_8852ce_driver);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a.c b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
index 1d0f6e7df497..603212ed4558 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8922a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
@@ -15,7 +15,7 @@
#include "sar.h"
#include "util.h"
-#define RTW8922A_FW_FORMAT_MAX 3
+#define RTW8922A_FW_FORMAT_MAX 4
#define RTW8922A_FW_BASENAME "rtw89/rtw8922a_fw"
#define RTW8922A_MODULE_FIRMWARE \
RTW8922A_FW_BASENAME "-" __stringify(RTW8922A_FW_FORMAT_MAX) ".bin"
@@ -2390,6 +2390,48 @@ static u8 rtw8922a_get_thermal(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_p
return clamp_t(int, th, 0, U8_MAX);
}
+static u32 rtw8922a_chan_to_rf18_val(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan)
+{
+ u32 val = u32_encode_bits(chan->channel, RR_CFGCH_CH);
+
+ switch (chan->band_type) {
+ case RTW89_BAND_2G:
+ default:
+ break;
+ case RTW89_BAND_5G:
+ val |= u32_encode_bits(CFGCH_BAND1_5G, RR_CFGCH_BAND1) |
+ u32_encode_bits(CFGCH_BAND0_5G, RR_CFGCH_BAND0);
+ break;
+ case RTW89_BAND_6G:
+ val |= u32_encode_bits(CFGCH_BAND1_6G, RR_CFGCH_BAND1) |
+ u32_encode_bits(CFGCH_BAND0_6G, RR_CFGCH_BAND0);
+ break;
+ }
+
+ switch (chan->band_width) {
+ case RTW89_CHANNEL_WIDTH_5:
+ case RTW89_CHANNEL_WIDTH_10:
+ case RTW89_CHANNEL_WIDTH_20:
+ default:
+ break;
+ case RTW89_CHANNEL_WIDTH_40:
+ val |= u32_encode_bits(CFGCH_BW_V2_40M, RR_CFGCH_BW_V2);
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ val |= u32_encode_bits(CFGCH_BW_V2_80M, RR_CFGCH_BW_V2);
+ break;
+ case RTW89_CHANNEL_WIDTH_160:
+ val |= u32_encode_bits(CFGCH_BW_V2_160M, RR_CFGCH_BW_V2);
+ break;
+ case RTW89_CHANNEL_WIDTH_320:
+ val |= u32_encode_bits(CFGCH_BW_V2_320M, RR_CFGCH_BW_V2);
+ break;
+ }
+
+ return val;
+}
+
static void rtw8922a_btc_set_rfe(struct rtw89_dev *rtwdev)
{
union rtw89_btc_module_info *md = &rtwdev->btc.mdinfo;
@@ -2761,6 +2803,7 @@ static const struct rtw89_chip_ops rtw8922a_chip_ops = {
.set_txpwr_ctrl = rtw8922a_set_txpwr_ctrl,
.init_txpwr_unit = NULL,
.get_thermal = rtw8922a_get_thermal,
+ .chan_to_rf18_val = rtw8922a_chan_to_rf18_val,
.ctrl_btg_bt_rx = rtw8922a_ctrl_btg_bt_rx,
.query_ppdu = rtw8922a_query_ppdu,
.convert_rpl_to_rssi = rtw8922a_convert_rpl_to_rssi,
@@ -2880,7 +2923,6 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
.phycap_size = 0x38,
.para_ver = 0xf,
.wlcx_desired = 0x07110000,
- .btcx_desired = 0x7,
.scbd = 0x1,
.mailbox = 0x1,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c
index 1659ea64ade1..fce094c7ce93 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c
@@ -36,8 +36,7 @@ void rtw8922a_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en, u8 phy_idx)
static
void rtw8922a_ctl_band_ch_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
- u8 central_ch, enum rtw89_band band,
- enum rtw89_bandwidth bw)
+ const struct rtw89_chan *chan)
{
const u32 rf_addr[2] = {RR_CFGCH, RR_CFGCH_V1};
struct rtw89_hal *hal = &rtwdev->hal;
@@ -73,49 +72,9 @@ void rtw8922a_ctl_band_ch_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
return;
}
- rf_reg[path][i] &= ~(RR_CFGCH_BAND1 | RR_CFGCH_BW |
+ rf_reg[path][i] &= ~(RR_CFGCH_BAND1 | RR_CFGCH_BW_V2 |
RR_CFGCH_BAND0 | RR_CFGCH_CH);
- rf_reg[path][i] |= u32_encode_bits(central_ch, RR_CFGCH_CH);
-
- switch (band) {
- case RTW89_BAND_2G:
- default:
- break;
- case RTW89_BAND_5G:
- rf_reg[path][i] |=
- u32_encode_bits(CFGCH_BAND1_5G, RR_CFGCH_BAND1) |
- u32_encode_bits(CFGCH_BAND0_5G, RR_CFGCH_BAND0);
- break;
- case RTW89_BAND_6G:
- rf_reg[path][i] |=
- u32_encode_bits(CFGCH_BAND1_6G, RR_CFGCH_BAND1) |
- u32_encode_bits(CFGCH_BAND0_6G, RR_CFGCH_BAND0);
- break;
- }
-
- switch (bw) {
- case RTW89_CHANNEL_WIDTH_5:
- case RTW89_CHANNEL_WIDTH_10:
- case RTW89_CHANNEL_WIDTH_20:
- default:
- break;
- case RTW89_CHANNEL_WIDTH_40:
- rf_reg[path][i] |=
- u32_encode_bits(CFGCH_BW_V2_40M, RR_CFGCH_BW_V2);
- break;
- case RTW89_CHANNEL_WIDTH_80:
- rf_reg[path][i] |=
- u32_encode_bits(CFGCH_BW_V2_80M, RR_CFGCH_BW_V2);
- break;
- case RTW89_CHANNEL_WIDTH_160:
- rf_reg[path][i] |=
- u32_encode_bits(CFGCH_BW_V2_160M, RR_CFGCH_BW_V2);
- break;
- case RTW89_CHANNEL_WIDTH_320:
- rf_reg[path][i] |=
- u32_encode_bits(CFGCH_BW_V2_320M, RR_CFGCH_BW_V2);
- break;
- }
+ rf_reg[path][i] |= rtw89_chip_chan_to_rf18_val(rtwdev, chan);
rtw89_write_rf(rtwdev, path, rf_addr[i],
RFREG_MASK, rf_reg[path][i]);
@@ -126,7 +85,7 @@ void rtw8922a_ctl_band_ch_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
if (hal->cv != CHIP_CAV)
return;
- if (band == RTW89_BAND_2G) {
+ if (chan->band_type == RTW89_BAND_2G) {
rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x80000);
rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x00003);
rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD1, RFREG_MASK, 0x0c990);
@@ -145,8 +104,7 @@ void rtw8922a_set_channel_rf(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
{
- rtw8922a_ctl_band_ch_bw(rtwdev, phy_idx, chan->channel, chan->band_type,
- chan->band_width);
+ rtw8922a_ctl_band_ch_bw(rtwdev, phy_idx, chan);
}
enum _rf_syn_pow {
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922ae.c b/drivers/net/wireless/realtek/rtw89/rtw8922ae.c
index 0ea8d5281c10..b730d79edd10 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8922ae.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922ae.c
@@ -106,6 +106,7 @@ static struct pci_driver rtw89_8922ae_driver = {
.probe = rtw89_pci_probe,
.remove = rtw89_pci_remove,
.driver.pm = &rtw89_pm_ops_be,
+ .err_handler = &rtw89_pci_err_handler,
};
module_pci_driver(rtw89_8922ae_driver);
diff --git a/drivers/net/wireless/realtek/rtw89/sar.c b/drivers/net/wireless/realtek/rtw89/sar.c
index 517b66022f18..7f568ffb3766 100644
--- a/drivers/net/wireless/realtek/rtw89/sar.c
+++ b/drivers/net/wireless/realtek/rtw89/sar.c
@@ -199,7 +199,8 @@ struct rtw89_sar_handler rtw89_sar_handlers[RTW89_SAR_SOURCE_NR] = {
typeof(_dev) _d = (_dev); \
BUILD_BUG_ON(!rtw89_sar_handlers[_s].descr_sar_source); \
BUILD_BUG_ON(!rtw89_sar_handlers[_s].query_sar_config); \
- lockdep_assert_wiphy(_d->hw->wiphy); \
+ if (test_bit(RTW89_FLAG_PROBE_DONE, _d->flags)) \
+ lockdep_assert_wiphy(_d->hw->wiphy); \
_d->sar._cfg_name = *(_cfg_data); \
_d->sar.src = _s; \
} while (0)
@@ -499,8 +500,6 @@ static void rtw89_set_sar_from_acpi(struct rtw89_dev *rtwdev)
struct rtw89_sar_cfg_acpi *cfg;
int ret;
- lockdep_assert_wiphy(rtwdev->hw->wiphy);
-
cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
if (!cfg)
return;
diff --git a/drivers/net/wireless/realtek/rtw89/ser.c b/drivers/net/wireless/realtek/rtw89/ser.c
index 811c91481441..d504518b8a57 100644
--- a/drivers/net/wireless/realtek/rtw89/ser.c
+++ b/drivers/net/wireless/realtek/rtw89/ser.c
@@ -566,21 +566,22 @@ static void ser_mac_mem_dump(struct rtw89_dev *rtwdev, u8 *buf,
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
u32 filter_model_addr = mac->filter_model_addr;
u32 indir_access_addr = mac->indir_access_addr;
+ u32 mem_page_size = mac->mem_page_size;
u32 *ptr = (u32 *)buf;
u32 base_addr, start_page, residue;
u32 cnt = 0;
u32 i;
- start_page = start_addr / MAC_MEM_DUMP_PAGE_SIZE;
- residue = start_addr % MAC_MEM_DUMP_PAGE_SIZE;
+ start_page = start_addr / mem_page_size;
+ residue = start_addr % mem_page_size;
base_addr = mac->mem_base_addrs[sel];
- base_addr += start_page * MAC_MEM_DUMP_PAGE_SIZE;
+ base_addr += start_page * mem_page_size;
while (cnt < len) {
rtw89_write32(rtwdev, filter_model_addr, base_addr);
for (i = indir_access_addr + residue;
- i < indir_access_addr + MAC_MEM_DUMP_PAGE_SIZE;
+ i < indir_access_addr + mem_page_size;
i += 4, ptr++) {
*ptr = rtw89_read32(rtwdev, i);
cnt += 4;
@@ -589,7 +590,7 @@ static void ser_mac_mem_dump(struct rtw89_dev *rtwdev, u8 *buf,
}
residue = 0;
- base_addr += MAC_MEM_DUMP_PAGE_SIZE;
+ base_addr += mem_page_size;
}
}
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index 0e115b428f96..f3a853edfc11 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -656,11 +656,13 @@ static int rsi_config_power(struct ieee80211_hw *hw)
* requests. The stack calls this function to
* change hardware configuration, e.g., channel.
* @hw: Pointer to the ieee80211_hw structure.
+ * @radio_idx: Radio index.
* @changed: Changed flags set.
*
* Return: 0 on success, negative error code on failure.
*/
static int rsi_mac80211_config(struct ieee80211_hw *hw,
+ int radio_idx,
u32 changed)
{
struct rsi_hw *adapter = hw->priv;
@@ -1201,12 +1203,13 @@ unlock:
/**
* rsi_mac80211_set_rts_threshold() - This function sets rts threshold value.
* @hw: Pointer to the ieee80211_hw structure.
+ * @radio_idx: Radio index.
* @value: Rts threshold value.
*
* Return: 0 on success.
*/
static int rsi_mac80211_set_rts_threshold(struct ieee80211_hw *hw,
- u32 value)
+ int radio_idx, u32 value)
{
struct rsi_hw *adapter = hw->priv;
struct rsi_common *common = adapter->priv;
@@ -1583,12 +1586,14 @@ static int rsi_mac80211_sta_remove(struct ieee80211_hw *hw,
* rsi_mac80211_set_antenna() - This function is used to configure
* tx and rx antennas.
* @hw: Pointer to the ieee80211_hw structure.
+ * @radio_idx: Radio index
* @tx_ant: Bitmap for tx antenna
* @rx_ant: Bitmap for rx antenna
*
* Return: 0 on success, Negative error code on failure.
*/
static int rsi_mac80211_set_antenna(struct ieee80211_hw *hw,
+ int radio_idx,
u32 tx_ant, u32 rx_ant)
{
struct rsi_hw *adapter = hw->priv;
@@ -1634,12 +1639,14 @@ fail_set_antenna:
* tx and rx antennas.
*
* @hw: Pointer to the ieee80211_hw structure.
+ * @radio_idx: Radio index
* @tx_ant: Bitmap for tx antenna
* @rx_ant: Bitmap for rx antenna
*
* Return: 0 on success, negative error codes on failure.
*/
static int rsi_mac80211_get_antenna(struct ieee80211_hw *hw,
+ int radio_idx,
u32 *tx_ant, u32 *rx_ant)
{
struct rsi_hw *adapter = hw->priv;
diff --git a/drivers/net/wireless/silabs/wfx/sta.c b/drivers/net/wireless/silabs/wfx/sta.c
index e95b9ded17d9..d12fcc755701 100644
--- a/drivers/net/wireless/silabs/wfx/sta.c
+++ b/drivers/net/wireless/silabs/wfx/sta.c
@@ -220,7 +220,7 @@ int wfx_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
return 0;
}
-int wfx_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+int wfx_set_rts_threshold(struct ieee80211_hw *hw, int radio_idx, u32 value)
{
struct wfx_dev *wdev = hw->priv;
struct wfx_vif *wvif = NULL;
@@ -706,7 +706,7 @@ void wfx_unassign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif
wvif->channel = NULL;
}
-int wfx_config(struct ieee80211_hw *hw, u32 changed)
+int wfx_config(struct ieee80211_hw *hw, int radio_idx, u32 changed)
{
return 0;
}
diff --git a/drivers/net/wireless/silabs/wfx/sta.h b/drivers/net/wireless/silabs/wfx/sta.h
index 8702eed5267f..b4812b294f3c 100644
--- a/drivers/net/wireless/silabs/wfx/sta.h
+++ b/drivers/net/wireless/silabs/wfx/sta.h
@@ -21,8 +21,8 @@ struct wfx_sta_priv {
/* mac80211 interface */
int wfx_start(struct ieee80211_hw *hw);
void wfx_stop(struct ieee80211_hw *hw, bool suspend);
-int wfx_config(struct ieee80211_hw *hw, u32 changed);
-int wfx_set_rts_threshold(struct ieee80211_hw *hw, u32 value);
+int wfx_config(struct ieee80211_hw *hw, int radio_idx, u32 changed);
+int wfx_set_rts_threshold(struct ieee80211_hw *hw, int radio_idx, u32 value);
void wfx_set_default_unicast_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int idx);
void wfx_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
unsigned int *total_flags, u64 unused);
diff --git a/drivers/net/wireless/st/cw1200/sta.c b/drivers/net/wireless/st/cw1200/sta.c
index 5dd7f6a38900..b1dd76e8aecb 100644
--- a/drivers/net/wireless/st/cw1200/sta.c
+++ b/drivers/net/wireless/st/cw1200/sta.c
@@ -321,7 +321,7 @@ int cw1200_change_interface(struct ieee80211_hw *dev,
return ret;
}
-int cw1200_config(struct ieee80211_hw *dev, u32 changed)
+int cw1200_config(struct ieee80211_hw *dev, int radio_idx, u32 changed)
{
int ret = 0;
struct cw1200_common *priv = dev->priv;
@@ -857,7 +857,8 @@ void cw1200_wep_key_work(struct work_struct *work)
wsm_unlock_tx(priv);
}
-int cw1200_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+int cw1200_set_rts_threshold(struct ieee80211_hw *hw, int radio_idx,
+ u32 value)
{
int ret = 0;
__le32 val32;
diff --git a/drivers/net/wireless/st/cw1200/sta.h b/drivers/net/wireless/st/cw1200/sta.h
index b955b92cfd73..b4f04371668d 100644
--- a/drivers/net/wireless/st/cw1200/sta.h
+++ b/drivers/net/wireless/st/cw1200/sta.h
@@ -22,7 +22,7 @@ int cw1200_change_interface(struct ieee80211_hw *dev,
struct ieee80211_vif *vif,
enum nl80211_iftype new_type,
bool p2p);
-int cw1200_config(struct ieee80211_hw *dev, u32 changed);
+int cw1200_config(struct ieee80211_hw *dev, int radio_idx, u32 changed);
void cw1200_configure_filter(struct ieee80211_hw *dev,
unsigned int changed_flags,
unsigned int *total_flags,
@@ -36,7 +36,8 @@ int cw1200_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
struct ieee80211_key_conf *key);
-int cw1200_set_rts_threshold(struct ieee80211_hw *hw, u32 value);
+int cw1200_set_rts_threshold(struct ieee80211_hw *hw, int radio_idx,
+ u32 value);
void cw1200_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop);
diff --git a/drivers/net/wireless/ti/wl1251/acx.c b/drivers/net/wireless/ti/wl1251/acx.c
index f78fc3880423..cb8b3102fa6c 100644
--- a/drivers/net/wireless/ti/wl1251/acx.c
+++ b/drivers/net/wireless/ti/wl1251/acx.c
@@ -832,41 +832,6 @@ int wl1251_acx_statistics(struct wl1251 *wl, struct acx_statistics *stats)
return 0;
}
-int wl1251_acx_rate_policies(struct wl1251 *wl)
-{
- struct acx_rate_policy *acx;
- int ret = 0;
-
- wl1251_debug(DEBUG_ACX, "acx rate policies");
-
- acx = kzalloc(sizeof(*acx), GFP_KERNEL);
- if (!acx)
- return -ENOMEM;
-
- /* configure one default (one-size-fits-all) rate class */
- acx->rate_class_cnt = 2;
- acx->rate_class[0].enabled_rates = ACX_RATE_MASK_UNSPECIFIED;
- acx->rate_class[0].short_retry_limit = ACX_RATE_RETRY_LIMIT;
- acx->rate_class[0].long_retry_limit = ACX_RATE_RETRY_LIMIT;
- acx->rate_class[0].aflags = 0;
-
- /* no-retry rate class */
- acx->rate_class[1].enabled_rates = ACX_RATE_MASK_UNSPECIFIED;
- acx->rate_class[1].short_retry_limit = 0;
- acx->rate_class[1].long_retry_limit = 0;
- acx->rate_class[1].aflags = 0;
-
- ret = wl1251_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx));
- if (ret < 0) {
- wl1251_warning("Setting of rate policies failed: %d", ret);
- goto out;
- }
-
-out:
- kfree(acx);
- return ret;
-}
-
int wl1251_acx_mem_cfg(struct wl1251 *wl)
{
struct wl1251_acx_config_memory *mem_conf;
diff --git a/drivers/net/wireless/ti/wl1251/acx.h b/drivers/net/wireless/ti/wl1251/acx.h
index af5ec7f12231..efe1f61f89bc 100644
--- a/drivers/net/wireless/ti/wl1251/acx.h
+++ b/drivers/net/wireless/ti/wl1251/acx.h
@@ -1469,7 +1469,6 @@ int wl1251_acx_cts_protect(struct wl1251 *wl,
enum acx_ctsprotect_type ctsprotect);
int wl1251_acx_statistics(struct wl1251 *wl, struct acx_statistics *stats);
int wl1251_acx_tsf_info(struct wl1251 *wl, u64 *mactime);
-int wl1251_acx_rate_policies(struct wl1251 *wl);
int wl1251_acx_mem_cfg(struct wl1251 *wl);
int wl1251_acx_wr_tbtt_and_dtim(struct wl1251 *wl, u16 tbtt, u8 dtim);
int wl1251_acx_bet_enable(struct wl1251 *wl, enum wl1251_acx_bet_mode mode,
diff --git a/drivers/net/wireless/ti/wl1251/cmd.c b/drivers/net/wireless/ti/wl1251/cmd.c
index c3be81dc7970..c33ee0d4d323 100644
--- a/drivers/net/wireless/ti/wl1251/cmd.c
+++ b/drivers/net/wireless/ti/wl1251/cmd.c
@@ -59,47 +59,6 @@ out:
}
/**
- * wl1251_cmd_test - Send test command to firmware
- *
- * @wl: wl struct
- * @buf: buffer containing the command, with all headers, must work with dma
- * @buf_len: length of the buffer
- * @answer: is answer needed
- */
-int wl1251_cmd_test(struct wl1251 *wl, void *buf, size_t buf_len, u8 answer)
-{
- int ret;
-
- wl1251_debug(DEBUG_CMD, "cmd test");
-
- ret = wl1251_cmd_send(wl, CMD_TEST, buf, buf_len);
-
- if (ret < 0) {
- wl1251_warning("TEST command failed");
- return ret;
- }
-
- if (answer) {
- struct wl1251_command *cmd_answer;
-
- /*
- * The test command got in, we can read the answer.
- * The answer would be a wl1251_command, where the
- * parameter array contains the actual answer.
- */
- wl1251_mem_read(wl, wl->cmd_box_addr, buf, buf_len);
-
- cmd_answer = buf;
-
- if (cmd_answer->header.status != CMD_STATUS_SUCCESS)
- wl1251_error("TEST command answer error: %d",
- cmd_answer->header.status);
- }
-
- return 0;
-}
-
-/**
* wl1251_cmd_interrogate - Read acx from firmware
*
* @wl: wl struct
@@ -339,44 +298,6 @@ out:
return ret;
}
-int wl1251_cmd_read_memory(struct wl1251 *wl, u32 addr, void *answer,
- size_t len)
-{
- struct cmd_read_write_memory *cmd;
- int ret = 0;
-
- wl1251_debug(DEBUG_CMD, "cmd read memory");
-
- cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
- if (!cmd)
- return -ENOMEM;
-
- WARN_ON(len > MAX_READ_SIZE);
- len = min_t(size_t, len, MAX_READ_SIZE);
-
- cmd->addr = addr;
- cmd->size = len;
-
- ret = wl1251_cmd_send(wl, CMD_READ_MEMORY, cmd, sizeof(*cmd));
- if (ret < 0) {
- wl1251_error("read memory command failed: %d", ret);
- goto out;
- }
-
- /* the read command got in, we can now read the answer */
- wl1251_mem_read(wl, wl->cmd_box_addr, cmd, sizeof(*cmd));
-
- if (cmd->header.status != CMD_STATUS_SUCCESS)
- wl1251_error("error in read command result: %d",
- cmd->header.status);
-
- memcpy(answer, cmd->value, len);
-
-out:
- kfree(cmd);
- return ret;
-}
-
int wl1251_cmd_template_set(struct wl1251 *wl, u16 cmd_id,
void *buf, size_t buf_len)
{
diff --git a/drivers/net/wireless/ti/wl1251/cmd.h b/drivers/net/wireless/ti/wl1251/cmd.h
index 39159201b97e..3474b45af3b1 100644
--- a/drivers/net/wireless/ti/wl1251/cmd.h
+++ b/drivers/net/wireless/ti/wl1251/cmd.h
@@ -16,7 +16,6 @@
struct acx_header;
int wl1251_cmd_send(struct wl1251 *wl, u16 type, void *buf, size_t buf_len);
-int wl1251_cmd_test(struct wl1251 *wl, void *buf, size_t buf_len, u8 answer);
int wl1251_cmd_interrogate(struct wl1251 *wl, u16 id, void *buf, size_t len);
int wl1251_cmd_configure(struct wl1251 *wl, u16 id, void *buf, size_t len);
int wl1251_cmd_vbm(struct wl1251 *wl, u8 identity,
@@ -26,8 +25,6 @@ int wl1251_cmd_data_path_tx(struct wl1251 *wl, u8 channel, bool enable);
int wl1251_cmd_join(struct wl1251 *wl, u8 bss_type, u8 channel,
u16 beacon_interval, u8 dtim_interval);
int wl1251_cmd_ps_mode(struct wl1251 *wl, u8 ps_mode);
-int wl1251_cmd_read_memory(struct wl1251 *wl, u32 addr, void *answer,
- size_t len);
int wl1251_cmd_template_set(struct wl1251 *wl, u16 cmd_id,
void *buf, size_t buf_len);
int wl1251_cmd_scan(struct wl1251 *wl, u8 *ssid, size_t ssid_len,
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index bb53d681c11b..69fc51f183ad 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -589,7 +589,7 @@ static bool wl1251_can_do_pm(struct ieee80211_conf *conf, struct wl1251 *wl)
return (conf->flags & IEEE80211_CONF_PS) && !wl->monitor_present;
}
-static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed)
+static int wl1251_op_config(struct ieee80211_hw *hw, int radio_idx, u32 changed)
{
struct wl1251 *wl = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
@@ -1051,7 +1051,8 @@ out:
return ret;
}
-static int wl1251_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+static int wl1251_op_set_rts_threshold(struct ieee80211_hw *hw, int radio_idx,
+ u32 value)
{
struct wl1251 *wl = hw->priv;
int ret;
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index cd8ad0fe59cc..fa3a3f71dd15 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -1804,32 +1804,6 @@ out:
return ret;
}
-int wl12xx_cmd_start_fwlog(struct wl1271 *wl)
-{
- struct wl12xx_cmd_start_fwlog *cmd;
- int ret = 0;
-
- wl1271_debug(DEBUG_CMD, "cmd start firmware logger");
-
- cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
- if (!cmd) {
- ret = -ENOMEM;
- goto out;
- }
-
- ret = wl1271_cmd_send(wl, CMD_START_FWLOGGER, cmd, sizeof(*cmd), 0);
- if (ret < 0) {
- wl1271_error("failed to send start firmware logger command");
- goto out_free;
- }
-
-out_free:
- kfree(cmd);
-
-out:
- return ret;
-}
-
int wl12xx_cmd_stop_fwlog(struct wl1271 *wl)
{
struct wl12xx_cmd_stop_fwlog *cmd;
diff --git a/drivers/net/wireless/ti/wlcore/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h
index 4c2f2608ef3b..d16afb35f9ee 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.h
+++ b/drivers/net/wireless/ti/wlcore/cmd.h
@@ -81,7 +81,6 @@ int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl);
int wlcore_cmd_generic_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u8 feature, u8 enable, u8 value);
int wl12xx_cmd_config_fwlog(struct wl1271 *wl);
-int wl12xx_cmd_start_fwlog(struct wl1271 *wl);
int wl12xx_cmd_stop_fwlog(struct wl1271 *wl);
int wl12xx_cmd_channel_switch(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index f93c95edd991..6116a8522d96 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -3166,7 +3166,7 @@ static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
return 0;
}
-static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
+static int wl1271_op_config(struct ieee80211_hw *hw, int radio_idx, u32 changed)
{
struct wl1271 *wl = hw->priv;
struct wl12xx_vif *wlvif;
@@ -3895,7 +3895,8 @@ out:
return 0;
}
-static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
+static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw,
+ int radio_idx, u32 value)
{
struct wl1271 *wl = hw->priv;
int ret = 0;
@@ -3924,7 +3925,8 @@ out:
return ret;
}
-static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, int radio_idx,
+ u32 value)
{
struct wl1271 *wl = hw->priv;
struct wl12xx_vif *wlvif;
diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c
index f6add19d1da1..eefe8da3b14d 100644
--- a/drivers/net/wireless/virtual/mac80211_hwsim.c
+++ b/drivers/net/wireless/virtual/mac80211_hwsim.c
@@ -2381,7 +2381,8 @@ static const char * const hwsim_chanwidths[] = {
[NL80211_CHAN_WIDTH_320] = "eht320",
};
-static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
+static int mac80211_hwsim_config(struct ieee80211_hw *hw, int radio_idx,
+ u32 changed)
{
struct mac80211_hwsim_data *data = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
@@ -3338,7 +3339,8 @@ static int mac80211_hwsim_tx_last_beacon(struct ieee80211_hw *hw)
return 1;
}
-static int mac80211_hwsim_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+static int mac80211_hwsim_set_rts_threshold(struct ieee80211_hw *hw,
+ int radio_idx, u32 value)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
index 9653dbaac3c0..f7c56174424d 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
@@ -1133,7 +1133,7 @@ static void zd_op_remove_interface(struct ieee80211_hw *hw,
zd_mac_free_cur_beacon(mac);
}
-static int zd_op_config(struct ieee80211_hw *hw, u32 changed)
+static int zd_op_config(struct ieee80211_hw *hw, int radio_idx, u32 changed)
{
struct zd_mac *mac = zd_hw_mac(hw);
struct ieee80211_conf *conf = &hw->conf;
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index ef5d655a0052..86ca041bf74a 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -2119,6 +2119,7 @@ static struct irq_chip hv_msi_irq_chip = {
static struct msi_domain_ops hv_msi_ops = {
.msi_prepare = hv_msi_prepare,
.msi_free = hv_msi_free,
+ .prepare_desc = pci_msix_prepare_desc,
};
/**
@@ -2140,7 +2141,7 @@ static int hv_pcie_init_irq_domain(struct hv_pcibus_device *hbus)
hbus->msi_info.ops = &hv_msi_ops;
hbus->msi_info.flags = (MSI_FLAG_USE_DEF_DOM_OPS |
MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_MULTI_PCI_MSI |
- MSI_FLAG_PCI_MSIX);
+ MSI_FLAG_PCI_MSIX | MSI_FLAG_PCI_MSIX_ALLOC_DYN);
hbus->msi_info.handler = FLOW_HANDLER;
hbus->msi_info.handler_name = FLOW_NAME;
hbus->msi_info.data = hbus;
diff --git a/drivers/pci/msi/irqdomain.c b/drivers/pci/msi/irqdomain.c
index c05152733993..765312c92d9b 100644
--- a/drivers/pci/msi/irqdomain.c
+++ b/drivers/pci/msi/irqdomain.c
@@ -222,13 +222,14 @@ static void pci_irq_unmask_msix(struct irq_data *data)
pci_msix_unmask(irq_data_get_msi_desc(data));
}
-static void pci_msix_prepare_desc(struct irq_domain *domain, msi_alloc_info_t *arg,
- struct msi_desc *desc)
+void pci_msix_prepare_desc(struct irq_domain *domain, msi_alloc_info_t *arg,
+ struct msi_desc *desc)
{
/* Don't fiddle with preallocated MSI descriptors */
if (!desc->pci.mask_base)
msix_prepare_msi_desc(to_pci_dev(desc->dev), desc);
}
+EXPORT_SYMBOL_GPL(pci_msix_prepare_desc);
static const struct msi_domain_template pci_msix_template = {
.chip = {
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index 36f57d7b4a66..1cc06b7cb17e 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -96,7 +96,7 @@ static int ptp_clock_settime(struct posix_clock *pc, const struct timespec64 *tp
struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
if (ptp_clock_freerun(ptp)) {
- pr_err("ptp: physical clock is free running\n");
+ pr_err_ratelimited("ptp: physical clock is free running\n");
return -EBUSY;
}
diff --git a/drivers/ssb/driver_gpio.c b/drivers/ssb/driver_gpio.c
index f9426a586653..14ad57954a66 100644
--- a/drivers/ssb/driver_gpio.c
+++ b/drivers/ssb/driver_gpio.c
@@ -45,12 +45,14 @@ static int ssb_gpio_chipco_get_value(struct gpio_chip *chip, unsigned int gpio)
return !!ssb_chipco_gpio_in(&bus->chipco, 1 << gpio);
}
-static void ssb_gpio_chipco_set_value(struct gpio_chip *chip, unsigned int gpio,
- int value)
+static int ssb_gpio_chipco_set_value(struct gpio_chip *chip, unsigned int gpio,
+ int value)
{
struct ssb_bus *bus = gpiochip_get_data(chip);
ssb_chipco_gpio_out(&bus->chipco, 1 << gpio, value ? 1 << gpio : 0);
+
+ return 0;
}
static int ssb_gpio_chipco_direction_input(struct gpio_chip *chip,
@@ -223,7 +225,7 @@ static int ssb_gpio_chipco_init(struct ssb_bus *bus)
chip->request = ssb_gpio_chipco_request;
chip->free = ssb_gpio_chipco_free;
chip->get = ssb_gpio_chipco_get_value;
- chip->set = ssb_gpio_chipco_set_value;
+ chip->set_rv = ssb_gpio_chipco_set_value;
chip->direction_input = ssb_gpio_chipco_direction_input;
chip->direction_output = ssb_gpio_chipco_direction_output;
#if IS_ENABLED(CONFIG_SSB_EMBEDDED)
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
index 7fcc46a0bb48..4e29652f8ee7 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
@@ -1298,7 +1298,8 @@ exit:
return ret;
}
-static int cfg80211_rtw_set_wiphy_params(struct wiphy *wiphy, u32 changed)
+static int cfg80211_rtw_set_wiphy_params(struct wiphy *wiphy, int radio_idx,
+ u32 changed)
{
return 0;
}
@@ -1795,7 +1796,7 @@ static int cfg80211_rtw_disconnect(struct wiphy *wiphy, struct net_device *ndev,
}
static int cfg80211_rtw_set_txpower(struct wiphy *wiphy,
- struct wireless_dev *wdev,
+ struct wireless_dev *wdev, int radio_idx,
enum nl80211_tx_power_setting type, int mbm)
{
return 0;
@@ -1803,6 +1804,7 @@ static int cfg80211_rtw_set_txpower(struct wiphy *wiphy,
static int cfg80211_rtw_get_txpower(struct wiphy *wiphy,
struct wireless_dev *wdev,
+ int radio_idx,
unsigned int link_id, int *dbm)
{
*dbm = (12);
diff --git a/fs/smb/server/transport_tcp.c b/fs/smb/server/transport_tcp.c
index 4e9f98db9ff4..f8c772a7cb43 100644
--- a/fs/smb/server/transport_tcp.c
+++ b/fs/smb/server/transport_tcp.c
@@ -58,12 +58,10 @@ static inline void ksmbd_tcp_reuseaddr(struct socket *sock)
static inline void ksmbd_tcp_rcv_timeout(struct socket *sock, s64 secs)
{
- lock_sock(sock->sk);
if (secs && secs < MAX_SCHEDULE_TIMEOUT / HZ - 1)
- sock->sk->sk_rcvtimeo = secs * HZ;
+ WRITE_ONCE(sock->sk->sk_rcvtimeo, secs * HZ);
else
- sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
- release_sock(sock->sk);
+ WRITE_ONCE(sock->sk->sk_rcvtimeo, MAX_SCHEDULE_TIMEOUT);
}
static inline void ksmbd_tcp_snd_timeout(struct socket *sock, s64 secs)
diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h
index cf0afa60e4a7..5be1881abbb6 100644
--- a/include/linux/avf/virtchnl.h
+++ b/include/linux/avf/virtchnl.h
@@ -132,8 +132,8 @@ enum virtchnl_ops {
VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP = VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
- VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
- VIRTCHNL_OP_SET_RSS_HENA = 26,
+ VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS = 25,
+ VIRTCHNL_OP_SET_RSS_HASHCFG = 26,
VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27,
VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28,
VIRTCHNL_OP_REQUEST_QUEUES = 29,
@@ -974,18 +974,19 @@ struct virtchnl_rss_lut {
VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_rss_lut);
#define virtchnl_rss_lut_LEGACY_SIZEOF 6
-/* VIRTCHNL_OP_GET_RSS_HENA_CAPS
- * VIRTCHNL_OP_SET_RSS_HENA
- * VF sends these messages to get and set the hash filter enable bits for RSS.
+/* VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS
+ * VIRTCHNL_OP_SET_RSS_HASHCFG
+ * VF sends these messages to get and set the hash filter configuration for RSS.
* By default, the PF sets these to all possible traffic types that the
* hardware supports. The VF can query this value if it wants to change the
* traffic types that are hashed by the hardware.
*/
-struct virtchnl_rss_hena {
- u64 hena;
+struct virtchnl_rss_hashcfg {
+ /* Bits defined by enum libie_filter_pctype */
+ u64 hashcfg;
};
-VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena);
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hashcfg);
/* Type of RSS algorithm */
enum virtchnl_rss_algorithm {
@@ -1779,10 +1780,10 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
case VIRTCHNL_OP_CONFIG_RSS_HFUNC:
valid_len = sizeof(struct virtchnl_rss_hfunc);
break;
- case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
+ case VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS:
break;
- case VIRTCHNL_OP_SET_RSS_HENA:
- valid_len = sizeof(struct virtchnl_rss_hena);
+ case VIRTCHNL_OP_SET_RSS_HASHCFG:
+ valid_len = sizeof(struct virtchnl_rss_hashcfg);
break;
case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
diff --git a/include/linux/can/bittiming.h b/include/linux/can/bittiming.h
index 9b8a9c39614b..5dfdbb63b1d5 100644
--- a/include/linux/can/bittiming.h
+++ b/include/linux/can/bittiming.h
@@ -14,7 +14,7 @@
#define CAN_BITRATE_UNSET 0
#define CAN_BITRATE_UNKNOWN (-1U)
-#define CAN_CTRLMODE_TDC_MASK \
+#define CAN_CTRLMODE_FD_TDC_MASK \
(CAN_CTRLMODE_TDC_AUTO | CAN_CTRLMODE_TDC_MANUAL)
/*
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 492d23bec7be..9a92cbe5b2cb 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -91,9 +91,9 @@ struct can_priv {
struct can_berr_counter *bec);
};
-static inline bool can_tdc_is_enabled(const struct can_priv *priv)
+static inline bool can_fd_tdc_is_enabled(const struct can_priv *priv)
{
- return !!(priv->ctrlmode & CAN_CTRLMODE_TDC_MASK);
+ return !!(priv->ctrlmode & CAN_CTRLMODE_FD_TDC_MASK);
}
/*
diff --git a/include/linux/dpll.h b/include/linux/dpll.h
index 5e4f9ab1cf75..6ad6c2968a28 100644
--- a/include/linux/dpll.h
+++ b/include/linux/dpll.h
@@ -30,6 +30,14 @@ struct dpll_device_ops {
void *dpll_priv,
unsigned long *qls,
struct netlink_ext_ack *extack);
+ int (*phase_offset_monitor_set)(const struct dpll_device *dpll,
+ void *dpll_priv,
+ enum dpll_feature_state state,
+ struct netlink_ext_ack *extack);
+ int (*phase_offset_monitor_get)(const struct dpll_device *dpll,
+ void *dpll_priv,
+ enum dpll_feature_state *state,
+ struct netlink_ext_ack *extack);
};
struct dpll_pin_ops {
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 5e0dd333ad1f..59877fd2a1d3 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -536,7 +536,7 @@ struct ethtool_rmon_hist_range {
u16 high;
};
-#define ETHTOOL_RMON_HIST_MAX 10
+#define ETHTOOL_RMON_HIST_MAX 11
/**
* struct ethtool_rmon_stats - selected RMON (RFC 2819) statistics
@@ -826,6 +826,19 @@ struct ethtool_rxfh_param {
};
/**
+ * struct ethtool_rxfh_fields - Rx Flow Hashing (RXFH) header field config
+ * @data: which header fields are used for hashing, bitmask of RXH_* defines
+ * @flow_type: L2-L4 network traffic flow type
+ * @rss_context: RSS context, will only be used if rxfh_per_ctx_fields is
+ * set in struct ethtool_ops
+ */
+struct ethtool_rxfh_fields {
+ u32 data;
+ u32 flow_type;
+ u32 rss_context;
+};
+
+/**
* struct kernel_ethtool_ts_info - kernel copy of struct ethtool_ts_info
* @cmd: command number = %ETHTOOL_GET_TS_INFO
* @so_timestamping: bit mask of the sum of the supported SO_TIMESTAMPING flags
@@ -855,6 +868,8 @@ struct kernel_ethtool_ts_info {
* @cap_rss_ctx_supported: indicates if the driver supports RSS
* contexts via legacy API, drivers implementing @create_rxfh_context
* do not have to set this bit.
+ * @rxfh_per_ctx_fields: device supports selecting different header fields
+ * for Rx hash calculation and RSS for each additional context.
* @rxfh_per_ctx_key: device supports setting different RSS key for each
* additional context. Netlink API should report hfunc, key, and input_xfrm
* for every context, not just context 0.
@@ -968,6 +983,8 @@ struct kernel_ethtool_ts_info {
* will remain unchanged.
* Returns a negative error code or zero. An error code must be returned
* if at least one unsupported change was requested.
+ * @get_rxfh_fields: Get header fields used for flow hashing.
+ * @set_rxfh_fields: Set header fields used for flow hashing.
* @create_rxfh_context: Create a new RSS context with the specified RX flow
* hash indirection table, hash key, and hash function.
* The &struct ethtool_rxfh_context for this context is passed in @ctx;
@@ -1084,6 +1101,7 @@ struct ethtool_ops {
u32 supported_input_xfrm:8;
u32 cap_link_lanes_supported:1;
u32 cap_rss_ctx_supported:1;
+ u32 rxfh_per_ctx_fields:1;
u32 rxfh_per_ctx_key:1;
u32 cap_rss_rxnfc_adds:1;
u32 rxfh_indir_space;
@@ -1153,6 +1171,11 @@ struct ethtool_ops {
int (*get_rxfh)(struct net_device *, struct ethtool_rxfh_param *);
int (*set_rxfh)(struct net_device *, struct ethtool_rxfh_param *,
struct netlink_ext_ack *extack);
+ int (*get_rxfh_fields)(struct net_device *,
+ struct ethtool_rxfh_fields *);
+ int (*set_rxfh_fields)(struct net_device *,
+ const struct ethtool_rxfh_fields *,
+ struct netlink_ext_ack *extack);
int (*create_rxfh_context)(struct net_device *,
struct ethtool_rxfh_context *ctx,
const struct ethtool_rxfh_param *rxfh,
diff --git a/include/linux/ethtool_netlink.h b/include/linux/ethtool_netlink.h
index aba91335273a..39254b2726c0 100644
--- a/include/linux/ethtool_netlink.h
+++ b/include/linux/ethtool_netlink.h
@@ -43,6 +43,8 @@ void ethtool_aggregate_rmon_stats(struct net_device *dev,
struct ethtool_rmon_stats *rmon_stats);
bool ethtool_dev_mm_supported(struct net_device *dev);
+void ethnl_pse_send_ntf(struct net_device *netdev, unsigned long notif);
+
#else
static inline int ethnl_cable_test_alloc(struct phy_device *phydev, u8 cmd)
{
@@ -120,6 +122,11 @@ static inline bool ethtool_dev_mm_supported(struct net_device *dev)
return false;
}
+static inline void ethnl_pse_send_ntf(struct net_device *netdev,
+ unsigned long notif)
+{
+}
+
#endif /* IS_ENABLED(CONFIG_ETHTOOL_NETLINK) */
static inline int ethnl_cable_test_result(struct phy_device *phydev, u8 pair,
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 22f39e5e2ff1..120de474a8bf 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -4007,6 +4007,16 @@ enum ieee80211_s1g_actioncode {
WLAN_S1G_TWT_INFORMATION = 11,
};
+/* Radio measurement action codes as defined in IEEE 802.11-2024 - Table 9-470 */
+enum ieee80211_radio_measurement_actioncode {
+ WLAN_RM_ACTION_RADIO_MEASUREMENT_REQUEST = 0,
+ WLAN_RM_ACTION_RADIO_MEASUREMENT_REPORT = 1,
+ WLAN_RM_ACTION_LINK_MEASUREMENT_REQUEST = 2,
+ WLAN_RM_ACTION_LINK_MEASUREMENT_REPORT = 3,
+ WLAN_RM_ACTION_NEIGHBOR_REPORT_REQUEST = 4,
+ WLAN_RM_ACTION_NEIGHBOR_REPORT_RESPONSE = 5,
+};
+
#define IEEE80211_WEP_IV_LEN 4
#define IEEE80211_WEP_ICV_LEN 4
#define IEEE80211_CCMP_HDR_LEN 8
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index cdc684e04a2f..ce97d891cf72 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -191,8 +191,6 @@ struct team {
const struct header_ops *header_ops_cache;
- struct mutex lock; /* used for overall locking, e.g. port lists write */
-
/*
* List of enabled ports and their count
*/
@@ -223,7 +221,6 @@ struct team {
atomic_t count_pending;
struct delayed_work dw;
} mcast_rejoin;
- struct lock_class_key team_lock_key;
long mode_priv[TEAM_MODE_PRIV_LONGS];
};
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 38456b42cdb5..15e01935d3fa 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -79,11 +79,6 @@ static inline struct vlan_ethhdr *skb_vlan_eth_hdr(const struct sk_buff *skb)
/* found in socket.c */
extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *));
-static inline bool is_vlan_dev(const struct net_device *dev)
-{
- return dev->priv_flags & IFF_802_1Q_VLAN;
-}
-
#define skb_vlan_tag_present(__skb) (!!(__skb)->vlan_all)
#define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci)
#define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK)
@@ -136,7 +131,7 @@ struct vlan_pcpu_stats {
u32 tx_dropped;
};
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
extern struct net_device *__vlan_find_dev_deep_rcu(struct net_device *real_dev,
__be16 vlan_proto, u16 vlan_id);
@@ -200,6 +195,11 @@ struct vlan_dev_priv {
#endif
};
+static inline bool is_vlan_dev(const struct net_device *dev)
+{
+ return dev->priv_flags & IFF_802_1Q_VLAN;
+}
+
static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev)
{
return netdev_priv(dev);
@@ -237,6 +237,11 @@ extern void vlan_vids_del_by_dev(struct net_device *dev,
extern bool vlan_uses_dev(const struct net_device *dev);
#else
+static inline bool is_vlan_dev(const struct net_device *dev)
+{
+ return false;
+}
+
static inline struct net_device *
__vlan_find_dev_deep_rcu(struct net_device *real_dev,
__be16 vlan_proto, u16 vlan_id)
@@ -254,19 +259,19 @@ vlan_for_each(struct net_device *dev,
static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev)
{
- BUG();
+ WARN_ON_ONCE(1);
return NULL;
}
static inline u16 vlan_dev_vlan_id(const struct net_device *dev)
{
- BUG();
+ WARN_ON_ONCE(1);
return 0;
}
static inline __be16 vlan_dev_vlan_proto(const struct net_device *dev)
{
- BUG();
+ WARN_ON_ONCE(1);
return 0;
}
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 5aeeed22f35b..db0eb0d86b64 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -156,6 +156,7 @@ struct inet6_skb_parm {
#define IP6SKB_SEG6 256
#define IP6SKB_FAKEJUMBO 512
#define IP6SKB_MULTIPATH 1024
+#define IP6SKB_MCROUTE 2048
};
#if defined(CONFIG_NET_L3_MASTER_DEV)
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index e43ff9f980a4..c640ba44dd6e 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -95,7 +95,6 @@ void mdio_device_remove(struct mdio_device *mdiodev);
void mdio_device_reset(struct mdio_device *mdiodev, int value);
int mdio_driver_register(struct mdio_driver *drv);
void mdio_driver_unregister(struct mdio_driver *drv);
-int mdio_device_bus_match(struct device *dev, const struct device_driver *drv);
static inline void mdio_device_get(struct mdio_device *mdiodev)
{
diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h
index 63ef5191cc57..fddafdc168f7 100644
--- a/include/linux/mroute6.h
+++ b/include/linux/mroute6.h
@@ -31,6 +31,7 @@ extern int ip6_mroute_getsockopt(struct sock *, int, sockptr_t, sockptr_t);
extern int ip6_mr_input(struct sk_buff *skb);
extern int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
extern int ip6_mr_init(void);
+extern int ip6_mr_output(struct net *net, struct sock *sk, struct sk_buff *skb);
extern void ip6_mr_cleanup(void);
int ip6mr_ioctl(struct sock *sk, int cmd, void *arg);
#else
@@ -58,6 +59,12 @@ static inline int ip6_mr_init(void)
return 0;
}
+static inline int
+ip6_mr_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+{
+ return ip6_output(net, sk, skb);
+}
+
static inline void ip6_mr_cleanup(void)
{
return;
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 6863540f4b71..7f254bde5426 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -706,6 +706,8 @@ struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
struct irq_domain *parent);
u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev);
struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev);
+void pci_msix_prepare_desc(struct irq_domain *domain, msi_alloc_info_t *arg,
+ struct msi_desc *desc);
#else /* CONFIG_PCI_MSI */
static inline struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev)
{
diff --git a/include/linux/net/intel/libie/pctype.h b/include/linux/net/intel/libie/pctype.h
new file mode 100644
index 000000000000..d783417fbf36
--- /dev/null
+++ b/include/linux/net/intel/libie/pctype.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2025 Intel Corporation */
+
+#ifndef __LIBIE_PCTYPE_H
+#define __LIBIE_PCTYPE_H
+
+/* Packet Classifier Type indexes, used to set the xxQF_HENA registers. Also
+ * communicated over the virtchnl API as part of struct virtchnl_rss_hashena.
+ */
+enum libie_filter_pctype {
+ /* Note: Values 0-28 are reserved for future use.
+ * Value 29, 30, 32 are not supported on XL710 and X710.
+ */
+ LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
+ LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
+ LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
+ LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32,
+ LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
+ LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
+ LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
+ LIBIE_FILTER_PCTYPE_FRAG_IPV4 = 36,
+ /* Note: Values 37-38 are reserved for future use.
+ * Value 39, 40, 42 are not supported on XL710 and X710.
+ */
+ LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
+ LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
+ LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
+ LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42,
+ LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
+ LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP = 44,
+ LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER = 45,
+ LIBIE_FILTER_PCTYPE_FRAG_IPV6 = 46,
+ /* Note: Value 47 is reserved for future use */
+ LIBIE_FILTER_PCTYPE_FCOE_OX = 48,
+ LIBIE_FILTER_PCTYPE_FCOE_RX = 49,
+ LIBIE_FILTER_PCTYPE_FCOE_OTHER = 50,
+ /* Note: Values 51-62 are reserved for future use */
+ LIBIE_FILTER_PCTYPE_L2_PAYLOAD = 63
+};
+
+#endif /* __LIBIE_PCTYPE_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index adb14db25798..db5bfd4e7ec8 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2388,7 +2388,7 @@ struct net_device {
struct dm_hw_stat_delta __rcu *dm_private;
#endif
struct device dev;
- const struct attribute_group *sysfs_groups[4];
+ const struct attribute_group *sysfs_groups[5];
const struct attribute_group *sysfs_rx_queue_group;
const struct rtnl_link_ops *rtnl_link_ops;
@@ -3016,6 +3016,16 @@ static inline void dev_dstats_rx_dropped(struct net_device *dev)
u64_stats_update_end(&dstats->syncp);
}
+static inline void dev_dstats_rx_dropped_add(struct net_device *dev,
+ unsigned int packets)
+{
+ struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
+
+ u64_stats_update_begin(&dstats->syncp);
+ u64_stats_add(&dstats->rx_drops, packets);
+ u64_stats_update_end(&dstats->syncp);
+}
+
static inline void dev_dstats_tx_add(struct net_device *dev,
unsigned int len)
{
@@ -5128,10 +5138,9 @@ void netdev_bonding_info_change(struct net_device *dev,
struct netdev_bonding_info *bonding_info);
#if IS_ENABLED(CONFIG_ETHTOOL_NETLINK)
-void ethtool_notify(struct net_device *dev, unsigned int cmd, const void *data);
+void ethtool_notify(struct net_device *dev, unsigned int cmd);
#else
-static inline void ethtool_notify(struct net_device *dev, unsigned int cmd,
- const void *data)
+static inline void ethtool_notify(struct net_device *dev, unsigned int cmd)
{
}
#endif
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index 0477208ed9ff..735e65c3cc11 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -42,6 +42,13 @@ struct netpoll {
struct work_struct refill_wq;
};
+#define np_info(np, fmt, ...) \
+ pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
+#define np_err(np, fmt, ...) \
+ pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
+#define np_notice(np, fmt, ...) \
+ pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
+
struct netpoll_info {
refcount_t refcnt;
@@ -65,11 +72,8 @@ static inline void netpoll_poll_enable(struct net_device *dev) { return; }
#endif
int netpoll_send_udp(struct netpoll *np, const char *msg, int len);
-void netpoll_print_options(struct netpoll *np);
-int netpoll_parse_options(struct netpoll *np, char *opt);
int __netpoll_setup(struct netpoll *np, struct net_device *ndev);
int netpoll_setup(struct netpoll *np);
-void __netpoll_cleanup(struct netpoll *np);
void __netpoll_free(struct netpoll *np);
void netpoll_cleanup(struct netpoll *np);
void do_netpoll_cleanup(struct netpoll *np);
diff --git a/include/linux/packing.h b/include/linux/packing.h
index 0589d70bbe04..20ae4d452c7b 100644
--- a/include/linux/packing.h
+++ b/include/linux/packing.h
@@ -5,8 +5,12 @@
#ifndef _LINUX_PACKING_H
#define _LINUX_PACKING_H
-#include <linux/types.h>
+#include <linux/array_size.h>
#include <linux/bitops.h>
+#include <linux/build_bug.h>
+#include <linux/minmax.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
#define GEN_PACKED_FIELD_STRUCT(__type) \
struct packed_field_ ## __type { \
diff --git a/include/linux/phy.h b/include/linux/phy.h
index e194dad1623d..74c1bcf64b3c 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -103,6 +103,9 @@ extern const int phy_basic_ports_array[3];
* @PHY_INTERFACE_MODE_QUSGMII: Quad Universal SGMII
* @PHY_INTERFACE_MODE_1000BASEKX: 1000Base-KX - with Clause 73 AN
* @PHY_INTERFACE_MODE_10G_QXGMII: 10G-QXGMII - 4 ports over 10G USXGMII
+ * @PHY_INTERFACE_MODE_50GBASER: 50GBase-R - with Clause 134 FEC
+ * @PHY_INTERFACE_MODE_LAUI: 50 Gigabit Attachment Unit Interface
+ * @PHY_INTERFACE_MODE_100GBASEP: 100GBase-P - with Clause 134 FEC
* @PHY_INTERFACE_MODE_MAX: Book keeping
*
* Describes the interface between the MAC and PHY.
@@ -144,6 +147,9 @@ typedef enum {
PHY_INTERFACE_MODE_QUSGMII,
PHY_INTERFACE_MODE_1000BASEKX,
PHY_INTERFACE_MODE_10G_QXGMII,
+ PHY_INTERFACE_MODE_50GBASER,
+ PHY_INTERFACE_MODE_LAUI,
+ PHY_INTERFACE_MODE_100GBASEP,
PHY_INTERFACE_MODE_MAX,
} phy_interface_t;
@@ -260,6 +266,12 @@ static inline const char *phy_modes(phy_interface_t interface)
return "qusgmii";
case PHY_INTERFACE_MODE_10G_QXGMII:
return "10g-qxgmii";
+ case PHY_INTERFACE_MODE_50GBASER:
+ return "50gbase-r";
+ case PHY_INTERFACE_MODE_LAUI:
+ return "laui";
+ case PHY_INTERFACE_MODE_100GBASEP:
+ return "100gbase-p";
default:
return "unknown";
}
@@ -269,8 +281,10 @@ static inline const char *phy_modes(phy_interface_t interface)
* rgmii_clock - map link speed to the clock rate
* @speed: link speed value
*
- * Description: maps RGMII supported link speeds
- * into the clock rates.
+ * Description: maps RGMII supported link speeds into the clock rates.
+ * This can also be used for MII, GMII, and RMII interface modes as the
+ * clock rates are indentical, but the caller must be aware that errors
+ * for unsupported clock rates will not be signalled.
*
* Returns: clock rate or negative errno
*/
@@ -526,6 +540,7 @@ struct macsec_ops;
* @mac_managed_pm: Set true if MAC driver takes of suspending/resuming PHY
* @wol_enabled: Set to true if the PHY or the attached MAC have Wake-on-LAN
* enabled.
+ * @is_genphy_driven: PHY is driven by one of the generic PHY drivers
* @state: State of the PHY for management purposes
* @dev_flags: Device-specific flags used by the PHY driver.
*
@@ -629,6 +644,7 @@ struct phy_device {
unsigned is_on_sfp_module:1;
unsigned mac_managed_pm:1;
unsigned wol_enabled:1;
+ unsigned is_genphy_driven:1;
unsigned autoneg:1;
/* The most recently read link state */
@@ -1292,6 +1308,17 @@ static inline bool phy_is_started(struct phy_device *phydev)
}
/**
+ * phy_driver_is_genphy - Convenience function to check whether PHY is driven
+ * by one of the generic PHY drivers
+ * @phydev: The phy_device struct
+ * Return: true if PHY is driven by one of the genphy drivers
+ */
+static inline bool phy_driver_is_genphy(struct phy_device *phydev)
+{
+ return phydev->is_genphy_driven;
+}
+
+/**
* phy_disable_eee_mode - Don't advertise an EEE mode.
* @phydev: The phy_device struct
* @link_mode: The EEE mode to be disabled
@@ -1941,9 +1968,6 @@ int genphy_c45_ethtool_set_eee(struct phy_device *phydev,
struct ethtool_keee *data);
int genphy_c45_an_config_eee_aneg(struct phy_device *phydev);
-/* Generic C45 PHY driver */
-extern struct phy_driver genphy_c45_driver;
-
/* The gen10g_* functions are the old Clause 45 stub */
int gen10g_config_aneg(struct phy_device *phydev);
@@ -1997,8 +2021,8 @@ bool phy_validate_pause(struct phy_device *phydev,
struct ethtool_pauseparam *pp);
void phy_get_pause(struct phy_device *phydev, bool *tx_pause, bool *rx_pause);
-s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev,
- const int *delay_values, int size, bool is_rx);
+s32 phy_get_internal_delay(struct phy_device *phydev, const int *delay_values,
+ int size, bool is_rx);
int phy_get_tx_amplitude_gain(struct phy_device *phydev, struct device *dev,
enum ethtool_link_mode_bit_indices linkmode,
@@ -2096,7 +2120,4 @@ module_exit(phy_module_exit)
#define module_phy_driver(__phy_drivers) \
phy_module_driver(__phy_drivers, ARRAY_SIZE(__phy_drivers))
-bool phy_driver_is_genphy(struct phy_device *phydev);
-bool phy_driver_is_genphy_10g(struct phy_device *phydev);
-
#endif /* __PHY_H */
diff --git a/include/linux/pse-pd/pse.h b/include/linux/pse-pd/pse.h
index c773eeb92d04..4e5696cfade7 100644
--- a/include/linux/pse-pd/pse.h
+++ b/include/linux/pse-pd/pse.h
@@ -6,13 +6,18 @@
#define _LINUX_PSE_CONTROLLER_H
#include <linux/list.h>
+#include <linux/netlink.h>
+#include <linux/kfifo.h>
#include <uapi/linux/ethtool.h>
+#include <uapi/linux/ethtool_netlink_generated.h>
+#include <linux/regulator/driver.h>
/* Maximum current in uA according to IEEE 802.3-2022 Table 145-1 */
#define MAX_PI_CURRENT 1920000
/* Maximum power in mW according to IEEE 802.3-2022 Table 145-16 */
#define MAX_PI_PW 99900
+struct net_device;
struct phy_device;
struct pse_controller_dev;
struct netlink_ext_ack;
@@ -38,6 +43,19 @@ struct ethtool_c33_pse_pw_limit_range {
};
/**
+ * struct pse_irq_desc - notification sender description for IRQ based events.
+ *
+ * @name: the visible name for the IRQ
+ * @map_event: driver callback to map IRQ status into PSE devices with events.
+ */
+struct pse_irq_desc {
+ const char *name;
+ int (*map_event)(int irq, struct pse_controller_dev *pcdev,
+ unsigned long *notifs,
+ unsigned long *notifs_mask);
+};
+
+/**
* struct pse_control_config - PSE control/channel configuration.
*
* @podl_admin_control: set PoDL PSE admin control as described in
@@ -98,6 +116,7 @@ struct pse_pw_limit_ranges {
/**
* struct ethtool_pse_control_status - PSE control/channel status.
*
+ * @pw_d_id: PSE power domain index.
* @podl_admin_state: operational state of the PoDL PSE
* functions. IEEE 802.3-2018 30.15.1.1.2 aPoDLPSEAdminState
* @podl_pw_status: power detection status of the PoDL PSE.
@@ -117,8 +136,12 @@ struct pse_pw_limit_ranges {
* is in charge of the memory allocation
* @c33_pw_limit_nb_ranges: number of supported power limit configuration
* ranges
+ * @prio_max: max priority allowed for the c33_prio variable value.
+ * @prio: priority of the PSE. Managed by PSE core in case of static budget
+ * evaluation strategy.
*/
struct ethtool_pse_control_status {
+ u32 pw_d_id;
enum ethtool_podl_pse_admin_state podl_admin_state;
enum ethtool_podl_pse_pw_d_status podl_pw_status;
enum ethtool_c33_pse_admin_state c33_admin_state;
@@ -129,12 +152,20 @@ struct ethtool_pse_control_status {
u32 c33_avail_pw_limit;
struct ethtool_c33_pse_pw_limit_range *c33_pw_limit_ranges;
u32 c33_pw_limit_nb_ranges;
+ u32 prio_max;
+ u32 prio;
};
/**
* struct pse_controller_ops - PSE controller driver callbacks
*
- * @setup_pi_matrix: setup PI matrix of the PSE controller
+ * @setup_pi_matrix: Setup PI matrix of the PSE controller.
+ * The PSE PIs devicetree nodes have already been parsed by
+ * of_load_pse_pis() and the pcdev->pi[x]->pairset[y].np
+ * populated. This callback should establish the
+ * relationship between the PSE controller hardware ports
+ * and the PSE Power Interfaces, either through software
+ * mapping or hardware configuration.
* @pi_get_admin_state: Get the operational state of the PSE PI. This ops
* is mandatory.
* @pi_get_pw_status: Get the power detection status of the PSE PI. This
@@ -152,6 +183,11 @@ struct ethtool_pse_control_status {
* range. The driver is in charge of the memory
* allocation and should return the number of
* ranges.
+ * @pi_get_prio: Get the PSE PI priority.
+ * @pi_set_prio: Configure the PSE PI priority.
+ * @pi_get_pw_req: Get the power requested by a PD before enabling the PSE PI.
+ * This is only relevant when an interrupt is registered using
+ * devm_pse_irq_helper helper.
*/
struct pse_controller_ops {
int (*setup_pi_matrix)(struct pse_controller_dev *pcdev);
@@ -172,6 +208,10 @@ struct pse_controller_ops {
int id, int max_mW);
int (*pi_get_pw_limit_ranges)(struct pse_controller_dev *pcdev, int id,
struct pse_pw_limit_ranges *pw_limit_ranges);
+ int (*pi_get_prio)(struct pse_controller_dev *pcdev, int id);
+ int (*pi_set_prio)(struct pse_controller_dev *pcdev, int id,
+ unsigned int prio);
+ int (*pi_get_pw_req)(struct pse_controller_dev *pcdev, int id);
};
struct module;
@@ -206,12 +246,35 @@ struct pse_pi_pairset {
* @np: device node pointer of the PSE PI node
* @rdev: regulator represented by the PSE PI
* @admin_state_enabled: PI enabled state
+ * @pw_d: Power domain of the PSE PI
+ * @prio: Priority of the PSE PI. Used in static budget evaluation strategy
+ * @isr_pd_detected: PSE PI detection status managed by the interruption
+ * handler. This variable is relevant when the power enabled
+ * management is managed in software like the static
+ * budget evaluation strategy.
+ * @pw_allocated_mW: Power allocated to a PSE PI to manage power budget in
+ * static budget evaluation strategy.
*/
struct pse_pi {
struct pse_pi_pairset pairset[2];
struct device_node *np;
struct regulator_dev *rdev;
bool admin_state_enabled;
+ struct pse_power_domain *pw_d;
+ int prio;
+ bool isr_pd_detected;
+ int pw_allocated_mW;
+};
+
+/**
+ * struct pse_ntf - PSE notification element
+ *
+ * @id: ID of the PSE control
+ * @notifs: PSE notifications to be reported
+ */
+struct pse_ntf {
+ int id;
+ unsigned long notifs;
};
/**
@@ -228,6 +291,13 @@ struct pse_pi {
* @types: types of the PSE controller
* @pi: table of PSE PIs described in this controller device
* @no_of_pse_pi: flag set if the pse_pis devicetree node is not used
+ * @irq: PSE interrupt
+ * @pis_prio_max: Maximum value allowed for the PSE PIs priority
+ * @supp_budget_eval_strategies: budget evaluation strategies supported
+ * by the PSE
+ * @ntf_work: workqueue for PSE notification management
+ * @ntf_fifo: PSE notifications FIFO
+ * @ntf_fifo_lock: protect @ntf_fifo writer
*/
struct pse_controller_dev {
const struct pse_controller_ops *ops;
@@ -241,6 +311,30 @@ struct pse_controller_dev {
enum ethtool_pse_types types;
struct pse_pi *pi;
bool no_of_pse_pi;
+ int irq;
+ unsigned int pis_prio_max;
+ u32 supp_budget_eval_strategies;
+ struct work_struct ntf_work;
+ DECLARE_KFIFO_PTR(ntf_fifo, struct pse_ntf);
+ spinlock_t ntf_fifo_lock; /* Protect @ntf_fifo writer */
+};
+
+/**
+ * enum pse_budget_eval_strategies - PSE budget evaluation strategies.
+ * @PSE_BUDGET_EVAL_STRAT_DISABLED: Budget evaluation strategy disabled.
+ * @PSE_BUDGET_EVAL_STRAT_STATIC: PSE static budget evaluation strategy.
+ * Budget evaluation strategy based on the power requested during PD
+ * classification. This strategy is managed by the PSE core.
+ * @PSE_BUDGET_EVAL_STRAT_DYNAMIC: PSE dynamic budget evaluation
+ * strategy. Budget evaluation strategy based on the current consumption
+ * per ports compared to the total power budget. This mode is managed by
+ * the PSE controller.
+ */
+
+enum pse_budget_eval_strategies {
+ PSE_BUDGET_EVAL_STRAT_DISABLED = 1 << 0,
+ PSE_BUDGET_EVAL_STRAT_STATIC = 1 << 1,
+ PSE_BUDGET_EVAL_STRAT_DYNAMIC = 1 << 2,
};
#if IS_ENABLED(CONFIG_PSE_CONTROLLER)
@@ -249,8 +343,11 @@ void pse_controller_unregister(struct pse_controller_dev *pcdev);
struct device;
int devm_pse_controller_register(struct device *dev,
struct pse_controller_dev *pcdev);
+int devm_pse_irq_helper(struct pse_controller_dev *pcdev, int irq,
+ int irq_flags, const struct pse_irq_desc *d);
-struct pse_control *of_pse_control_get(struct device_node *node);
+struct pse_control *of_pse_control_get(struct device_node *node,
+ struct phy_device *phydev);
void pse_control_put(struct pse_control *psec);
int pse_ethtool_get_status(struct pse_control *psec,
@@ -262,13 +359,17 @@ int pse_ethtool_set_config(struct pse_control *psec,
int pse_ethtool_set_pw_limit(struct pse_control *psec,
struct netlink_ext_ack *extack,
const unsigned int pw_limit);
+int pse_ethtool_set_prio(struct pse_control *psec,
+ struct netlink_ext_ack *extack,
+ unsigned int prio);
bool pse_has_podl(struct pse_control *psec);
bool pse_has_c33(struct pse_control *psec);
#else
-static inline struct pse_control *of_pse_control_get(struct device_node *node)
+static inline struct pse_control *of_pse_control_get(struct device_node *node,
+ struct phy_device *phydev)
{
return ERR_PTR(-ENOENT);
}
@@ -298,6 +399,13 @@ static inline int pse_ethtool_set_pw_limit(struct pse_control *psec,
return -EOPNOTSUPP;
}
+static inline int pse_ethtool_set_prio(struct pse_control *psec,
+ struct netlink_ext_ack *extack,
+ unsigned int prio)
+{
+ return -EOPNOTSUPP;
+}
+
static inline bool pse_has_podl(struct pse_control *psec)
{
return false;
diff --git a/include/linux/ref_tracker.h b/include/linux/ref_tracker.h
index 8eac4f3d5254..d10563afd91c 100644
--- a/include/linux/ref_tracker.h
+++ b/include/linux/ref_tracker.h
@@ -6,6 +6,8 @@
#include <linux/spinlock.h>
#include <linux/stackdepot.h>
+#define __ostream_printf __printf(2, 3)
+
struct ref_tracker;
struct ref_tracker_dir {
@@ -17,15 +19,45 @@ struct ref_tracker_dir {
bool dead;
struct list_head list; /* List of active trackers */
struct list_head quarantine; /* List of dead trackers */
- char name[32];
+ const char *class; /* object classname */
#endif
};
#ifdef CONFIG_REF_TRACKER
+#ifdef CONFIG_DEBUG_FS
+
+void ref_tracker_dir_debugfs(struct ref_tracker_dir *dir);
+void ref_tracker_dir_symlink(struct ref_tracker_dir *dir, const char *fmt, ...);
+
+#else /* CONFIG_DEBUG_FS */
+
+static inline void ref_tracker_dir_debugfs(struct ref_tracker_dir *dir)
+{
+}
+
+static inline __ostream_printf
+void ref_tracker_dir_symlink(struct ref_tracker_dir *dir, const char *fmt, ...)
+{
+}
+
+#endif /* CONFIG_DEBUG_FS */
+
+/**
+ * ref_tracker_dir_init - initialize a ref_tracker dir
+ * @dir: ref_tracker_dir to be initialized
+ * @quarantine_count: max number of entries to be tracked
+ * @class: pointer to static string that describes object type
+ *
+ * Initialize a ref_tracker_dir. If debugfs is configured, then a file
+ * will also be created for it under the top-level ref_tracker debugfs
+ * directory.
+ *
+ * Note that @class must point to a static string.
+ */
static inline void ref_tracker_dir_init(struct ref_tracker_dir *dir,
unsigned int quarantine_count,
- const char *name)
+ const char *class)
{
INIT_LIST_HEAD(&dir->list);
INIT_LIST_HEAD(&dir->quarantine);
@@ -34,7 +66,8 @@ static inline void ref_tracker_dir_init(struct ref_tracker_dir *dir,
dir->dead = false;
refcount_set(&dir->untracked, 1);
refcount_set(&dir->no_tracker, 1);
- strscpy(dir->name, name, sizeof(dir->name));
+ dir->class = class;
+ ref_tracker_dir_debugfs(dir);
stack_depot_init();
}
@@ -58,7 +91,16 @@ int ref_tracker_free(struct ref_tracker_dir *dir,
static inline void ref_tracker_dir_init(struct ref_tracker_dir *dir,
unsigned int quarantine_count,
- const char *name)
+ const char *class)
+{
+}
+
+static inline void ref_tracker_dir_debugfs(struct ref_tracker_dir *dir)
+{
+}
+
+static inline __ostream_printf
+void ref_tracker_dir_symlink(struct ref_tracker_dir *dir, const char *fmt, ...)
{
}
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 5520524c93bf..4f6dcb37bae8 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -3665,7 +3665,13 @@ static inline void *skb_frag_address(const skb_frag_t *frag)
*/
static inline void *skb_frag_address_safe(const skb_frag_t *frag)
{
- void *ptr = page_address(skb_frag_page(frag));
+ struct page *page = skb_frag_page(frag);
+ void *ptr;
+
+ if (!page)
+ return NULL;
+
+ ptr = page_address(page);
if (unlikely(!ptr))
return NULL;
@@ -3873,20 +3879,26 @@ static inline int __must_check skb_put_padto(struct sk_buff *skb, unsigned int l
bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i)
__must_check;
-static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
- const struct page *page, int off)
+static inline bool skb_can_coalesce_netmem(struct sk_buff *skb, int i,
+ netmem_ref netmem, int off)
{
if (skb_zcopy(skb))
return false;
if (i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
- return page == skb_frag_page(frag) &&
+ return netmem == skb_frag_netmem(frag) &&
off == skb_frag_off(frag) + skb_frag_size(frag);
}
return false;
}
+static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
+ const struct page *page, int off)
+{
+ return skb_can_coalesce_netmem(skb, i, page_to_netmem(page), off);
+}
+
static inline int __skb_linearize(struct sk_buff *skb)
{
return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
diff --git a/include/linux/soc/marvell/silicons.h b/include/linux/soc/marvell/silicons.h
new file mode 100644
index 000000000000..66bb9bfaf17d
--- /dev/null
+++ b/include/linux/soc/marvell/silicons.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (C) 2024 Marvell.
+ */
+
+#ifndef __SOC_SILICON_H
+#define __SOC_SILICON_H
+
+#include <linux/types.h>
+#include <linux/pci.h>
+
+#if defined(CONFIG_ARM64)
+
+#define CN20K_CHIPID 0x20
+/*
+ * Silicon check for CN20K family
+ */
+static inline bool is_cn20k(struct pci_dev *pdev)
+{
+ return (pdev->subsystem_device & 0xFF) == CN20K_CHIPID;
+}
+#else
+#define is_cn20k(pdev) ((void)(pdev), 0)
+#endif
+
+#endif /* __SOC_SILICON_H */
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 29f59d50dc73..1a5737b3753d 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -208,7 +208,6 @@ struct tcp_sock {
u32 notsent_lowat; /* TCP_NOTSENT_LOWAT */
u16 gso_segs; /* Max number of segs per GSO packet */
/* from STCP, retrans queue hinting */
- struct sk_buff *lost_skb_hint;
struct sk_buff *retransmit_skb_hint;
__cacheline_group_end(tcp_sock_read_tx);
@@ -419,8 +418,6 @@ struct tcp_sock {
struct tcp_sack_block recv_sack_cache[4];
- int lost_cnt_hint;
-
u32 prior_ssthresh; /* ssthresh saved at recovery start */
u32 high_seq; /* snd_nxt at onset of congestion */
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 0b9f1e598e3a..208682f77179 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -58,7 +58,7 @@ struct usbnet {
unsigned interrupt_count;
struct mutex interrupt_mutex;
struct usb_anchor deferred;
- struct tasklet_struct bh;
+ struct work_struct bh_work;
struct work_struct kevent;
unsigned long flags;
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index d1848dc8ec99..4a092da3a9de 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -560,7 +560,7 @@ struct ieee80211_sta_s1g_cap {
* @vht_cap: VHT capabilities in this band
* @s1g_cap: S1G capabilities in this band
* @edmg_cap: EDMG capabilities in this band
- * @s1g_cap: S1G capabilities in this band (S1B band only, of course)
+ * @s1g_cap: S1G capabilities in this band (S1G band only, of course)
* @n_iftype_data: number of iftype data entries
* @iftype_data: interface type data entries. Note that the bits in
* @types_mask inside this structure cannot overlap (i.e. only
@@ -1653,6 +1653,7 @@ struct sta_txpwr {
* @he_6ghz_capa: HE 6 GHz Band capabilities of station
* @eht_capa: EHT capabilities of station
* @eht_capa_len: the length of the EHT capabilities
+ * @s1g_capa: S1G capabilities of station
*/
struct link_station_parameters {
const u8 *mld_mac;
@@ -1671,6 +1672,7 @@ struct link_station_parameters {
const struct ieee80211_he_6ghz_capa *he_6ghz_capa;
const struct ieee80211_eht_cap_elem *eht_capa;
u8 eht_capa_len;
+ const struct ieee80211_s1g_cap *s1g_capa;
};
/**
@@ -2018,6 +2020,99 @@ struct cfg80211_tid_stats {
#define IEEE80211_MAX_CHAINS 4
/**
+ * struct link_station_info - link station information
+ *
+ * Link station information filled by driver for get_station() and
+ * dump_station().
+ * @filled: bit flag of flags using the bits of &enum nl80211_sta_info to
+ * indicate the relevant values in this struct for them
+ * @connected_time: time(in secs) since a link of station is last connected
+ * @inactive_time: time since last activity for link station(tx/rx)
+ * in milliseconds
+ * @assoc_at: bootime (ns) of the last association of link of station
+ * @rx_bytes: bytes (size of MPDUs) received from this link of station
+ * @tx_bytes: bytes (size of MPDUs) transmitted to this link of station
+ * @signal: The signal strength, type depends on the wiphy's signal_type.
+ * For CFG80211_SIGNAL_TYPE_MBM, value is expressed in _dBm_.
+ * @signal_avg: Average signal strength, type depends on the wiphy's
+ * signal_type. For CFG80211_SIGNAL_TYPE_MBM, value is expressed in _dBm_
+ * @chains: bitmask for filled values in @chain_signal, @chain_signal_avg
+ * @chain_signal: per-chain signal strength of last received packet in dBm
+ * @chain_signal_avg: per-chain signal strength average in dBm
+ * @txrate: current unicast bitrate from this link of station
+ * @rxrate: current unicast bitrate to this link of station
+ * @rx_packets: packets (MSDUs & MMPDUs) received from this link of station
+ * @tx_packets: packets (MSDUs & MMPDUs) transmitted to this link of station
+ * @tx_retries: cumulative retry counts (MPDUs) for this link of station
+ * @tx_failed: number of failed transmissions (MPDUs) (retries exceeded, no ACK)
+ * @rx_dropped_misc: Dropped for un-specified reason.
+ * @bss_param: current BSS parameters
+ * @beacon_loss_count: Number of times beacon loss event has triggered.
+ * @expected_throughput: expected throughput in kbps (including 802.11 headers)
+ * towards this station.
+ * @rx_beacon: number of beacons received from this peer
+ * @rx_beacon_signal_avg: signal strength average (in dBm) for beacons received
+ * from this peer
+ * @rx_duration: aggregate PPDU duration(usecs) for all the frames from a peer
+ * @tx_duration: aggregate PPDU duration(usecs) for all the frames to a peer
+ * @airtime_weight: current airtime scheduling weight
+ * @pertid: per-TID statistics, see &struct cfg80211_tid_stats, using the last
+ * (IEEE80211_NUM_TIDS) index for MSDUs not encapsulated in QoS-MPDUs.
+ * Note that this doesn't use the @filled bit, but is used if non-NULL.
+ * @ack_signal: signal strength (in dBm) of the last ACK frame.
+ * @avg_ack_signal: average rssi value of ack packet for the no of msdu's has
+ * been sent.
+ * @rx_mpdu_count: number of MPDUs received from this station
+ * @fcs_err_count: number of packets (MPDUs) received from this station with
+ * an FCS error. This counter should be incremented only when TA of the
+ * received packet with an FCS error matches the peer MAC address.
+ * @addr: For MLO STA connection, filled with address of the link of station.
+ */
+struct link_station_info {
+ u64 filled;
+ u32 connected_time;
+ u32 inactive_time;
+ u64 assoc_at;
+ u64 rx_bytes;
+ u64 tx_bytes;
+ s8 signal;
+ s8 signal_avg;
+
+ u8 chains;
+ s8 chain_signal[IEEE80211_MAX_CHAINS];
+ s8 chain_signal_avg[IEEE80211_MAX_CHAINS];
+
+ struct rate_info txrate;
+ struct rate_info rxrate;
+ u32 rx_packets;
+ u32 tx_packets;
+ u32 tx_retries;
+ u32 tx_failed;
+ u32 rx_dropped_misc;
+ struct sta_bss_parameters bss_param;
+
+ u32 beacon_loss_count;
+
+ u32 expected_throughput;
+
+ u64 tx_duration;
+ u64 rx_duration;
+ u64 rx_beacon;
+ u8 rx_beacon_signal_avg;
+
+ u16 airtime_weight;
+
+ s8 ack_signal;
+ s8 avg_ack_signal;
+ struct cfg80211_tid_stats *pertid;
+
+ u32 rx_mpdu_count;
+ u32 fcs_err_count;
+
+ u8 addr[ETH_ALEN] __aligned(2);
+};
+
+/**
* struct station_info - station information
*
* Station information filled by driver for get_station() and dump_station.
@@ -2101,6 +2196,11 @@ struct cfg80211_tid_stats {
* dump_station() callbacks. User space needs this information to determine
* the accepted and rejected affiliated links of the connected station.
* @assoc_resp_ies_len: Length of @assoc_resp_ies buffer in octets.
+ * @valid_links: bitmap of valid links, or 0 for non-MLO. Drivers fill this
+ * information in cfg80211_new_sta(), cfg80211_del_sta_sinfo(),
+ * get_station() and dump_station() callbacks.
+ * @links: reference to Link sta entries for MLO STA, all link specific
+ * information is accessed through links[link_id].
*/
struct station_info {
u64 filled;
@@ -2165,6 +2265,9 @@ struct station_info {
u8 mld_addr[ETH_ALEN] __aligned(2);
const u8 *assoc_resp_ies;
size_t assoc_resp_ies_len;
+
+ u16 valid_links;
+ struct link_station_info *links[IEEE80211_MLD_MAX_NUM_LINKS];
};
/**
@@ -4752,12 +4855,14 @@ struct cfg80211_ops {
int (*set_mcast_rate)(struct wiphy *wiphy, struct net_device *dev,
int rate[NUM_NL80211_BANDS]);
- int (*set_wiphy_params)(struct wiphy *wiphy, u32 changed);
+ int (*set_wiphy_params)(struct wiphy *wiphy, int radio_idx,
+ u32 changed);
int (*set_tx_power)(struct wiphy *wiphy, struct wireless_dev *wdev,
+ int radio_idx,
enum nl80211_tx_power_setting type, int mbm);
int (*get_tx_power)(struct wiphy *wiphy, struct wireless_dev *wdev,
- unsigned int link_id, int *dbm);
+ int radio_idx, unsigned int link_id, int *dbm);
void (*rfkill_poll)(struct wiphy *wiphy);
@@ -4819,8 +4924,10 @@ struct cfg80211_ops {
struct wireless_dev *wdev,
struct mgmt_frame_regs *upd);
- int (*set_antenna)(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant);
- int (*get_antenna)(struct wiphy *wiphy, u32 *tx_ant, u32 *rx_ant);
+ int (*set_antenna)(struct wiphy *wiphy, int radio_idx,
+ u32 tx_ant, u32 rx_ant);
+ int (*get_antenna)(struct wiphy *wiphy, int radio_idx,
+ u32 *tx_ant, u32 *rx_ant);
int (*sched_scan_start)(struct wiphy *wiphy,
struct net_device *dev,
@@ -5443,6 +5550,18 @@ struct wiphy_iftype_akm_suites {
};
/**
+ * struct wiphy_radio_cfg - physical radio config of a wiphy
+ * This structure describes the configurations of a physical radio in a
+ * wiphy. It is used to denote per-radio attributes belonging to a wiphy.
+ *
+ * @rts_threshold: RTS threshold (dot11RTSThreshold);
+ * -1 (default) = RTS/CTS disabled
+ */
+struct wiphy_radio_cfg {
+ u32 rts_threshold;
+};
+
+/**
* struct wiphy_radio_freq_range - wiphy frequency range
* @start_freq: start range edge frequency (kHz)
* @end_freq: end range edge frequency (kHz)
@@ -5697,6 +5816,10 @@ struct wiphy_radio {
* supports enabling HW timestamping for all peers (i.e. no need to
* specify a mac address).
*
+ * @radio_cfg: configuration of radios belonging to a muli-radio wiphy. This
+ * struct contains a list of all radio specific attributes and should be
+ * used only for multi-radio wiphy.
+ *
* @radio: radios belonging to this wiphy
* @n_radio: number of radios
*/
@@ -5786,6 +5909,8 @@ struct wiphy {
void (*reg_notifier)(struct wiphy *wiphy,
struct regulatory_request *request);
+ struct wiphy_radio_cfg *radio_cfg;
+
/* fields below are read-only, assigned by cfg80211 */
const struct ieee80211_regdomain __rcu *regd;
@@ -8466,6 +8591,17 @@ void cfg80211_tx_mgmt_expired(struct wireless_dev *wdev, u64 cookie,
int cfg80211_sinfo_alloc_tid_stats(struct station_info *sinfo, gfp_t gfp);
/**
+ * cfg80211_link_sinfo_alloc_tid_stats - allocate per-tid statistics.
+ *
+ * @link_sinfo: the link station information
+ * @gfp: allocation flags
+ *
+ * Return: 0 on success. Non-zero on error.
+ */
+int cfg80211_link_sinfo_alloc_tid_stats(struct link_station_info *link_sinfo,
+ gfp_t gfp);
+
+/**
* cfg80211_sinfo_release_content - release contents of station info
* @sinfo: the station information
*
@@ -8476,6 +8612,13 @@ int cfg80211_sinfo_alloc_tid_stats(struct station_info *sinfo, gfp_t gfp);
static inline void cfg80211_sinfo_release_content(struct station_info *sinfo)
{
kfree(sinfo->pertid);
+
+ for (int link_id = 0; link_id < ARRAY_SIZE(sinfo->links); link_id++) {
+ if (sinfo->links[link_id]) {
+ kfree(sinfo->links[link_id]->pertid);
+ kfree(sinfo->links[link_id]);
+ }
+ }
}
/**
@@ -9372,6 +9515,17 @@ int cfg80211_iter_combinations(struct wiphy *wiphy,
void (*iter)(const struct ieee80211_iface_combination *c,
void *data),
void *data);
+/**
+ * cfg80211_get_radio_idx_by_chan - get the radio index by the channel
+ *
+ * @wiphy: the wiphy
+ * @chan: channel for which the supported radio index is required
+ *
+ * Return: radio index on success or a negative error code
+ */
+int cfg80211_get_radio_idx_by_chan(struct wiphy *wiphy,
+ const struct ieee80211_channel *chan);
+
/**
* cfg80211_stop_iface - trigger interface disconnection
@@ -9736,6 +9890,11 @@ void cfg80211_links_removed(struct net_device *dev, u16 link_mask);
* struct cfg80211_mlo_reconf_done_data - MLO reconfiguration data
* @buf: MLO Reconfiguration Response frame (header + body)
* @len: length of the frame data
+ * @driver_initiated: Indicates whether the add links request is initiated by
+ * driver. This is set to true when the link reconfiguration request
+ * initiated by driver due to AP link recommendation requests
+ * (Ex: BTM (BSS Transition Management) request) handling offloaded to
+ * driver.
* @added_links: BIT mask of links successfully added to the association
* @links: per-link information indexed by link ID
* @links.bss: the BSS that MLO reconfiguration was requested for, ownership of
@@ -9748,6 +9907,7 @@ void cfg80211_links_removed(struct net_device *dev, u16 link_mask);
struct cfg80211_mlo_reconf_done_data {
const u8 *buf;
size_t len;
+ bool driver_initiated;
u16 added_links;
struct {
struct cfg80211_bss *bss;
diff --git a/include/net/devlink.h b/include/net/devlink.h
index 0091f23a40f7..63517646a497 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -520,6 +520,7 @@ enum devlink_param_generic_id {
DEVLINK_PARAM_GENERIC_ID_ENABLE_IWARP,
DEVLINK_PARAM_GENERIC_ID_IO_EQ_SIZE,
DEVLINK_PARAM_GENERIC_ID_EVENT_EQ_SIZE,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_PHC,
/* add new param generic ids above here*/
__DEVLINK_PARAM_GENERIC_ID_MAX,
@@ -578,6 +579,9 @@ enum devlink_param_generic_id {
#define DEVLINK_PARAM_GENERIC_EVENT_EQ_SIZE_NAME "event_eq_size"
#define DEVLINK_PARAM_GENERIC_EVENT_EQ_SIZE_TYPE DEVLINK_PARAM_TYPE_U32
+#define DEVLINK_PARAM_GENERIC_ENABLE_PHC_NAME "enable_phc"
+#define DEVLINK_PARAM_GENERIC_ENABLE_PHC_TYPE DEVLINK_PARAM_TYPE_BOOL
+
#define DEVLINK_PARAM_GENERIC(_id, _cmodes, _get, _set, _validate) \
{ \
.id = DEVLINK_PARAM_GENERIC_ID_##_id, \
diff --git a/include/net/dropreason-core.h b/include/net/dropreason-core.h
index bcf9d7467e1a..b9e78290269e 100644
--- a/include/net/dropreason-core.h
+++ b/include/net/dropreason-core.h
@@ -121,6 +121,9 @@
FN(ARP_PVLAN_DISABLE) \
FN(MAC_IEEE_MAC_CONTROL) \
FN(BRIDGE_INGRESS_STP_STATE) \
+ FN(CAN_RX_INVALID_FRAME) \
+ FN(CANFD_RX_INVALID_FRAME) \
+ FN(CANXL_RX_INVALID_FRAME) \
FNe(MAX)
/**
@@ -574,6 +577,21 @@ enum skb_drop_reason {
*/
SKB_DROP_REASON_BRIDGE_INGRESS_STP_STATE,
/**
+ * @SKB_DROP_REASON_CAN_RX_INVALID_FRAME: received
+ * non conform CAN frame (or device is unable to receive CAN frames)
+ */
+ SKB_DROP_REASON_CAN_RX_INVALID_FRAME,
+ /**
+ * @SKB_DROP_REASON_CANFD_RX_INVALID_FRAME: received
+ * non conform CAN-FD frame (or device is unable to receive CAN frames)
+ */
+ SKB_DROP_REASON_CANFD_RX_INVALID_FRAME,
+ /**
+ * @SKB_DROP_REASON_CANXL_RX_INVALID_FRAME: received
+ * non conform CAN-XL frame (or device is unable to receive CAN frames)
+ */
+ SKB_DROP_REASON_CANXL_RX_INVALID_FRAME,
+ /**
* @SKB_DROP_REASON_MAX: the maximum of core drop reasons, which
* shouldn't be used as a real 'reason' - only for tracing code gen
*/
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 55e2d97f247e..d73ea0880066 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -54,11 +54,13 @@ struct tc_action;
#define DSA_TAG_PROTO_RZN1_A5PSW_VALUE 26
#define DSA_TAG_PROTO_LAN937X_VALUE 27
#define DSA_TAG_PROTO_VSC73XX_8021Q_VALUE 28
+#define DSA_TAG_PROTO_BRCM_LEGACY_FCS_VALUE 29
enum dsa_tag_protocol {
DSA_TAG_PROTO_NONE = DSA_TAG_PROTO_NONE_VALUE,
DSA_TAG_PROTO_BRCM = DSA_TAG_PROTO_BRCM_VALUE,
DSA_TAG_PROTO_BRCM_LEGACY = DSA_TAG_PROTO_BRCM_LEGACY_VALUE,
+ DSA_TAG_PROTO_BRCM_LEGACY_FCS = DSA_TAG_PROTO_BRCM_LEGACY_FCS_VALUE,
DSA_TAG_PROTO_BRCM_PREPEND = DSA_TAG_PROTO_BRCM_PREPEND_VALUE,
DSA_TAG_PROTO_DSA = DSA_TAG_PROTO_DSA_VALUE,
DSA_TAG_PROTO_EDSA = DSA_TAG_PROTO_EDSA_VALUE,
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index 4564b5d348b1..ae09e91398a5 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -202,12 +202,6 @@ static inline spinlock_t *inet_ehash_lockp(
int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo);
-static inline void inet_hashinfo2_free_mod(struct inet_hashinfo *h)
-{
- kfree(h->lhash2);
- h->lhash2 = NULL;
-}
-
static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo)
{
kvfree(hashinfo->ehash_locks);
diff --git a/include/net/ip.h b/include/net/ip.h
index 47ed6d23853d..375304bb99f6 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -59,6 +59,7 @@ struct inet_skb_parm {
#define IPSKB_L3SLAVE BIT(7)
#define IPSKB_NOPOLICY BIT(8)
#define IPSKB_MULTIPATH BIT(9)
+#define IPSKB_MCROUTE BIT(10)
u16 frag_max_size;
};
@@ -167,6 +168,7 @@ void ip_list_rcv(struct list_head *head, struct packet_type *pt,
int ip_local_deliver(struct sk_buff *skb);
void ip_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int proto);
int ip_mr_input(struct sk_buff *skb);
+int ip_mr_output(struct net *net, struct sock *sk, struct sk_buff *skb);
int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb);
int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb);
int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index 399592405c72..dd163495f353 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -152,11 +152,12 @@ int ip6_tnl_get_iflink(const struct net_device *dev);
int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu);
static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
- struct net_device *dev)
+ struct net_device *dev, u16 ip6cb_flags)
{
int pkt_len, err;
memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
+ IP6CB(skb)->flags = ip6cb_flags;
pkt_len = skb->len - skb_inner_network_offset(skb);
err = ip6_local_out(dev_net(skb_dst(skb)->dev), sk, skb);
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 0c3d571a04a1..8cf1380f3656 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -603,7 +603,7 @@ static inline int iptunnel_pull_header(struct sk_buff *skb, int hdr_len,
void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
__be32 src, __be32 dst, u8 proto,
- u8 tos, u8 ttl, __be16 df, bool xnet);
+ u8 tos, u8 ttl, __be16 df, bool xnet, u16 ipcb_flags);
struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
gfp_t flags);
int skb_tunnel_check_pmtu(struct sk_buff *skb, struct dst_entry *encap_dst,
diff --git a/include/net/libeth/rx.h b/include/net/libeth/rx.h
index ab05024be518..5d991404845e 100644
--- a/include/net/libeth/rx.h
+++ b/include/net/libeth/rx.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (C) 2024 Intel Corporation */
+/* Copyright (C) 2024-2025 Intel Corporation */
#ifndef __LIBETH_RX_H
#define __LIBETH_RX_H
@@ -13,8 +13,10 @@
/* Space reserved in front of each frame */
#define LIBETH_SKB_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
+#define LIBETH_XDP_HEADROOM (ALIGN(XDP_PACKET_HEADROOM, NET_SKB_PAD) + \
+ NET_IP_ALIGN)
/* Maximum headroom for worst-case calculations */
-#define LIBETH_MAX_HEADROOM LIBETH_SKB_HEADROOM
+#define LIBETH_MAX_HEADROOM LIBETH_XDP_HEADROOM
/* Link layer / L2 overhead: Ethernet, 2 VLAN tags (C + S), FCS */
#define LIBETH_RX_LL_LEN (ETH_HLEN + 2 * VLAN_HLEN + ETH_FCS_LEN)
/* Maximum supported L2-L4 header length */
@@ -31,7 +33,7 @@
/**
* struct libeth_fqe - structure representing an Rx buffer (fill queue element)
- * @page: page holding the buffer
+ * @netmem: network memory reference holding the buffer
* @offset: offset from the page start (to the headroom)
* @truesize: total space occupied by the buffer (w/ headroom and tailroom)
*
@@ -40,7 +42,7 @@
* former, @offset is always 0 and @truesize is always ```PAGE_SIZE```.
*/
struct libeth_fqe {
- struct page *page;
+ netmem_ref netmem;
u32 offset;
u32 truesize;
} __aligned_largest;
@@ -66,6 +68,7 @@ enum libeth_fqe_type {
* @count: number of descriptors/buffers the queue has
* @type: type of the buffers this queue has
* @hsplit: flag whether header split is enabled
+ * @xdp: flag indicating whether XDP is enabled
* @buf_len: HW-writeable length per each buffer
* @nid: ID of the closest NUMA node with memory
*/
@@ -81,6 +84,7 @@ struct libeth_fq {
/* Cold fields */
enum libeth_fqe_type type:2;
bool hsplit:1;
+ bool xdp:1;
u32 buf_len;
int nid;
@@ -102,15 +106,16 @@ static inline dma_addr_t libeth_rx_alloc(const struct libeth_fq_fp *fq, u32 i)
struct libeth_fqe *buf = &fq->fqes[i];
buf->truesize = fq->truesize;
- buf->page = page_pool_dev_alloc(fq->pp, &buf->offset, &buf->truesize);
- if (unlikely(!buf->page))
+ buf->netmem = page_pool_dev_alloc_netmem(fq->pp, &buf->offset,
+ &buf->truesize);
+ if (unlikely(!buf->netmem))
return DMA_MAPPING_ERROR;
- return page_pool_get_dma_addr(buf->page) + buf->offset +
+ return page_pool_get_dma_addr_netmem(buf->netmem) + buf->offset +
fq->pp->p.offset;
}
-void libeth_rx_recycle_slow(struct page *page);
+void libeth_rx_recycle_slow(netmem_ref netmem);
/**
* libeth_rx_sync_for_cpu - synchronize or recycle buffer post DMA
@@ -126,18 +131,19 @@ void libeth_rx_recycle_slow(struct page *page);
static inline bool libeth_rx_sync_for_cpu(const struct libeth_fqe *fqe,
u32 len)
{
- struct page *page = fqe->page;
+ netmem_ref netmem = fqe->netmem;
/* Very rare, but possible case. The most common reason:
* the last fragment contained FCS only, which was then
* stripped by the HW.
*/
if (unlikely(!len)) {
- libeth_rx_recycle_slow(page);
+ libeth_rx_recycle_slow(netmem);
return false;
}
- page_pool_dma_sync_for_cpu(page->pp, page, fqe->offset, len);
+ page_pool_dma_sync_netmem_for_cpu(netmem_get_pp(netmem), netmem,
+ fqe->offset, len);
return true;
}
diff --git a/include/net/libeth/tx.h b/include/net/libeth/tx.h
index 35614f9523f6..c3db5c6f1641 100644
--- a/include/net/libeth/tx.h
+++ b/include/net/libeth/tx.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (C) 2024 Intel Corporation */
+/* Copyright (C) 2024-2025 Intel Corporation */
#ifndef __LIBETH_TX_H
#define __LIBETH_TX_H
@@ -12,11 +12,17 @@
/**
* enum libeth_sqe_type - type of &libeth_sqe to act on Tx completion
- * @LIBETH_SQE_EMPTY: unused/empty, no action required
+ * @LIBETH_SQE_EMPTY: unused/empty OR XDP_TX/XSk frame, no action required
* @LIBETH_SQE_CTX: context descriptor with empty SQE, no action required
* @LIBETH_SQE_SLAB: kmalloc-allocated buffer, unmap and kfree()
* @LIBETH_SQE_FRAG: mapped skb frag, only unmap DMA
* @LIBETH_SQE_SKB: &sk_buff, unmap and napi_consume_skb(), update stats
+ * @__LIBETH_SQE_XDP_START: separator between skb and XDP types
+ * @LIBETH_SQE_XDP_TX: &skb_shared_info, libeth_xdp_return_buff_bulk(), stats
+ * @LIBETH_SQE_XDP_XMIT: &xdp_frame, unmap and xdp_return_frame_bulk(), stats
+ * @LIBETH_SQE_XDP_XMIT_FRAG: &xdp_frame frag, only unmap DMA
+ * @LIBETH_SQE_XSK_TX: &libeth_xdp_buff on XSk queue, xsk_buff_free(), stats
+ * @LIBETH_SQE_XSK_TX_FRAG: &libeth_xdp_buff frag on XSk queue, xsk_buff_free()
*/
enum libeth_sqe_type {
LIBETH_SQE_EMPTY = 0U,
@@ -24,6 +30,13 @@ enum libeth_sqe_type {
LIBETH_SQE_SLAB,
LIBETH_SQE_FRAG,
LIBETH_SQE_SKB,
+
+ __LIBETH_SQE_XDP_START,
+ LIBETH_SQE_XDP_TX = __LIBETH_SQE_XDP_START,
+ LIBETH_SQE_XDP_XMIT,
+ LIBETH_SQE_XDP_XMIT_FRAG,
+ LIBETH_SQE_XSK_TX,
+ LIBETH_SQE_XSK_TX_FRAG,
};
/**
@@ -32,6 +45,9 @@ enum libeth_sqe_type {
* @rs_idx: index of the last buffer from the batch this one was sent in
* @raw: slab buffer to free via kfree()
* @skb: &sk_buff to consume
+ * @sinfo: skb shared info of an XDP_TX frame
+ * @xdpf: XDP frame from ::ndo_xdp_xmit()
+ * @xsk: XSk Rx frame from XDP_TX action
* @dma: DMA address to unmap
* @len: length of the mapped region to unmap
* @nr_frags: number of frags in the frame this buffer belongs to
@@ -46,6 +62,9 @@ struct libeth_sqe {
union {
void *raw;
struct sk_buff *skb;
+ struct skb_shared_info *sinfo;
+ struct xdp_frame *xdpf;
+ struct libeth_xdp_buff *xsk;
};
DEFINE_DMA_UNMAP_ADDR(dma);
@@ -71,7 +90,10 @@ struct libeth_sqe {
/**
* struct libeth_cq_pp - completion queue poll params
* @dev: &device to perform DMA unmapping
+ * @bq: XDP frame bulk to combine return operations
* @ss: onstack NAPI stats to fill
+ * @xss: onstack XDPSQ NAPI stats to fill
+ * @xdp_tx: number of XDP-not-XSk frames processed
* @napi: whether it's called from the NAPI context
*
* libeth uses this structure to access objects needed for performing full
@@ -80,7 +102,13 @@ struct libeth_sqe {
*/
struct libeth_cq_pp {
struct device *dev;
- struct libeth_sq_napi_stats *ss;
+ struct xdp_frame_bulk *bq;
+
+ union {
+ struct libeth_sq_napi_stats *ss;
+ struct libeth_xdpsq_napi_stats *xss;
+ };
+ u32 xdp_tx;
bool napi;
};
@@ -126,4 +154,6 @@ static inline void libeth_tx_complete(struct libeth_sqe *sqe,
sqe->type = LIBETH_SQE_EMPTY;
}
+void libeth_tx_complete_any(struct libeth_sqe *sqe, struct libeth_cq_pp *cp);
+
#endif /* __LIBETH_TX_H */
diff --git a/include/net/libeth/types.h b/include/net/libeth/types.h
index 603825e45133..cf1d78a9dc38 100644
--- a/include/net/libeth/types.h
+++ b/include/net/libeth/types.h
@@ -1,10 +1,32 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (C) 2024 Intel Corporation */
+/* Copyright (C) 2024-2025 Intel Corporation */
#ifndef __LIBETH_TYPES_H
#define __LIBETH_TYPES_H
-#include <linux/types.h>
+#include <linux/workqueue.h>
+
+/* Stats */
+
+/**
+ * struct libeth_rq_napi_stats - "hot" counters to update in Rx polling loop
+ * @packets: received frames counter
+ * @bytes: sum of bytes of received frames above
+ * @fragments: sum of fragments of received S/G frames
+ * @hsplit: number of frames the device performed the header split for
+ * @raw: alias to access all the fields as an array
+ */
+struct libeth_rq_napi_stats {
+ union {
+ struct {
+ u32 packets;
+ u32 bytes;
+ u32 fragments;
+ u32 hsplit;
+ };
+ DECLARE_FLEX_ARRAY(u32, raw);
+ };
+};
/**
* struct libeth_sq_napi_stats - "hot" counters to update in Tx completion loop
@@ -22,4 +44,84 @@ struct libeth_sq_napi_stats {
};
};
+/**
+ * struct libeth_xdpsq_napi_stats - "hot" counters to update in XDP Tx
+ * completion loop
+ * @packets: completed frames counter
+ * @bytes: sum of bytes of completed frames above
+ * @fragments: sum of fragments of completed S/G frames
+ * @raw: alias to access all the fields as an array
+ */
+struct libeth_xdpsq_napi_stats {
+ union {
+ struct {
+ u32 packets;
+ u32 bytes;
+ u32 fragments;
+ };
+ DECLARE_FLEX_ARRAY(u32, raw);
+ };
+};
+
+/* XDP */
+
+/*
+ * The following structures should be embedded into driver's queue structure
+ * and passed to the libeth_xdp helpers, never used directly.
+ */
+
+/* XDPSQ sharing */
+
+/**
+ * struct libeth_xdpsq_lock - locking primitive for sharing XDPSQs
+ * @lock: spinlock for locking the queue
+ * @share: whether this particular queue is shared
+ */
+struct libeth_xdpsq_lock {
+ spinlock_t lock;
+ bool share;
+};
+
+/* XDPSQ clean-up timers */
+
+/**
+ * struct libeth_xdpsq_timer - timer for cleaning up XDPSQs w/o interrupts
+ * @xdpsq: queue this timer belongs to
+ * @lock: lock for the queue
+ * @dwork: work performing cleanups
+ *
+ * XDPSQs not using interrupts but lazy cleaning, i.e. only when there's no
+ * space for sending the current queued frame/bulk, must fire up timers to
+ * make sure there are no stale buffers to free.
+ */
+struct libeth_xdpsq_timer {
+ void *xdpsq;
+ struct libeth_xdpsq_lock *lock;
+
+ struct delayed_work dwork;
+};
+
+/* Rx polling path */
+
+/**
+ * struct libeth_xdp_buff_stash - struct for stashing &xdp_buff onto a queue
+ * @data: pointer to the start of the frame, xdp_buff.data
+ * @headroom: frame headroom, xdp_buff.data - xdp_buff.data_hard_start
+ * @len: frame linear space length, xdp_buff.data_end - xdp_buff.data
+ * @frame_sz: truesize occupied by the frame, xdp_buff.frame_sz
+ * @flags: xdp_buff.flags
+ *
+ * &xdp_buff is 56 bytes long on x64, &libeth_xdp_buff is 64 bytes. This
+ * structure carries only necessary fields to save/restore a partially built
+ * frame on the queue structure to finish it during the next NAPI poll.
+ */
+struct libeth_xdp_buff_stash {
+ void *data;
+ u16 headroom;
+ u16 len;
+
+ u32 frame_sz:24;
+ u32 flags:8;
+} __aligned_largest;
+
#endif /* __LIBETH_TYPES_H */
diff --git a/include/net/libeth/xdp.h b/include/net/libeth/xdp.h
new file mode 100644
index 000000000000..6ce6aec6884c
--- /dev/null
+++ b/include/net/libeth/xdp.h
@@ -0,0 +1,1879 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2025 Intel Corporation */
+
+#ifndef __LIBETH_XDP_H
+#define __LIBETH_XDP_H
+
+#include <linux/bpf_trace.h>
+#include <linux/unroll.h>
+
+#include <net/libeth/rx.h>
+#include <net/libeth/tx.h>
+#include <net/xsk_buff_pool.h>
+
+/*
+ * Defined as bits to be able to use them as a mask on Rx.
+ * Also used as internal return values on Tx.
+ */
+enum {
+ LIBETH_XDP_PASS = 0U,
+ LIBETH_XDP_DROP = BIT(0),
+ LIBETH_XDP_ABORTED = BIT(1),
+ LIBETH_XDP_TX = BIT(2),
+ LIBETH_XDP_REDIRECT = BIT(3),
+};
+
+/*
+ * &xdp_buff_xsk is the largest structure &libeth_xdp_buff gets casted to,
+ * pick maximum pointer-compatible alignment.
+ */
+#define __LIBETH_XDP_BUFF_ALIGN \
+ (IS_ALIGNED(sizeof(struct xdp_buff_xsk), 16) ? 16 : \
+ IS_ALIGNED(sizeof(struct xdp_buff_xsk), 8) ? 8 : \
+ sizeof(long))
+
+/**
+ * struct libeth_xdp_buff - libeth extension over &xdp_buff
+ * @base: main &xdp_buff
+ * @data: shortcut for @base.data
+ * @desc: RQ descriptor containing metadata for this buffer
+ * @priv: driver-private scratchspace
+ *
+ * The main reason for this is to have a pointer to the descriptor to be able
+ * to quickly get frame metadata from xdpmo and driver buff-to-xdp callbacks
+ * (as well as bigger alignment).
+ * Pointer/layout-compatible with &xdp_buff and &xdp_buff_xsk.
+ */
+struct libeth_xdp_buff {
+ union {
+ struct xdp_buff base;
+ void *data;
+ };
+
+ const void *desc;
+ unsigned long priv[]
+ __aligned(__LIBETH_XDP_BUFF_ALIGN);
+} __aligned(__LIBETH_XDP_BUFF_ALIGN);
+static_assert(offsetof(struct libeth_xdp_buff, data) ==
+ offsetof(struct xdp_buff_xsk, xdp.data));
+static_assert(offsetof(struct libeth_xdp_buff, desc) ==
+ offsetof(struct xdp_buff_xsk, cb));
+static_assert(IS_ALIGNED(sizeof(struct xdp_buff_xsk),
+ __alignof(struct libeth_xdp_buff)));
+
+/**
+ * __LIBETH_XDP_ONSTACK_BUFF - declare a &libeth_xdp_buff on the stack
+ * @name: name of the variable to declare
+ * @...: sizeof() of the driver-private data
+ */
+#define __LIBETH_XDP_ONSTACK_BUFF(name, ...) \
+ ___LIBETH_XDP_ONSTACK_BUFF(name, ##__VA_ARGS__)
+/**
+ * LIBETH_XDP_ONSTACK_BUFF - declare a &libeth_xdp_buff on the stack
+ * @name: name of the variable to declare
+ * @...: type or variable name of the driver-private data
+ */
+#define LIBETH_XDP_ONSTACK_BUFF(name, ...) \
+ __LIBETH_XDP_ONSTACK_BUFF(name, __libeth_xdp_priv_sz(__VA_ARGS__))
+
+#define ___LIBETH_XDP_ONSTACK_BUFF(name, ...) \
+ __DEFINE_FLEX(struct libeth_xdp_buff, name, priv, \
+ LIBETH_XDP_PRIV_SZ(__VA_ARGS__ + 0), \
+ __uninitialized); \
+ LIBETH_XDP_ASSERT_PRIV_SZ(__VA_ARGS__ + 0)
+
+#define __libeth_xdp_priv_sz(...) \
+ CONCATENATE(__libeth_xdp_psz, COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__)
+
+#define __libeth_xdp_psz0(...)
+#define __libeth_xdp_psz1(...) sizeof(__VA_ARGS__)
+
+#define LIBETH_XDP_PRIV_SZ(sz) \
+ (ALIGN(sz, __alignof(struct libeth_xdp_buff)) / sizeof(long))
+
+/* Performs XSK_CHECK_PRIV_TYPE() */
+#define LIBETH_XDP_ASSERT_PRIV_SZ(sz) \
+ static_assert(offsetofend(struct xdp_buff_xsk, cb) >= \
+ struct_size_t(struct libeth_xdp_buff, priv, \
+ LIBETH_XDP_PRIV_SZ(sz)))
+
+/* XDPSQ sharing */
+
+DECLARE_STATIC_KEY_FALSE(libeth_xdpsq_share);
+
+/**
+ * libeth_xdpsq_num - calculate optimal number of XDPSQs for this device + sys
+ * @rxq: current number of active Rx queues
+ * @txq: current number of active Tx queues
+ * @max: maximum number of Tx queues
+ *
+ * Each RQ must have its own XDPSQ for XSk pairs, each CPU must have own XDPSQ
+ * for lockless sending (``XDP_TX``, .ndo_xdp_xmit()). Cap the maximum of these
+ * two with the number of SQs the device can have (minus used ones).
+ *
+ * Return: number of XDP Tx queues the device needs to use.
+ */
+static inline u32 libeth_xdpsq_num(u32 rxq, u32 txq, u32 max)
+{
+ return min(max(nr_cpu_ids, rxq), max - txq);
+}
+
+/**
+ * libeth_xdpsq_shared - whether XDPSQs can be shared between several CPUs
+ * @num: number of active XDPSQs
+ *
+ * Return: true if there's no 1:1 XDPSQ/CPU association, false otherwise.
+ */
+static inline bool libeth_xdpsq_shared(u32 num)
+{
+ return num < nr_cpu_ids;
+}
+
+/**
+ * libeth_xdpsq_id - get XDPSQ index corresponding to this CPU
+ * @num: number of active XDPSQs
+ *
+ * Helper for libeth_xdp routines, do not use in drivers directly.
+ *
+ * Return: XDPSQ index needs to be used on this CPU.
+ */
+static inline u32 libeth_xdpsq_id(u32 num)
+{
+ u32 ret = raw_smp_processor_id();
+
+ if (static_branch_unlikely(&libeth_xdpsq_share) &&
+ libeth_xdpsq_shared(num))
+ ret %= num;
+
+ return ret;
+}
+
+void __libeth_xdpsq_get(struct libeth_xdpsq_lock *lock,
+ const struct net_device *dev);
+void __libeth_xdpsq_put(struct libeth_xdpsq_lock *lock,
+ const struct net_device *dev);
+
+/**
+ * libeth_xdpsq_get - initialize &libeth_xdpsq_lock
+ * @lock: lock to initialize
+ * @dev: netdev which this lock belongs to
+ * @share: whether XDPSQs can be shared
+ *
+ * Tracks the current XDPSQ association and enables the static lock
+ * if needed.
+ */
+static inline void libeth_xdpsq_get(struct libeth_xdpsq_lock *lock,
+ const struct net_device *dev,
+ bool share)
+{
+ if (unlikely(share))
+ __libeth_xdpsq_get(lock, dev);
+}
+
+/**
+ * libeth_xdpsq_put - deinitialize &libeth_xdpsq_lock
+ * @lock: lock to deinitialize
+ * @dev: netdev which this lock belongs to
+ *
+ * Tracks the current XDPSQ association and disables the static lock
+ * if needed.
+ */
+static inline void libeth_xdpsq_put(struct libeth_xdpsq_lock *lock,
+ const struct net_device *dev)
+{
+ if (static_branch_unlikely(&libeth_xdpsq_share) && lock->share)
+ __libeth_xdpsq_put(lock, dev);
+}
+
+void __libeth_xdpsq_lock(struct libeth_xdpsq_lock *lock);
+void __libeth_xdpsq_unlock(struct libeth_xdpsq_lock *lock);
+
+/**
+ * libeth_xdpsq_lock - grab &libeth_xdpsq_lock if needed
+ * @lock: lock to take
+ *
+ * Touches the underlying spinlock only if the static key is enabled
+ * and the queue itself is marked as shareable.
+ */
+static inline void libeth_xdpsq_lock(struct libeth_xdpsq_lock *lock)
+{
+ if (static_branch_unlikely(&libeth_xdpsq_share) && lock->share)
+ __libeth_xdpsq_lock(lock);
+}
+
+/**
+ * libeth_xdpsq_unlock - free &libeth_xdpsq_lock if needed
+ * @lock: lock to free
+ *
+ * Touches the underlying spinlock only if the static key is enabled
+ * and the queue itself is marked as shareable.
+ */
+static inline void libeth_xdpsq_unlock(struct libeth_xdpsq_lock *lock)
+{
+ if (static_branch_unlikely(&libeth_xdpsq_share) && lock->share)
+ __libeth_xdpsq_unlock(lock);
+}
+
+/* XDPSQ clean-up timers */
+
+void libeth_xdpsq_init_timer(struct libeth_xdpsq_timer *timer, void *xdpsq,
+ struct libeth_xdpsq_lock *lock,
+ void (*poll)(struct work_struct *work));
+
+/**
+ * libeth_xdpsq_deinit_timer - deinitialize &libeth_xdpsq_timer
+ * @timer: timer to deinitialize
+ *
+ * Flush and disable the underlying workqueue.
+ */
+static inline void libeth_xdpsq_deinit_timer(struct libeth_xdpsq_timer *timer)
+{
+ cancel_delayed_work_sync(&timer->dwork);
+}
+
+/**
+ * libeth_xdpsq_queue_timer - run &libeth_xdpsq_timer
+ * @timer: timer to queue
+ *
+ * Should be called after the queue was filled and the transmission was run
+ * to complete the pending buffers if no further sending will be done in a
+ * second (-> lazy cleaning won't happen).
+ * If the timer was already run, it will be requeued back to one second
+ * timeout again.
+ */
+static inline void libeth_xdpsq_queue_timer(struct libeth_xdpsq_timer *timer)
+{
+ mod_delayed_work_on(raw_smp_processor_id(), system_bh_highpri_wq,
+ &timer->dwork, HZ);
+}
+
+/**
+ * libeth_xdpsq_run_timer - wrapper to run a queue clean-up on a timer event
+ * @work: workqueue belonging to the corresponding timer
+ * @poll: driver-specific completion queue poll function
+ *
+ * Run the polling function on the locked queue and requeue the timer if
+ * there's more work to do.
+ * Designed to be used via LIBETH_XDP_DEFINE_TIMER() below.
+ */
+static __always_inline void
+libeth_xdpsq_run_timer(struct work_struct *work,
+ u32 (*poll)(void *xdpsq, u32 budget))
+{
+ struct libeth_xdpsq_timer *timer = container_of(work, typeof(*timer),
+ dwork.work);
+
+ libeth_xdpsq_lock(timer->lock);
+
+ if (poll(timer->xdpsq, U32_MAX))
+ libeth_xdpsq_queue_timer(timer);
+
+ libeth_xdpsq_unlock(timer->lock);
+}
+
+/* Common Tx bits */
+
+/**
+ * enum - libeth_xdp internal Tx flags
+ * @LIBETH_XDP_TX_BULK: one bulk size at which it will be flushed to the queue
+ * @LIBETH_XDP_TX_BATCH: batch size for which the queue fill loop is unrolled
+ * @LIBETH_XDP_TX_DROP: indicates the send function must drop frames not sent
+ * @LIBETH_XDP_TX_NDO: whether the send function is called from .ndo_xdp_xmit()
+ * @LIBETH_XDP_TX_XSK: whether the function is called for ``XDP_TX`` for XSk
+ */
+enum {
+ LIBETH_XDP_TX_BULK = DEV_MAP_BULK_SIZE,
+ LIBETH_XDP_TX_BATCH = 8,
+
+ LIBETH_XDP_TX_DROP = BIT(0),
+ LIBETH_XDP_TX_NDO = BIT(1),
+ LIBETH_XDP_TX_XSK = BIT(2),
+};
+
+/**
+ * enum - &libeth_xdp_tx_frame and &libeth_xdp_tx_desc flags
+ * @LIBETH_XDP_TX_LEN: only for ``XDP_TX``, [15:0] of ::len_fl is actual length
+ * @LIBETH_XDP_TX_CSUM: for XSk xmit, enable checksum offload
+ * @LIBETH_XDP_TX_XSKMD: for XSk xmit, mask of the metadata bits
+ * @LIBETH_XDP_TX_FIRST: indicates the frag is the first one of the frame
+ * @LIBETH_XDP_TX_LAST: whether the frag is the last one of the frame
+ * @LIBETH_XDP_TX_MULTI: whether the frame contains several frags
+ * @LIBETH_XDP_TX_FLAGS: only for ``XDP_TX``, [31:16] of ::len_fl is flags
+ */
+enum {
+ LIBETH_XDP_TX_LEN = GENMASK(15, 0),
+
+ LIBETH_XDP_TX_CSUM = XDP_TXMD_FLAGS_CHECKSUM,
+ LIBETH_XDP_TX_XSKMD = LIBETH_XDP_TX_LEN,
+
+ LIBETH_XDP_TX_FIRST = BIT(16),
+ LIBETH_XDP_TX_LAST = BIT(17),
+ LIBETH_XDP_TX_MULTI = BIT(18),
+
+ LIBETH_XDP_TX_FLAGS = GENMASK(31, 16),
+};
+
+/**
+ * struct libeth_xdp_tx_frame - represents one XDP Tx element
+ * @data: frame start pointer for ``XDP_TX``
+ * @len_fl: ``XDP_TX``, combined flags [31:16] and len [15:0] field for speed
+ * @soff: ``XDP_TX``, offset from @data to the start of &skb_shared_info
+ * @frag: one (non-head) frag for ``XDP_TX``
+ * @xdpf: &xdp_frame for the head frag for .ndo_xdp_xmit()
+ * @dma: DMA address of the non-head frag for .ndo_xdp_xmit()
+ * @xsk: ``XDP_TX`` for XSk, XDP buffer for any frag
+ * @len: frag length for XSk ``XDP_TX`` and .ndo_xdp_xmit()
+ * @flags: Tx flags for the above
+ * @opts: combined @len + @flags for the above for speed
+ * @desc: XSk xmit descriptor for direct casting
+ */
+struct libeth_xdp_tx_frame {
+ union {
+ /* ``XDP_TX`` */
+ struct {
+ void *data;
+ u32 len_fl;
+ u32 soff;
+ };
+
+ /* ``XDP_TX`` frag */
+ skb_frag_t frag;
+
+ /* .ndo_xdp_xmit(), XSk ``XDP_TX`` */
+ struct {
+ union {
+ struct xdp_frame *xdpf;
+ dma_addr_t dma;
+
+ struct libeth_xdp_buff *xsk;
+ };
+ union {
+ struct {
+ u32 len;
+ u32 flags;
+ };
+ aligned_u64 opts;
+ };
+ };
+
+ /* XSk xmit */
+ struct xdp_desc desc;
+ };
+} __aligned(sizeof(struct xdp_desc));
+static_assert(offsetof(struct libeth_xdp_tx_frame, frag.len) ==
+ offsetof(struct libeth_xdp_tx_frame, len_fl));
+static_assert(sizeof(struct libeth_xdp_tx_frame) == sizeof(struct xdp_desc));
+
+/**
+ * struct libeth_xdp_tx_bulk - XDP Tx frame bulk for bulk sending
+ * @prog: corresponding active XDP program, %NULL for .ndo_xdp_xmit()
+ * @dev: &net_device which the frames are transmitted on
+ * @xdpsq: shortcut to the corresponding driver-specific XDPSQ structure
+ * @act_mask: Rx only, mask of all the XDP prog verdicts for that NAPI session
+ * @count: current number of frames in @bulk
+ * @bulk: array of queued frames for bulk Tx
+ *
+ * All XDP Tx operations except XSk xmit queue each frame to the bulk first
+ * and flush it when @count reaches the array end. Bulk is always placed on
+ * the stack for performance. One bulk element contains all the data necessary
+ * for sending a frame and then freeing it on completion.
+ * For XSk xmit, Tx descriptor array from &xsk_buff_pool is casted directly
+ * to &libeth_xdp_tx_frame as they are compatible and the bulk structure is
+ * not used.
+ */
+struct libeth_xdp_tx_bulk {
+ const struct bpf_prog *prog;
+ struct net_device *dev;
+ void *xdpsq;
+
+ u32 act_mask;
+ u32 count;
+ struct libeth_xdp_tx_frame bulk[LIBETH_XDP_TX_BULK];
+} __aligned(sizeof(struct libeth_xdp_tx_frame));
+
+/**
+ * LIBETH_XDP_ONSTACK_BULK - declare &libeth_xdp_tx_bulk on the stack
+ * @bq: name of the variable to declare
+ *
+ * Helper to declare a bulk on the stack with a compiler hint that it should
+ * not be initialized automatically (with `CONFIG_INIT_STACK_ALL_*`) for
+ * performance reasons.
+ */
+#define LIBETH_XDP_ONSTACK_BULK(bq) \
+ struct libeth_xdp_tx_bulk bq __uninitialized
+
+/**
+ * struct libeth_xdpsq - abstraction for an XDPSQ
+ * @pool: XSk buffer pool for XSk ``XDP_TX`` and xmit
+ * @sqes: array of Tx buffers from the actual queue struct
+ * @descs: opaque pointer to the HW descriptor array
+ * @ntu: pointer to the next free descriptor index
+ * @count: number of descriptors on that queue
+ * @pending: pointer to the number of sent-not-completed descs on that queue
+ * @xdp_tx: pointer to the above, but only for non-XSk-xmit frames
+ * @lock: corresponding XDPSQ lock
+ *
+ * Abstraction for driver-independent implementation of Tx. Placed on the stack
+ * and filled by the driver before the transmission, so that the generic
+ * functions can access and modify driver-specific resources.
+ */
+struct libeth_xdpsq {
+ struct xsk_buff_pool *pool;
+ struct libeth_sqe *sqes;
+ void *descs;
+
+ u32 *ntu;
+ u32 count;
+
+ u32 *pending;
+ u32 *xdp_tx;
+ struct libeth_xdpsq_lock *lock;
+};
+
+/**
+ * struct libeth_xdp_tx_desc - abstraction for an XDP Tx descriptor
+ * @addr: DMA address of the frame
+ * @len: length of the frame
+ * @flags: XDP Tx flags
+ * @opts: combined @len + @flags for speed
+ *
+ * Filled by the generic functions and then passed to driver-specific functions
+ * to fill a HW Tx descriptor, always placed on the [function] stack.
+ */
+struct libeth_xdp_tx_desc {
+ dma_addr_t addr;
+ union {
+ struct {
+ u32 len;
+ u32 flags;
+ };
+ aligned_u64 opts;
+ };
+} __aligned_largest;
+
+/**
+ * libeth_xdp_ptr_to_priv - convert pointer to a libeth_xdp u64 priv
+ * @ptr: pointer to convert
+ *
+ * The main sending function passes private data as the largest scalar, u64.
+ * Use this helper when you want to pass a pointer there.
+ */
+#define libeth_xdp_ptr_to_priv(ptr) ({ \
+ typecheck_pointer(ptr); \
+ ((u64)(uintptr_t)(ptr)); \
+})
+/**
+ * libeth_xdp_priv_to_ptr - convert libeth_xdp u64 priv to a pointer
+ * @priv: private data to convert
+ *
+ * The main sending function passes private data as the largest scalar, u64.
+ * Use this helper when your callback takes this u64 and you want to convert
+ * it back to a pointer.
+ */
+#define libeth_xdp_priv_to_ptr(priv) ({ \
+ static_assert(__same_type(priv, u64)); \
+ ((const void *)(uintptr_t)(priv)); \
+})
+
+/*
+ * On 64-bit systems, assigning one u64 is faster than two u32s. When ::len
+ * occupies lowest 32 bits (LE), whole ::opts can be assigned directly instead.
+ */
+#ifdef __LITTLE_ENDIAN
+#define __LIBETH_WORD_ACCESS 1
+#endif
+#ifdef __LIBETH_WORD_ACCESS
+#define __libeth_xdp_tx_len(flen, ...) \
+ .opts = ((flen) | FIELD_PREP(GENMASK_ULL(63, 32), (__VA_ARGS__ + 0)))
+#else
+#define __libeth_xdp_tx_len(flen, ...) \
+ .len = (flen), .flags = (__VA_ARGS__ + 0)
+#endif
+
+/**
+ * libeth_xdp_tx_xmit_bulk - main XDP Tx function
+ * @bulk: array of frames to send
+ * @xdpsq: pointer to the driver-specific XDPSQ struct
+ * @n: number of frames to send
+ * @unroll: whether to unroll the queue filling loop for speed
+ * @priv: driver-specific private data
+ * @prep: callback for cleaning the queue and filling abstract &libeth_xdpsq
+ * @fill: internal callback for filling &libeth_sqe and &libeth_xdp_tx_desc
+ * @xmit: callback for filling a HW descriptor with the frame info
+ *
+ * Internal abstraction for placing @n XDP Tx frames on the HW XDPSQ. Used for
+ * all types of frames: ``XDP_TX``, .ndo_xdp_xmit(), XSk ``XDP_TX``, and XSk
+ * xmit.
+ * @prep must lock the queue as this function releases it at the end. @unroll
+ * greatly increases the object code size, but also greatly increases XSk xmit
+ * performance; for other types of frames, it's not enabled.
+ * The compilers inline all those onstack abstractions to direct data accesses.
+ *
+ * Return: number of frames actually placed on the queue, <= @n. The function
+ * can't fail, but can send less frames if there's no enough free descriptors
+ * available. The actual free space is returned by @prep from the driver.
+ */
+static __always_inline u32
+libeth_xdp_tx_xmit_bulk(const struct libeth_xdp_tx_frame *bulk, void *xdpsq,
+ u32 n, bool unroll, u64 priv,
+ u32 (*prep)(void *xdpsq, struct libeth_xdpsq *sq),
+ struct libeth_xdp_tx_desc
+ (*fill)(struct libeth_xdp_tx_frame frm, u32 i,
+ const struct libeth_xdpsq *sq, u64 priv),
+ void (*xmit)(struct libeth_xdp_tx_desc desc, u32 i,
+ const struct libeth_xdpsq *sq, u64 priv))
+{
+ struct libeth_xdpsq sq __uninitialized;
+ u32 this, batched, off = 0;
+ u32 ntu, i = 0;
+
+ n = min(n, prep(xdpsq, &sq));
+ if (unlikely(!n))
+ goto unlock;
+
+ ntu = *sq.ntu;
+
+ this = sq.count - ntu;
+ if (likely(this > n))
+ this = n;
+
+again:
+ if (!unroll)
+ goto linear;
+
+ batched = ALIGN_DOWN(this, LIBETH_XDP_TX_BATCH);
+
+ for ( ; i < off + batched; i += LIBETH_XDP_TX_BATCH) {
+ u32 base = ntu + i - off;
+
+ unrolled_count(LIBETH_XDP_TX_BATCH)
+ for (u32 j = 0; j < LIBETH_XDP_TX_BATCH; j++)
+ xmit(fill(bulk[i + j], base + j, &sq, priv),
+ base + j, &sq, priv);
+ }
+
+ if (batched < this) {
+linear:
+ for ( ; i < off + this; i++)
+ xmit(fill(bulk[i], ntu + i - off, &sq, priv),
+ ntu + i - off, &sq, priv);
+ }
+
+ ntu += this;
+ if (likely(ntu < sq.count))
+ goto out;
+
+ ntu = 0;
+
+ if (i < n) {
+ this = n - i;
+ off = i;
+
+ goto again;
+ }
+
+out:
+ *sq.ntu = ntu;
+ *sq.pending += n;
+ if (sq.xdp_tx)
+ *sq.xdp_tx += n;
+
+unlock:
+ libeth_xdpsq_unlock(sq.lock);
+
+ return n;
+}
+
+/* ``XDP_TX`` bulking */
+
+void libeth_xdp_return_buff_slow(struct libeth_xdp_buff *xdp);
+
+/**
+ * libeth_xdp_tx_queue_head - internal helper for queueing one ``XDP_TX`` head
+ * @bq: XDP Tx bulk to queue the head frag to
+ * @xdp: XDP buffer with the head to queue
+ *
+ * Return: false if it's the only frag of the frame, true if it's an S/G frame.
+ */
+static inline bool libeth_xdp_tx_queue_head(struct libeth_xdp_tx_bulk *bq,
+ const struct libeth_xdp_buff *xdp)
+{
+ const struct xdp_buff *base = &xdp->base;
+
+ bq->bulk[bq->count++] = (typeof(*bq->bulk)){
+ .data = xdp->data,
+ .len_fl = (base->data_end - xdp->data) | LIBETH_XDP_TX_FIRST,
+ .soff = xdp_data_hard_end(base) - xdp->data,
+ };
+
+ if (!xdp_buff_has_frags(base))
+ return false;
+
+ bq->bulk[bq->count - 1].len_fl |= LIBETH_XDP_TX_MULTI;
+
+ return true;
+}
+
+/**
+ * libeth_xdp_tx_queue_frag - internal helper for queueing one ``XDP_TX`` frag
+ * @bq: XDP Tx bulk to queue the frag to
+ * @frag: frag to queue
+ */
+static inline void libeth_xdp_tx_queue_frag(struct libeth_xdp_tx_bulk *bq,
+ const skb_frag_t *frag)
+{
+ bq->bulk[bq->count++].frag = *frag;
+}
+
+/**
+ * libeth_xdp_tx_queue_bulk - internal helper for queueing one ``XDP_TX`` frame
+ * @bq: XDP Tx bulk to queue the frame to
+ * @xdp: XDP buffer to queue
+ * @flush_bulk: driver callback to flush the bulk to the HW queue
+ *
+ * Return: true on success, false on flush error.
+ */
+static __always_inline bool
+libeth_xdp_tx_queue_bulk(struct libeth_xdp_tx_bulk *bq,
+ struct libeth_xdp_buff *xdp,
+ bool (*flush_bulk)(struct libeth_xdp_tx_bulk *bq,
+ u32 flags))
+{
+ const struct skb_shared_info *sinfo;
+ bool ret = true;
+ u32 nr_frags;
+
+ if (unlikely(bq->count == LIBETH_XDP_TX_BULK) &&
+ unlikely(!flush_bulk(bq, 0))) {
+ libeth_xdp_return_buff_slow(xdp);
+ return false;
+ }
+
+ if (!libeth_xdp_tx_queue_head(bq, xdp))
+ goto out;
+
+ sinfo = xdp_get_shared_info_from_buff(&xdp->base);
+ nr_frags = sinfo->nr_frags;
+
+ for (u32 i = 0; i < nr_frags; i++) {
+ if (unlikely(bq->count == LIBETH_XDP_TX_BULK) &&
+ unlikely(!flush_bulk(bq, 0))) {
+ ret = false;
+ break;
+ }
+
+ libeth_xdp_tx_queue_frag(bq, &sinfo->frags[i]);
+ }
+
+out:
+ bq->bulk[bq->count - 1].len_fl |= LIBETH_XDP_TX_LAST;
+ xdp->data = NULL;
+
+ return ret;
+}
+
+/**
+ * libeth_xdp_tx_fill_stats - fill &libeth_sqe with ``XDP_TX`` frame stats
+ * @sqe: SQ element to fill
+ * @desc: libeth_xdp Tx descriptor
+ * @sinfo: &skb_shared_info for this frame
+ *
+ * Internal helper for filling an SQE with the frame stats, do not use in
+ * drivers. Fills the number of frags and bytes for this frame.
+ */
+#define libeth_xdp_tx_fill_stats(sqe, desc, sinfo) \
+ __libeth_xdp_tx_fill_stats(sqe, desc, sinfo, __UNIQUE_ID(sqe_), \
+ __UNIQUE_ID(desc_), __UNIQUE_ID(sinfo_))
+
+#define __libeth_xdp_tx_fill_stats(sqe, desc, sinfo, ue, ud, us) do { \
+ const struct libeth_xdp_tx_desc *ud = (desc); \
+ const struct skb_shared_info *us; \
+ struct libeth_sqe *ue = (sqe); \
+ \
+ ue->nr_frags = 1; \
+ ue->bytes = ud->len; \
+ \
+ if (ud->flags & LIBETH_XDP_TX_MULTI) { \
+ us = (sinfo); \
+ ue->nr_frags += us->nr_frags; \
+ ue->bytes += us->xdp_frags_size; \
+ } \
+} while (0)
+
+/**
+ * libeth_xdp_tx_fill_buf - internal helper to fill one ``XDP_TX`` &libeth_sqe
+ * @frm: XDP Tx frame from the bulk
+ * @i: index on the HW queue
+ * @sq: XDPSQ abstraction for the queue
+ * @priv: private data
+ *
+ * Return: XDP Tx descriptor with the synced DMA and other info to pass to
+ * the driver callback.
+ */
+static inline struct libeth_xdp_tx_desc
+libeth_xdp_tx_fill_buf(struct libeth_xdp_tx_frame frm, u32 i,
+ const struct libeth_xdpsq *sq, u64 priv)
+{
+ struct libeth_xdp_tx_desc desc;
+ struct skb_shared_info *sinfo;
+ skb_frag_t *frag = &frm.frag;
+ struct libeth_sqe *sqe;
+ netmem_ref netmem;
+
+ if (frm.len_fl & LIBETH_XDP_TX_FIRST) {
+ sinfo = frm.data + frm.soff;
+ skb_frag_fill_netmem_desc(frag, virt_to_netmem(frm.data),
+ offset_in_page(frm.data),
+ frm.len_fl);
+ } else {
+ sinfo = NULL;
+ }
+
+ netmem = skb_frag_netmem(frag);
+ desc = (typeof(desc)){
+ .addr = page_pool_get_dma_addr_netmem(netmem) +
+ skb_frag_off(frag),
+ .len = skb_frag_size(frag) & LIBETH_XDP_TX_LEN,
+ .flags = skb_frag_size(frag) & LIBETH_XDP_TX_FLAGS,
+ };
+
+ dma_sync_single_for_device(__netmem_get_pp(netmem)->p.dev, desc.addr,
+ desc.len, DMA_BIDIRECTIONAL);
+
+ if (!sinfo)
+ return desc;
+
+ sqe = &sq->sqes[i];
+ sqe->type = LIBETH_SQE_XDP_TX;
+ sqe->sinfo = sinfo;
+ libeth_xdp_tx_fill_stats(sqe, &desc, sinfo);
+
+ return desc;
+}
+
+void libeth_xdp_tx_exception(struct libeth_xdp_tx_bulk *bq, u32 sent,
+ u32 flags);
+
+/**
+ * __libeth_xdp_tx_flush_bulk - internal helper to flush one XDP Tx bulk
+ * @bq: bulk to flush
+ * @flags: XDP TX flags (.ndo_xdp_xmit(), XSk etc.)
+ * @prep: driver-specific callback to prepare the queue for sending
+ * @fill: libeth_xdp callback to fill &libeth_sqe and &libeth_xdp_tx_desc
+ * @xmit: driver callback to fill a HW descriptor
+ *
+ * Internal abstraction to create bulk flush functions for drivers. Used for
+ * everything except XSk xmit.
+ *
+ * Return: true if anything was sent, false otherwise.
+ */
+static __always_inline bool
+__libeth_xdp_tx_flush_bulk(struct libeth_xdp_tx_bulk *bq, u32 flags,
+ u32 (*prep)(void *xdpsq, struct libeth_xdpsq *sq),
+ struct libeth_xdp_tx_desc
+ (*fill)(struct libeth_xdp_tx_frame frm, u32 i,
+ const struct libeth_xdpsq *sq, u64 priv),
+ void (*xmit)(struct libeth_xdp_tx_desc desc, u32 i,
+ const struct libeth_xdpsq *sq,
+ u64 priv))
+{
+ u32 sent, drops;
+ int err = 0;
+
+ sent = libeth_xdp_tx_xmit_bulk(bq->bulk, bq->xdpsq,
+ min(bq->count, LIBETH_XDP_TX_BULK),
+ false, 0, prep, fill, xmit);
+ drops = bq->count - sent;
+
+ if (unlikely(drops)) {
+ libeth_xdp_tx_exception(bq, sent, flags);
+ err = -ENXIO;
+ } else {
+ bq->count = 0;
+ }
+
+ trace_xdp_bulk_tx(bq->dev, sent, drops, err);
+
+ return likely(sent);
+}
+
+/**
+ * libeth_xdp_tx_flush_bulk - wrapper to define flush of one ``XDP_TX`` bulk
+ * @bq: bulk to flush
+ * @flags: Tx flags, see above
+ * @prep: driver callback to prepare the queue
+ * @xmit: driver callback to fill a HW descriptor
+ *
+ * Use via LIBETH_XDP_DEFINE_FLUSH_TX() to define an ``XDP_TX`` driver
+ * callback.
+ */
+#define libeth_xdp_tx_flush_bulk(bq, flags, prep, xmit) \
+ __libeth_xdp_tx_flush_bulk(bq, flags, prep, libeth_xdp_tx_fill_buf, \
+ xmit)
+
+/* .ndo_xdp_xmit() implementation */
+
+/**
+ * libeth_xdp_xmit_init_bulk - internal helper to initialize bulk for XDP xmit
+ * @bq: bulk to initialize
+ * @dev: target &net_device
+ * @xdpsqs: array of driver-specific XDPSQ structs
+ * @num: number of active XDPSQs (the above array length)
+ */
+#define libeth_xdp_xmit_init_bulk(bq, dev, xdpsqs, num) \
+ __libeth_xdp_xmit_init_bulk(bq, dev, (xdpsqs)[libeth_xdpsq_id(num)])
+
+static inline void __libeth_xdp_xmit_init_bulk(struct libeth_xdp_tx_bulk *bq,
+ struct net_device *dev,
+ void *xdpsq)
+{
+ bq->dev = dev;
+ bq->xdpsq = xdpsq;
+ bq->count = 0;
+}
+
+/**
+ * libeth_xdp_xmit_frame_dma - internal helper to access DMA of an &xdp_frame
+ * @xf: pointer to the XDP frame
+ *
+ * There's no place in &libeth_xdp_tx_frame to store DMA address for an
+ * &xdp_frame head. The headroom is used then, the address is placed right
+ * after the frame struct, naturally aligned.
+ *
+ * Return: pointer to the DMA address to use.
+ */
+#define libeth_xdp_xmit_frame_dma(xf) \
+ _Generic((xf), \
+ const struct xdp_frame *: \
+ (const dma_addr_t *)__libeth_xdp_xmit_frame_dma(xf), \
+ struct xdp_frame *: \
+ (dma_addr_t *)__libeth_xdp_xmit_frame_dma(xf) \
+ )
+
+static inline void *__libeth_xdp_xmit_frame_dma(const struct xdp_frame *xdpf)
+{
+ void *addr = (void *)(xdpf + 1);
+
+ if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
+ __alignof(*xdpf) < sizeof(dma_addr_t))
+ addr = PTR_ALIGN(addr, sizeof(dma_addr_t));
+
+ return addr;
+}
+
+/**
+ * libeth_xdp_xmit_queue_head - internal helper for queueing one XDP xmit head
+ * @bq: XDP Tx bulk to queue the head frag to
+ * @xdpf: XDP frame with the head to queue
+ * @dev: device to perform DMA mapping
+ *
+ * Return: ``LIBETH_XDP_DROP`` on DMA mapping error,
+ * ``LIBETH_XDP_PASS`` if it's the only frag in the frame,
+ * ``LIBETH_XDP_TX`` if it's an S/G frame.
+ */
+static inline u32 libeth_xdp_xmit_queue_head(struct libeth_xdp_tx_bulk *bq,
+ struct xdp_frame *xdpf,
+ struct device *dev)
+{
+ dma_addr_t dma;
+
+ dma = dma_map_single(dev, xdpf->data, xdpf->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma))
+ return LIBETH_XDP_DROP;
+
+ *libeth_xdp_xmit_frame_dma(xdpf) = dma;
+
+ bq->bulk[bq->count++] = (typeof(*bq->bulk)){
+ .xdpf = xdpf,
+ __libeth_xdp_tx_len(xdpf->len, LIBETH_XDP_TX_FIRST),
+ };
+
+ if (!xdp_frame_has_frags(xdpf))
+ return LIBETH_XDP_PASS;
+
+ bq->bulk[bq->count - 1].flags |= LIBETH_XDP_TX_MULTI;
+
+ return LIBETH_XDP_TX;
+}
+
+/**
+ * libeth_xdp_xmit_queue_frag - internal helper for queueing one XDP xmit frag
+ * @bq: XDP Tx bulk to queue the frag to
+ * @frag: frag to queue
+ * @dev: device to perform DMA mapping
+ *
+ * Return: true on success, false on DMA mapping error.
+ */
+static inline bool libeth_xdp_xmit_queue_frag(struct libeth_xdp_tx_bulk *bq,
+ const skb_frag_t *frag,
+ struct device *dev)
+{
+ dma_addr_t dma;
+
+ dma = skb_frag_dma_map(dev, frag);
+ if (dma_mapping_error(dev, dma))
+ return false;
+
+ bq->bulk[bq->count++] = (typeof(*bq->bulk)){
+ .dma = dma,
+ __libeth_xdp_tx_len(skb_frag_size(frag)),
+ };
+
+ return true;
+}
+
+/**
+ * libeth_xdp_xmit_queue_bulk - internal helper for queueing one XDP xmit frame
+ * @bq: XDP Tx bulk to queue the frame to
+ * @xdpf: XDP frame to queue
+ * @flush_bulk: driver callback to flush the bulk to the HW queue
+ *
+ * Return: ``LIBETH_XDP_TX`` on success,
+ * ``LIBETH_XDP_DROP`` if the frame should be dropped by the stack,
+ * ``LIBETH_XDP_ABORTED`` if the frame will be dropped by libeth_xdp.
+ */
+static __always_inline u32
+libeth_xdp_xmit_queue_bulk(struct libeth_xdp_tx_bulk *bq,
+ struct xdp_frame *xdpf,
+ bool (*flush_bulk)(struct libeth_xdp_tx_bulk *bq,
+ u32 flags))
+{
+ u32 head, nr_frags, i, ret = LIBETH_XDP_TX;
+ struct device *dev = bq->dev->dev.parent;
+ const struct skb_shared_info *sinfo;
+
+ if (unlikely(bq->count == LIBETH_XDP_TX_BULK) &&
+ unlikely(!flush_bulk(bq, LIBETH_XDP_TX_NDO)))
+ return LIBETH_XDP_DROP;
+
+ head = libeth_xdp_xmit_queue_head(bq, xdpf, dev);
+ if (head == LIBETH_XDP_PASS)
+ goto out;
+ else if (head == LIBETH_XDP_DROP)
+ return LIBETH_XDP_DROP;
+
+ sinfo = xdp_get_shared_info_from_frame(xdpf);
+ nr_frags = sinfo->nr_frags;
+
+ for (i = 0; i < nr_frags; i++) {
+ if (unlikely(bq->count == LIBETH_XDP_TX_BULK) &&
+ unlikely(!flush_bulk(bq, LIBETH_XDP_TX_NDO)))
+ break;
+
+ if (!libeth_xdp_xmit_queue_frag(bq, &sinfo->frags[i], dev))
+ break;
+ }
+
+ if (unlikely(i < nr_frags))
+ ret = LIBETH_XDP_ABORTED;
+
+out:
+ bq->bulk[bq->count - 1].flags |= LIBETH_XDP_TX_LAST;
+
+ return ret;
+}
+
+/**
+ * libeth_xdp_xmit_fill_buf - internal helper to fill one XDP xmit &libeth_sqe
+ * @frm: XDP Tx frame from the bulk
+ * @i: index on the HW queue
+ * @sq: XDPSQ abstraction for the queue
+ * @priv: private data
+ *
+ * Return: XDP Tx descriptor with the mapped DMA and other info to pass to
+ * the driver callback.
+ */
+static inline struct libeth_xdp_tx_desc
+libeth_xdp_xmit_fill_buf(struct libeth_xdp_tx_frame frm, u32 i,
+ const struct libeth_xdpsq *sq, u64 priv)
+{
+ struct libeth_xdp_tx_desc desc;
+ struct libeth_sqe *sqe;
+ struct xdp_frame *xdpf;
+
+ if (frm.flags & LIBETH_XDP_TX_FIRST) {
+ xdpf = frm.xdpf;
+ desc.addr = *libeth_xdp_xmit_frame_dma(xdpf);
+ } else {
+ xdpf = NULL;
+ desc.addr = frm.dma;
+ }
+ desc.opts = frm.opts;
+
+ sqe = &sq->sqes[i];
+ dma_unmap_addr_set(sqe, dma, desc.addr);
+ dma_unmap_len_set(sqe, len, desc.len);
+
+ if (!xdpf) {
+ sqe->type = LIBETH_SQE_XDP_XMIT_FRAG;
+ return desc;
+ }
+
+ sqe->type = LIBETH_SQE_XDP_XMIT;
+ sqe->xdpf = xdpf;
+ libeth_xdp_tx_fill_stats(sqe, &desc,
+ xdp_get_shared_info_from_frame(xdpf));
+
+ return desc;
+}
+
+/**
+ * libeth_xdp_xmit_flush_bulk - wrapper to define flush of one XDP xmit bulk
+ * @bq: bulk to flush
+ * @flags: Tx flags, see __libeth_xdp_tx_flush_bulk()
+ * @prep: driver callback to prepare the queue
+ * @xmit: driver callback to fill a HW descriptor
+ *
+ * Use via LIBETH_XDP_DEFINE_FLUSH_XMIT() to define an XDP xmit driver
+ * callback.
+ */
+#define libeth_xdp_xmit_flush_bulk(bq, flags, prep, xmit) \
+ __libeth_xdp_tx_flush_bulk(bq, (flags) | LIBETH_XDP_TX_NDO, prep, \
+ libeth_xdp_xmit_fill_buf, xmit)
+
+u32 libeth_xdp_xmit_return_bulk(const struct libeth_xdp_tx_frame *bq,
+ u32 count, const struct net_device *dev);
+
+/**
+ * __libeth_xdp_xmit_do_bulk - internal function to implement .ndo_xdp_xmit()
+ * @bq: XDP Tx bulk to queue frames to
+ * @frames: XDP frames passed by the stack
+ * @n: number of frames
+ * @flags: flags passed by the stack
+ * @flush_bulk: driver callback to flush an XDP xmit bulk
+ * @finalize: driver callback to finalize sending XDP Tx frames on the queue
+ *
+ * Perform common checks, map the frags and queue them to the bulk, then flush
+ * the bulk to the XDPSQ. If requested by the stack, finalize the queue.
+ *
+ * Return: number of frames send or -errno on error.
+ */
+static __always_inline int
+__libeth_xdp_xmit_do_bulk(struct libeth_xdp_tx_bulk *bq,
+ struct xdp_frame **frames, u32 n, u32 flags,
+ bool (*flush_bulk)(struct libeth_xdp_tx_bulk *bq,
+ u32 flags),
+ void (*finalize)(void *xdpsq, bool sent, bool flush))
+{
+ u32 nxmit = 0;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ for (u32 i = 0; likely(i < n); i++) {
+ u32 ret;
+
+ ret = libeth_xdp_xmit_queue_bulk(bq, frames[i], flush_bulk);
+ if (unlikely(ret != LIBETH_XDP_TX)) {
+ nxmit += ret == LIBETH_XDP_ABORTED;
+ break;
+ }
+
+ nxmit++;
+ }
+
+ if (bq->count) {
+ flush_bulk(bq, LIBETH_XDP_TX_NDO);
+ if (unlikely(bq->count))
+ nxmit -= libeth_xdp_xmit_return_bulk(bq->bulk,
+ bq->count,
+ bq->dev);
+ }
+
+ finalize(bq->xdpsq, nxmit, flags & XDP_XMIT_FLUSH);
+
+ return nxmit;
+}
+
+/**
+ * libeth_xdp_xmit_do_bulk - implement full .ndo_xdp_xmit() in driver
+ * @dev: target &net_device
+ * @n: number of frames to send
+ * @fr: XDP frames to send
+ * @f: flags passed by the stack
+ * @xqs: array of XDPSQs driver structs
+ * @nqs: number of active XDPSQs, the above array length
+ * @fl: driver callback to flush an XDP xmit bulk
+ * @fin: driver cabback to finalize the queue
+ *
+ * If the driver has active XDPSQs, perform common checks and send the frames.
+ * Finalize the queue, if requested.
+ *
+ * Return: number of frames sent or -errno on error.
+ */
+#define libeth_xdp_xmit_do_bulk(dev, n, fr, f, xqs, nqs, fl, fin) \
+ _libeth_xdp_xmit_do_bulk(dev, n, fr, f, xqs, nqs, fl, fin, \
+ __UNIQUE_ID(bq_), __UNIQUE_ID(ret_), \
+ __UNIQUE_ID(nqs_))
+
+#define _libeth_xdp_xmit_do_bulk(d, n, fr, f, xqs, nqs, fl, fin, ub, ur, un) \
+({ \
+ u32 un = (nqs); \
+ int ur; \
+ \
+ if (likely(un)) { \
+ LIBETH_XDP_ONSTACK_BULK(ub); \
+ \
+ libeth_xdp_xmit_init_bulk(&ub, d, xqs, un); \
+ ur = __libeth_xdp_xmit_do_bulk(&ub, fr, n, f, fl, fin); \
+ } else { \
+ ur = -ENXIO; \
+ } \
+ \
+ ur; \
+})
+
+/* Rx polling path */
+
+/**
+ * libeth_xdp_tx_init_bulk - initialize an XDP Tx bulk for Rx NAPI poll
+ * @bq: bulk to initialize
+ * @prog: RCU pointer to the XDP program (can be %NULL)
+ * @dev: target &net_device
+ * @xdpsqs: array of driver XDPSQ structs
+ * @num: number of active XDPSQs, the above array length
+ *
+ * Should be called on an onstack XDP Tx bulk before the NAPI polling loop.
+ * Initializes all the needed fields to run libeth_xdp functions. If @num == 0,
+ * assumes XDP is not enabled.
+ * Do not use for XSk, it has its own optimized helper.
+ */
+#define libeth_xdp_tx_init_bulk(bq, prog, dev, xdpsqs, num) \
+ __libeth_xdp_tx_init_bulk(bq, prog, dev, xdpsqs, num, false, \
+ __UNIQUE_ID(bq_), __UNIQUE_ID(nqs_))
+
+#define __libeth_xdp_tx_init_bulk(bq, pr, d, xdpsqs, num, xsk, ub, un) do { \
+ typeof(bq) ub = (bq); \
+ u32 un = (num); \
+ \
+ rcu_read_lock(); \
+ \
+ if (un || (xsk)) { \
+ ub->prog = rcu_dereference(pr); \
+ ub->dev = (d); \
+ ub->xdpsq = (xdpsqs)[libeth_xdpsq_id(un)]; \
+ } else { \
+ ub->prog = NULL; \
+ } \
+ \
+ ub->act_mask = 0; \
+ ub->count = 0; \
+} while (0)
+
+void libeth_xdp_load_stash(struct libeth_xdp_buff *dst,
+ const struct libeth_xdp_buff_stash *src);
+void libeth_xdp_save_stash(struct libeth_xdp_buff_stash *dst,
+ const struct libeth_xdp_buff *src);
+void __libeth_xdp_return_stash(struct libeth_xdp_buff_stash *stash);
+
+/**
+ * libeth_xdp_init_buff - initialize a &libeth_xdp_buff for Rx NAPI poll
+ * @dst: onstack buffer to initialize
+ * @src: XDP buffer stash placed on the queue
+ * @rxq: registered &xdp_rxq_info corresponding to this queue
+ *
+ * Should be called before the main NAPI polling loop. Loads the content of
+ * the previously saved stash or initializes the buffer from scratch.
+ * Do not use for XSk.
+ */
+static inline void
+libeth_xdp_init_buff(struct libeth_xdp_buff *dst,
+ const struct libeth_xdp_buff_stash *src,
+ struct xdp_rxq_info *rxq)
+{
+ if (likely(!src->data))
+ dst->data = NULL;
+ else
+ libeth_xdp_load_stash(dst, src);
+
+ dst->base.rxq = rxq;
+}
+
+/**
+ * libeth_xdp_save_buff - save a partially built buffer on a queue
+ * @dst: XDP buffer stash placed on the queue
+ * @src: onstack buffer to save
+ *
+ * Should be called after the main NAPI polling loop. If the loop exited before
+ * the buffer was finished, saves its content on the queue, so that it can be
+ * completed during the next poll. Otherwise, clears the stash.
+ */
+static inline void libeth_xdp_save_buff(struct libeth_xdp_buff_stash *dst,
+ const struct libeth_xdp_buff *src)
+{
+ if (likely(!src->data))
+ dst->data = NULL;
+ else
+ libeth_xdp_save_stash(dst, src);
+}
+
+/**
+ * libeth_xdp_return_stash - free an XDP buffer stash from a queue
+ * @stash: stash to free
+ *
+ * If the queue is about to be destroyed, but it still has an incompleted
+ * buffer stash, this helper should be called to free it.
+ */
+static inline void libeth_xdp_return_stash(struct libeth_xdp_buff_stash *stash)
+{
+ if (stash->data)
+ __libeth_xdp_return_stash(stash);
+}
+
+static inline void libeth_xdp_return_va(const void *data, bool napi)
+{
+ netmem_ref netmem = virt_to_netmem(data);
+
+ page_pool_put_full_netmem(__netmem_get_pp(netmem), netmem, napi);
+}
+
+static inline void libeth_xdp_return_frags(const struct skb_shared_info *sinfo,
+ bool napi)
+{
+ for (u32 i = 0; i < sinfo->nr_frags; i++) {
+ netmem_ref netmem = skb_frag_netmem(&sinfo->frags[i]);
+
+ page_pool_put_full_netmem(netmem_get_pp(netmem), netmem, napi);
+ }
+}
+
+/**
+ * libeth_xdp_return_buff - free/recycle &libeth_xdp_buff
+ * @xdp: buffer to free
+ *
+ * Hotpath helper to free &libeth_xdp_buff. Comparing to xdp_return_buff(),
+ * it's faster as it gets inlined and always assumes order-0 pages and safe
+ * direct recycling. Zeroes @xdp->data to avoid UAFs.
+ */
+#define libeth_xdp_return_buff(xdp) __libeth_xdp_return_buff(xdp, true)
+
+static inline void __libeth_xdp_return_buff(struct libeth_xdp_buff *xdp,
+ bool napi)
+{
+ if (!xdp_buff_has_frags(&xdp->base))
+ goto out;
+
+ libeth_xdp_return_frags(xdp_get_shared_info_from_buff(&xdp->base),
+ napi);
+
+out:
+ libeth_xdp_return_va(xdp->data, napi);
+ xdp->data = NULL;
+}
+
+bool libeth_xdp_buff_add_frag(struct libeth_xdp_buff *xdp,
+ const struct libeth_fqe *fqe,
+ u32 len);
+
+/**
+ * libeth_xdp_prepare_buff - fill &libeth_xdp_buff with head FQE data
+ * @xdp: XDP buffer to attach the head to
+ * @fqe: FQE containing the head buffer
+ * @len: buffer len passed from HW
+ *
+ * Internal, use libeth_xdp_process_buff() instead. Initializes XDP buffer
+ * head with the Rx buffer data: data pointer, length, headroom, and
+ * truesize/tailroom. Zeroes the flags.
+ * Uses faster single u64 write instead of per-field access.
+ */
+static inline void libeth_xdp_prepare_buff(struct libeth_xdp_buff *xdp,
+ const struct libeth_fqe *fqe,
+ u32 len)
+{
+ const struct page *page = __netmem_to_page(fqe->netmem);
+
+#ifdef __LIBETH_WORD_ACCESS
+ static_assert(offsetofend(typeof(xdp->base), flags) -
+ offsetof(typeof(xdp->base), frame_sz) ==
+ sizeof(u64));
+
+ *(u64 *)&xdp->base.frame_sz = fqe->truesize;
+#else
+ xdp_init_buff(&xdp->base, fqe->truesize, xdp->base.rxq);
+#endif
+ xdp_prepare_buff(&xdp->base, page_address(page) + fqe->offset,
+ page->pp->p.offset, len, true);
+}
+
+/**
+ * libeth_xdp_process_buff - attach Rx buffer to &libeth_xdp_buff
+ * @xdp: XDP buffer to attach the Rx buffer to
+ * @fqe: Rx buffer to process
+ * @len: received data length from the descriptor
+ *
+ * If the XDP buffer is empty, attaches the Rx buffer as head and initializes
+ * the required fields. Otherwise, attaches the buffer as a frag.
+ * Already performs DMA sync-for-CPU and frame start prefetch
+ * (for head buffers only).
+ *
+ * Return: true on success, false if the descriptor must be skipped (empty or
+ * no space for a new frag).
+ */
+static inline bool libeth_xdp_process_buff(struct libeth_xdp_buff *xdp,
+ const struct libeth_fqe *fqe,
+ u32 len)
+{
+ if (!libeth_rx_sync_for_cpu(fqe, len))
+ return false;
+
+ if (xdp->data)
+ return libeth_xdp_buff_add_frag(xdp, fqe, len);
+
+ libeth_xdp_prepare_buff(xdp, fqe, len);
+
+ prefetch(xdp->data);
+
+ return true;
+}
+
+/**
+ * libeth_xdp_buff_stats_frags - update onstack RQ stats with XDP frags info
+ * @ss: onstack stats to update
+ * @xdp: buffer to account
+ *
+ * Internal helper used by __libeth_xdp_run_pass(), do not call directly.
+ * Adds buffer's frags count and total len to the onstack stats.
+ */
+static inline void
+libeth_xdp_buff_stats_frags(struct libeth_rq_napi_stats *ss,
+ const struct libeth_xdp_buff *xdp)
+{
+ const struct skb_shared_info *sinfo;
+
+ sinfo = xdp_get_shared_info_from_buff(&xdp->base);
+ ss->bytes += sinfo->xdp_frags_size;
+ ss->fragments += sinfo->nr_frags + 1;
+}
+
+u32 libeth_xdp_prog_exception(const struct libeth_xdp_tx_bulk *bq,
+ struct libeth_xdp_buff *xdp,
+ enum xdp_action act, int ret);
+
+/**
+ * __libeth_xdp_run_prog - run XDP program on an XDP buffer
+ * @xdp: XDP buffer to run the prog on
+ * @bq: buffer bulk for ``XDP_TX`` queueing
+ *
+ * Internal inline abstraction to run XDP program. Handles ``XDP_DROP``
+ * and ``XDP_REDIRECT`` only, the rest is processed levels up.
+ * Reports an XDP prog exception on errors.
+ *
+ * Return: libeth_xdp prog verdict depending on the prog's verdict.
+ */
+static __always_inline u32
+__libeth_xdp_run_prog(struct libeth_xdp_buff *xdp,
+ const struct libeth_xdp_tx_bulk *bq)
+{
+ enum xdp_action act;
+
+ act = bpf_prog_run_xdp(bq->prog, &xdp->base);
+ if (unlikely(act < XDP_DROP || act > XDP_REDIRECT))
+ goto out;
+
+ switch (act) {
+ case XDP_PASS:
+ return LIBETH_XDP_PASS;
+ case XDP_DROP:
+ libeth_xdp_return_buff(xdp);
+
+ return LIBETH_XDP_DROP;
+ case XDP_TX:
+ return LIBETH_XDP_TX;
+ case XDP_REDIRECT:
+ if (unlikely(xdp_do_redirect(bq->dev, &xdp->base, bq->prog)))
+ break;
+
+ xdp->data = NULL;
+
+ return LIBETH_XDP_REDIRECT;
+ default:
+ break;
+ }
+
+out:
+ return libeth_xdp_prog_exception(bq, xdp, act, 0);
+}
+
+/**
+ * __libeth_xdp_run_flush - run XDP program and handle ``XDP_TX`` verdict
+ * @xdp: XDP buffer to run the prog on
+ * @bq: buffer bulk for ``XDP_TX`` queueing
+ * @run: internal callback for running XDP program
+ * @queue: internal callback for queuing ``XDP_TX`` frame
+ * @flush_bulk: driver callback for flushing a bulk
+ *
+ * Internal inline abstraction to run XDP program and additionally handle
+ * ``XDP_TX`` verdict. Used by both XDP and XSk, hence @run and @queue.
+ * Do not use directly.
+ *
+ * Return: libeth_xdp prog verdict depending on the prog's verdict.
+ */
+static __always_inline u32
+__libeth_xdp_run_flush(struct libeth_xdp_buff *xdp,
+ struct libeth_xdp_tx_bulk *bq,
+ u32 (*run)(struct libeth_xdp_buff *xdp,
+ const struct libeth_xdp_tx_bulk *bq),
+ bool (*queue)(struct libeth_xdp_tx_bulk *bq,
+ struct libeth_xdp_buff *xdp,
+ bool (*flush_bulk)
+ (struct libeth_xdp_tx_bulk *bq,
+ u32 flags)),
+ bool (*flush_bulk)(struct libeth_xdp_tx_bulk *bq,
+ u32 flags))
+{
+ u32 act;
+
+ act = run(xdp, bq);
+ if (act == LIBETH_XDP_TX && unlikely(!queue(bq, xdp, flush_bulk)))
+ act = LIBETH_XDP_DROP;
+
+ bq->act_mask |= act;
+
+ return act;
+}
+
+/**
+ * libeth_xdp_run_prog - run XDP program (non-XSk path) and handle all verdicts
+ * @xdp: XDP buffer to process
+ * @bq: XDP Tx bulk to queue ``XDP_TX`` buffers
+ * @fl: driver ``XDP_TX`` bulk flush callback
+ *
+ * Run the attached XDP program and handle all possible verdicts. XSk has its
+ * own version.
+ * Prefer using it via LIBETH_XDP_DEFINE_RUN{,_PASS,_PROG}().
+ *
+ * Return: true if the buffer should be passed up the stack, false if the poll
+ * should go to the next buffer.
+ */
+#define libeth_xdp_run_prog(xdp, bq, fl) \
+ (__libeth_xdp_run_flush(xdp, bq, __libeth_xdp_run_prog, \
+ libeth_xdp_tx_queue_bulk, \
+ fl) == LIBETH_XDP_PASS)
+
+/**
+ * __libeth_xdp_run_pass - helper to run XDP program and handle the result
+ * @xdp: XDP buffer to process
+ * @bq: XDP Tx bulk to queue ``XDP_TX`` frames
+ * @napi: NAPI to build an skb and pass it up the stack
+ * @rs: onstack libeth RQ stats
+ * @md: metadata that should be filled to the XDP buffer
+ * @prep: callback for filling the metadata
+ * @run: driver wrapper to run XDP program
+ * @populate: driver callback to populate an skb with the HW descriptor data
+ *
+ * Inline abstraction that does the following (non-XSk path):
+ * 1) adds frame size and frag number (if needed) to the onstack stats;
+ * 2) fills the descriptor metadata to the onstack &libeth_xdp_buff
+ * 3) runs XDP program if present;
+ * 4) handles all possible verdicts;
+ * 5) on ``XDP_PASS`, builds an skb from the buffer;
+ * 6) populates it with the descriptor metadata;
+ * 7) passes it up the stack.
+ *
+ * In most cases, number 2 means just writing the pointer to the HW descriptor
+ * to the XDP buffer. If so, please use LIBETH_XDP_DEFINE_RUN{,_PASS}()
+ * wrappers to build a driver function.
+ */
+static __always_inline void
+__libeth_xdp_run_pass(struct libeth_xdp_buff *xdp,
+ struct libeth_xdp_tx_bulk *bq, struct napi_struct *napi,
+ struct libeth_rq_napi_stats *rs, const void *md,
+ void (*prep)(struct libeth_xdp_buff *xdp,
+ const void *md),
+ bool (*run)(struct libeth_xdp_buff *xdp,
+ struct libeth_xdp_tx_bulk *bq),
+ bool (*populate)(struct sk_buff *skb,
+ const struct libeth_xdp_buff *xdp,
+ struct libeth_rq_napi_stats *rs))
+{
+ struct sk_buff *skb;
+
+ rs->bytes += xdp->base.data_end - xdp->data;
+ rs->packets++;
+
+ if (xdp_buff_has_frags(&xdp->base))
+ libeth_xdp_buff_stats_frags(rs, xdp);
+
+ if (prep && (!__builtin_constant_p(!!md) || md))
+ prep(xdp, md);
+
+ if (!bq || !run || !bq->prog)
+ goto build;
+
+ if (!run(xdp, bq))
+ return;
+
+build:
+ skb = xdp_build_skb_from_buff(&xdp->base);
+ if (unlikely(!skb)) {
+ libeth_xdp_return_buff_slow(xdp);
+ return;
+ }
+
+ xdp->data = NULL;
+
+ if (unlikely(!populate(skb, xdp, rs))) {
+ napi_consume_skb(skb, true);
+ return;
+ }
+
+ napi_gro_receive(napi, skb);
+}
+
+static inline void libeth_xdp_prep_desc(struct libeth_xdp_buff *xdp,
+ const void *desc)
+{
+ xdp->desc = desc;
+}
+
+/**
+ * libeth_xdp_run_pass - helper to run XDP program and handle the result
+ * @xdp: XDP buffer to process
+ * @bq: XDP Tx bulk to queue ``XDP_TX`` frames
+ * @napi: NAPI to build an skb and pass it up the stack
+ * @ss: onstack libeth RQ stats
+ * @desc: pointer to the HW descriptor for that frame
+ * @run: driver wrapper to run XDP program
+ * @populate: driver callback to populate an skb with the HW descriptor data
+ *
+ * Wrapper around the underscored version when "fill the descriptor metadata"
+ * means just writing the pointer to the HW descriptor as @xdp->desc.
+ */
+#define libeth_xdp_run_pass(xdp, bq, napi, ss, desc, run, populate) \
+ __libeth_xdp_run_pass(xdp, bq, napi, ss, desc, libeth_xdp_prep_desc, \
+ run, populate)
+
+/**
+ * libeth_xdp_finalize_rx - finalize XDPSQ after a NAPI polling loop (non-XSk)
+ * @bq: ``XDP_TX`` frame bulk
+ * @flush: driver callback to flush the bulk
+ * @finalize: driver callback to start sending the frames and run the timer
+ *
+ * Flush the bulk if there are frames left to send, kick the queue and flush
+ * the XDP maps.
+ */
+#define libeth_xdp_finalize_rx(bq, flush, finalize) \
+ __libeth_xdp_finalize_rx(bq, 0, flush, finalize)
+
+static __always_inline void
+__libeth_xdp_finalize_rx(struct libeth_xdp_tx_bulk *bq, u32 flags,
+ bool (*flush_bulk)(struct libeth_xdp_tx_bulk *bq,
+ u32 flags),
+ void (*finalize)(void *xdpsq, bool sent, bool flush))
+{
+ if (bq->act_mask & LIBETH_XDP_TX) {
+ if (bq->count)
+ flush_bulk(bq, flags | LIBETH_XDP_TX_DROP);
+ finalize(bq->xdpsq, true, true);
+ }
+ if (bq->act_mask & LIBETH_XDP_REDIRECT)
+ xdp_do_flush();
+
+ rcu_read_unlock();
+}
+
+/*
+ * Helpers to reduce boilerplate code in drivers.
+ *
+ * Typical driver Rx flow would be (excl. bulk and buff init, frag attach):
+ *
+ * LIBETH_XDP_DEFINE_START();
+ * LIBETH_XDP_DEFINE_FLUSH_TX(static driver_xdp_flush_tx, driver_xdp_tx_prep,
+ * driver_xdp_xmit);
+ * LIBETH_XDP_DEFINE_RUN(static driver_xdp_run, driver_xdp_run_prog,
+ * driver_xdp_flush_tx, driver_populate_skb);
+ * LIBETH_XDP_DEFINE_FINALIZE(static driver_xdp_finalize_rx,
+ * driver_xdp_flush_tx, driver_xdp_finalize_sq);
+ * LIBETH_XDP_DEFINE_END();
+ *
+ * This will build a set of 4 static functions. The compiler is free to decide
+ * whether to inline them.
+ * Then, in the NAPI polling function:
+ *
+ * while (packets < budget) {
+ * // ...
+ * driver_xdp_run(xdp, &bq, napi, &rs, desc);
+ * }
+ * driver_xdp_finalize_rx(&bq);
+ */
+
+#define LIBETH_XDP_DEFINE_START() \
+ __diag_push(); \
+ __diag_ignore(GCC, 8, "-Wold-style-declaration", \
+ "Allow specifying \'static\' after the return type")
+
+/**
+ * LIBETH_XDP_DEFINE_TIMER - define a driver XDPSQ cleanup timer callback
+ * @name: name of the function to define
+ * @poll: Tx polling/completion function
+ */
+#define LIBETH_XDP_DEFINE_TIMER(name, poll) \
+void name(struct work_struct *work) \
+{ \
+ libeth_xdpsq_run_timer(work, poll); \
+}
+
+/**
+ * LIBETH_XDP_DEFINE_FLUSH_TX - define a driver ``XDP_TX`` bulk flush function
+ * @name: name of the function to define
+ * @prep: driver callback to clean an XDPSQ
+ * @xmit: driver callback to write a HW Tx descriptor
+ */
+#define LIBETH_XDP_DEFINE_FLUSH_TX(name, prep, xmit) \
+ __LIBETH_XDP_DEFINE_FLUSH_TX(name, prep, xmit, xdp)
+
+#define __LIBETH_XDP_DEFINE_FLUSH_TX(name, prep, xmit, pfx) \
+bool name(struct libeth_xdp_tx_bulk *bq, u32 flags) \
+{ \
+ return libeth_##pfx##_tx_flush_bulk(bq, flags, prep, xmit); \
+}
+
+/**
+ * LIBETH_XDP_DEFINE_FLUSH_XMIT - define a driver XDP xmit bulk flush function
+ * @name: name of the function to define
+ * @prep: driver callback to clean an XDPSQ
+ * @xmit: driver callback to write a HW Tx descriptor
+ */
+#define LIBETH_XDP_DEFINE_FLUSH_XMIT(name, prep, xmit) \
+bool name(struct libeth_xdp_tx_bulk *bq, u32 flags) \
+{ \
+ return libeth_xdp_xmit_flush_bulk(bq, flags, prep, xmit); \
+}
+
+/**
+ * LIBETH_XDP_DEFINE_RUN_PROG - define a driver XDP program run function
+ * @name: name of the function to define
+ * @flush: driver callback to flush an ``XDP_TX`` bulk
+ */
+#define LIBETH_XDP_DEFINE_RUN_PROG(name, flush) \
+ bool __LIBETH_XDP_DEFINE_RUN_PROG(name, flush, xdp)
+
+#define __LIBETH_XDP_DEFINE_RUN_PROG(name, flush, pfx) \
+name(struct libeth_xdp_buff *xdp, struct libeth_xdp_tx_bulk *bq) \
+{ \
+ return libeth_##pfx##_run_prog(xdp, bq, flush); \
+}
+
+/**
+ * LIBETH_XDP_DEFINE_RUN_PASS - define a driver buffer process + pass function
+ * @name: name of the function to define
+ * @run: driver callback to run XDP program (above)
+ * @populate: driver callback to fill an skb with HW descriptor info
+ */
+#define LIBETH_XDP_DEFINE_RUN_PASS(name, run, populate) \
+ void __LIBETH_XDP_DEFINE_RUN_PASS(name, run, populate, xdp)
+
+#define __LIBETH_XDP_DEFINE_RUN_PASS(name, run, populate, pfx) \
+name(struct libeth_xdp_buff *xdp, struct libeth_xdp_tx_bulk *bq, \
+ struct napi_struct *napi, struct libeth_rq_napi_stats *ss, \
+ const void *desc) \
+{ \
+ return libeth_##pfx##_run_pass(xdp, bq, napi, ss, desc, run, \
+ populate); \
+}
+
+/**
+ * LIBETH_XDP_DEFINE_RUN - define a driver buffer process, run + pass function
+ * @name: name of the function to define
+ * @run: name of the XDP prog run function to define
+ * @flush: driver callback to flush an ``XDP_TX`` bulk
+ * @populate: driver callback to fill an skb with HW descriptor info
+ */
+#define LIBETH_XDP_DEFINE_RUN(name, run, flush, populate) \
+ __LIBETH_XDP_DEFINE_RUN(name, run, flush, populate, XDP)
+
+#define __LIBETH_XDP_DEFINE_RUN(name, run, flush, populate, pfx) \
+ LIBETH_##pfx##_DEFINE_RUN_PROG(static run, flush); \
+ LIBETH_##pfx##_DEFINE_RUN_PASS(name, run, populate)
+
+/**
+ * LIBETH_XDP_DEFINE_FINALIZE - define a driver Rx NAPI poll finalize function
+ * @name: name of the function to define
+ * @flush: driver callback to flush an ``XDP_TX`` bulk
+ * @finalize: driver callback to finalize an XDPSQ and run the timer
+ */
+#define LIBETH_XDP_DEFINE_FINALIZE(name, flush, finalize) \
+ __LIBETH_XDP_DEFINE_FINALIZE(name, flush, finalize, xdp)
+
+#define __LIBETH_XDP_DEFINE_FINALIZE(name, flush, finalize, pfx) \
+void name(struct libeth_xdp_tx_bulk *bq) \
+{ \
+ libeth_##pfx##_finalize_rx(bq, flush, finalize); \
+}
+
+#define LIBETH_XDP_DEFINE_END() __diag_pop()
+
+/* XMO */
+
+/**
+ * libeth_xdp_buff_to_rq - get RQ pointer from an XDP buffer pointer
+ * @xdp: &libeth_xdp_buff corresponding to the queue
+ * @type: typeof() of the driver Rx queue structure
+ * @member: name of &xdp_rxq_info inside @type
+ *
+ * Often times, pointer to the RQ is needed when reading/filling metadata from
+ * HW descriptors. The helper can be used to quickly jump from an XDP buffer
+ * to the queue corresponding to its &xdp_rxq_info without introducing
+ * additional fields (&libeth_xdp_buff is precisely 1 cacheline long on x64).
+ */
+#define libeth_xdp_buff_to_rq(xdp, type, member) \
+ container_of_const((xdp)->base.rxq, type, member)
+
+/**
+ * libeth_xdpmo_rx_hash - convert &libeth_rx_pt to an XDP RSS hash metadata
+ * @hash: pointer to the variable to write the hash to
+ * @rss_type: pointer to the variable to write the hash type to
+ * @val: hash value from the HW descriptor
+ * @pt: libeth parsed packet type
+ *
+ * Handle zeroed/non-available hash and convert libeth parsed packet type to
+ * the corresponding XDP RSS hash type. To be called at the end of
+ * xdp_metadata_ops idpf_xdpmo::xmo_rx_hash() implementation.
+ * Note that if the driver doesn't use a constant packet type lookup table but
+ * generates it at runtime, it must call libeth_rx_pt_gen_hash_type(pt) to
+ * generate XDP RSS hash type for each packet type.
+ *
+ * Return: 0 on success, -ENODATA when the hash is not available.
+ */
+static inline int libeth_xdpmo_rx_hash(u32 *hash,
+ enum xdp_rss_hash_type *rss_type,
+ u32 val, struct libeth_rx_pt pt)
+{
+ if (unlikely(!val))
+ return -ENODATA;
+
+ *hash = val;
+ *rss_type = pt.hash_type;
+
+ return 0;
+}
+
+/* Tx buffer completion */
+
+void libeth_xdp_return_buff_bulk(const struct skb_shared_info *sinfo,
+ struct xdp_frame_bulk *bq, bool frags);
+void libeth_xsk_buff_free_slow(struct libeth_xdp_buff *xdp);
+
+/**
+ * __libeth_xdp_complete_tx - complete sent XDPSQE
+ * @sqe: SQ element / Tx buffer to complete
+ * @cp: Tx polling/completion params
+ * @bulk: internal callback to bulk-free ``XDP_TX`` buffers
+ * @xsk: internal callback to free XSk ``XDP_TX`` buffers
+ *
+ * Use the non-underscored version in drivers instead. This one is shared
+ * internally with libeth_tx_complete_any().
+ * Complete an XDPSQE of any type of XDP frame. This includes DMA unmapping
+ * when needed, buffer freeing, stats update, and SQE invalidation.
+ */
+static __always_inline void
+__libeth_xdp_complete_tx(struct libeth_sqe *sqe, struct libeth_cq_pp *cp,
+ typeof(libeth_xdp_return_buff_bulk) bulk,
+ typeof(libeth_xsk_buff_free_slow) xsk)
+{
+ enum libeth_sqe_type type = sqe->type;
+
+ switch (type) {
+ case LIBETH_SQE_EMPTY:
+ return;
+ case LIBETH_SQE_XDP_XMIT:
+ case LIBETH_SQE_XDP_XMIT_FRAG:
+ dma_unmap_page(cp->dev, dma_unmap_addr(sqe, dma),
+ dma_unmap_len(sqe, len), DMA_TO_DEVICE);
+ break;
+ default:
+ break;
+ }
+
+ switch (type) {
+ case LIBETH_SQE_XDP_TX:
+ bulk(sqe->sinfo, cp->bq, sqe->nr_frags != 1);
+ break;
+ case LIBETH_SQE_XDP_XMIT:
+ xdp_return_frame_bulk(sqe->xdpf, cp->bq);
+ break;
+ case LIBETH_SQE_XSK_TX:
+ case LIBETH_SQE_XSK_TX_FRAG:
+ xsk(sqe->xsk);
+ break;
+ default:
+ break;
+ }
+
+ switch (type) {
+ case LIBETH_SQE_XDP_TX:
+ case LIBETH_SQE_XDP_XMIT:
+ case LIBETH_SQE_XSK_TX:
+ cp->xdp_tx -= sqe->nr_frags;
+
+ cp->xss->packets++;
+ cp->xss->bytes += sqe->bytes;
+ break;
+ default:
+ break;
+ }
+
+ sqe->type = LIBETH_SQE_EMPTY;
+}
+
+static inline void libeth_xdp_complete_tx(struct libeth_sqe *sqe,
+ struct libeth_cq_pp *cp)
+{
+ __libeth_xdp_complete_tx(sqe, cp, libeth_xdp_return_buff_bulk,
+ libeth_xsk_buff_free_slow);
+}
+
+/* Misc */
+
+u32 libeth_xdp_queue_threshold(u32 count);
+
+void __libeth_xdp_set_features(struct net_device *dev,
+ const struct xdp_metadata_ops *xmo,
+ u32 zc_segs,
+ const struct xsk_tx_metadata_ops *tmo);
+void libeth_xdp_set_redirect(struct net_device *dev, bool enable);
+
+/**
+ * libeth_xdp_set_features - set XDP features for netdev
+ * @dev: &net_device to configure
+ * @...: optional params, see __libeth_xdp_set_features()
+ *
+ * Set all the features libeth_xdp supports, including .ndo_xdp_xmit(). That
+ * said, it should be used only when XDPSQs are always available regardless
+ * of whether an XDP prog is attached to @dev.
+ */
+#define libeth_xdp_set_features(dev, ...) \
+ CONCATENATE(__libeth_xdp_feat, \
+ COUNT_ARGS(__VA_ARGS__))(dev, ##__VA_ARGS__)
+
+#define __libeth_xdp_feat0(dev) \
+ __libeth_xdp_set_features(dev, NULL, 0, NULL)
+#define __libeth_xdp_feat1(dev, xmo) \
+ __libeth_xdp_set_features(dev, xmo, 0, NULL)
+#define __libeth_xdp_feat2(dev, xmo, zc_segs) \
+ __libeth_xdp_set_features(dev, xmo, zc_segs, NULL)
+#define __libeth_xdp_feat3(dev, xmo, zc_segs, tmo) \
+ __libeth_xdp_set_features(dev, xmo, zc_segs, tmo)
+
+/**
+ * libeth_xdp_set_features_noredir - enable all libeth_xdp features w/o redir
+ * @dev: target &net_device
+ * @...: optional params, see __libeth_xdp_set_features()
+ *
+ * Enable everything except the .ndo_xdp_xmit() feature, use when XDPSQs are
+ * not available right after netdev registration.
+ */
+#define libeth_xdp_set_features_noredir(dev, ...) \
+ __libeth_xdp_set_features_noredir(dev, __UNIQUE_ID(dev_), \
+ ##__VA_ARGS__)
+
+#define __libeth_xdp_set_features_noredir(dev, ud, ...) do { \
+ struct net_device *ud = (dev); \
+ \
+ libeth_xdp_set_features(ud, ##__VA_ARGS__); \
+ libeth_xdp_set_redirect(ud, false); \
+} while (0)
+
+#define libeth_xsktmo ((const void *)GOLDEN_RATIO_PRIME)
+
+#endif /* __LIBETH_XDP_H */
diff --git a/include/net/libeth/xsk.h b/include/net/libeth/xsk.h
new file mode 100644
index 000000000000..481a7b28e6f2
--- /dev/null
+++ b/include/net/libeth/xsk.h
@@ -0,0 +1,685 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2025 Intel Corporation */
+
+#ifndef __LIBETH_XSK_H
+#define __LIBETH_XSK_H
+
+#include <net/libeth/xdp.h>
+#include <net/xdp_sock_drv.h>
+
+/* ``XDP_TXMD_FLAGS_VALID`` is defined only under ``CONFIG_XDP_SOCKETS`` */
+#ifdef XDP_TXMD_FLAGS_VALID
+static_assert(XDP_TXMD_FLAGS_VALID <= LIBETH_XDP_TX_XSKMD);
+#endif
+
+/* ``XDP_TX`` bulking */
+
+/**
+ * libeth_xsk_tx_queue_head - internal helper for queueing XSk ``XDP_TX`` head
+ * @bq: XDP Tx bulk to queue the head frag to
+ * @xdp: XSk buffer with the head to queue
+ *
+ * Return: false if it's the only frag of the frame, true if it's an S/G frame.
+ */
+static inline bool libeth_xsk_tx_queue_head(struct libeth_xdp_tx_bulk *bq,
+ struct libeth_xdp_buff *xdp)
+{
+ bq->bulk[bq->count++] = (typeof(*bq->bulk)){
+ .xsk = xdp,
+ __libeth_xdp_tx_len(xdp->base.data_end - xdp->data,
+ LIBETH_XDP_TX_FIRST),
+ };
+
+ if (likely(!xdp_buff_has_frags(&xdp->base)))
+ return false;
+
+ bq->bulk[bq->count - 1].flags |= LIBETH_XDP_TX_MULTI;
+
+ return true;
+}
+
+/**
+ * libeth_xsk_tx_queue_frag - internal helper for queueing XSk ``XDP_TX`` frag
+ * @bq: XDP Tx bulk to queue the frag to
+ * @frag: XSk frag to queue
+ */
+static inline void libeth_xsk_tx_queue_frag(struct libeth_xdp_tx_bulk *bq,
+ struct libeth_xdp_buff *frag)
+{
+ bq->bulk[bq->count++] = (typeof(*bq->bulk)){
+ .xsk = frag,
+ __libeth_xdp_tx_len(frag->base.data_end - frag->data),
+ };
+}
+
+/**
+ * libeth_xsk_tx_queue_bulk - internal helper for queueing XSk ``XDP_TX`` frame
+ * @bq: XDP Tx bulk to queue the frame to
+ * @xdp: XSk buffer to queue
+ * @flush_bulk: driver callback to flush the bulk to the HW queue
+ *
+ * Return: true on success, false on flush error.
+ */
+static __always_inline bool
+libeth_xsk_tx_queue_bulk(struct libeth_xdp_tx_bulk *bq,
+ struct libeth_xdp_buff *xdp,
+ bool (*flush_bulk)(struct libeth_xdp_tx_bulk *bq,
+ u32 flags))
+{
+ bool ret = true;
+
+ if (unlikely(bq->count == LIBETH_XDP_TX_BULK) &&
+ unlikely(!flush_bulk(bq, LIBETH_XDP_TX_XSK))) {
+ libeth_xsk_buff_free_slow(xdp);
+ return false;
+ }
+
+ if (!libeth_xsk_tx_queue_head(bq, xdp))
+ goto out;
+
+ for (const struct libeth_xdp_buff *head = xdp; ; ) {
+ xdp = container_of(xsk_buff_get_frag(&head->base),
+ typeof(*xdp), base);
+ if (!xdp)
+ break;
+
+ if (unlikely(bq->count == LIBETH_XDP_TX_BULK) &&
+ unlikely(!flush_bulk(bq, LIBETH_XDP_TX_XSK))) {
+ ret = false;
+ break;
+ }
+
+ libeth_xsk_tx_queue_frag(bq, xdp);
+ }
+
+out:
+ bq->bulk[bq->count - 1].flags |= LIBETH_XDP_TX_LAST;
+
+ return ret;
+}
+
+/**
+ * libeth_xsk_tx_fill_buf - internal helper to fill XSk ``XDP_TX`` &libeth_sqe
+ * @frm: XDP Tx frame from the bulk
+ * @i: index on the HW queue
+ * @sq: XDPSQ abstraction for the queue
+ * @priv: private data
+ *
+ * Return: XDP Tx descriptor with the synced DMA and other info to pass to
+ * the driver callback.
+ */
+static inline struct libeth_xdp_tx_desc
+libeth_xsk_tx_fill_buf(struct libeth_xdp_tx_frame frm, u32 i,
+ const struct libeth_xdpsq *sq, u64 priv)
+{
+ struct libeth_xdp_buff *xdp = frm.xsk;
+ struct libeth_xdp_tx_desc desc = {
+ .addr = xsk_buff_xdp_get_dma(&xdp->base),
+ .opts = frm.opts,
+ };
+ struct libeth_sqe *sqe;
+
+ xsk_buff_raw_dma_sync_for_device(sq->pool, desc.addr, desc.len);
+
+ sqe = &sq->sqes[i];
+ sqe->xsk = xdp;
+
+ if (!(desc.flags & LIBETH_XDP_TX_FIRST)) {
+ sqe->type = LIBETH_SQE_XSK_TX_FRAG;
+ return desc;
+ }
+
+ sqe->type = LIBETH_SQE_XSK_TX;
+ libeth_xdp_tx_fill_stats(sqe, &desc,
+ xdp_get_shared_info_from_buff(&xdp->base));
+
+ return desc;
+}
+
+/**
+ * libeth_xsk_tx_flush_bulk - wrapper to define flush of XSk ``XDP_TX`` bulk
+ * @bq: bulk to flush
+ * @flags: Tx flags, see __libeth_xdp_tx_flush_bulk()
+ * @prep: driver callback to prepare the queue
+ * @xmit: driver callback to fill a HW descriptor
+ *
+ * Use via LIBETH_XSK_DEFINE_FLUSH_TX() to define an XSk ``XDP_TX`` driver
+ * callback.
+ */
+#define libeth_xsk_tx_flush_bulk(bq, flags, prep, xmit) \
+ __libeth_xdp_tx_flush_bulk(bq, (flags) | LIBETH_XDP_TX_XSK, prep, \
+ libeth_xsk_tx_fill_buf, xmit)
+
+/* XSk TMO */
+
+/**
+ * libeth_xsktmo_req_csum - XSk Tx metadata op to request checksum offload
+ * @csum_start: unused
+ * @csum_offset: unused
+ * @priv: &libeth_xdp_tx_desc from the filling helper
+ *
+ * Generic implementation of ::tmo_request_checksum. Works only when HW doesn't
+ * require filling checksum offsets and other parameters beside the checksum
+ * request bit.
+ * Consider using within @libeth_xsktmo unless the driver requires HW-specific
+ * callbacks.
+ */
+static inline void libeth_xsktmo_req_csum(u16 csum_start, u16 csum_offset,
+ void *priv)
+{
+ ((struct libeth_xdp_tx_desc *)priv)->flags |= LIBETH_XDP_TX_CSUM;
+}
+
+/* Only to inline the callbacks below, use @libeth_xsktmo in drivers instead */
+static const struct xsk_tx_metadata_ops __libeth_xsktmo = {
+ .tmo_request_checksum = libeth_xsktmo_req_csum,
+};
+
+/**
+ * __libeth_xsk_xmit_fill_buf_md - internal helper to prepare XSk xmit w/meta
+ * @xdesc: &xdp_desc from the XSk buffer pool
+ * @sq: XDPSQ abstraction for the queue
+ * @priv: XSk Tx metadata ops
+ *
+ * Same as __libeth_xsk_xmit_fill_buf(), but requests metadata pointer and
+ * fills additional fields in &libeth_xdp_tx_desc to ask for metadata offload.
+ *
+ * Return: XDP Tx descriptor with the DMA, metadata request bits, and other
+ * info to pass to the driver callback.
+ */
+static __always_inline struct libeth_xdp_tx_desc
+__libeth_xsk_xmit_fill_buf_md(const struct xdp_desc *xdesc,
+ const struct libeth_xdpsq *sq,
+ u64 priv)
+{
+ const struct xsk_tx_metadata_ops *tmo = libeth_xdp_priv_to_ptr(priv);
+ struct libeth_xdp_tx_desc desc;
+ struct xdp_desc_ctx ctx;
+
+ ctx = xsk_buff_raw_get_ctx(sq->pool, xdesc->addr);
+ desc = (typeof(desc)){
+ .addr = ctx.dma,
+ __libeth_xdp_tx_len(xdesc->len),
+ };
+
+ BUILD_BUG_ON(!__builtin_constant_p(tmo == libeth_xsktmo));
+ tmo = tmo == libeth_xsktmo ? &__libeth_xsktmo : tmo;
+
+ xsk_tx_metadata_request(ctx.meta, tmo, &desc);
+
+ return desc;
+}
+
+/* XSk xmit implementation */
+
+/**
+ * __libeth_xsk_xmit_fill_buf - internal helper to prepare XSk xmit w/o meta
+ * @xdesc: &xdp_desc from the XSk buffer pool
+ * @sq: XDPSQ abstraction for the queue
+ *
+ * Return: XDP Tx descriptor with the DMA and other info to pass to
+ * the driver callback.
+ */
+static inline struct libeth_xdp_tx_desc
+__libeth_xsk_xmit_fill_buf(const struct xdp_desc *xdesc,
+ const struct libeth_xdpsq *sq)
+{
+ return (struct libeth_xdp_tx_desc){
+ .addr = xsk_buff_raw_get_dma(sq->pool, xdesc->addr),
+ __libeth_xdp_tx_len(xdesc->len),
+ };
+}
+
+/**
+ * libeth_xsk_xmit_fill_buf - internal helper to prepare an XSk xmit
+ * @frm: &xdp_desc from the XSk buffer pool
+ * @i: index on the HW queue
+ * @sq: XDPSQ abstraction for the queue
+ * @priv: XSk Tx metadata ops
+ *
+ * Depending on the metadata ops presence (determined at compile time), calls
+ * the quickest helper to build a libeth XDP Tx descriptor.
+ *
+ * Return: XDP Tx descriptor with the synced DMA, metadata request bits,
+ * and other info to pass to the driver callback.
+ */
+static __always_inline struct libeth_xdp_tx_desc
+libeth_xsk_xmit_fill_buf(struct libeth_xdp_tx_frame frm, u32 i,
+ const struct libeth_xdpsq *sq, u64 priv)
+{
+ struct libeth_xdp_tx_desc desc;
+
+ if (priv)
+ desc = __libeth_xsk_xmit_fill_buf_md(&frm.desc, sq, priv);
+ else
+ desc = __libeth_xsk_xmit_fill_buf(&frm.desc, sq);
+
+ desc.flags |= xsk_is_eop_desc(&frm.desc) ? LIBETH_XDP_TX_LAST : 0;
+
+ xsk_buff_raw_dma_sync_for_device(sq->pool, desc.addr, desc.len);
+
+ return desc;
+}
+
+/**
+ * libeth_xsk_xmit_do_bulk - send XSk xmit frames
+ * @pool: XSk buffer pool containing the frames to send
+ * @xdpsq: opaque pointer to driver's XDPSQ struct
+ * @budget: maximum number of frames can be sent
+ * @tmo: optional XSk Tx metadata ops
+ * @prep: driver callback to build a &libeth_xdpsq
+ * @xmit: driver callback to put frames to a HW queue
+ * @finalize: driver callback to start a transmission
+ *
+ * Implements generic XSk xmit. Always turns on XSk Tx wakeup as it's assumed
+ * lazy cleaning is used and interrupts are disabled for the queue.
+ * HW descriptor filling is unrolled by ``LIBETH_XDP_TX_BATCH`` to optimize
+ * writes.
+ * Note that unlike other XDP Tx ops, the queue must be locked and cleaned
+ * prior to calling this function to already know available @budget.
+ * @prepare must only build a &libeth_xdpsq and return ``U32_MAX``.
+ *
+ * Return: false if @budget was exhausted, true otherwise.
+ */
+static __always_inline bool
+libeth_xsk_xmit_do_bulk(struct xsk_buff_pool *pool, void *xdpsq, u32 budget,
+ const struct xsk_tx_metadata_ops *tmo,
+ u32 (*prep)(void *xdpsq, struct libeth_xdpsq *sq),
+ void (*xmit)(struct libeth_xdp_tx_desc desc, u32 i,
+ const struct libeth_xdpsq *sq, u64 priv),
+ void (*finalize)(void *xdpsq, bool sent, bool flush))
+{
+ const struct libeth_xdp_tx_frame *bulk;
+ bool wake;
+ u32 n;
+
+ wake = xsk_uses_need_wakeup(pool);
+ if (wake)
+ xsk_clear_tx_need_wakeup(pool);
+
+ n = xsk_tx_peek_release_desc_batch(pool, budget);
+ bulk = container_of(&pool->tx_descs[0], typeof(*bulk), desc);
+
+ libeth_xdp_tx_xmit_bulk(bulk, xdpsq, n, true,
+ libeth_xdp_ptr_to_priv(tmo), prep,
+ libeth_xsk_xmit_fill_buf, xmit);
+ finalize(xdpsq, n, true);
+
+ if (wake)
+ xsk_set_tx_need_wakeup(pool);
+
+ return n < budget;
+}
+
+/* Rx polling path */
+
+/**
+ * libeth_xsk_tx_init_bulk - initialize XDP Tx bulk for an XSk Rx NAPI poll
+ * @bq: bulk to initialize
+ * @prog: RCU pointer to the XDP program (never %NULL)
+ * @dev: target &net_device
+ * @xdpsqs: array of driver XDPSQ structs
+ * @num: number of active XDPSQs, the above array length
+ *
+ * Should be called on an onstack XDP Tx bulk before the XSk NAPI polling loop.
+ * Initializes all the needed fields to run libeth_xdp functions.
+ * Never checks if @prog is %NULL or @num == 0 as XDP must always be enabled
+ * when hitting this path.
+ */
+#define libeth_xsk_tx_init_bulk(bq, prog, dev, xdpsqs, num) \
+ __libeth_xdp_tx_init_bulk(bq, prog, dev, xdpsqs, num, true, \
+ __UNIQUE_ID(bq_), __UNIQUE_ID(nqs_))
+
+struct libeth_xdp_buff *libeth_xsk_buff_add_frag(struct libeth_xdp_buff *head,
+ struct libeth_xdp_buff *xdp);
+
+/**
+ * libeth_xsk_process_buff - attach XSk Rx buffer to &libeth_xdp_buff
+ * @head: head XSk buffer to attach the XSk buffer to (or %NULL)
+ * @xdp: XSk buffer to process
+ * @len: received data length from the descriptor
+ *
+ * If @head == %NULL, treats the XSk buffer as head and initializes
+ * the required fields. Otherwise, attaches the buffer as a frag.
+ * Already performs DMA sync-for-CPU and frame start prefetch
+ * (for head buffers only).
+ *
+ * Return: head XSk buffer on success or if the descriptor must be skipped
+ * (empty), %NULL if there is no space for a new frag.
+ */
+static inline struct libeth_xdp_buff *
+libeth_xsk_process_buff(struct libeth_xdp_buff *head,
+ struct libeth_xdp_buff *xdp, u32 len)
+{
+ if (unlikely(!len)) {
+ libeth_xsk_buff_free_slow(xdp);
+ return head;
+ }
+
+ xsk_buff_set_size(&xdp->base, len);
+ xsk_buff_dma_sync_for_cpu(&xdp->base);
+
+ if (head)
+ return libeth_xsk_buff_add_frag(head, xdp);
+
+ prefetch(xdp->data);
+
+ return xdp;
+}
+
+void libeth_xsk_buff_stats_frags(struct libeth_rq_napi_stats *rs,
+ const struct libeth_xdp_buff *xdp);
+
+u32 __libeth_xsk_run_prog_slow(struct libeth_xdp_buff *xdp,
+ const struct libeth_xdp_tx_bulk *bq,
+ enum xdp_action act, int ret);
+
+/**
+ * __libeth_xsk_run_prog - run XDP program on XSk buffer
+ * @xdp: XSk buffer to run the prog on
+ * @bq: buffer bulk for ``XDP_TX`` queueing
+ *
+ * Internal inline abstraction to run XDP program on XSk Rx path. Handles
+ * only the most common ``XDP_REDIRECT`` inline, the rest is processed
+ * externally.
+ * Reports an XDP prog exception on errors.
+ *
+ * Return: libeth_xdp prog verdict depending on the prog's verdict.
+ */
+static __always_inline u32
+__libeth_xsk_run_prog(struct libeth_xdp_buff *xdp,
+ const struct libeth_xdp_tx_bulk *bq)
+{
+ enum xdp_action act;
+ int ret = 0;
+
+ act = bpf_prog_run_xdp(bq->prog, &xdp->base);
+ if (unlikely(act != XDP_REDIRECT))
+rest:
+ return __libeth_xsk_run_prog_slow(xdp, bq, act, ret);
+
+ ret = xdp_do_redirect(bq->dev, &xdp->base, bq->prog);
+ if (unlikely(ret))
+ goto rest;
+
+ return LIBETH_XDP_REDIRECT;
+}
+
+/**
+ * libeth_xsk_run_prog - run XDP program on XSk path and handle all verdicts
+ * @xdp: XSk buffer to process
+ * @bq: XDP Tx bulk to queue ``XDP_TX`` buffers
+ * @fl: driver ``XDP_TX`` bulk flush callback
+ *
+ * Run the attached XDP program and handle all possible verdicts.
+ * Prefer using it via LIBETH_XSK_DEFINE_RUN{,_PASS,_PROG}().
+ *
+ * Return: libeth_xdp prog verdict depending on the prog's verdict.
+ */
+#define libeth_xsk_run_prog(xdp, bq, fl) \
+ __libeth_xdp_run_flush(xdp, bq, __libeth_xsk_run_prog, \
+ libeth_xsk_tx_queue_bulk, fl)
+
+/**
+ * __libeth_xsk_run_pass - helper to run XDP program and handle the result
+ * @xdp: XSk buffer to process
+ * @bq: XDP Tx bulk to queue ``XDP_TX`` frames
+ * @napi: NAPI to build an skb and pass it up the stack
+ * @rs: onstack libeth RQ stats
+ * @md: metadata that should be filled to the XSk buffer
+ * @prep: callback for filling the metadata
+ * @run: driver wrapper to run XDP program
+ * @populate: driver callback to populate an skb with the HW descriptor data
+ *
+ * Inline abstraction, XSk's counterpart of __libeth_xdp_run_pass(), see its
+ * doc for details.
+ *
+ * Return: false if the polling loop must be exited due to lack of free
+ * buffers, true otherwise.
+ */
+static __always_inline bool
+__libeth_xsk_run_pass(struct libeth_xdp_buff *xdp,
+ struct libeth_xdp_tx_bulk *bq, struct napi_struct *napi,
+ struct libeth_rq_napi_stats *rs, const void *md,
+ void (*prep)(struct libeth_xdp_buff *xdp,
+ const void *md),
+ u32 (*run)(struct libeth_xdp_buff *xdp,
+ struct libeth_xdp_tx_bulk *bq),
+ bool (*populate)(struct sk_buff *skb,
+ const struct libeth_xdp_buff *xdp,
+ struct libeth_rq_napi_stats *rs))
+{
+ struct sk_buff *skb;
+ u32 act;
+
+ rs->bytes += xdp->base.data_end - xdp->data;
+ rs->packets++;
+
+ if (unlikely(xdp_buff_has_frags(&xdp->base)))
+ libeth_xsk_buff_stats_frags(rs, xdp);
+
+ if (prep && (!__builtin_constant_p(!!md) || md))
+ prep(xdp, md);
+
+ act = run(xdp, bq);
+ if (likely(act == LIBETH_XDP_REDIRECT))
+ return true;
+
+ if (act != LIBETH_XDP_PASS)
+ return act != LIBETH_XDP_ABORTED;
+
+ skb = xdp_build_skb_from_zc(&xdp->base);
+ if (unlikely(!skb)) {
+ libeth_xsk_buff_free_slow(xdp);
+ return true;
+ }
+
+ if (unlikely(!populate(skb, xdp, rs))) {
+ napi_consume_skb(skb, true);
+ return true;
+ }
+
+ napi_gro_receive(napi, skb);
+
+ return true;
+}
+
+/**
+ * libeth_xsk_run_pass - helper to run XDP program and handle the result
+ * @xdp: XSk buffer to process
+ * @bq: XDP Tx bulk to queue ``XDP_TX`` frames
+ * @napi: NAPI to build an skb and pass it up the stack
+ * @rs: onstack libeth RQ stats
+ * @desc: pointer to the HW descriptor for that frame
+ * @run: driver wrapper to run XDP program
+ * @populate: driver callback to populate an skb with the HW descriptor data
+ *
+ * Wrapper around the underscored version when "fill the descriptor metadata"
+ * means just writing the pointer to the HW descriptor as @xdp->desc.
+ */
+#define libeth_xsk_run_pass(xdp, bq, napi, rs, desc, run, populate) \
+ __libeth_xsk_run_pass(xdp, bq, napi, rs, desc, libeth_xdp_prep_desc, \
+ run, populate)
+
+/**
+ * libeth_xsk_finalize_rx - finalize XDPSQ after an XSk NAPI polling loop
+ * @bq: ``XDP_TX`` frame bulk
+ * @flush: driver callback to flush the bulk
+ * @finalize: driver callback to start sending the frames and run the timer
+ *
+ * Flush the bulk if there are frames left to send, kick the queue and flush
+ * the XDP maps.
+ */
+#define libeth_xsk_finalize_rx(bq, flush, finalize) \
+ __libeth_xdp_finalize_rx(bq, LIBETH_XDP_TX_XSK, flush, finalize)
+
+/*
+ * Helpers to reduce boilerplate code in drivers.
+ *
+ * Typical driver XSk Rx flow would be (excl. bulk and buff init, frag attach):
+ *
+ * LIBETH_XDP_DEFINE_START();
+ * LIBETH_XSK_DEFINE_FLUSH_TX(static driver_xsk_flush_tx, driver_xsk_tx_prep,
+ * driver_xdp_xmit);
+ * LIBETH_XSK_DEFINE_RUN(static driver_xsk_run, driver_xsk_run_prog,
+ * driver_xsk_flush_tx, driver_populate_skb);
+ * LIBETH_XSK_DEFINE_FINALIZE(static driver_xsk_finalize_rx,
+ * driver_xsk_flush_tx, driver_xdp_finalize_sq);
+ * LIBETH_XDP_DEFINE_END();
+ *
+ * This will build a set of 4 static functions. The compiler is free to decide
+ * whether to inline them.
+ * Then, in the NAPI polling function:
+ *
+ * while (packets < budget) {
+ * // ...
+ * if (!driver_xsk_run(xdp, &bq, napi, &rs, desc))
+ * break;
+ * }
+ * driver_xsk_finalize_rx(&bq);
+ */
+
+/**
+ * LIBETH_XSK_DEFINE_FLUSH_TX - define a driver XSk ``XDP_TX`` flush function
+ * @name: name of the function to define
+ * @prep: driver callback to clean an XDPSQ
+ * @xmit: driver callback to write a HW Tx descriptor
+ */
+#define LIBETH_XSK_DEFINE_FLUSH_TX(name, prep, xmit) \
+ __LIBETH_XDP_DEFINE_FLUSH_TX(name, prep, xmit, xsk)
+
+/**
+ * LIBETH_XSK_DEFINE_RUN_PROG - define a driver XDP program run function
+ * @name: name of the function to define
+ * @flush: driver callback to flush an XSk ``XDP_TX`` bulk
+ */
+#define LIBETH_XSK_DEFINE_RUN_PROG(name, flush) \
+ u32 __LIBETH_XDP_DEFINE_RUN_PROG(name, flush, xsk)
+
+/**
+ * LIBETH_XSK_DEFINE_RUN_PASS - define a driver buffer process + pass function
+ * @name: name of the function to define
+ * @run: driver callback to run XDP program (above)
+ * @populate: driver callback to fill an skb with HW descriptor info
+ */
+#define LIBETH_XSK_DEFINE_RUN_PASS(name, run, populate) \
+ bool __LIBETH_XDP_DEFINE_RUN_PASS(name, run, populate, xsk)
+
+/**
+ * LIBETH_XSK_DEFINE_RUN - define a driver buffer process, run + pass function
+ * @name: name of the function to define
+ * @run: name of the XDP prog run function to define
+ * @flush: driver callback to flush an XSk ``XDP_TX`` bulk
+ * @populate: driver callback to fill an skb with HW descriptor info
+ */
+#define LIBETH_XSK_DEFINE_RUN(name, run, flush, populate) \
+ __LIBETH_XDP_DEFINE_RUN(name, run, flush, populate, XSK)
+
+/**
+ * LIBETH_XSK_DEFINE_FINALIZE - define a driver XSk NAPI poll finalize function
+ * @name: name of the function to define
+ * @flush: driver callback to flush an XSk ``XDP_TX`` bulk
+ * @finalize: driver callback to finalize an XDPSQ and run the timer
+ */
+#define LIBETH_XSK_DEFINE_FINALIZE(name, flush, finalize) \
+ __LIBETH_XDP_DEFINE_FINALIZE(name, flush, finalize, xsk)
+
+/* Refilling */
+
+/**
+ * struct libeth_xskfq - structure representing an XSk buffer (fill) queue
+ * @fp: hotpath part of the structure
+ * @pool: &xsk_buff_pool for buffer management
+ * @fqes: array of XSk buffer pointers
+ * @descs: opaque pointer to the HW descriptor array
+ * @ntu: index of the next buffer to poll
+ * @count: number of descriptors/buffers the queue has
+ * @pending: current number of XSkFQEs to refill
+ * @thresh: threshold below which the queue is refilled
+ * @buf_len: HW-writeable length per each buffer
+ * @nid: ID of the closest NUMA node with memory
+ */
+struct libeth_xskfq {
+ struct_group_tagged(libeth_xskfq_fp, fp,
+ struct xsk_buff_pool *pool;
+ struct libeth_xdp_buff **fqes;
+ void *descs;
+
+ u32 ntu;
+ u32 count;
+ );
+
+ /* Cold fields */
+ u32 pending;
+ u32 thresh;
+
+ u32 buf_len;
+ int nid;
+};
+
+int libeth_xskfq_create(struct libeth_xskfq *fq);
+void libeth_xskfq_destroy(struct libeth_xskfq *fq);
+
+/**
+ * libeth_xsk_buff_xdp_get_dma - get DMA address of XSk &libeth_xdp_buff
+ * @xdp: buffer to get the DMA addr for
+ */
+#define libeth_xsk_buff_xdp_get_dma(xdp) \
+ xsk_buff_xdp_get_dma(&(xdp)->base)
+
+/**
+ * libeth_xskfqe_alloc - allocate @n XSk Rx buffers
+ * @fq: hotpath part of the XSkFQ, usually onstack
+ * @n: number of buffers to allocate
+ * @fill: driver callback to write DMA addresses to HW descriptors
+ *
+ * Note that @fq->ntu gets updated, but ::pending must be recalculated
+ * by the caller.
+ *
+ * Return: number of buffers refilled.
+ */
+static __always_inline u32
+libeth_xskfqe_alloc(struct libeth_xskfq_fp *fq, u32 n,
+ void (*fill)(const struct libeth_xskfq_fp *fq, u32 i))
+{
+ u32 this, ret, done = 0;
+ struct xdp_buff **xskb;
+
+ this = fq->count - fq->ntu;
+ if (likely(this > n))
+ this = n;
+
+again:
+ xskb = (typeof(xskb))&fq->fqes[fq->ntu];
+ ret = xsk_buff_alloc_batch(fq->pool, xskb, this);
+
+ for (u32 i = 0, ntu = fq->ntu; likely(i < ret); i++)
+ fill(fq, ntu + i);
+
+ done += ret;
+ fq->ntu += ret;
+
+ if (likely(fq->ntu < fq->count) || unlikely(ret < this))
+ goto out;
+
+ fq->ntu = 0;
+
+ if (this < n) {
+ this = n - this;
+ goto again;
+ }
+
+out:
+ return done;
+}
+
+/* .ndo_xsk_wakeup */
+
+void libeth_xsk_init_wakeup(call_single_data_t *csd, struct napi_struct *napi);
+void libeth_xsk_wakeup(call_single_data_t *csd, u32 qid);
+
+/* Pool setup */
+
+int libeth_xsk_setup_pool(struct net_device *dev, u32 qid, bool enable);
+
+#endif /* __LIBETH_XSK_H */
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 82617579d910..dcd5969bb559 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -2428,6 +2428,7 @@ struct ieee80211_sta_aggregates {
* @he_cap: HE capabilities of this STA
* @he_6ghz_capa: on 6 GHz, holds the HE 6 GHz band capabilities
* @eht_cap: EHT capabilities of this STA
+ * @s1g_cap: S1G capabilities of this STA
* @agg: per-link data for multi-link aggregation
* @bandwidth: current bandwidth the station can receive with
* @rx_nss: in HT/VHT, the maximum number of spatial streams the
@@ -2450,6 +2451,7 @@ struct ieee80211_link_sta {
struct ieee80211_sta_he_cap he_cap;
struct ieee80211_he_6ghz_capa he_6ghz_capa;
struct ieee80211_sta_eht_cap eht_cap;
+ struct ieee80211_sta_s1g_cap s1g_cap;
struct ieee80211_sta_aggregates agg;
@@ -4133,6 +4135,15 @@ struct ieee80211_prep_tx_info {
* Statistics that the driver doesn't fill will be filled by mac80211.
* The callback can sleep.
*
+ * @link_sta_statistics: Get link statistics for this station. For example with
+ * beacon filtering, the statistics kept by mac80211 might not be
+ * accurate, so let the driver pre-fill the statistics. The driver can
+ * fill most of the values (indicating which by setting the filled
+ * bitmap), but not all of them make sense - see the source for which
+ * ones are possible.
+ * Statistics that the driver doesn't fill will be filled by mac80211.
+ * The callback can sleep.
+ *
* @conf_tx: Configure TX queue parameters (EDCF (aifs, cw_min, cw_max),
* bursting) for a hardware TX queue.
* Returns a negative error code on failure.
@@ -4508,7 +4519,7 @@ struct ieee80211_ops {
enum nl80211_iftype new_type, bool p2p);
void (*remove_interface)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
- int (*config)(struct ieee80211_hw *hw, u32 changed);
+ int (*config)(struct ieee80211_hw *hw, int radio_idx, u32 changed);
void (*bss_info_changed)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
@@ -4571,8 +4582,10 @@ struct ieee80211_ops {
void (*get_key_seq)(struct ieee80211_hw *hw,
struct ieee80211_key_conf *key,
struct ieee80211_key_seq *seq);
- int (*set_frag_threshold)(struct ieee80211_hw *hw, u32 value);
- int (*set_rts_threshold)(struct ieee80211_hw *hw, u32 value);
+ int (*set_frag_threshold)(struct ieee80211_hw *hw, int radio_idx,
+ u32 value);
+ int (*set_rts_threshold)(struct ieee80211_hw *hw, int radio_idx,
+ u32 value);
int (*sta_add)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
int (*sta_remove)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
@@ -4627,6 +4640,10 @@ struct ieee80211_ops {
s64 offset);
void (*reset_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
int (*tx_last_beacon)(struct ieee80211_hw *hw);
+ void (*link_sta_statistics)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta,
+ struct link_station_info *link_sinfo);
/**
* @ampdu_action:
@@ -4665,7 +4682,8 @@ struct ieee80211_ops {
int (*get_survey)(struct ieee80211_hw *hw, int idx,
struct survey_info *survey);
void (*rfkill_poll)(struct ieee80211_hw *hw);
- void (*set_coverage_class)(struct ieee80211_hw *hw, s16 coverage_class);
+ void (*set_coverage_class)(struct ieee80211_hw *hw, int radio_idx,
+ s16 coverage_class);
#ifdef CONFIG_NL80211_TESTMODE
int (*testmode_cmd)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
void *data, int len);
@@ -4680,8 +4698,10 @@ struct ieee80211_ops {
void (*channel_switch)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_channel_switch *ch_switch);
- int (*set_antenna)(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant);
- int (*get_antenna)(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant);
+ int (*set_antenna)(struct ieee80211_hw *hw, int radio_idx,
+ u32 tx_ant, u32 rx_ant);
+ int (*get_antenna)(struct ieee80211_hw *hw, int radio_idx,
+ u32 *tx_ant, u32 *rx_ant);
int (*remain_on_channel)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -7242,13 +7262,14 @@ void ieee80211_disable_rssi_reports(struct ieee80211_vif *vif);
* ieee80211_ave_rssi - report the average RSSI for the specified interface
*
* @vif: the specified virtual interface
+ * @link_id: the link ID for MLO, or -1 for non-MLO
*
* Note: This function assumes that the given vif is valid.
*
* Return: The average RSSI value for the requested interface, or 0 if not
* applicable.
*/
-int ieee80211_ave_rssi(struct ieee80211_vif *vif);
+int ieee80211_ave_rssi(struct ieee80211_vif *vif, int link_id);
/**
* ieee80211_report_wowlan_wakeup - report WoWLAN wakeup
diff --git a/include/net/mana/gdma.h b/include/net/mana/gdma.h
index 3ce56a816425..92ab85061df0 100644
--- a/include/net/mana/gdma.h
+++ b/include/net/mana/gdma.h
@@ -10,6 +10,7 @@
#include "shm_channel.h"
#define GDMA_STATUS_MORE_ENTRIES 0x00000105
+#define GDMA_STATUS_CMD_UNSUPPORTED 0xffffffff
/* Structures labeled with "HW DATA" are exchanged with the hardware. All of
* them are naturally aligned and hence don't need __packed.
@@ -58,7 +59,7 @@ enum gdma_eqe_type {
GDMA_EQE_HWC_INIT_EQ_ID_DB = 129,
GDMA_EQE_HWC_INIT_DATA = 130,
GDMA_EQE_HWC_INIT_DONE = 131,
- GDMA_EQE_HWC_SOC_RECONFIG = 132,
+ GDMA_EQE_HWC_FPGA_RECONFIG = 132,
GDMA_EQE_HWC_SOC_RECONFIG_DATA = 133,
GDMA_EQE_HWC_SOC_SERVICE = 134,
GDMA_EQE_RNIC_QP_FATAL = 176,
@@ -388,7 +389,7 @@ struct gdma_context {
unsigned int max_num_queues;
unsigned int max_num_msix;
unsigned int num_msix_usable;
- struct gdma_irq_context *irq_contexts;
+ struct xarray irq_contexts;
/* L2 MTU */
u16 adapter_mtu;
@@ -403,6 +404,8 @@ struct gdma_context {
u32 test_event_eq_id;
bool is_pf;
+ bool in_service;
+
phys_addr_t bar0_pa;
void __iomem *bar0_va;
void __iomem *shm_base;
@@ -578,12 +581,20 @@ enum {
/* Driver can handle holes (zeros) in the device list */
#define GDMA_DRV_CAP_FLAG_1_DEV_LIST_HOLES_SUP BIT(11)
+/* Driver supports dynamic MSI-X vector allocation */
+#define GDMA_DRV_CAP_FLAG_1_DYNAMIC_IRQ_ALLOC_SUPPORT BIT(13)
+
+/* Driver can self reset on FPGA Reconfig EQE notification */
+#define GDMA_DRV_CAP_FLAG_1_HANDLE_RECONFIG_EQE BIT(17)
+
#define GDMA_DRV_CAP_FLAGS1 \
(GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \
GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX | \
GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG | \
GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT | \
- GDMA_DRV_CAP_FLAG_1_DEV_LIST_HOLES_SUP)
+ GDMA_DRV_CAP_FLAG_1_DEV_LIST_HOLES_SUP | \
+ GDMA_DRV_CAP_FLAG_1_DYNAMIC_IRQ_ALLOC_SUPPORT | \
+ GDMA_DRV_CAP_FLAG_1_HANDLE_RECONFIG_EQE)
#define GDMA_DRV_CAP_FLAGS2 0
diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h
index 9abb66461211..e1030a7d2daa 100644
--- a/include/net/mana/mana.h
+++ b/include/net/mana/mana.h
@@ -5,6 +5,7 @@
#define _MANA_H
#include <net/xdp.h>
+#include <net/net_shaper.h>
#include "gdma.h"
#include "hw_channel.h"
@@ -404,6 +405,65 @@ struct mana_ethtool_stats {
u64 rx_cqe_unknown_type;
};
+struct mana_ethtool_phy_stats {
+ /* Drop Counters */
+ u64 rx_pkt_drop_phy;
+ u64 tx_pkt_drop_phy;
+
+ /* Per TC traffic Counters */
+ u64 rx_pkt_tc0_phy;
+ u64 tx_pkt_tc0_phy;
+ u64 rx_pkt_tc1_phy;
+ u64 tx_pkt_tc1_phy;
+ u64 rx_pkt_tc2_phy;
+ u64 tx_pkt_tc2_phy;
+ u64 rx_pkt_tc3_phy;
+ u64 tx_pkt_tc3_phy;
+ u64 rx_pkt_tc4_phy;
+ u64 tx_pkt_tc4_phy;
+ u64 rx_pkt_tc5_phy;
+ u64 tx_pkt_tc5_phy;
+ u64 rx_pkt_tc6_phy;
+ u64 tx_pkt_tc6_phy;
+ u64 rx_pkt_tc7_phy;
+ u64 tx_pkt_tc7_phy;
+
+ u64 rx_byte_tc0_phy;
+ u64 tx_byte_tc0_phy;
+ u64 rx_byte_tc1_phy;
+ u64 tx_byte_tc1_phy;
+ u64 rx_byte_tc2_phy;
+ u64 tx_byte_tc2_phy;
+ u64 rx_byte_tc3_phy;
+ u64 tx_byte_tc3_phy;
+ u64 rx_byte_tc4_phy;
+ u64 tx_byte_tc4_phy;
+ u64 rx_byte_tc5_phy;
+ u64 tx_byte_tc5_phy;
+ u64 rx_byte_tc6_phy;
+ u64 tx_byte_tc6_phy;
+ u64 rx_byte_tc7_phy;
+ u64 tx_byte_tc7_phy;
+
+ /* Per TC pause Counters */
+ u64 rx_pause_tc0_phy;
+ u64 tx_pause_tc0_phy;
+ u64 rx_pause_tc1_phy;
+ u64 tx_pause_tc1_phy;
+ u64 rx_pause_tc2_phy;
+ u64 tx_pause_tc2_phy;
+ u64 rx_pause_tc3_phy;
+ u64 tx_pause_tc3_phy;
+ u64 rx_pause_tc4_phy;
+ u64 tx_pause_tc4_phy;
+ u64 rx_pause_tc5_phy;
+ u64 tx_pause_tc5_phy;
+ u64 rx_pause_tc6_phy;
+ u64 tx_pause_tc6_phy;
+ u64 rx_pause_tc7_phy;
+ u64 tx_pause_tc7_phy;
+};
+
struct mana_context {
struct gdma_dev *gdma_dev;
@@ -467,13 +527,22 @@ struct mana_port_context {
struct mutex vport_mutex;
int vport_use_count;
+ /* Net shaper handle*/
+ struct net_shaper_handle handle;
+
u16 port_idx;
+ /* Currently configured speed (mbps) */
+ u32 speed;
+ /* Maximum speed supported by the SKU (mbps) */
+ u32 max_speed;
bool port_is_up;
bool port_st_save; /* Saved port state */
struct mana_ethtool_stats eth_stats;
+ struct mana_ethtool_phy_stats phy_stats;
+
/* Debugfs */
struct dentry *mana_port_debugfs;
};
@@ -501,6 +570,10 @@ struct bpf_prog *mana_xdp_get(struct mana_port_context *apc);
void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog);
int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
void mana_query_gf_stats(struct mana_port_context *apc);
+int mana_query_link_cfg(struct mana_port_context *apc);
+int mana_set_bw_clamp(struct mana_port_context *apc, u32 speed,
+ int enable_clamping);
+void mana_query_phy_stats(struct mana_port_context *apc);
int mana_pre_alloc_rxbufs(struct mana_port_context *apc, int mtu, int num_queues);
void mana_pre_dealloc_rxbufs(struct mana_port_context *apc);
@@ -527,6 +600,9 @@ enum mana_command_code {
MANA_FENCE_RQ = 0x20006,
MANA_CONFIG_VPORT_RX = 0x20007,
MANA_QUERY_VPORT_CONFIG = 0x20008,
+ MANA_QUERY_LINK_CONFIG = 0x2000A,
+ MANA_SET_BW_CLAMP = 0x2000B,
+ MANA_QUERY_PHY_STAT = 0x2000c,
/* Privileged commands for the PF mode */
MANA_REGISTER_FILTER = 0x28000,
@@ -535,6 +611,35 @@ enum mana_command_code {
MANA_DEREGISTER_HW_PORT = 0x28004,
};
+/* Query Link Configuration*/
+struct mana_query_link_config_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t vport;
+}; /* HW DATA */
+
+struct mana_query_link_config_resp {
+ struct gdma_resp_hdr hdr;
+ u32 qos_speed_mbps;
+ u8 qos_unconfigured;
+ u8 reserved1[3];
+ u32 link_speed_mbps;
+ u8 reserved2[4];
+}; /* HW DATA */
+
+/* Set Bandwidth Clamp*/
+struct mana_set_bw_clamp_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t vport;
+ enum TRI_STATE enable_clamping;
+ u32 link_speed_mbps;
+}; /* HW DATA */
+
+struct mana_set_bw_clamp_resp {
+ struct gdma_resp_hdr hdr;
+ u8 qos_unconfigured;
+ u8 reserved[7];
+}; /* HW DATA */
+
/* Query Device Configuration */
struct mana_query_device_cfg_req {
struct gdma_req_hdr hdr;
@@ -689,6 +794,74 @@ struct mana_query_gf_stat_resp {
u64 tx_err_gdma;
}; /* HW DATA */
+/* Query phy stats */
+struct mana_query_phy_stat_req {
+ struct gdma_req_hdr hdr;
+ u64 req_stats;
+}; /* HW DATA */
+
+struct mana_query_phy_stat_resp {
+ struct gdma_resp_hdr hdr;
+ u64 reported_stats;
+
+ /* Aggregate Drop Counters */
+ u64 rx_pkt_drop_phy;
+ u64 tx_pkt_drop_phy;
+
+ /* Per TC(Traffic class) traffic Counters */
+ u64 rx_pkt_tc0_phy;
+ u64 tx_pkt_tc0_phy;
+ u64 rx_pkt_tc1_phy;
+ u64 tx_pkt_tc1_phy;
+ u64 rx_pkt_tc2_phy;
+ u64 tx_pkt_tc2_phy;
+ u64 rx_pkt_tc3_phy;
+ u64 tx_pkt_tc3_phy;
+ u64 rx_pkt_tc4_phy;
+ u64 tx_pkt_tc4_phy;
+ u64 rx_pkt_tc5_phy;
+ u64 tx_pkt_tc5_phy;
+ u64 rx_pkt_tc6_phy;
+ u64 tx_pkt_tc6_phy;
+ u64 rx_pkt_tc7_phy;
+ u64 tx_pkt_tc7_phy;
+
+ u64 rx_byte_tc0_phy;
+ u64 tx_byte_tc0_phy;
+ u64 rx_byte_tc1_phy;
+ u64 tx_byte_tc1_phy;
+ u64 rx_byte_tc2_phy;
+ u64 tx_byte_tc2_phy;
+ u64 rx_byte_tc3_phy;
+ u64 tx_byte_tc3_phy;
+ u64 rx_byte_tc4_phy;
+ u64 tx_byte_tc4_phy;
+ u64 rx_byte_tc5_phy;
+ u64 tx_byte_tc5_phy;
+ u64 rx_byte_tc6_phy;
+ u64 tx_byte_tc6_phy;
+ u64 rx_byte_tc7_phy;
+ u64 tx_byte_tc7_phy;
+
+ /* Per TC(Traffic Class) pause Counters */
+ u64 rx_pause_tc0_phy;
+ u64 tx_pause_tc0_phy;
+ u64 rx_pause_tc1_phy;
+ u64 tx_pause_tc1_phy;
+ u64 rx_pause_tc2_phy;
+ u64 tx_pause_tc2_phy;
+ u64 rx_pause_tc3_phy;
+ u64 tx_pause_tc3_phy;
+ u64 rx_pause_tc4_phy;
+ u64 tx_pause_tc4_phy;
+ u64 rx_pause_tc5_phy;
+ u64 tx_pause_tc5_phy;
+ u64 rx_pause_tc6_phy;
+ u64 tx_pause_tc6_phy;
+ u64 rx_pause_tc7_phy;
+ u64 tx_pause_tc7_phy;
+}; /* HW DATA */
+
/* Configure vPort Rx Steering */
struct mana_cfg_rx_steer_req_v2 {
struct gdma_req_hdr hdr;
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 9a832cab5b1d..c7ce5ec7be23 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -182,6 +182,7 @@ struct pneigh_entry {
netdevice_tracker dev_tracker;
u32 flags;
u8 protocol;
+ bool permanent;
u32 key[];
};
diff --git a/include/net/netdev_queues.h b/include/net/netdev_queues.h
index ba2eaf39089b..6e835972abd1 100644
--- a/include/net/netdev_queues.h
+++ b/include/net/netdev_queues.h
@@ -294,6 +294,15 @@ netdev_txq_completed_mb(struct netdev_queue *dev_queue,
netif_txq_try_stop(_txq, get_desc, start_thrs); \
})
+static inline void netif_subqueue_sent(const struct net_device *dev,
+ unsigned int idx, unsigned int bytes)
+{
+ struct netdev_queue *txq;
+
+ txq = netdev_get_tx_queue(dev, idx);
+ netdev_tx_sent_queue(txq, bytes);
+}
+
#define netif_subqueue_maybe_stop(dev, idx, get_desc, stop_thrs, start_thrs) \
({ \
struct netdev_queue *_txq; \
diff --git a/include/net/netmem.h b/include/net/netmem.h
index 386164fb9c18..7a1dafa3f080 100644
--- a/include/net/netmem.h
+++ b/include/net/netmem.h
@@ -89,8 +89,7 @@ static inline unsigned int net_iov_idx(const struct net_iov *niov)
* typedef netmem_ref - a nonexistent type marking a reference to generic
* network memory.
*
- * A netmem_ref currently is always a reference to a struct page. This
- * abstraction is introduced so support for new memory types can be added.
+ * A netmem_ref can be a struct page* or a struct net_iov* underneath.
*
* Use the supplied helpers to obtain the underlying memory pointer and fields.
*/
@@ -117,9 +116,6 @@ static inline struct page *__netmem_to_page(netmem_ref netmem)
return (__force struct page *)netmem;
}
-/* This conversion fails (returns NULL) if the netmem_ref is not struct page
- * backed.
- */
static inline struct page *netmem_to_page(netmem_ref netmem)
{
if (WARN_ON_ONCE(netmem_is_net_iov(netmem)))
@@ -143,7 +139,7 @@ static inline netmem_ref net_iov_to_netmem(struct net_iov *niov)
return (__force netmem_ref)((unsigned long)niov | NET_IOV);
}
-static inline netmem_ref page_to_netmem(struct page *page)
+static inline netmem_ref page_to_netmem(const struct page *page)
{
return (__force netmem_ref)page;
}
@@ -178,6 +174,21 @@ static inline unsigned long netmem_pfn_trace(netmem_ref netmem)
return page_to_pfn(netmem_to_page(netmem));
}
+/* __netmem_clear_lsb - convert netmem_ref to struct net_iov * for access to
+ * common fields.
+ * @netmem: netmem reference to extract as net_iov.
+ *
+ * All the sub types of netmem_ref (page, net_iov) have the same pp, pp_magic,
+ * dma_addr, and pp_ref_count fields at the same offsets. Thus, we can access
+ * these fields without a type check to make sure that the underlying mem is
+ * net_iov or page.
+ *
+ * The resulting value of this function can only be used to access the fields
+ * that are NET_IOV_ASSERT_OFFSET'd. Accessing any other fields will result in
+ * undefined behavior.
+ *
+ * Return: the netmem_ref cast to net_iov* regardless of its underlying type.
+ */
static inline struct net_iov *__netmem_clear_lsb(netmem_ref netmem)
{
return (struct net_iov *)((__force unsigned long)netmem & ~NET_IOV);
diff --git a/include/net/page_pool/helpers.h b/include/net/page_pool/helpers.h
index 93f2c31baf9b..773fc65780b5 100644
--- a/include/net/page_pool/helpers.h
+++ b/include/net/page_pool/helpers.h
@@ -153,6 +153,13 @@ static inline netmem_ref page_pool_dev_alloc_netmem(struct page_pool *pool,
return page_pool_alloc_netmem(pool, offset, size, gfp);
}
+static inline netmem_ref page_pool_dev_alloc_netmems(struct page_pool *pool)
+{
+ gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN;
+
+ return page_pool_alloc_netmems(pool, gfp);
+}
+
static inline struct page *page_pool_alloc(struct page_pool *pool,
unsigned int *offset,
unsigned int *size, gfp_t gfp)
diff --git a/include/net/pfcp.h b/include/net/pfcp.h
index af14f970b80e..639553797d3e 100644
--- a/include/net/pfcp.h
+++ b/include/net/pfcp.h
@@ -45,7 +45,7 @@ struct pfcphdr_session {
reserved:4;
#elif defined(__BIG_ENDIAN_BITFIELD)
u8 reserved:4,
- message_priprity:4;
+ message_priority:4;
#else
#error "Please fix <asm/byteorder>"
#endif
diff --git a/include/net/route.h b/include/net/route.h
index 8e39aa822cf9..3d3d6048ffca 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -153,7 +153,7 @@ static inline void inet_sk_init_flowi4(const struct inet_sock *inet,
ip_sock_rt_tos(sk), ip_sock_rt_scope(sk),
sk->sk_protocol, inet_sk_flowi_flags(sk), daddr,
inet->inet_saddr, inet->inet_dport,
- inet->inet_sport, sk->sk_uid);
+ inet->inet_sport, sk_uid(sk));
security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4));
}
@@ -331,7 +331,7 @@ static inline void ip_route_connect_init(struct flowi4 *fl4, __be32 dst,
flowi4_init_output(fl4, oif, READ_ONCE(sk->sk_mark), ip_sock_rt_tos(sk),
ip_sock_rt_scope(sk), protocol, flow_flags, dst,
- src, dport, sport, sk->sk_uid);
+ src, dport, sport, sk_uid(sk));
}
static inline struct rtable *ip_route_connect(struct flowi4 *fl4, __be32 dst,
diff --git a/include/net/sock.h b/include/net/sock.h
index 4c37015b7cf7..0f2443d4ec58 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -2076,6 +2076,7 @@ static inline void sock_orphan(struct sock *sk)
sock_set_flag(sk, SOCK_DEAD);
sk_set_socket(sk, NULL);
sk->sk_wq = NULL;
+ /* Note: sk_uid is unchanged. */
write_unlock_bh(&sk->sk_callback_lock);
}
@@ -2086,18 +2087,23 @@ static inline void sock_graft(struct sock *sk, struct socket *parent)
rcu_assign_pointer(sk->sk_wq, &parent->wq);
parent->sk = sk;
sk_set_socket(sk, parent);
- sk->sk_uid = SOCK_INODE(parent)->i_uid;
+ WRITE_ONCE(sk->sk_uid, SOCK_INODE(parent)->i_uid);
security_sock_graft(sk, parent);
write_unlock_bh(&sk->sk_callback_lock);
}
-kuid_t sock_i_uid(struct sock *sk);
+static inline kuid_t sk_uid(const struct sock *sk)
+{
+ /* Paired with WRITE_ONCE() in sockfs_setattr() */
+ return READ_ONCE(sk->sk_uid);
+}
+
unsigned long __sock_i_ino(struct sock *sk);
unsigned long sock_i_ino(struct sock *sk);
static inline kuid_t sock_net_uid(const struct net *net, const struct sock *sk)
{
- return sk ? sk->sk_uid : make_kuid(net->user_ns, 0);
+ return sk ? sk_uid(sk) : make_kuid(net->user_ns, 0);
}
static inline u32 net_tx_rndhash(void)
@@ -2590,12 +2596,12 @@ static inline gfp_t gfp_memcg_charge(void)
static inline long sock_rcvtimeo(const struct sock *sk, bool noblock)
{
- return noblock ? 0 : sk->sk_rcvtimeo;
+ return noblock ? 0 : READ_ONCE(sk->sk_rcvtimeo);
}
static inline long sock_sndtimeo(const struct sock *sk, bool noblock)
{
- return noblock ? 0 : sk->sk_sndtimeo;
+ return noblock ? 0 : READ_ONCE(sk->sk_sndtimeo);
}
static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len)
@@ -2677,6 +2683,10 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb);
+bool skb_has_tx_timestamp(struct sk_buff *skb, const struct sock *sk);
+int skb_get_tx_timestamp(struct sk_buff *skb, struct sock *sk,
+ struct timespec64 *ts);
+
static inline void
sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
{
@@ -2982,7 +2992,6 @@ void sock_set_timestamp(struct sock *sk, int optname, bool valbool);
int sock_set_timestamping(struct sock *sk, int optname,
struct so_timestamping timestamping);
-void sock_enable_timestamps(struct sock *sk);
#if defined(CONFIG_CGROUP_BPF)
void bpf_skops_tx_timestamping(struct sock *sk, struct sk_buff *skb, int op);
#else
diff --git a/include/net/tc_act/tc_csum.h b/include/net/tc_act/tc_csum.h
index 68269e4581b7..2515da0142a6 100644
--- a/include/net/tc_act/tc_csum.h
+++ b/include/net/tc_act/tc_csum.h
@@ -18,15 +18,6 @@ struct tcf_csum {
};
#define to_tcf_csum(a) ((struct tcf_csum *)a)
-static inline bool is_tcf_csum(const struct tc_action *a)
-{
-#ifdef CONFIG_NET_CLS_ACT
- if (a->ops && a->ops->id == TCA_ID_CSUM)
- return true;
-#endif
- return false;
-}
-
static inline u32 tcf_csum_update_flags(const struct tc_action *a)
{
u32 update_flags;
diff --git a/include/net/tc_act/tc_ct.h b/include/net/tc_act/tc_ct.h
index 77f87c622a2e..e6b45cb27ebf 100644
--- a/include/net/tc_act/tc_ct.h
+++ b/include/net/tc_act/tc_ct.h
@@ -92,13 +92,4 @@ static inline void
tcf_ct_flow_table_restore_skb(struct sk_buff *skb, unsigned long cookie) { }
#endif
-static inline bool is_tcf_ct(const struct tc_action *a)
-{
-#if defined(CONFIG_NET_CLS_ACT) && IS_ENABLED(CONFIG_NF_CONNTRACK)
- if (a->ops && a->ops->id == TCA_ID_CT)
- return true;
-#endif
- return false;
-}
-
#endif /* __NET_TC_CT_H */
diff --git a/include/net/tc_act/tc_gate.h b/include/net/tc_act/tc_gate.h
index c8fa11ebb397..c1a67149c6b6 100644
--- a/include/net/tc_act/tc_gate.h
+++ b/include/net/tc_act/tc_gate.h
@@ -51,15 +51,6 @@ struct tcf_gate {
#define to_gate(a) ((struct tcf_gate *)a)
-static inline bool is_tcf_gate(const struct tc_action *a)
-{
-#ifdef CONFIG_NET_CLS_ACT
- if (a->ops && a->ops->id == TCA_ID_GATE)
- return true;
-#endif
- return false;
-}
-
static inline s32 tcf_gate_prio(const struct tc_action *a)
{
s32 tcfg_prio;
diff --git a/include/net/tc_act/tc_mpls.h b/include/net/tc_act/tc_mpls.h
index 721de4f5733a..d452e5e94fd0 100644
--- a/include/net/tc_act/tc_mpls.h
+++ b/include/net/tc_act/tc_mpls.h
@@ -27,15 +27,6 @@ struct tcf_mpls {
};
#define to_mpls(a) ((struct tcf_mpls *)a)
-static inline bool is_tcf_mpls(const struct tc_action *a)
-{
-#ifdef CONFIG_NET_CLS_ACT
- if (a->ops && a->ops->id == TCA_ID_MPLS)
- return true;
-#endif
- return false;
-}
-
static inline u32 tcf_mpls_action(const struct tc_action *a)
{
u32 tcfm_action;
diff --git a/include/net/tc_act/tc_police.h b/include/net/tc_act/tc_police.h
index 283bde711a42..490d88cb5233 100644
--- a/include/net/tc_act/tc_police.h
+++ b/include/net/tc_act/tc_police.h
@@ -44,15 +44,6 @@ struct tc_police_compat {
struct tc_ratespec peakrate;
};
-static inline bool is_tcf_police(const struct tc_action *act)
-{
-#ifdef CONFIG_NET_CLS_ACT
- if (act->ops && act->ops->id == TCA_ID_POLICE)
- return true;
-#endif
- return false;
-}
-
static inline u64 tcf_police_rate_bytes_ps(const struct tc_action *act)
{
struct tcf_police *police = to_police(act);
diff --git a/include/net/tc_act/tc_sample.h b/include/net/tc_act/tc_sample.h
index b5d76305e854..abd163ca1864 100644
--- a/include/net/tc_act/tc_sample.h
+++ b/include/net/tc_act/tc_sample.h
@@ -17,15 +17,6 @@ struct tcf_sample {
};
#define to_sample(a) ((struct tcf_sample *)a)
-static inline bool is_tcf_sample(const struct tc_action *a)
-{
-#ifdef CONFIG_NET_CLS_ACT
- return a->ops && a->ops->id == TCA_ID_SAMPLE;
-#else
- return false;
-#endif
-}
-
static inline __u32 tcf_sample_rate(const struct tc_action *a)
{
return to_sample(a)->rate;
diff --git a/include/net/tc_act/tc_vlan.h b/include/net/tc_act/tc_vlan.h
index 904eddfc1826..3f5e9242b5e8 100644
--- a/include/net/tc_act/tc_vlan.h
+++ b/include/net/tc_act/tc_vlan.h
@@ -26,15 +26,6 @@ struct tcf_vlan {
};
#define to_vlan(a) ((struct tcf_vlan *)a)
-static inline bool is_tcf_vlan(const struct tc_action *a)
-{
-#ifdef CONFIG_NET_CLS_ACT
- if (a->ops && a->ops->id == TCA_ID_VLAN)
- return true;
-#endif
- return false;
-}
-
static inline u32 tcf_vlan_action(const struct tc_action *a)
{
u32 tcfv_action;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 5078ad868fee..761c4a0ad386 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -321,7 +321,7 @@ extern struct proto tcp_prot;
#define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
#define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
-void tcp_tasklet_init(void);
+void tcp_tsq_work_init(void);
int tcp_v4_err(struct sk_buff *skb, u32);
@@ -1811,14 +1811,8 @@ static inline void tcp_mib_init(struct net *net)
}
/* from STCP */
-static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
-{
- tp->lost_skb_hint = NULL;
-}
-
static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
{
- tcp_clear_retrans_hints_partial(tp);
tp->retransmit_skb_hint = NULL;
}
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
index 2df3b8344eb5..9acef2fbd2fd 100644
--- a/include/net/udp_tunnel.h
+++ b/include/net/udp_tunnel.h
@@ -130,35 +130,20 @@ void udp_tunnel_drop_rx_port(struct net_device *dev, struct socket *sock,
void udp_tunnel_notify_add_rx_port(struct socket *sock, unsigned short type);
void udp_tunnel_notify_del_rx_port(struct socket *sock, unsigned short type);
-static inline void udp_tunnel_get_rx_info(struct net_device *dev)
-{
- ASSERT_RTNL();
- if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT))
- return;
- call_netdevice_notifiers(NETDEV_UDP_TUNNEL_PUSH_INFO, dev);
-}
-
-static inline void udp_tunnel_drop_rx_info(struct net_device *dev)
-{
- ASSERT_RTNL();
- if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT))
- return;
- call_netdevice_notifiers(NETDEV_UDP_TUNNEL_DROP_INFO, dev);
-}
-
/* Transmit the skb using UDP encapsulation. */
void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
__be32 src, __be32 dst, __u8 tos, __u8 ttl,
__be16 df, __be16 src_port, __be16 dst_port,
- bool xnet, bool nocheck);
+ bool xnet, bool nocheck, u16 ipcb_flags);
-int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb,
- struct net_device *dev,
- const struct in6_addr *saddr,
- const struct in6_addr *daddr,
- __u8 prio, __u8 ttl, __be32 label,
- __be16 src_port, __be16 dst_port, bool nocheck);
+void udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb,
+ struct net_device *dev,
+ const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
+ __u8 prio, __u8 ttl, __be32 label,
+ __be16 src_port, __be16 dst_port, bool nocheck,
+ u16 ip6cb_flags);
void udp_tunnel_sock_release(struct socket *sock);
@@ -221,19 +206,17 @@ static inline void udp_tunnel_encap_enable(struct sock *sk)
#define UDP_TUNNEL_NIC_MAX_TABLES 4
enum udp_tunnel_nic_info_flags {
- /* Device callbacks may sleep */
- UDP_TUNNEL_NIC_INFO_MAY_SLEEP = BIT(0),
/* Device only supports offloads when it's open, all ports
* will be removed before close and re-added after open.
*/
- UDP_TUNNEL_NIC_INFO_OPEN_ONLY = BIT(1),
+ UDP_TUNNEL_NIC_INFO_OPEN_ONLY = BIT(0),
/* Device supports only IPv4 tunnels */
- UDP_TUNNEL_NIC_INFO_IPV4_ONLY = BIT(2),
+ UDP_TUNNEL_NIC_INFO_IPV4_ONLY = BIT(1),
/* Device has hard-coded the IANA VXLAN port (4789) as VXLAN.
* This port must not be counted towards n_entries of any table.
* Driver will not receive any callback associated with port 4789.
*/
- UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN = BIT(3),
+ UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN = BIT(2),
};
struct udp_tunnel_nic;
@@ -324,6 +307,9 @@ struct udp_tunnel_nic_ops {
size_t (*dump_size)(struct net_device *dev, unsigned int table);
int (*dump_write)(struct net_device *dev, unsigned int table,
struct sk_buff *skb);
+ void (*assert_locked)(struct net_device *dev);
+ void (*lock)(struct net_device *dev);
+ void (*unlock)(struct net_device *dev);
};
#ifdef CONFIG_INET
@@ -352,8 +338,28 @@ static inline void
udp_tunnel_nic_set_port_priv(struct net_device *dev, unsigned int table,
unsigned int idx, u8 priv)
{
- if (udp_tunnel_nic_ops)
+ if (udp_tunnel_nic_ops) {
+ udp_tunnel_nic_ops->assert_locked(dev);
udp_tunnel_nic_ops->set_port_priv(dev, table, idx, priv);
+ }
+}
+
+static inline void udp_tunnel_nic_assert_locked(struct net_device *dev)
+{
+ if (udp_tunnel_nic_ops)
+ udp_tunnel_nic_ops->assert_locked(dev);
+}
+
+static inline void udp_tunnel_nic_lock(struct net_device *dev)
+{
+ if (udp_tunnel_nic_ops)
+ udp_tunnel_nic_ops->lock(dev);
+}
+
+static inline void udp_tunnel_nic_unlock(struct net_device *dev)
+{
+ if (udp_tunnel_nic_ops)
+ udp_tunnel_nic_ops->unlock(dev);
}
static inline void
@@ -395,17 +401,50 @@ static inline void udp_tunnel_nic_reset_ntf(struct net_device *dev)
static inline size_t
udp_tunnel_nic_dump_size(struct net_device *dev, unsigned int table)
{
+ size_t ret;
+
if (!udp_tunnel_nic_ops)
return 0;
- return udp_tunnel_nic_ops->dump_size(dev, table);
+
+ udp_tunnel_nic_ops->lock(dev);
+ ret = udp_tunnel_nic_ops->dump_size(dev, table);
+ udp_tunnel_nic_ops->unlock(dev);
+
+ return ret;
}
static inline int
udp_tunnel_nic_dump_write(struct net_device *dev, unsigned int table,
struct sk_buff *skb)
{
+ int ret;
+
if (!udp_tunnel_nic_ops)
return 0;
- return udp_tunnel_nic_ops->dump_write(dev, table, skb);
+
+ udp_tunnel_nic_ops->lock(dev);
+ ret = udp_tunnel_nic_ops->dump_write(dev, table, skb);
+ udp_tunnel_nic_ops->unlock(dev);
+
+ return ret;
+}
+
+static inline void udp_tunnel_get_rx_info(struct net_device *dev)
+{
+ ASSERT_RTNL();
+ if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT))
+ return;
+ udp_tunnel_nic_assert_locked(dev);
+ call_netdevice_notifiers(NETDEV_UDP_TUNNEL_PUSH_INFO, dev);
}
+
+static inline void udp_tunnel_drop_rx_info(struct net_device *dev)
+{
+ ASSERT_RTNL();
+ if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT))
+ return;
+ udp_tunnel_nic_assert_locked(dev);
+ call_netdevice_notifiers(NETDEV_UDP_TUNNEL_DROP_INFO, dev);
+}
+
#endif
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index e2f7ca045d3e..0ee50785f4f1 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -332,6 +332,7 @@ struct vxlan_dev {
#define VXLAN_F_VNIFILTER 0x20000
#define VXLAN_F_MDB 0x40000
#define VXLAN_F_LOCALBYPASS 0x80000
+#define VXLAN_F_MC_ROUTE 0x100000
/* Flags that are used in the receive path. These flags must match in
* order for a socket to be shareable
@@ -353,7 +354,9 @@ struct vxlan_dev {
VXLAN_F_UDP_ZERO_CSUM6_RX | \
VXLAN_F_COLLECT_METADATA | \
VXLAN_F_VNIFILTER | \
- VXLAN_F_LOCALBYPASS)
+ VXLAN_F_LOCALBYPASS | \
+ VXLAN_F_MC_ROUTE | \
+ 0)
struct net_device *vxlan_dev_create(struct net *net, const char *name,
u8 name_assign_type, struct vxlan_config *conf);
diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h
index 95f59c1a6f57..54e60c6009e3 100644
--- a/include/trace/events/tcp.h
+++ b/include/trace/events/tcp.h
@@ -692,6 +692,7 @@ DEFINE_EVENT(tcp_ao_event, tcp_ao_handshake_failure,
TP_ARGS(sk, skb, keyid, rnext, maclen)
);
+#ifdef CONFIG_TCP_AO
DEFINE_EVENT(tcp_ao_event, tcp_ao_wrong_maclen,
TP_PROTO(const struct sock *sk, const struct sk_buff *skb,
const __u8 keyid, const __u8 rnext, const __u8 maclen),
@@ -830,6 +831,7 @@ DEFINE_EVENT(tcp_ao_event_sne, tcp_ao_rcv_sne_update,
TP_PROTO(const struct sock *sk, __u32 new_sne),
TP_ARGS(sk, new_sne)
);
+#endif /* CONFIG_TCP_AO */
#endif /* _TRACE_TCP_H */
diff --git a/include/uapi/linux/dpll.h b/include/uapi/linux/dpll.h
index bf97d4b6d51f..349e1b3ca1ae 100644
--- a/include/uapi/linux/dpll.h
+++ b/include/uapi/linux/dpll.h
@@ -192,6 +192,17 @@ enum dpll_pin_capabilities {
#define DPLL_PHASE_OFFSET_DIVIDER 1000
+/**
+ * enum dpll_feature_state - Allow control (enable/disable) and status checking
+ * over features.
+ * @DPLL_FEATURE_STATE_DISABLE: feature shall be disabled
+ * @DPLL_FEATURE_STATE_ENABLE: feature shall be enabled
+ */
+enum dpll_feature_state {
+ DPLL_FEATURE_STATE_DISABLE,
+ DPLL_FEATURE_STATE_ENABLE,
+};
+
enum dpll_a {
DPLL_A_ID = 1,
DPLL_A_MODULE_NAME,
@@ -204,6 +215,7 @@ enum dpll_a {
DPLL_A_TYPE,
DPLL_A_LOCK_STATUS_ERROR,
DPLL_A_CLOCK_QUALITY_LEVEL,
+ DPLL_A_PHASE_OFFSET_MONITOR,
__DPLL_A_MAX,
DPLL_A_MAX = (__DPLL_A_MAX - 1)
diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h
index 09a75bdb6560..fa5d645140a4 100644
--- a/include/uapi/linux/ethtool_netlink.h
+++ b/include/uapi/linux/ethtool_netlink.h
@@ -208,6 +208,4 @@ enum {
ETHTOOL_A_STATS_PHY_MAX = (__ETHTOOL_A_STATS_PHY_CNT - 1)
};
-#define ETHTOOL_MCGRP_MONITOR_NAME "monitor"
-
#endif /* _UAPI_LINUX_ETHTOOL_NETLINK_H_ */
diff --git a/include/uapi/linux/ethtool_netlink_generated.h b/include/uapi/linux/ethtool_netlink_generated.h
index aa8ab5227c1e..8f30ffa1cd14 100644
--- a/include/uapi/linux/ethtool_netlink_generated.h
+++ b/include/uapi/linux/ethtool_netlink_generated.h
@@ -49,6 +49,34 @@ enum hwtstamp_source {
HWTSTAMP_SOURCE_PHYLIB,
};
+/**
+ * enum ethtool_pse_event - PSE event list for the PSE controller
+ * @ETHTOOL_PSE_EVENT_OVER_CURRENT: PSE output current is too high
+ * @ETHTOOL_PSE_EVENT_OVER_TEMP: PSE in over temperature state
+ * @ETHTOOL_C33_PSE_EVENT_DETECTION: detection process occur on the PSE. IEEE
+ * 802.3-2022 33.2.5 and 145.2.6 PSE detection of PDs. IEEE 802.3-202
+ * 30.9.1.1.5 aPSEPowerDetectionStatus
+ * @ETHTOOL_C33_PSE_EVENT_CLASSIFICATION: classification process occur on the
+ * PSE. IEEE 802.3-2022 33.2.6 and 145.2.8 classification of PDs mutual
+ * identification. IEEE 802.3-2022 30.9.1.1.8 aPSEPowerClassification.
+ * @ETHTOOL_C33_PSE_EVENT_DISCONNECTION: PD has been disconnected on the PSE.
+ * IEEE 802.3-2022 33.3.8 and 145.3.9 PD Maintain Power Signature. IEEE
+ * 802.3-2022 33.5.1.2.9 MPS Absent. IEEE 802.3-2022 30.9.1.1.20
+ * aPSEMPSAbsentCounter.
+ * @ETHTOOL_PSE_EVENT_OVER_BUDGET: PSE turned off due to over budget situation
+ * @ETHTOOL_PSE_EVENT_SW_PW_CONTROL_ERROR: PSE faced an error managing the
+ * power control from software
+ */
+enum ethtool_pse_event {
+ ETHTOOL_PSE_EVENT_OVER_CURRENT = 1,
+ ETHTOOL_PSE_EVENT_OVER_TEMP = 2,
+ ETHTOOL_C33_PSE_EVENT_DETECTION = 4,
+ ETHTOOL_C33_PSE_EVENT_CLASSIFICATION = 8,
+ ETHTOOL_C33_PSE_EVENT_DISCONNECTION = 16,
+ ETHTOOL_PSE_EVENT_OVER_BUDGET = 32,
+ ETHTOOL_PSE_EVENT_SW_PW_CONTROL_ERROR = 64,
+};
+
enum {
ETHTOOL_A_HEADER_UNSPEC,
ETHTOOL_A_HEADER_DEV_INDEX,
@@ -642,6 +670,9 @@ enum {
ETHTOOL_A_C33_PSE_EXT_SUBSTATE,
ETHTOOL_A_C33_PSE_AVAIL_PW_LIMIT,
ETHTOOL_A_C33_PSE_PW_LIMIT_RANGES,
+ ETHTOOL_A_PSE_PW_D_ID,
+ ETHTOOL_A_PSE_PRIO_MAX,
+ ETHTOOL_A_PSE_PRIO,
__ETHTOOL_A_PSE_CNT,
ETHTOOL_A_PSE_MAX = (__ETHTOOL_A_PSE_CNT - 1)
@@ -719,6 +750,14 @@ enum {
};
enum {
+ ETHTOOL_A_PSE_NTF_HEADER = 1,
+ ETHTOOL_A_PSE_NTF_EVENTS,
+
+ __ETHTOOL_A_PSE_NTF_CNT,
+ ETHTOOL_A_PSE_NTF_MAX = (__ETHTOOL_A_PSE_NTF_CNT - 1)
+};
+
+enum {
ETHTOOL_MSG_USER_NONE = 0,
ETHTOOL_MSG_STRSET_GET = 1,
ETHTOOL_MSG_LINKINFO_GET,
@@ -822,9 +861,13 @@ enum {
ETHTOOL_MSG_PHY_NTF,
ETHTOOL_MSG_TSCONFIG_GET_REPLY,
ETHTOOL_MSG_TSCONFIG_SET_REPLY,
+ ETHTOOL_MSG_PSE_NTF,
+ ETHTOOL_MSG_RSS_NTF,
__ETHTOOL_MSG_KERNEL_CNT,
ETHTOOL_MSG_KERNEL_MAX = (__ETHTOOL_MSG_KERNEL_CNT - 1)
};
+#define ETHTOOL_MCGRP_MONITOR_NAME "monitor"
+
#endif /* _UAPI_LINUX_ETHTOOL_NETLINK_GENERATED_H */
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 3ad2d5d98034..873c285996fe 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -1398,6 +1398,7 @@ enum {
IFLA_VXLAN_LOCALBYPASS,
IFLA_VXLAN_LABEL_POLICY, /* IPv6 flow label policy; ifla_vxlan_label_policy */
IFLA_VXLAN_RESERVED_BITS,
+ IFLA_VXLAN_MC_ROUTE,
__IFLA_VXLAN_MAX
};
#define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1)
diff --git a/include/uapi/linux/in6.h b/include/uapi/linux/in6.h
index ff8d21f9e95b..5a47339ef7d7 100644
--- a/include/uapi/linux/in6.h
+++ b/include/uapi/linux/in6.h
@@ -152,7 +152,6 @@ struct in6_flowlabel_req {
/*
* IPV6 socket options
*/
-#if __UAPI_DEF_IPV6_OPTIONS
#define IPV6_ADDRFORM 1
#define IPV6_2292PKTINFO 2
#define IPV6_2292HOPOPTS 3
@@ -169,8 +168,10 @@ struct in6_flowlabel_req {
#define IPV6_MULTICAST_IF 17
#define IPV6_MULTICAST_HOPS 18
#define IPV6_MULTICAST_LOOP 19
+#if __UAPI_DEF_IPV6_OPTIONS
#define IPV6_ADD_MEMBERSHIP 20
#define IPV6_DROP_MEMBERSHIP 21
+#endif
#define IPV6_ROUTER_ALERT 22
#define IPV6_MTU_DISCOVER 23
#define IPV6_MTU 24
@@ -203,7 +204,6 @@ struct in6_flowlabel_req {
#define IPV6_IPSEC_POLICY 34
#define IPV6_XFRM_POLICY 35
#define IPV6_HDRINCL 36
-#endif
/*
* Multicast:
diff --git a/include/uapi/linux/net_dropmon.h b/include/uapi/linux/net_dropmon.h
index 9dd41c2f58a6..87cbef48d4c7 100644
--- a/include/uapi/linux/net_dropmon.h
+++ b/include/uapi/linux/net_dropmon.h
@@ -10,13 +10,6 @@ struct net_dm_drop_point {
__u32 count;
};
-#define is_drop_point_hw(x) do {\
- int ____i, ____j;\
- for (____i = 0; ____i < 8; i ____i++)\
- ____j |= x[____i];\
- ____j;\
-} while (0)
-
#define NET_DM_CFG_VERSION 0
#define NET_DM_CFG_ALERT_COUNT 1
#define NET_DM_CFG_ALERT_DELAY 2
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index e9ccf43fe3c6..39460334dafb 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -1330,7 +1330,15 @@
* TID to Link mapping for downlink/uplink traffic.
*
* @NL80211_CMD_ASSOC_MLO_RECONF: For a non-AP MLD station, request to
- * add/remove links to/from the association.
+ * add/remove links to/from the association. To indicate link
+ * reconfiguration request results from the driver, this command is also
+ * used as an event to notify userspace about the added links information.
+ * For notifying the removed links information, the existing
+ * %NL80211_CMD_LINKS_REMOVED command is used. This command is also used to
+ * notify userspace about newly added links for the current connection in
+ * case of AP-initiated link recommendation requests, received via
+ * a BTM (BSS Transition Management) request or a link reconfig notify
+ * frame, where the driver handles the link recommendation offload.
*
* @NL80211_CMD_EPCS_CFG: EPCS configuration for a station. Used by userland to
* control EPCS configuration. Used to notify userland on the current state
@@ -2899,6 +2907,14 @@ enum nl80211_commands {
* APs Support". Drivers may set additional flags that they support
* in the kernel or device.
*
+ * @NL80211_ATTR_WIPHY_RADIO_INDEX: (int) Integer attribute denoting the index
+ * of the radio in interest. Internally a value of -1 is used to
+ * indicate that the radio id is not given in user-space. This means
+ * that all the attributes are applicable to all the radios. If there is
+ * a radio index provided in user-space, the attributes will be
+ * applicable to that specific radio only. If the radio id is greater
+ * thank the number of radios, error denoting invalid value is returned.
+ *
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
@@ -3456,6 +3472,8 @@ enum nl80211_attrs {
NL80211_ATTR_ASSOC_MLD_EXT_CAPA_OPS,
+ NL80211_ATTR_WIPHY_RADIO_INDEX,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -8088,6 +8106,7 @@ enum nl80211_ap_settings_flags {
* and contains attributes defined in &enum nl80211_if_combination_attrs.
* @NL80211_WIPHY_RADIO_ATTR_ANTENNA_MASK: bitmask (u32) of antennas
* connected to this radio.
+ * @NL80211_WIPHY_RADIO_ATTR_RTS_THRESHOLD: RTS threshold (u32) of this radio.
*
* @__NL80211_WIPHY_RADIO_ATTR_LAST: Internal
* @NL80211_WIPHY_RADIO_ATTR_MAX: Highest attribute
@@ -8099,6 +8118,7 @@ enum nl80211_wiphy_radio_attrs {
NL80211_WIPHY_RADIO_ATTR_FREQ_RANGE,
NL80211_WIPHY_RADIO_ATTR_INTERFACE_COMBINATION,
NL80211_WIPHY_RADIO_ATTR_ANTENNA_MASK,
+ NL80211_WIPHY_RADIO_ATTR_RTS_THRESHOLD,
/* keep last */
__NL80211_WIPHY_RADIO_ATTR_LAST,
diff --git a/lib/ref_tracker.c b/lib/ref_tracker.c
index cf5609b1ca79..dcf923a1edf5 100644
--- a/lib/ref_tracker.c
+++ b/lib/ref_tracker.c
@@ -8,6 +8,7 @@
#include <linux/slab.h>
#include <linux/stacktrace.h>
#include <linux/stackdepot.h>
+#include <linux/seq_file.h>
#define REF_TRACKER_STACK_ENTRIES 16
#define STACK_BUF_SIZE 1024
@@ -28,6 +29,45 @@ struct ref_tracker_dir_stats {
} stacks[];
};
+#ifdef CONFIG_DEBUG_FS
+#include <linux/xarray.h>
+
+/*
+ * ref_tracker_dir_init() is usually called in allocation-safe contexts, but
+ * the same is not true of ref_tracker_dir_exit() which can be called from
+ * anywhere an object is freed. Removing debugfs dentries is a blocking
+ * operation, so we defer that work to the debugfs_reap_worker.
+ *
+ * Each dentry is tracked in the appropriate xarray. When
+ * ref_tracker_dir_exit() is called, its entries in the xarrays are marked and
+ * the workqueue job is scheduled. The worker then runs and deletes any marked
+ * dentries asynchronously.
+ */
+static struct xarray debugfs_dentries;
+static struct xarray debugfs_symlinks;
+static struct work_struct debugfs_reap_worker;
+
+#define REF_TRACKER_DIR_DEAD XA_MARK_0
+static inline void ref_tracker_debugfs_mark(struct ref_tracker_dir *dir)
+{
+ unsigned long flags;
+
+ xa_lock_irqsave(&debugfs_dentries, flags);
+ __xa_set_mark(&debugfs_dentries, (unsigned long)dir, REF_TRACKER_DIR_DEAD);
+ xa_unlock_irqrestore(&debugfs_dentries, flags);
+
+ xa_lock_irqsave(&debugfs_symlinks, flags);
+ __xa_set_mark(&debugfs_symlinks, (unsigned long)dir, REF_TRACKER_DIR_DEAD);
+ xa_unlock_irqrestore(&debugfs_symlinks, flags);
+
+ schedule_work(&debugfs_reap_worker);
+}
+#else
+static inline void ref_tracker_debugfs_mark(struct ref_tracker_dir *dir)
+{
+}
+#endif
+
static struct ref_tracker_dir_stats *
ref_tracker_get_stats(struct ref_tracker_dir *dir, unsigned int limit)
{
@@ -63,21 +103,39 @@ ref_tracker_get_stats(struct ref_tracker_dir *dir, unsigned int limit)
}
struct ostream {
+ void __ostream_printf (*func)(struct ostream *stream, char *fmt, ...);
+ char *prefix;
char *buf;
+ struct seq_file *seq;
int size, used;
};
+static void __ostream_printf pr_ostream_log(struct ostream *stream, char *fmt, ...)
+{
+ va_list args;
+
+ va_start(args, fmt);
+ vprintk(fmt, args);
+ va_end(args);
+}
+
+static void __ostream_printf pr_ostream_buf(struct ostream *stream, char *fmt, ...)
+{
+ int ret, len = stream->size - stream->used;
+ va_list args;
+
+ va_start(args, fmt);
+ ret = vsnprintf(stream->buf + stream->used, len, fmt, args);
+ va_end(args);
+ if (ret > 0)
+ stream->used += min(ret, len);
+}
+
#define pr_ostream(stream, fmt, args...) \
({ \
struct ostream *_s = (stream); \
\
- if (!_s->buf) { \
- pr_err(fmt, ##args); \
- } else { \
- int ret, len = _s->size - _s->used; \
- ret = snprintf(_s->buf + _s->used, len, pr_fmt(fmt), ##args); \
- _s->used += min(ret, len); \
- } \
+ _s->func(_s, fmt, ##args); \
})
static void
@@ -96,8 +154,8 @@ __ref_tracker_dir_pr_ostream(struct ref_tracker_dir *dir,
stats = ref_tracker_get_stats(dir, display_limit);
if (IS_ERR(stats)) {
- pr_ostream(s, "%s@%pK: couldn't get stats, error %pe\n",
- dir->name, dir, stats);
+ pr_ostream(s, "%s%s@%p: couldn't get stats, error %pe\n",
+ s->prefix, dir->class, dir, stats);
return;
}
@@ -107,14 +165,15 @@ __ref_tracker_dir_pr_ostream(struct ref_tracker_dir *dir,
stack = stats->stacks[i].stack_handle;
if (sbuf && !stack_depot_snprint(stack, sbuf, STACK_BUF_SIZE, 4))
sbuf[0] = 0;
- pr_ostream(s, "%s@%pK has %d/%d users at\n%s\n", dir->name, dir,
- stats->stacks[i].count, stats->total, sbuf);
+ pr_ostream(s, "%s%s@%p has %d/%d users at\n%s\n", s->prefix,
+ dir->class, dir, stats->stacks[i].count,
+ stats->total, sbuf);
skipped -= stats->stacks[i].count;
}
if (skipped)
- pr_ostream(s, "%s@%pK skipped reports about %d/%d users.\n",
- dir->name, dir, skipped, stats->total);
+ pr_ostream(s, "%s%s@%p skipped reports about %d/%d users.\n",
+ s->prefix, dir->class, dir, skipped, stats->total);
kfree(sbuf);
@@ -124,7 +183,8 @@ __ref_tracker_dir_pr_ostream(struct ref_tracker_dir *dir,
void ref_tracker_dir_print_locked(struct ref_tracker_dir *dir,
unsigned int display_limit)
{
- struct ostream os = {};
+ struct ostream os = { .func = pr_ostream_log,
+ .prefix = "ref_tracker: " };
__ref_tracker_dir_pr_ostream(dir, display_limit, &os);
}
@@ -143,7 +203,10 @@ EXPORT_SYMBOL(ref_tracker_dir_print);
int ref_tracker_dir_snprint(struct ref_tracker_dir *dir, char *buf, size_t size)
{
- struct ostream os = { .buf = buf, .size = size };
+ struct ostream os = { .func = pr_ostream_buf,
+ .prefix = "ref_tracker: ",
+ .buf = buf,
+ .size = size };
unsigned long flags;
spin_lock_irqsave(&dir->lock, flags);
@@ -161,6 +224,11 @@ void ref_tracker_dir_exit(struct ref_tracker_dir *dir)
bool leak = false;
dir->dead = true;
+ /*
+ * The xarray entries must be marked before the dir->lock is taken to
+ * protect simultaneous debugfs readers.
+ */
+ ref_tracker_debugfs_mark(dir);
spin_lock_irqsave(&dir->lock, flags);
list_for_each_entry_safe(tracker, n, &dir->quarantine, head) {
list_del(&tracker->head);
@@ -273,3 +341,188 @@ int ref_tracker_free(struct ref_tracker_dir *dir,
return 0;
}
EXPORT_SYMBOL_GPL(ref_tracker_free);
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+
+static struct dentry *ref_tracker_debug_dir = (struct dentry *)-ENOENT;
+
+static void __ostream_printf pr_ostream_seq(struct ostream *stream, char *fmt, ...)
+{
+ va_list args;
+
+ va_start(args, fmt);
+ seq_vprintf(stream->seq, fmt, args);
+ va_end(args);
+}
+
+static int ref_tracker_dir_seq_print(struct ref_tracker_dir *dir, struct seq_file *seq)
+{
+ struct ostream os = { .func = pr_ostream_seq,
+ .prefix = "",
+ .seq = seq };
+
+ __ref_tracker_dir_pr_ostream(dir, 16, &os);
+
+ return os.used;
+}
+
+static int ref_tracker_debugfs_show(struct seq_file *f, void *v)
+{
+ struct ref_tracker_dir *dir = f->private;
+ unsigned long index = (unsigned long)dir;
+ unsigned long flags;
+ int ret;
+
+ /*
+ * "dir" may not exist at this point if ref_tracker_dir_exit() has
+ * already been called. Take care not to dereference it until its
+ * legitimacy is established.
+ *
+ * The xa_lock is necessary to ensure that "dir" doesn't disappear
+ * before its lock can be taken. If it's in the hash and not marked
+ * dead, then it's safe to take dir->lock which prevents
+ * ref_tracker_dir_exit() from completing. Once the dir->lock is
+ * acquired, the xa_lock can be released. All of this must be IRQ-safe.
+ */
+ xa_lock_irqsave(&debugfs_dentries, flags);
+ if (!xa_load(&debugfs_dentries, index) ||
+ xa_get_mark(&debugfs_dentries, index, REF_TRACKER_DIR_DEAD)) {
+ xa_unlock_irqrestore(&debugfs_dentries, flags);
+ return -ENODATA;
+ }
+
+ spin_lock(&dir->lock);
+ xa_unlock(&debugfs_dentries);
+ ret = ref_tracker_dir_seq_print(dir, f);
+ spin_unlock_irqrestore(&dir->lock, flags);
+ return ret;
+}
+
+static int ref_tracker_debugfs_open(struct inode *inode, struct file *filp)
+{
+ struct ref_tracker_dir *dir = inode->i_private;
+
+ return single_open(filp, ref_tracker_debugfs_show, dir);
+}
+
+static const struct file_operations ref_tracker_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = ref_tracker_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/**
+ * ref_tracker_dir_debugfs - create debugfs file for ref_tracker_dir
+ * @dir: ref_tracker_dir to be associated with debugfs file
+ *
+ * In most cases, a debugfs file will be created automatically for every
+ * ref_tracker_dir. If the object was created before debugfs is brought up
+ * then that may fail. In those cases, it is safe to call this at a later
+ * time to create the file.
+ */
+void ref_tracker_dir_debugfs(struct ref_tracker_dir *dir)
+{
+ char name[NAME_MAX + 1];
+ struct dentry *dentry;
+ int ret;
+
+ /* No-op if already created */
+ dentry = xa_load(&debugfs_dentries, (unsigned long)dir);
+ if (dentry && !xa_is_err(dentry))
+ return;
+
+ ret = snprintf(name, sizeof(name), "%s@%px", dir->class, dir);
+ name[sizeof(name) - 1] = '\0';
+
+ if (ret < sizeof(name)) {
+ dentry = debugfs_create_file(name, S_IFREG | 0400,
+ ref_tracker_debug_dir, dir,
+ &ref_tracker_debugfs_fops);
+ if (!IS_ERR(dentry)) {
+ void *old;
+
+ old = xa_store_irq(&debugfs_dentries, (unsigned long)dir,
+ dentry, GFP_KERNEL);
+
+ if (xa_is_err(old))
+ debugfs_remove(dentry);
+ else
+ WARN_ON_ONCE(old);
+ }
+ }
+}
+EXPORT_SYMBOL(ref_tracker_dir_debugfs);
+
+void __ostream_printf ref_tracker_dir_symlink(struct ref_tracker_dir *dir, const char *fmt, ...)
+{
+ char name[NAME_MAX + 1];
+ struct dentry *symlink, *dentry;
+ va_list args;
+ int ret;
+
+ symlink = xa_load(&debugfs_symlinks, (unsigned long)dir);
+ dentry = xa_load(&debugfs_dentries, (unsigned long)dir);
+
+ /* Already created?*/
+ if (symlink && !xa_is_err(symlink))
+ return;
+
+ if (!dentry || xa_is_err(dentry))
+ return;
+
+ va_start(args, fmt);
+ ret = vsnprintf(name, sizeof(name), fmt, args);
+ va_end(args);
+ name[sizeof(name) - 1] = '\0';
+
+ if (ret < sizeof(name)) {
+ symlink = debugfs_create_symlink(name, ref_tracker_debug_dir,
+ dentry->d_name.name);
+ if (!IS_ERR(symlink)) {
+ void *old;
+
+ old = xa_store_irq(&debugfs_symlinks, (unsigned long)dir,
+ symlink, GFP_KERNEL);
+ if (xa_is_err(old))
+ debugfs_remove(symlink);
+ else
+ WARN_ON_ONCE(old);
+ }
+ }
+}
+EXPORT_SYMBOL(ref_tracker_dir_symlink);
+
+static void debugfs_reap_work(struct work_struct *work)
+{
+ struct dentry *dentry;
+ unsigned long index;
+ bool reaped;
+
+ do {
+ reaped = false;
+ xa_for_each_marked(&debugfs_symlinks, index, dentry, REF_TRACKER_DIR_DEAD) {
+ xa_erase_irq(&debugfs_symlinks, index);
+ debugfs_remove(dentry);
+ reaped = true;
+ }
+ xa_for_each_marked(&debugfs_dentries, index, dentry, REF_TRACKER_DIR_DEAD) {
+ xa_erase_irq(&debugfs_dentries, index);
+ debugfs_remove(dentry);
+ reaped = true;
+ }
+ } while (reaped);
+}
+
+static int __init ref_tracker_debugfs_init(void)
+{
+ INIT_WORK(&debugfs_reap_worker, debugfs_reap_work);
+ xa_init_flags(&debugfs_dentries, XA_FLAGS_LOCK_IRQ);
+ xa_init_flags(&debugfs_symlinks, XA_FLAGS_LOCK_IRQ);
+ ref_tracker_debug_dir = debugfs_create_dir("ref_tracker", NULL);
+ return 0;
+}
+late_initcall(ref_tracker_debugfs_init);
+#endif /* CONFIG_DEBUG_FS */
diff --git a/lib/test_objagg.c b/lib/test_objagg.c
index d34df4306b87..a67b8ef5c5be 100644
--- a/lib/test_objagg.c
+++ b/lib/test_objagg.c
@@ -906,50 +906,22 @@ static int check_expect_hints_stats(struct objagg_hints *objagg_hints,
return err;
}
-static int test_hints_case(const struct hints_case *hints_case)
+static int test_hints_case2(const struct hints_case *hints_case,
+ struct objagg_hints *hints, struct objagg *objagg)
{
struct objagg_obj *objagg_obj;
- struct objagg_hints *hints;
struct world world2 = {};
- struct world world = {};
struct objagg *objagg2;
- struct objagg *objagg;
const char *errmsg;
int i;
int err;
- objagg = objagg_create(&delta_ops, NULL, &world);
- if (IS_ERR(objagg))
- return PTR_ERR(objagg);
-
- for (i = 0; i < hints_case->key_ids_count; i++) {
- objagg_obj = world_obj_get(&world, objagg,
- hints_case->key_ids[i]);
- if (IS_ERR(objagg_obj)) {
- err = PTR_ERR(objagg_obj);
- goto err_world_obj_get;
- }
- }
-
- pr_debug_stats(objagg);
- err = check_expect_stats(objagg, &hints_case->expect_stats, &errmsg);
- if (err) {
- pr_err("Stats: %s\n", errmsg);
- goto err_check_expect_stats;
- }
-
- hints = objagg_hints_get(objagg, OBJAGG_OPT_ALGO_SIMPLE_GREEDY);
- if (IS_ERR(hints)) {
- err = PTR_ERR(hints);
- goto err_hints_get;
- }
-
pr_debug_hints_stats(hints);
err = check_expect_hints_stats(hints, &hints_case->expect_stats_hints,
&errmsg);
if (err) {
pr_err("Hints stats: %s\n", errmsg);
- goto err_check_expect_hints_stats;
+ return err;
}
objagg2 = objagg_create(&delta_ops, hints, &world2);
@@ -981,7 +953,48 @@ err_world2_obj_get:
world_obj_put(&world2, objagg, hints_case->key_ids[i]);
i = hints_case->key_ids_count;
objagg_destroy(objagg2);
-err_check_expect_hints_stats:
+
+ return err;
+}
+
+static int test_hints_case(const struct hints_case *hints_case)
+{
+ struct objagg_obj *objagg_obj;
+ struct objagg_hints *hints;
+ struct world world = {};
+ struct objagg *objagg;
+ const char *errmsg;
+ int i;
+ int err;
+
+ objagg = objagg_create(&delta_ops, NULL, &world);
+ if (IS_ERR(objagg))
+ return PTR_ERR(objagg);
+
+ for (i = 0; i < hints_case->key_ids_count; i++) {
+ objagg_obj = world_obj_get(&world, objagg,
+ hints_case->key_ids[i]);
+ if (IS_ERR(objagg_obj)) {
+ err = PTR_ERR(objagg_obj);
+ goto err_world_obj_get;
+ }
+ }
+
+ pr_debug_stats(objagg);
+ err = check_expect_stats(objagg, &hints_case->expect_stats, &errmsg);
+ if (err) {
+ pr_err("Stats: %s\n", errmsg);
+ goto err_check_expect_stats;
+ }
+
+ hints = objagg_hints_get(objagg, OBJAGG_OPT_ALGO_SIMPLE_GREEDY);
+ if (IS_ERR(hints)) {
+ err = PTR_ERR(hints);
+ goto err_hints_get;
+ }
+
+ err = test_hints_case2(hints_case, hints, objagg);
+
objagg_hints_put(hints);
err_hints_get:
err_check_expect_stats:
diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
index 9c1241292d1d..01787fb6a7bc 100644
--- a/net/appletalk/atalk_proc.c
+++ b/net/appletalk/atalk_proc.c
@@ -181,7 +181,7 @@ static int atalk_seq_socket_show(struct seq_file *seq, void *v)
sk_wmem_alloc_get(s),
sk_rmem_alloc_get(s),
s->sk_state,
- from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)));
+ from_kuid_munged(seq_user_ns(seq), sk_uid(s)));
out:
return 0;
}
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 6ad2f72f53f4..ee9bf84c88a7 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -815,7 +815,7 @@ static int bt_seq_show(struct seq_file *seq, void *v)
refcount_read(&sk->sk_refcnt),
sk_rmem_alloc_get(sk),
sk_wmem_alloc_get(sk),
- from_kuid(seq_user_ns(seq), sock_i_uid(sk)),
+ from_kuid(seq_user_ns(seq), sk_uid(sk)),
sock_i_ino(sk),
bt->parent ? sock_i_ino(bt->parent) : 0LU);
diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
index 3c2c98eecc62..34e89bb5f384 100644
--- a/net/bluetooth/iso.c
+++ b/net/bluetooth/iso.c
@@ -413,7 +413,7 @@ static int iso_connect_bis(struct sock *sk)
sk->sk_state = BT_CONNECT;
} else {
sk->sk_state = BT_CONNECT;
- iso_sock_set_timer(sk, sk->sk_sndtimeo);
+ iso_sock_set_timer(sk, READ_ONCE(sk->sk_sndtimeo));
}
release_sock(sk);
@@ -503,7 +503,7 @@ static int iso_connect_cis(struct sock *sk)
sk->sk_state = BT_CONNECT;
} else {
sk->sk_state = BT_CONNECT;
- iso_sock_set_timer(sk, sk->sk_sndtimeo);
+ iso_sock_set_timer(sk, READ_ONCE(sk->sk_sndtimeo));
}
release_sock(sk);
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 5aa55fa69594..113656489db5 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -255,7 +255,7 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr,
err = l2cap_chan_connect(chan, la.l2_psm, __le16_to_cpu(la.l2_cid),
&la.l2_bdaddr, la.l2_bdaddr_type,
- sk->sk_sndtimeo);
+ READ_ONCE(sk->sk_sndtimeo));
if (err)
return err;
@@ -1725,7 +1725,7 @@ static long l2cap_sock_get_sndtimeo_cb(struct l2cap_chan *chan)
{
struct sock *sk = chan->data;
- return sk->sk_sndtimeo;
+ return READ_ONCE(sk->sk_sndtimeo);
}
static struct pid *l2cap_sock_get_peer_pid_cb(struct l2cap_chan *chan)
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 2945d27e75dc..d382d980fd9a 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -338,7 +338,7 @@ static int sco_connect(struct sock *sk)
hcon = hci_connect_sco(hdev, type, &sco_pi(sk)->dst,
sco_pi(sk)->setting, &sco_pi(sk)->codec,
- sk->sk_sndtimeo);
+ READ_ONCE(sk->sk_sndtimeo));
if (IS_ERR(hcon)) {
err = PTR_ERR(hcon);
goto unlock;
@@ -367,7 +367,7 @@ static int sco_connect(struct sock *sk)
sk->sk_state = BT_CONNECTED;
} else {
sk->sk_state = BT_CONNECT;
- sco_sock_set_timer(sk, sk->sk_sndtimeo);
+ sco_sock_set_timer(sk, READ_ONCE(sk->sk_sndtimeo));
}
release_sock(sk);
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index 20139fa1be1f..06b604cf9d58 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -351,17 +351,154 @@ int cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer)
return found;
}
+static int cfctrl_link_setup(struct cfctrl *cfctrl, struct cfpkt *pkt, u8 cmdrsp)
+{
+ u8 len;
+ u8 linkid = 0;
+ enum cfctrl_srv serv;
+ enum cfctrl_srv servtype;
+ u8 endpoint;
+ u8 physlinkid;
+ u8 prio;
+ u8 tmp;
+ u8 *cp;
+ int i;
+ struct cfctrl_link_param linkparam;
+ struct cfctrl_request_info rsp, *req;
+
+ memset(&linkparam, 0, sizeof(linkparam));
+
+ tmp = cfpkt_extr_head_u8(pkt);
+
+ serv = tmp & CFCTRL_SRV_MASK;
+ linkparam.linktype = serv;
+
+ servtype = tmp >> 4;
+ linkparam.chtype = servtype;
+
+ tmp = cfpkt_extr_head_u8(pkt);
+ physlinkid = tmp & 0x07;
+ prio = tmp >> 3;
+
+ linkparam.priority = prio;
+ linkparam.phyid = physlinkid;
+ endpoint = cfpkt_extr_head_u8(pkt);
+ linkparam.endpoint = endpoint & 0x03;
+
+ switch (serv) {
+ case CFCTRL_SRV_VEI:
+ case CFCTRL_SRV_DBG:
+ if (CFCTRL_ERR_BIT & cmdrsp)
+ break;
+ /* Link ID */
+ linkid = cfpkt_extr_head_u8(pkt);
+ break;
+ case CFCTRL_SRV_VIDEO:
+ tmp = cfpkt_extr_head_u8(pkt);
+ linkparam.u.video.connid = tmp;
+ if (CFCTRL_ERR_BIT & cmdrsp)
+ break;
+ /* Link ID */
+ linkid = cfpkt_extr_head_u8(pkt);
+ break;
+
+ case CFCTRL_SRV_DATAGRAM:
+ linkparam.u.datagram.connid = cfpkt_extr_head_u32(pkt);
+ if (CFCTRL_ERR_BIT & cmdrsp)
+ break;
+ /* Link ID */
+ linkid = cfpkt_extr_head_u8(pkt);
+ break;
+ case CFCTRL_SRV_RFM:
+ /* Construct a frame, convert
+ * DatagramConnectionID
+ * to network format long and copy it out...
+ */
+ linkparam.u.rfm.connid = cfpkt_extr_head_u32(pkt);
+ cp = (u8 *) linkparam.u.rfm.volume;
+ for (tmp = cfpkt_extr_head_u8(pkt);
+ cfpkt_more(pkt) && tmp != '\0';
+ tmp = cfpkt_extr_head_u8(pkt))
+ *cp++ = tmp;
+ *cp = '\0';
+
+ if (CFCTRL_ERR_BIT & cmdrsp)
+ break;
+ /* Link ID */
+ linkid = cfpkt_extr_head_u8(pkt);
+
+ break;
+ case CFCTRL_SRV_UTIL:
+ /* Construct a frame, convert
+ * DatagramConnectionID
+ * to network format long and copy it out...
+ */
+ /* Fifosize KB */
+ linkparam.u.utility.fifosize_kb = cfpkt_extr_head_u16(pkt);
+ /* Fifosize bufs */
+ linkparam.u.utility.fifosize_bufs = cfpkt_extr_head_u16(pkt);
+ /* name */
+ cp = (u8 *) linkparam.u.utility.name;
+ caif_assert(sizeof(linkparam.u.utility.name)
+ >= UTILITY_NAME_LENGTH);
+ for (i = 0; i < UTILITY_NAME_LENGTH && cfpkt_more(pkt); i++) {
+ tmp = cfpkt_extr_head_u8(pkt);
+ *cp++ = tmp;
+ }
+ /* Length */
+ len = cfpkt_extr_head_u8(pkt);
+ linkparam.u.utility.paramlen = len;
+ /* Param Data */
+ cp = linkparam.u.utility.params;
+ while (cfpkt_more(pkt) && len--) {
+ tmp = cfpkt_extr_head_u8(pkt);
+ *cp++ = tmp;
+ }
+ if (CFCTRL_ERR_BIT & cmdrsp)
+ break;
+ /* Link ID */
+ linkid = cfpkt_extr_head_u8(pkt);
+ /* Length */
+ len = cfpkt_extr_head_u8(pkt);
+ /* Param Data */
+ cfpkt_extr_head(pkt, NULL, len);
+ break;
+ default:
+ pr_warn("Request setup, invalid type (%d)\n", serv);
+ return -1;
+ }
+
+ rsp.cmd = CFCTRL_CMD_LINK_SETUP;
+ rsp.param = linkparam;
+ spin_lock_bh(&cfctrl->info_list_lock);
+ req = cfctrl_remove_req(cfctrl, &rsp);
+
+ if (CFCTRL_ERR_BIT == (CFCTRL_ERR_BIT & cmdrsp) ||
+ cfpkt_erroneous(pkt)) {
+ pr_err("Invalid O/E bit or parse error "
+ "on CAIF control channel\n");
+ cfctrl->res.reject_rsp(cfctrl->serv.layer.up, 0,
+ req ? req->client_layer : NULL);
+ } else {
+ cfctrl->res.linksetup_rsp(cfctrl->serv.layer.up, linkid,
+ serv, physlinkid,
+ req ? req->client_layer : NULL);
+ }
+
+ kfree(req);
+
+ spin_unlock_bh(&cfctrl->info_list_lock);
+
+ return 0;
+}
+
static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
{
u8 cmdrsp;
u8 cmd;
- int ret = -1;
- u8 len;
- u8 param[255];
+ int ret = 0;
u8 linkid = 0;
struct cfctrl *cfctrl = container_obj(layer);
- struct cfctrl_request_info rsp, *req;
-
cmdrsp = cfpkt_extr_head_u8(pkt);
cmd = cmdrsp & CFCTRL_CMD_MASK;
@@ -374,150 +511,7 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
switch (cmd) {
case CFCTRL_CMD_LINK_SETUP:
- {
- enum cfctrl_srv serv;
- enum cfctrl_srv servtype;
- u8 endpoint;
- u8 physlinkid;
- u8 prio;
- u8 tmp;
- u8 *cp;
- int i;
- struct cfctrl_link_param linkparam;
- memset(&linkparam, 0, sizeof(linkparam));
-
- tmp = cfpkt_extr_head_u8(pkt);
-
- serv = tmp & CFCTRL_SRV_MASK;
- linkparam.linktype = serv;
-
- servtype = tmp >> 4;
- linkparam.chtype = servtype;
-
- tmp = cfpkt_extr_head_u8(pkt);
- physlinkid = tmp & 0x07;
- prio = tmp >> 3;
-
- linkparam.priority = prio;
- linkparam.phyid = physlinkid;
- endpoint = cfpkt_extr_head_u8(pkt);
- linkparam.endpoint = endpoint & 0x03;
-
- switch (serv) {
- case CFCTRL_SRV_VEI:
- case CFCTRL_SRV_DBG:
- if (CFCTRL_ERR_BIT & cmdrsp)
- break;
- /* Link ID */
- linkid = cfpkt_extr_head_u8(pkt);
- break;
- case CFCTRL_SRV_VIDEO:
- tmp = cfpkt_extr_head_u8(pkt);
- linkparam.u.video.connid = tmp;
- if (CFCTRL_ERR_BIT & cmdrsp)
- break;
- /* Link ID */
- linkid = cfpkt_extr_head_u8(pkt);
- break;
-
- case CFCTRL_SRV_DATAGRAM:
- linkparam.u.datagram.connid =
- cfpkt_extr_head_u32(pkt);
- if (CFCTRL_ERR_BIT & cmdrsp)
- break;
- /* Link ID */
- linkid = cfpkt_extr_head_u8(pkt);
- break;
- case CFCTRL_SRV_RFM:
- /* Construct a frame, convert
- * DatagramConnectionID
- * to network format long and copy it out...
- */
- linkparam.u.rfm.connid =
- cfpkt_extr_head_u32(pkt);
- cp = (u8 *) linkparam.u.rfm.volume;
- for (tmp = cfpkt_extr_head_u8(pkt);
- cfpkt_more(pkt) && tmp != '\0';
- tmp = cfpkt_extr_head_u8(pkt))
- *cp++ = tmp;
- *cp = '\0';
-
- if (CFCTRL_ERR_BIT & cmdrsp)
- break;
- /* Link ID */
- linkid = cfpkt_extr_head_u8(pkt);
-
- break;
- case CFCTRL_SRV_UTIL:
- /* Construct a frame, convert
- * DatagramConnectionID
- * to network format long and copy it out...
- */
- /* Fifosize KB */
- linkparam.u.utility.fifosize_kb =
- cfpkt_extr_head_u16(pkt);
- /* Fifosize bufs */
- linkparam.u.utility.fifosize_bufs =
- cfpkt_extr_head_u16(pkt);
- /* name */
- cp = (u8 *) linkparam.u.utility.name;
- caif_assert(sizeof(linkparam.u.utility.name)
- >= UTILITY_NAME_LENGTH);
- for (i = 0;
- i < UTILITY_NAME_LENGTH
- && cfpkt_more(pkt); i++) {
- tmp = cfpkt_extr_head_u8(pkt);
- *cp++ = tmp;
- }
- /* Length */
- len = cfpkt_extr_head_u8(pkt);
- linkparam.u.utility.paramlen = len;
- /* Param Data */
- cp = linkparam.u.utility.params;
- while (cfpkt_more(pkt) && len--) {
- tmp = cfpkt_extr_head_u8(pkt);
- *cp++ = tmp;
- }
- if (CFCTRL_ERR_BIT & cmdrsp)
- break;
- /* Link ID */
- linkid = cfpkt_extr_head_u8(pkt);
- /* Length */
- len = cfpkt_extr_head_u8(pkt);
- /* Param Data */
- cfpkt_extr_head(pkt, &param, len);
- break;
- default:
- pr_warn("Request setup, invalid type (%d)\n",
- serv);
- goto error;
- }
-
- rsp.cmd = cmd;
- rsp.param = linkparam;
- spin_lock_bh(&cfctrl->info_list_lock);
- req = cfctrl_remove_req(cfctrl, &rsp);
-
- if (CFCTRL_ERR_BIT == (CFCTRL_ERR_BIT & cmdrsp) ||
- cfpkt_erroneous(pkt)) {
- pr_err("Invalid O/E bit or parse error "
- "on CAIF control channel\n");
- cfctrl->res.reject_rsp(cfctrl->serv.layer.up,
- 0,
- req ? req->client_layer
- : NULL);
- } else {
- cfctrl->res.linksetup_rsp(cfctrl->serv.
- layer.up, linkid,
- serv, physlinkid,
- req ? req->
- client_layer : NULL);
- }
-
- kfree(req);
-
- spin_unlock_bh(&cfctrl->info_list_lock);
- }
+ ret = cfctrl_link_setup(cfctrl, pkt, cmdrsp);
break;
case CFCTRL_CMD_LINK_DESTROY:
linkid = cfpkt_extr_head_u8(pkt);
@@ -544,9 +538,9 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
break;
default:
pr_err("Unrecognized Control Frame\n");
+ ret = -1;
goto error;
}
- ret = 0;
error:
cfpkt_destroy(pkt);
return ret;
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 4aab7033c933..b2387a46794a 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -683,7 +683,7 @@ static int can_rcv(struct sk_buff *skb, struct net_device *dev,
pr_warn_once("PF_CAN: dropped non conform CAN skbuff: dev type %d, len %d\n",
dev->type, skb->len);
- kfree_skb(skb);
+ kfree_skb_reason(skb, SKB_DROP_REASON_CAN_RX_INVALID_FRAME);
return NET_RX_DROP;
}
@@ -698,7 +698,7 @@ static int canfd_rcv(struct sk_buff *skb, struct net_device *dev,
pr_warn_once("PF_CAN: dropped non conform CAN FD skbuff: dev type %d, len %d\n",
dev->type, skb->len);
- kfree_skb(skb);
+ kfree_skb_reason(skb, SKB_DROP_REASON_CANFD_RX_INVALID_FRAME);
return NET_RX_DROP;
}
@@ -713,7 +713,7 @@ static int canxl_rcv(struct sk_buff *skb, struct net_device *dev,
pr_warn_once("PF_CAN: dropped non conform CAN XL skbuff: dev type %d, len %d\n",
dev->type, skb->len);
- kfree_skb(skb);
+ kfree_skb_reason(skb, SKB_DROP_REASON_CANXL_RX_INVALID_FRAME);
return NET_RX_DROP;
}
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 6bc1cc4c94c5..5e690a2377e4 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -359,6 +359,7 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
unsigned int datalen = head->nframes * op->cfsiz;
int err;
unsigned int *pflags;
+ enum skb_drop_reason reason;
skb = alloc_skb(sizeof(*head) + datalen, gfp_any());
if (!skb)
@@ -413,11 +414,11 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
addr->can_family = AF_CAN;
addr->can_ifindex = op->rx_ifindex;
- err = sock_queue_rcv_skb(sk, skb);
+ err = sock_queue_rcv_skb_reason(sk, skb, &reason);
if (err < 0) {
struct bcm_sock *bo = bcm_sk(sk);
- kfree_skb(skb);
+ sk_skb_reason_drop(sk, skb, reason);
/* don't care about overflows in this statistic */
bo->dropped_usr_msgs++;
}
diff --git a/net/can/isotp.c b/net/can/isotp.c
index 1efa377f002e..dee1412b3c9c 100644
--- a/net/can/isotp.c
+++ b/net/can/isotp.c
@@ -278,6 +278,7 @@ static int isotp_send_fc(struct sock *sk, int ae, u8 flowstatus)
static void isotp_rcv_skb(struct sk_buff *skb, struct sock *sk)
{
struct sockaddr_can *addr = (struct sockaddr_can *)skb->cb;
+ enum skb_drop_reason reason;
BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct sockaddr_can));
@@ -285,8 +286,8 @@ static void isotp_rcv_skb(struct sk_buff *skb, struct sock *sk)
addr->can_family = AF_CAN;
addr->can_ifindex = skb->dev->ifindex;
- if (sock_queue_rcv_skb(sk, skb) < 0)
- kfree_skb(skb);
+ if (sock_queue_rcv_skb_reason(sk, skb, &reason) < 0)
+ sk_skb_reason_drop(sk, skb, reason);
}
static u8 padlen(u8 datalen)
diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
index 6fefe7a68761..3d8b588822f9 100644
--- a/net/can/j1939/socket.c
+++ b/net/can/j1939/socket.c
@@ -311,6 +311,7 @@ static void j1939_sk_recv_one(struct j1939_sock *jsk, struct sk_buff *oskb)
{
const struct j1939_sk_buff_cb *oskcb = j1939_skb_to_cb(oskb);
struct j1939_sk_buff_cb *skcb;
+ enum skb_drop_reason reason;
struct sk_buff *skb;
if (oskb->sk == &jsk->sk)
@@ -331,8 +332,8 @@ static void j1939_sk_recv_one(struct j1939_sock *jsk, struct sk_buff *oskb)
if (skb->sk)
skcb->msg_flags |= MSG_DONTROUTE;
- if (sock_queue_rcv_skb(&jsk->sk, skb) < 0)
- kfree_skb(skb);
+ if (sock_queue_rcv_skb_reason(&jsk->sk, skb, &reason) < 0)
+ sk_skb_reason_drop(&jsk->sk, skb, reason);
}
bool j1939_sk_recv_match(struct j1939_priv *priv, struct j1939_sk_buff_cb *skcb)
diff --git a/net/can/raw.c b/net/can/raw.c
index 020f21430b1d..76b867d21def 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -129,6 +129,7 @@ static void raw_rcv(struct sk_buff *oskb, void *data)
{
struct sock *sk = (struct sock *)data;
struct raw_sock *ro = raw_sk(sk);
+ enum skb_drop_reason reason;
struct sockaddr_can *addr;
struct sk_buff *skb;
unsigned int *pflags;
@@ -205,8 +206,8 @@ static void raw_rcv(struct sk_buff *oskb, void *data)
if (oskb->sk == sk)
*pflags |= MSG_CONFIRM;
- if (sock_queue_rcv_skb(sk, skb) < 0)
- kfree_skb(skb);
+ if (sock_queue_rcv_skb_reason(sk, skb, &reason) < 0)
+ sk_skb_reason_drop(sk, skb, reason);
}
static int raw_enable_filters(struct net *net, struct net_device *dev,
diff --git a/net/core/dev.c b/net/core/dev.c
index be97c440ecd5..7ee808eb068e 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3179,7 +3179,6 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
if (dev->reg_state == NETREG_REGISTERED ||
dev->reg_state == NETREG_UNREGISTERING) {
- ASSERT_RTNL();
netdev_ops_assert_locked(dev);
rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
@@ -3229,7 +3228,6 @@ int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
return -EINVAL;
if (dev->reg_state == NETREG_REGISTERED) {
- ASSERT_RTNL();
netdev_ops_assert_locked(dev);
rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
@@ -6926,6 +6924,43 @@ static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
return HRTIMER_NORESTART;
}
+static void napi_stop_kthread(struct napi_struct *napi)
+{
+ unsigned long val, new;
+
+ /* Wait until the napi STATE_THREADED is unset. */
+ while (true) {
+ val = READ_ONCE(napi->state);
+
+ /* If napi kthread own this napi or the napi is idle,
+ * STATE_THREADED can be unset here.
+ */
+ if ((val & NAPIF_STATE_SCHED_THREADED) ||
+ !(val & NAPIF_STATE_SCHED)) {
+ new = val & (~NAPIF_STATE_THREADED);
+ } else {
+ msleep(20);
+ continue;
+ }
+
+ if (try_cmpxchg(&napi->state, &val, new))
+ break;
+ }
+
+ /* Once STATE_THREADED is unset, wait for SCHED_THREADED to be unset by
+ * the kthread.
+ */
+ while (true) {
+ if (!test_bit(NAPIF_STATE_SCHED_THREADED, &napi->state))
+ break;
+
+ msleep(20);
+ }
+
+ kthread_stop(napi->thread);
+ napi->thread = NULL;
+}
+
int dev_set_threaded(struct net_device *dev, bool threaded)
{
struct napi_struct *napi;
@@ -6961,8 +6996,12 @@ int dev_set_threaded(struct net_device *dev, bool threaded)
* softirq mode will happen in the next round of napi_schedule().
* This should not cause hiccups/stalls to the live traffic.
*/
- list_for_each_entry(napi, &dev->napi_list, dev_list)
- assign_bit(NAPI_STATE_THREADED, &napi->state, threaded);
+ list_for_each_entry(napi, &dev->napi_list, dev_list) {
+ if (!threaded && napi->thread)
+ napi_stop_kthread(napi);
+ else
+ assign_bit(NAPI_STATE_THREADED, &napi->state, threaded);
+ }
return err;
}
@@ -10730,12 +10769,14 @@ sync_lower:
* *before* calling udp_tunnel_get_rx_info,
* but *after* calling udp_tunnel_drop_rx_info.
*/
+ udp_tunnel_nic_lock(dev);
if (features & NETIF_F_RX_UDP_TUNNEL_PORT) {
dev->features = features;
udp_tunnel_get_rx_info(dev);
} else {
udp_tunnel_drop_rx_info(dev);
}
+ udp_tunnel_nic_unlock(dev);
}
if (diff & NETIF_F_HW_VLAN_CTAG_FILTER) {
@@ -11715,7 +11756,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
dev->priv_len = sizeof_priv;
- ref_tracker_dir_init(&dev->refcnt_tracker, 128, name);
+ ref_tracker_dir_init(&dev->refcnt_tracker, 128, "netdev");
#ifdef CONFIG_PCPU_DEV_REFCNT
dev->pcpu_refcnt = alloc_percpu(int);
if (!dev->pcpu_refcnt)
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 49dce9a82295..8ad9898f8e42 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -54,7 +54,8 @@ static void __neigh_notify(struct neighbour *n, int type, int flags,
u32 pid);
static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid);
static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
- struct net_device *dev);
+ struct net_device *dev,
+ bool skip_perm);
#ifdef CONFIG_PROC_FS
static const struct seq_operations neigh_stat_seq_ops;
@@ -423,7 +424,7 @@ static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
{
write_lock_bh(&tbl->lock);
neigh_flush_dev(tbl, dev, skip_perm);
- pneigh_ifdown_and_unlock(tbl, dev);
+ pneigh_ifdown_and_unlock(tbl, dev, skip_perm);
pneigh_queue_purge(&tbl->proxy_queue, dev ? dev_net(dev) : NULL,
tbl->family);
if (skb_queue_empty_lockless(&tbl->proxy_queue))
@@ -803,7 +804,8 @@ int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
}
static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
- struct net_device *dev)
+ struct net_device *dev,
+ bool skip_perm)
{
struct pneigh_entry *n, **np, *freelist = NULL;
u32 h;
@@ -811,12 +813,15 @@ static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
for (h = 0; h <= PNEIGH_HASHMASK; h++) {
np = &tbl->phash_buckets[h];
while ((n = *np) != NULL) {
+ if (skip_perm && n->permanent)
+ goto skip;
if (!dev || n->dev == dev) {
*np = n->next;
n->next = freelist;
freelist = n;
continue;
}
+skip:
np = &n->next;
}
}
@@ -1983,6 +1988,7 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
pn = pneigh_lookup(tbl, net, dst, dev, 1);
if (pn) {
pn->flags = ndm_flags;
+ pn->permanent = !!(ndm->ndm_state & NUD_PERMANENT);
if (protocol)
pn->protocol = protocol;
err = 0;
@@ -2049,10 +2055,8 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags,
NETLINK_CB(skb).portid, extack);
- if (!err && ndm_flags & (NTF_USE | NTF_MANAGED)) {
+ if (!err && ndm_flags & (NTF_USE | NTF_MANAGED))
neigh_event_send(neigh, NULL);
- err = 0;
- }
neigh_release(neigh);
out:
return err;
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 1ace0cd01adc..c9b969386399 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -641,12 +641,6 @@ static ssize_t phys_port_id_show(struct device *dev,
struct netdev_phys_item_id ppid;
ssize_t ret;
- /* The check is also done in dev_get_phys_port_id; this helps returning
- * early without hitting the locking section below.
- */
- if (!netdev->netdev_ops->ndo_get_phys_port_id)
- return -EOPNOTSUPP;
-
ret = sysfs_rtnl_lock(&dev->kobj, &attr->attr, netdev);
if (ret)
return ret;
@@ -668,13 +662,6 @@ static ssize_t phys_port_name_show(struct device *dev,
char name[IFNAMSIZ];
ssize_t ret;
- /* The checks are also done in dev_get_phys_port_name; this helps
- * returning early without hitting the locking section below.
- */
- if (!netdev->netdev_ops->ndo_get_phys_port_name &&
- !netdev->devlink_port)
- return -EOPNOTSUPP;
-
ret = sysfs_rtnl_lock(&dev->kobj, &attr->attr, netdev);
if (ret)
return ret;
@@ -696,14 +683,6 @@ static ssize_t phys_switch_id_show(struct device *dev,
struct netdev_phys_item_id ppid = { };
ssize_t ret;
- /* The checks are also done in dev_get_phys_port_name; this helps
- * returning early without hitting the locking section below. This works
- * because recurse is false when calling dev_get_port_parent_id.
- */
- if (!netdev->netdev_ops->ndo_get_port_parent_id &&
- !netdev->devlink_port)
- return -EOPNOTSUPP;
-
ret = sysfs_rtnl_lock(&dev->kobj, &attr->attr, netdev);
if (ret)
return ret;
@@ -718,6 +697,40 @@ static ssize_t phys_switch_id_show(struct device *dev,
}
static DEVICE_ATTR_RO(phys_switch_id);
+static struct attribute *netdev_phys_attrs[] __ro_after_init = {
+ &dev_attr_phys_port_id.attr,
+ &dev_attr_phys_port_name.attr,
+ &dev_attr_phys_switch_id.attr,
+ NULL,
+};
+
+static umode_t netdev_phys_is_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct net_device *netdev = to_net_dev(dev);
+
+ if (attr == &dev_attr_phys_port_id.attr) {
+ if (!netdev->netdev_ops->ndo_get_phys_port_id)
+ return 0;
+ } else if (attr == &dev_attr_phys_port_name.attr) {
+ if (!netdev->netdev_ops->ndo_get_phys_port_name &&
+ !netdev->devlink_port)
+ return 0;
+ } else if (attr == &dev_attr_phys_switch_id.attr) {
+ if (!netdev->netdev_ops->ndo_get_port_parent_id &&
+ !netdev->devlink_port)
+ return 0;
+ }
+
+ return attr->mode;
+}
+
+static const struct attribute_group netdev_phys_group = {
+ .attrs = netdev_phys_attrs,
+ .is_visible = netdev_phys_is_visible,
+};
+
static ssize_t threaded_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -783,9 +796,6 @@ static struct attribute *net_class_attrs[] __ro_after_init = {
&dev_attr_tx_queue_len.attr,
&dev_attr_gro_flush_timeout.attr,
&dev_attr_napi_defer_hard_irqs.attr,
- &dev_attr_phys_port_id.attr,
- &dev_attr_phys_port_name.attr,
- &dev_attr_phys_switch_id.attr,
&dev_attr_proto_down.attr,
&dev_attr_carrier_up_count.attr,
&dev_attr_carrier_down_count.attr,
@@ -2328,6 +2338,7 @@ int netdev_register_kobject(struct net_device *ndev)
groups++;
*groups++ = &netstat_group;
+ *groups++ = &netdev_phys_group;
if (wireless_group_needed(ndev))
*groups++ = &wireless_group;
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index ae54f26709ca..d0f607507ee8 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -403,8 +403,8 @@ static __net_init void preinit_net(struct net *net, struct user_namespace *user_
{
refcount_set(&net->passive, 1);
refcount_set(&net->ns.count, 1);
- ref_tracker_dir_init(&net->refcnt_tracker, 128, "net refcnt");
- ref_tracker_dir_init(&net->notrefcnt_tracker, 128, "net notrefcnt");
+ ref_tracker_dir_init(&net->refcnt_tracker, 128, "net_refcnt");
+ ref_tracker_dir_init(&net->notrefcnt_tracker, 128, "net_notrefcnt");
get_random_bytes(&net->hash_mix, sizeof(u32));
net->dev_base_seq = 1;
@@ -791,12 +791,40 @@ struct net *get_net_ns_by_pid(pid_t pid)
}
EXPORT_SYMBOL_GPL(get_net_ns_by_pid);
+#ifdef CONFIG_NET_NS_REFCNT_TRACKER
+static void net_ns_net_debugfs(struct net *net)
+{
+ ref_tracker_dir_symlink(&net->refcnt_tracker, "netns-%llx-%u-refcnt",
+ net->net_cookie, net->ns.inum);
+ ref_tracker_dir_symlink(&net->notrefcnt_tracker, "netns-%llx-%u-notrefcnt",
+ net->net_cookie, net->ns.inum);
+}
+
+static int __init init_net_debugfs(void)
+{
+ ref_tracker_dir_debugfs(&init_net.refcnt_tracker);
+ ref_tracker_dir_debugfs(&init_net.notrefcnt_tracker);
+ net_ns_net_debugfs(&init_net);
+ return 0;
+}
+late_initcall(init_net_debugfs);
+#else
+static void net_ns_net_debugfs(struct net *net)
+{
+}
+#endif
+
static __net_init int net_ns_net_init(struct net *net)
{
+ int ret;
+
#ifdef CONFIG_NET_NS
net->ns.ops = &netns_operations;
#endif
- return ns_alloc_inum(&net->ns);
+ ret = ns_alloc_inum(&net->ns);
+ if (!ret)
+ net_ns_net_debugfs(net);
+ return ret;
}
static __net_exit void net_ns_net_exit(struct net *net)
diff --git a/net/core/netdev_rx_queue.c b/net/core/netdev_rx_queue.c
index d126f10197bf..3bf1151d8061 100644
--- a/net/core/netdev_rx_queue.c
+++ b/net/core/netdev_rx_queue.c
@@ -97,14 +97,12 @@ int __net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
if (!netdev_need_ops_lock(dev))
return -EOPNOTSUPP;
- if (rxq_idx >= dev->real_num_rx_queues)
- return -EINVAL;
- rxq_idx = array_index_nospec(rxq_idx, dev->real_num_rx_queues);
-
if (rxq_idx >= dev->real_num_rx_queues) {
NL_SET_ERR_MSG(extack, "rx queue index out of range");
return -ERANGE;
}
+ rxq_idx = array_index_nospec(rxq_idx, dev->real_num_rx_queues);
+
if (dev->cfg->hds_config != ETHTOOL_TCP_DATA_SPLIT_ENABLED) {
NL_SET_ERR_MSG(extack, "tcp-data-split is disabled");
return -EINVAL;
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 6ad84d4a2b46..54f9d505895f 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -58,13 +58,6 @@ static void zap_completion_queue(void);
static unsigned int carrier_timeout = 4;
module_param(carrier_timeout, uint, 0644);
-#define np_info(np, fmt, ...) \
- pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
-#define np_err(np, fmt, ...) \
- pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
-#define np_notice(np, fmt, ...) \
- pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
-
static netdev_tx_t netpoll_start_xmit(struct sk_buff *skb,
struct net_device *dev,
struct netdev_queue *txq)
@@ -499,43 +492,6 @@ int netpoll_send_udp(struct netpoll *np, const char *msg, int len)
}
EXPORT_SYMBOL(netpoll_send_udp);
-void netpoll_print_options(struct netpoll *np)
-{
- np_info(np, "local port %d\n", np->local_port);
- if (np->ipv6)
- np_info(np, "local IPv6 address %pI6c\n", &np->local_ip.in6);
- else
- np_info(np, "local IPv4 address %pI4\n", &np->local_ip.ip);
- np_info(np, "interface name '%s'\n", np->dev_name);
- np_info(np, "local ethernet address '%pM'\n", np->dev_mac);
- np_info(np, "remote port %d\n", np->remote_port);
- if (np->ipv6)
- np_info(np, "remote IPv6 address %pI6c\n", &np->remote_ip.in6);
- else
- np_info(np, "remote IPv4 address %pI4\n", &np->remote_ip.ip);
- np_info(np, "remote ethernet address %pM\n", np->remote_mac);
-}
-EXPORT_SYMBOL(netpoll_print_options);
-
-static int netpoll_parse_ip_addr(const char *str, union inet_addr *addr)
-{
- const char *end;
-
- if (!strchr(str, ':') &&
- in4_pton(str, -1, (void *)addr, -1, &end) > 0) {
- if (!*end)
- return 0;
- }
- if (in6_pton(str, -1, addr->in6.s6_addr, -1, &end) > 0) {
-#if IS_ENABLED(CONFIG_IPV6)
- if (!*end)
- return 1;
-#else
- return -1;
-#endif
- }
- return -1;
-}
static void skb_pool_flush(struct netpoll *np)
{
@@ -546,95 +502,6 @@ static void skb_pool_flush(struct netpoll *np)
skb_queue_purge_reason(skb_pool, SKB_CONSUMED);
}
-int netpoll_parse_options(struct netpoll *np, char *opt)
-{
- char *cur=opt, *delim;
- int ipv6;
- bool ipversion_set = false;
-
- if (*cur != '@') {
- if ((delim = strchr(cur, '@')) == NULL)
- goto parse_failed;
- *delim = 0;
- if (kstrtou16(cur, 10, &np->local_port))
- goto parse_failed;
- cur = delim;
- }
- cur++;
-
- if (*cur != '/') {
- ipversion_set = true;
- if ((delim = strchr(cur, '/')) == NULL)
- goto parse_failed;
- *delim = 0;
- ipv6 = netpoll_parse_ip_addr(cur, &np->local_ip);
- if (ipv6 < 0)
- goto parse_failed;
- else
- np->ipv6 = (bool)ipv6;
- cur = delim;
- }
- cur++;
-
- if (*cur != ',') {
- /* parse out dev_name or dev_mac */
- if ((delim = strchr(cur, ',')) == NULL)
- goto parse_failed;
- *delim = 0;
-
- np->dev_name[0] = '\0';
- eth_broadcast_addr(np->dev_mac);
- if (!strchr(cur, ':'))
- strscpy(np->dev_name, cur, sizeof(np->dev_name));
- else if (!mac_pton(cur, np->dev_mac))
- goto parse_failed;
-
- cur = delim;
- }
- cur++;
-
- if (*cur != '@') {
- /* dst port */
- if ((delim = strchr(cur, '@')) == NULL)
- goto parse_failed;
- *delim = 0;
- if (*cur == ' ' || *cur == '\t')
- np_info(np, "warning: whitespace is not allowed\n");
- if (kstrtou16(cur, 10, &np->remote_port))
- goto parse_failed;
- cur = delim;
- }
- cur++;
-
- /* dst ip */
- if ((delim = strchr(cur, '/')) == NULL)
- goto parse_failed;
- *delim = 0;
- ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip);
- if (ipv6 < 0)
- goto parse_failed;
- else if (ipversion_set && np->ipv6 != (bool)ipv6)
- goto parse_failed;
- else
- np->ipv6 = (bool)ipv6;
- cur = delim + 1;
-
- if (*cur != 0) {
- /* MAC address */
- if (!mac_pton(cur, np->remote_mac))
- goto parse_failed;
- }
-
- netpoll_print_options(np);
-
- return 0;
-
- parse_failed:
- np_info(np, "couldn't parse config at '%s'!\n", cur);
- return -1;
-}
-EXPORT_SYMBOL(netpoll_parse_options);
-
static void refill_skbs_work_handler(struct work_struct *work)
{
struct netpoll *np =
@@ -716,13 +583,97 @@ static char *egress_dev(struct netpoll *np, char *buf)
return buf;
}
+static void netpoll_wait_carrier(struct netpoll *np, struct net_device *ndev,
+ unsigned int timeout)
+{
+ unsigned long atmost;
+
+ atmost = jiffies + timeout * HZ;
+ while (!netif_carrier_ok(ndev)) {
+ if (time_after(jiffies, atmost)) {
+ np_notice(np, "timeout waiting for carrier\n");
+ break;
+ }
+ msleep(1);
+ }
+}
+
+/*
+ * Take the IPv6 from ndev and populate local_ip structure in netpoll
+ */
+static int netpoll_take_ipv6(struct netpoll *np, struct net_device *ndev)
+{
+ char buf[MAC_ADDR_STR_LEN + 1];
+ int err = -EDESTADDRREQ;
+ struct inet6_dev *idev;
+
+ if (!IS_ENABLED(CONFIG_IPV6)) {
+ np_err(np, "IPv6 is not supported %s, aborting\n",
+ egress_dev(np, buf));
+ return -EINVAL;
+ }
+
+ idev = __in6_dev_get(ndev);
+ if (idev) {
+ struct inet6_ifaddr *ifp;
+
+ read_lock_bh(&idev->lock);
+ list_for_each_entry(ifp, &idev->addr_list, if_list) {
+ if (!!(ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL) !=
+ !!(ipv6_addr_type(&np->remote_ip.in6) & IPV6_ADDR_LINKLOCAL))
+ continue;
+ /* Got the IP, let's return */
+ np->local_ip.in6 = ifp->addr;
+ err = 0;
+ break;
+ }
+ read_unlock_bh(&idev->lock);
+ }
+ if (err) {
+ np_err(np, "no IPv6 address for %s, aborting\n",
+ egress_dev(np, buf));
+ return err;
+ }
+
+ np_info(np, "local IPv6 %pI6c\n", &np->local_ip.in6);
+ return 0;
+}
+
+/*
+ * Take the IPv4 from ndev and populate local_ip structure in netpoll
+ */
+static int netpoll_take_ipv4(struct netpoll *np, struct net_device *ndev)
+{
+ char buf[MAC_ADDR_STR_LEN + 1];
+ const struct in_ifaddr *ifa;
+ struct in_device *in_dev;
+
+ in_dev = __in_dev_get_rtnl(ndev);
+ if (!in_dev) {
+ np_err(np, "no IP address for %s, aborting\n",
+ egress_dev(np, buf));
+ return -EDESTADDRREQ;
+ }
+
+ ifa = rtnl_dereference(in_dev->ifa_list);
+ if (!ifa) {
+ np_err(np, "no IP address for %s, aborting\n",
+ egress_dev(np, buf));
+ return -EDESTADDRREQ;
+ }
+
+ np->local_ip.ip = ifa->ifa_local;
+ np_info(np, "local IP %pI4\n", &np->local_ip.ip);
+
+ return 0;
+}
+
int netpoll_setup(struct netpoll *np)
{
struct net *net = current->nsproxy->net_ns;
char buf[MAC_ADDR_STR_LEN + 1];
struct net_device *ndev = NULL;
bool ip_overwritten = false;
- struct in_device *in_dev;
int err;
rtnl_lock();
@@ -746,85 +697,31 @@ int netpoll_setup(struct netpoll *np)
}
if (!netif_running(ndev)) {
- unsigned long atmost;
-
np_info(np, "device %s not up yet, forcing it\n",
egress_dev(np, buf));
err = dev_open(ndev, NULL);
-
if (err) {
np_err(np, "failed to open %s\n", ndev->name);
goto put;
}
rtnl_unlock();
- atmost = jiffies + carrier_timeout * HZ;
- while (!netif_carrier_ok(ndev)) {
- if (time_after(jiffies, atmost)) {
- np_notice(np, "timeout waiting for carrier\n");
- break;
- }
- msleep(1);
- }
-
+ netpoll_wait_carrier(np, ndev, carrier_timeout);
rtnl_lock();
}
if (!np->local_ip.ip) {
if (!np->ipv6) {
- const struct in_ifaddr *ifa;
-
- in_dev = __in_dev_get_rtnl(ndev);
- if (!in_dev)
- goto put_noaddr;
-
- ifa = rtnl_dereference(in_dev->ifa_list);
- if (!ifa) {
-put_noaddr:
- np_err(np, "no IP address for %s, aborting\n",
- egress_dev(np, buf));
- err = -EDESTADDRREQ;
+ err = netpoll_take_ipv4(np, ndev);
+ if (err)
goto put;
- }
-
- np->local_ip.ip = ifa->ifa_local;
- ip_overwritten = true;
- np_info(np, "local IP %pI4\n", &np->local_ip.ip);
} else {
-#if IS_ENABLED(CONFIG_IPV6)
- struct inet6_dev *idev;
-
- err = -EDESTADDRREQ;
- idev = __in6_dev_get(ndev);
- if (idev) {
- struct inet6_ifaddr *ifp;
-
- read_lock_bh(&idev->lock);
- list_for_each_entry(ifp, &idev->addr_list, if_list) {
- if (!!(ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL) !=
- !!(ipv6_addr_type(&np->remote_ip.in6) & IPV6_ADDR_LINKLOCAL))
- continue;
- np->local_ip.in6 = ifp->addr;
- ip_overwritten = true;
- err = 0;
- break;
- }
- read_unlock_bh(&idev->lock);
- }
- if (err) {
- np_err(np, "no IPv6 address for %s, aborting\n",
- egress_dev(np, buf));
+ err = netpoll_take_ipv6(np, ndev);
+ if (err)
goto put;
- } else
- np_info(np, "local IPv6 %pI6c\n", &np->local_ip.in6);
-#else
- np_err(np, "IPv6 is not supported %s, aborting\n",
- egress_dev(np, buf));
- err = -EINVAL;
- goto put;
-#endif
}
+ ip_overwritten = true;
}
err = __netpoll_setup(np, ndev);
@@ -863,7 +760,7 @@ static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
kfree(npinfo);
}
-void __netpoll_cleanup(struct netpoll *np)
+static void __netpoll_cleanup(struct netpoll *np)
{
struct netpoll_info *npinfo;
@@ -885,7 +782,6 @@ void __netpoll_cleanup(struct netpoll *np)
skb_pool_flush(np);
}
-EXPORT_SYMBOL_GPL(__netpoll_cleanup);
void __netpoll_free(struct netpoll *np)
{
diff --git a/net/core/sock.c b/net/core/sock.c
index 3b409bc8ef6d..3a71d6c4ccf0 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -818,12 +818,10 @@ EXPORT_SYMBOL(sock_set_priority);
void sock_set_sndtimeo(struct sock *sk, s64 secs)
{
- lock_sock(sk);
if (secs && secs < MAX_SCHEDULE_TIMEOUT / HZ - 1)
WRITE_ONCE(sk->sk_sndtimeo, secs * HZ);
else
WRITE_ONCE(sk->sk_sndtimeo, MAX_SCHEDULE_TIMEOUT);
- release_sock(sk);
}
EXPORT_SYMBOL(sock_set_sndtimeo);
@@ -837,14 +835,6 @@ static void __sock_set_timestamps(struct sock *sk, bool val, bool new, bool ns)
}
}
-void sock_enable_timestamps(struct sock *sk)
-{
- lock_sock(sk);
- __sock_set_timestamps(sk, true, false, true);
- release_sock(sk);
-}
-EXPORT_SYMBOL(sock_enable_timestamps);
-
void sock_set_timestamp(struct sock *sk, int optname, bool valbool)
{
switch (optname) {
@@ -1295,6 +1285,14 @@ int sk_setsockopt(struct sock *sk, int level, int optname,
case SO_DEVMEM_DONTNEED:
return sock_devmem_dontneed(sk, optval, optlen);
#endif
+ case SO_SNDTIMEO_OLD:
+ case SO_SNDTIMEO_NEW:
+ return sock_set_timeout(&sk->sk_sndtimeo, optval,
+ optlen, optname == SO_SNDTIMEO_OLD);
+ case SO_RCVTIMEO_OLD:
+ case SO_RCVTIMEO_NEW:
+ return sock_set_timeout(&sk->sk_rcvtimeo, optval,
+ optlen, optname == SO_RCVTIMEO_OLD);
}
sockopt_lock_sock(sk);
@@ -1450,18 +1448,6 @@ set_sndbuf:
WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
break;
}
- case SO_RCVTIMEO_OLD:
- case SO_RCVTIMEO_NEW:
- ret = sock_set_timeout(&sk->sk_rcvtimeo, optval,
- optlen, optname == SO_RCVTIMEO_OLD);
- break;
-
- case SO_SNDTIMEO_OLD:
- case SO_SNDTIMEO_NEW:
- ret = sock_set_timeout(&sk->sk_sndtimeo, optval,
- optlen, optname == SO_SNDTIMEO_OLD);
- break;
-
case SO_ATTACH_FILTER: {
struct sock_fprog fprog;
@@ -2788,17 +2774,6 @@ void sock_pfree(struct sk_buff *skb)
EXPORT_SYMBOL(sock_pfree);
#endif /* CONFIG_INET */
-kuid_t sock_i_uid(struct sock *sk)
-{
- kuid_t uid;
-
- read_lock_bh(&sk->sk_callback_lock);
- uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
- read_unlock_bh(&sk->sk_callback_lock);
- return uid;
-}
-EXPORT_SYMBOL(sock_i_uid);
-
unsigned long __sock_i_ino(struct sock *sk)
{
unsigned long ino;
diff --git a/net/devlink/param.c b/net/devlink/param.c
index b29abf8d3ed4..396b8a7f6013 100644
--- a/net/devlink/param.c
+++ b/net/devlink/param.c
@@ -92,6 +92,11 @@ static const struct devlink_param devlink_param_generic[] = {
.name = DEVLINK_PARAM_GENERIC_EVENT_EQ_SIZE_NAME,
.type = DEVLINK_PARAM_GENERIC_EVENT_EQ_SIZE_TYPE,
},
+ {
+ .id = DEVLINK_PARAM_GENERIC_ID_ENABLE_PHC,
+ .name = DEVLINK_PARAM_GENERIC_ENABLE_PHC_NAME,
+ .type = DEVLINK_PARAM_GENERIC_ENABLE_PHC_TYPE,
+ },
};
static int devlink_param_generic_verify(const struct devlink_param *param)
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig
index 2dfe9063613f..869cbe57162f 100644
--- a/net/dsa/Kconfig
+++ b/net/dsa/Kconfig
@@ -42,12 +42,24 @@ config NET_DSA_TAG_BRCM
Broadcom switches which place the tag after the MAC source address.
config NET_DSA_TAG_BRCM_LEGACY
- tristate "Tag driver for Broadcom legacy switches using in-frame headers"
+ tristate "Tag driver for BCM63xx legacy switches using in-frame headers"
select NET_DSA_TAG_BRCM_COMMON
help
Say Y if you want to enable support for tagging frames for the
- Broadcom legacy switches which place the tag after the MAC source
+ BCM63xx legacy switches which place the tag after the MAC source
address.
+ This tag is used in BCM63xx legacy switches which work without the
+ original FCS and length before the tag insertion.
+
+config NET_DSA_TAG_BRCM_LEGACY_FCS
+ tristate "Tag driver for BCM53xx legacy switches using in-frame headers"
+ select NET_DSA_TAG_BRCM_COMMON
+ help
+ Say Y if you want to enable support for tagging frames for the
+ BCM53xx legacy switches which place the tag after the MAC source
+ address.
+ This tag is used in BCM53xx legacy switches which expect original
+ FCS and length before the tag insertion to be present.
config NET_DSA_TAG_BRCM_PREPEND
tristate "Tag driver for Broadcom switches using prepended headers"
diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c
index fe75821623a4..26bb657ceac3 100644
--- a/net/dsa/tag_brcm.c
+++ b/net/dsa/tag_brcm.c
@@ -15,6 +15,7 @@
#define BRCM_NAME "brcm"
#define BRCM_LEGACY_NAME "brcm-legacy"
+#define BRCM_LEGACY_FCS_NAME "brcm-legacy-fcs"
#define BRCM_PREPEND_NAME "brcm-prepend"
/* Legacy Broadcom tag (6 bytes) */
@@ -32,6 +33,10 @@
#define BRCM_LEG_MULTICAST (1 << 5)
#define BRCM_LEG_EGRESS (2 << 5)
#define BRCM_LEG_INGRESS (3 << 5)
+#define BRCM_LEG_LEN_HI(x) (((x) >> 8) & 0x7)
+
+/* 4th byte in the tag */
+#define BRCM_LEG_LEN_LO(x) ((x) & 0xff)
/* 6th byte in the tag */
#define BRCM_LEG_PORT_ID (0xf)
@@ -212,6 +217,41 @@ DSA_TAG_DRIVER(brcm_netdev_ops);
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_BRCM, BRCM_NAME);
#endif
+#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_LEGACY) || \
+ IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_LEGACY_FCS)
+static struct sk_buff *brcm_leg_tag_rcv(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ int len = BRCM_LEG_TAG_LEN;
+ int source_port;
+ u8 *brcm_tag;
+
+ if (unlikely(!pskb_may_pull(skb, BRCM_LEG_TAG_LEN + VLAN_HLEN)))
+ return NULL;
+
+ brcm_tag = dsa_etype_header_pos_rx(skb);
+
+ source_port = brcm_tag[5] & BRCM_LEG_PORT_ID;
+
+ skb->dev = dsa_conduit_find_user(dev, 0, source_port);
+ if (!skb->dev)
+ return NULL;
+
+ /* VLAN tag is added by BCM63xx internal switch */
+ if (netdev_uses_dsa(skb->dev))
+ len += VLAN_HLEN;
+
+ /* Remove Broadcom tag and update checksum */
+ skb_pull_rcsum(skb, len);
+
+ dsa_default_offload_fwd_mark(skb);
+
+ dsa_strip_etype_header(skb, len);
+
+ return skb;
+}
+#endif /* CONFIG_NET_DSA_TAG_BRCM_LEGACY || CONFIG_NET_DSA_TAG_BRCM_LEGACY_FCS */
+
#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_LEGACY)
static struct sk_buff *brcm_leg_tag_xmit(struct sk_buff *skb,
struct net_device *dev)
@@ -250,49 +290,77 @@ static struct sk_buff *brcm_leg_tag_xmit(struct sk_buff *skb,
return skb;
}
-static struct sk_buff *brcm_leg_tag_rcv(struct sk_buff *skb,
- struct net_device *dev)
+static const struct dsa_device_ops brcm_legacy_netdev_ops = {
+ .name = BRCM_LEGACY_NAME,
+ .proto = DSA_TAG_PROTO_BRCM_LEGACY,
+ .xmit = brcm_leg_tag_xmit,
+ .rcv = brcm_leg_tag_rcv,
+ .needed_headroom = BRCM_LEG_TAG_LEN,
+};
+
+DSA_TAG_DRIVER(brcm_legacy_netdev_ops);
+MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_BRCM_LEGACY, BRCM_LEGACY_NAME);
+#endif /* CONFIG_NET_DSA_TAG_BRCM_LEGACY */
+
+#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_LEGACY_FCS)
+static struct sk_buff *brcm_leg_fcs_tag_xmit(struct sk_buff *skb,
+ struct net_device *dev)
{
- int len = BRCM_LEG_TAG_LEN;
- int source_port;
+ struct dsa_port *dp = dsa_user_to_port(dev);
+ unsigned int fcs_len;
+ __le32 fcs_val;
u8 *brcm_tag;
- if (unlikely(!pskb_may_pull(skb, BRCM_LEG_TAG_LEN + VLAN_HLEN)))
+ /* The Ethernet switch we are interfaced with needs packets to be at
+ * least 64 bytes (including FCS) otherwise they will be discarded when
+ * they enter the switch port logic. When Broadcom tags are enabled, we
+ * need to make sure that packets are at least 70 bytes (including FCS
+ * and tag) because the length verification is done after the Broadcom
+ * tag is stripped off the ingress packet.
+ *
+ * Let dsa_user_xmit() free the SKB.
+ */
+ if (__skb_put_padto(skb, ETH_ZLEN + BRCM_LEG_TAG_LEN, false))
return NULL;
- brcm_tag = dsa_etype_header_pos_rx(skb);
+ fcs_len = skb->len;
+ fcs_val = cpu_to_le32(crc32_le(~0, skb->data, fcs_len) ^ ~0);
- source_port = brcm_tag[5] & BRCM_LEG_PORT_ID;
+ skb_push(skb, BRCM_LEG_TAG_LEN);
- skb->dev = dsa_conduit_find_user(dev, 0, source_port);
- if (!skb->dev)
- return NULL;
+ dsa_alloc_etype_header(skb, BRCM_LEG_TAG_LEN);
- /* VLAN tag is added by BCM63xx internal switch */
- if (netdev_uses_dsa(skb->dev))
- len += VLAN_HLEN;
+ brcm_tag = skb->data + 2 * ETH_ALEN;
- /* Remove Broadcom tag and update checksum */
- skb_pull_rcsum(skb, len);
+ /* Broadcom tag type */
+ brcm_tag[0] = BRCM_LEG_TYPE_HI;
+ brcm_tag[1] = BRCM_LEG_TYPE_LO;
- dsa_default_offload_fwd_mark(skb);
+ /* Broadcom tag value */
+ brcm_tag[2] = BRCM_LEG_EGRESS | BRCM_LEG_LEN_HI(fcs_len);
+ brcm_tag[3] = BRCM_LEG_LEN_LO(fcs_len);
+ brcm_tag[4] = 0;
+ brcm_tag[5] = dp->index & BRCM_LEG_PORT_ID;
- dsa_strip_etype_header(skb, len);
+ /* Original FCS value */
+ if (__skb_pad(skb, ETH_FCS_LEN, false))
+ return NULL;
+ skb_put_data(skb, &fcs_val, ETH_FCS_LEN);
return skb;
}
-static const struct dsa_device_ops brcm_legacy_netdev_ops = {
- .name = BRCM_LEGACY_NAME,
- .proto = DSA_TAG_PROTO_BRCM_LEGACY,
- .xmit = brcm_leg_tag_xmit,
+static const struct dsa_device_ops brcm_legacy_fcs_netdev_ops = {
+ .name = BRCM_LEGACY_FCS_NAME,
+ .proto = DSA_TAG_PROTO_BRCM_LEGACY_FCS,
+ .xmit = brcm_leg_fcs_tag_xmit,
.rcv = brcm_leg_tag_rcv,
.needed_headroom = BRCM_LEG_TAG_LEN,
};
-DSA_TAG_DRIVER(brcm_legacy_netdev_ops);
-MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_BRCM_LEGACY, BRCM_LEGACY_NAME);
-#endif /* CONFIG_NET_DSA_TAG_BRCM_LEGACY */
+DSA_TAG_DRIVER(brcm_legacy_fcs_netdev_ops);
+MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_BRCM_LEGACY_FCS, BRCM_LEGACY_FCS_NAME);
+#endif /* CONFIG_NET_DSA_TAG_BRCM_LEGACY_FCS */
#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_PREPEND)
static struct sk_buff *brcm_tag_xmit_prepend(struct sk_buff *skb,
@@ -328,6 +396,9 @@ static struct dsa_tag_driver *dsa_tag_driver_array[] = {
#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_LEGACY)
&DSA_TAG_DRIVER_NAME(brcm_legacy_netdev_ops),
#endif
+#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_LEGACY_FCS)
+ &DSA_TAG_DRIVER_NAME(brcm_legacy_fcs_netdev_ops),
+#endif
#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_PREPEND)
&DSA_TAG_DRIVER_NAME(brcm_prepend_netdev_ops),
#endif
diff --git a/net/ethtool/common.h b/net/ethtool/common.h
index b4683d286a5a..c41db1595621 100644
--- a/net/ethtool/common.h
+++ b/net/ethtool/common.h
@@ -74,4 +74,12 @@ int ethtool_get_module_eeprom_call(struct net_device *dev,
bool __ethtool_dev_mm_supported(struct net_device *dev);
+#if IS_ENABLED(CONFIG_ETHTOOL_NETLINK)
+void ethtool_rss_notify(struct net_device *dev, u32 rss_context);
+#else
+static inline void ethtool_rss_notify(struct net_device *dev, u32 rss_context)
+{
+}
+#endif
+
#endif /* _ETHTOOL_COMMON_H */
diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
index 71c828d0bf31..c34bac7bffd8 100644
--- a/net/ethtool/ioctl.c
+++ b/net/ethtool/ioctl.c
@@ -617,8 +617,8 @@ static int ethtool_set_link_ksettings(struct net_device *dev,
err = dev->ethtool_ops->set_link_ksettings(dev, &link_ksettings);
if (err >= 0) {
- ethtool_notify(dev, ETHTOOL_MSG_LINKINFO_NTF, NULL);
- ethtool_notify(dev, ETHTOOL_MSG_LINKMODES_NTF, NULL);
+ ethtool_notify(dev, ETHTOOL_MSG_LINKINFO_NTF);
+ ethtool_notify(dev, ETHTOOL_MSG_LINKMODES_NTF);
}
return err;
}
@@ -708,8 +708,8 @@ static int ethtool_set_settings(struct net_device *dev, void __user *useraddr)
__ETHTOOL_LINK_MODE_MASK_NU32;
ret = dev->ethtool_ops->set_link_ksettings(dev, &link_ksettings);
if (ret >= 0) {
- ethtool_notify(dev, ETHTOOL_MSG_LINKINFO_NTF, NULL);
- ethtool_notify(dev, ETHTOOL_MSG_LINKMODES_NTF, NULL);
+ ethtool_notify(dev, ETHTOOL_MSG_LINKINFO_NTF);
+ ethtool_notify(dev, ETHTOOL_MSG_LINKMODES_NTF);
}
return ret;
}
@@ -1037,22 +1037,21 @@ static int ethtool_check_xfrm_rxfh(u32 input_xfrm, u64 rxfh)
static int ethtool_check_flow_types(struct net_device *dev, u32 input_xfrm)
{
const struct ethtool_ops *ops = dev->ethtool_ops;
- struct ethtool_rxnfc info = {
- .cmd = ETHTOOL_GRXFH,
- };
int err;
u32 i;
for (i = 0; i < __FLOW_TYPE_COUNT; i++) {
+ struct ethtool_rxfh_fields fields = {
+ .flow_type = i,
+ };
+
if (!flow_type_hashable(i))
continue;
- info.flow_type = i;
- err = ops->get_rxnfc(dev, &info, NULL);
- if (err)
+ if (ops->get_rxfh_fields(dev, &fields))
continue;
- err = ethtool_check_xfrm_rxfh(input_xfrm, info.data);
+ err = ethtool_check_xfrm_rxfh(input_xfrm, fields.data);
if (err)
return err;
}
@@ -1060,6 +1059,79 @@ static int ethtool_check_flow_types(struct net_device *dev, u32 input_xfrm)
return 0;
}
+static noinline_for_stack int
+ethtool_set_rxfh_fields(struct net_device *dev, u32 cmd, void __user *useraddr)
+{
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ struct ethtool_rxfh_fields fields = {};
+ struct ethtool_rxnfc info;
+ size_t info_size = sizeof(info);
+ int rc;
+
+ if (!ops->set_rxfh_fields)
+ return -EOPNOTSUPP;
+
+ rc = ethtool_rxnfc_copy_struct(cmd, &info, &info_size, useraddr);
+ if (rc)
+ return rc;
+
+ if (info.flow_type & FLOW_RSS && info.rss_context &&
+ !ops->rxfh_per_ctx_fields)
+ return -EINVAL;
+
+ if (ops->get_rxfh) {
+ struct ethtool_rxfh_param rxfh = {};
+
+ rc = ops->get_rxfh(dev, &rxfh);
+ if (rc)
+ return rc;
+
+ rc = ethtool_check_xfrm_rxfh(rxfh.input_xfrm, info.data);
+ if (rc)
+ return rc;
+ }
+
+ fields.data = info.data;
+ fields.flow_type = info.flow_type & ~FLOW_RSS;
+ if (info.flow_type & FLOW_RSS)
+ fields.rss_context = info.rss_context;
+
+ return ops->set_rxfh_fields(dev, &fields, NULL);
+}
+
+static noinline_for_stack int
+ethtool_get_rxfh_fields(struct net_device *dev, u32 cmd, void __user *useraddr)
+{
+ struct ethtool_rxnfc info;
+ size_t info_size = sizeof(info);
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ struct ethtool_rxfh_fields fields = {};
+ int ret;
+
+ if (!ops->get_rxfh_fields)
+ return -EOPNOTSUPP;
+
+ ret = ethtool_rxnfc_copy_struct(cmd, &info, &info_size, useraddr);
+ if (ret)
+ return ret;
+
+ if (info.flow_type & FLOW_RSS && info.rss_context &&
+ !ops->rxfh_per_ctx_fields)
+ return -EINVAL;
+
+ fields.flow_type = info.flow_type & ~FLOW_RSS;
+ if (info.flow_type & FLOW_RSS)
+ fields.rss_context = info.rss_context;
+
+ ret = ops->get_rxfh_fields(dev, &fields);
+ if (ret < 0)
+ return ret;
+
+ info.data = fields.data;
+
+ return ethtool_rxnfc_copy_to_user(useraddr, &info, info_size, NULL);
+}
+
static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev,
u32 cmd, void __user *useraddr)
{
@@ -1088,18 +1160,6 @@ static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev,
return -EINVAL;
}
- if (cmd == ETHTOOL_SRXFH && ops->get_rxfh) {
- struct ethtool_rxfh_param rxfh = {};
-
- rc = ops->get_rxfh(dev, &rxfh);
- if (rc)
- return rc;
-
- rc = ethtool_check_xfrm_rxfh(rxfh.input_xfrm, info.data);
- if (rc)
- return rc;
- }
-
rc = ops->set_rxnfc(dev, &info);
if (rc)
return rc;
@@ -1442,10 +1502,11 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
struct ethtool_rxfh rxfh;
bool locked = false; /* dev->ethtool->rss_lock taken */
bool create = false;
+ bool mod = false;
u8 *rss_config;
int ret;
- if (!ops->get_rxnfc || !ops->set_rxfh)
+ if (!ops->get_rxnfc || !ops->get_rxfh_fields || !ops->set_rxfh)
return -EOPNOTSUPP;
if (ops->get_rxfh_indir_size)
@@ -1628,6 +1689,7 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
}
goto out;
}
+ mod = !create && !rxfh_dev.rss_delete;
if (copy_to_user(useraddr + offsetof(struct ethtool_rxfh, rss_context),
&rxfh_dev.rss_context, sizeof(rxfh_dev.rss_context)))
@@ -1697,6 +1759,8 @@ out:
if (locked)
mutex_unlock(&dev->ethtool->rss_lock);
kfree(rss_config);
+ if (mod)
+ ethtool_rss_notify(dev, rxfh.rss_context);
return ret;
}
@@ -1808,7 +1872,7 @@ static int ethtool_set_wol(struct net_device *dev, char __user *useraddr)
return ret;
dev->ethtool->wol_enabled = !!wol.wolopts;
- ethtool_notify(dev, ETHTOOL_MSG_WOL_NTF, NULL);
+ ethtool_notify(dev, ETHTOOL_MSG_WOL_NTF);
return 0;
}
@@ -1884,7 +1948,7 @@ static int ethtool_set_eee(struct net_device *dev, char __user *useraddr)
eee_to_keee(&keee, &eee);
ret = dev->ethtool_ops->set_eee(dev, &keee);
if (!ret)
- ethtool_notify(dev, ETHTOOL_MSG_EEE_NTF, NULL);
+ ethtool_notify(dev, ETHTOOL_MSG_EEE_NTF);
return ret;
}
@@ -2124,7 +2188,7 @@ static noinline_for_stack int ethtool_set_coalesce(struct net_device *dev,
ret = dev->ethtool_ops->set_coalesce(dev, &coalesce, &kernel_coalesce,
NULL);
if (!ret)
- ethtool_notify(dev, ETHTOOL_MSG_COALESCE_NTF, NULL);
+ ethtool_notify(dev, ETHTOOL_MSG_COALESCE_NTF);
return ret;
}
@@ -2168,7 +2232,7 @@ static int ethtool_set_ringparam(struct net_device *dev, void __user *useraddr)
ret = dev->ethtool_ops->set_ringparam(dev, &ringparam,
&kernel_ringparam, NULL);
if (!ret)
- ethtool_notify(dev, ETHTOOL_MSG_RINGS_NTF, NULL);
+ ethtool_notify(dev, ETHTOOL_MSG_RINGS_NTF);
return ret;
}
@@ -2235,7 +2299,7 @@ static noinline_for_stack int ethtool_set_channels(struct net_device *dev,
ret = dev->ethtool_ops->set_channels(dev, &channels);
if (!ret)
- ethtool_notify(dev, ETHTOOL_MSG_CHANNELS_NTF, NULL);
+ ethtool_notify(dev, ETHTOOL_MSG_CHANNELS_NTF);
return ret;
}
@@ -2266,7 +2330,7 @@ static int ethtool_set_pauseparam(struct net_device *dev, void __user *useraddr)
ret = dev->ethtool_ops->set_pauseparam(dev, &pauseparam);
if (!ret)
- ethtool_notify(dev, ETHTOOL_MSG_PAUSE_NTF, NULL);
+ ethtool_notify(dev, ETHTOOL_MSG_PAUSE_NTF);
return ret;
}
@@ -3268,7 +3332,7 @@ __dev_ethtool(struct net *net, struct ifreq *ifr, void __user *useraddr,
rc = ethtool_set_value_void(dev, useraddr,
dev->ethtool_ops->set_msglevel);
if (!rc)
- ethtool_notify(dev, ETHTOOL_MSG_DEBUG_NTF, NULL);
+ ethtool_notify(dev, ETHTOOL_MSG_DEBUG_NTF);
break;
case ETHTOOL_GEEE:
rc = ethtool_get_eee(dev, useraddr);
@@ -3332,20 +3396,24 @@ __dev_ethtool(struct net *net, struct ifreq *ifr, void __user *useraddr,
rc = ethtool_get_value(dev, useraddr, ethcmd,
dev->ethtool_ops->get_priv_flags);
if (!rc)
- ethtool_notify(dev, ETHTOOL_MSG_PRIVFLAGS_NTF, NULL);
+ ethtool_notify(dev, ETHTOOL_MSG_PRIVFLAGS_NTF);
break;
case ETHTOOL_SPFLAGS:
rc = ethtool_set_value(dev, useraddr,
dev->ethtool_ops->set_priv_flags);
break;
case ETHTOOL_GRXFH:
+ rc = ethtool_get_rxfh_fields(dev, ethcmd, useraddr);
+ break;
+ case ETHTOOL_SRXFH:
+ rc = ethtool_set_rxfh_fields(dev, ethcmd, useraddr);
+ break;
case ETHTOOL_GRXRINGS:
case ETHTOOL_GRXCLSRLCNT:
case ETHTOOL_GRXCLSRULE:
case ETHTOOL_GRXCLSRLALL:
rc = ethtool_get_rxnfc(dev, ethcmd, useraddr);
break;
- case ETHTOOL_SRXFH:
case ETHTOOL_SRXCLSRLDEL:
case ETHTOOL_SRXCLSRLINS:
rc = ethtool_set_rxnfc(dev, ethcmd, useraddr);
diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c
index 9de828df46cd..09c81cc9a08f 100644
--- a/net/ethtool/netlink.c
+++ b/net/ethtool/netlink.c
@@ -863,8 +863,8 @@ static int ethnl_default_done(struct netlink_callback *cb)
static int ethnl_default_set_doit(struct sk_buff *skb, struct genl_info *info)
{
const struct ethnl_request_ops *ops;
- struct ethnl_req_info req_info = {};
const u8 cmd = info->genlhdr->cmd;
+ struct ethnl_req_info *req_info;
struct net_device *dev;
int ret;
@@ -874,20 +874,22 @@ static int ethnl_default_set_doit(struct sk_buff *skb, struct genl_info *info)
if (GENL_REQ_ATTR_CHECK(info, ops->hdr_attr))
return -EINVAL;
- ret = ethnl_parse_header_dev_get(&req_info, info->attrs[ops->hdr_attr],
- genl_info_net(info), info->extack,
- true);
+ req_info = kzalloc(ops->req_info_size, GFP_KERNEL);
+ if (!req_info)
+ return -ENOMEM;
+
+ ret = ethnl_default_parse(req_info, info, ops, true);
if (ret < 0)
- return ret;
+ goto out_free_req;
if (ops->set_validate) {
- ret = ops->set_validate(&req_info, info);
+ ret = ops->set_validate(req_info, info);
/* 0 means nothing to do */
if (ret <= 0)
goto out_dev;
}
- dev = req_info.dev;
+ dev = req_info->dev;
rtnl_lock();
netdev_lock_ops(dev);
@@ -902,14 +904,14 @@ static int ethnl_default_set_doit(struct sk_buff *skb, struct genl_info *info)
if (ret < 0)
goto out_free_cfg;
- ret = ops->set(&req_info, info);
+ ret = ops->set(req_info, info);
if (ret < 0)
goto out_ops;
swap(dev->cfg, dev->cfg_pending);
if (!ret)
goto out_ops;
- ethtool_notify(dev, ops->set_ntf_cmd, NULL);
+ ethnl_notify(dev, ops->set_ntf_cmd, req_info);
ret = 0;
out_ops:
@@ -921,7 +923,9 @@ out_tie_cfg:
netdev_unlock_ops(dev);
rtnl_unlock();
out_dev:
- ethnl_parse_header_dev_put(&req_info);
+ ethnl_parse_header_dev_put(req_info);
+out_free_req:
+ kfree(req_info);
return ret;
}
@@ -942,11 +946,12 @@ ethnl_default_notify_ops[ETHTOOL_MSG_KERNEL_MAX + 1] = {
[ETHTOOL_MSG_MODULE_NTF] = &ethnl_module_request_ops,
[ETHTOOL_MSG_PLCA_NTF] = &ethnl_plca_cfg_request_ops,
[ETHTOOL_MSG_MM_NTF] = &ethnl_mm_request_ops,
+ [ETHTOOL_MSG_RSS_NTF] = &ethnl_rss_request_ops,
};
/* default notification handler */
static void ethnl_default_notify(struct net_device *dev, unsigned int cmd,
- const void *data)
+ const struct ethnl_req_info *orig_req_info)
{
struct ethnl_reply_data *reply_data;
const struct ethnl_request_ops *ops;
@@ -975,6 +980,11 @@ static void ethnl_default_notify(struct net_device *dev, unsigned int cmd,
req_info->dev = dev;
req_info->flags |= ETHTOOL_FLAG_COMPACT_BITSETS;
+ if (orig_req_info) {
+ req_info->phy_index = orig_req_info->phy_index;
+ memcpy(&req_info[1], &orig_req_info[1],
+ ops->req_info_size - sizeof(*req_info));
+ }
netdev_ops_assert_locked(dev);
@@ -1025,7 +1035,7 @@ err_rep:
/* notifications */
typedef void (*ethnl_notify_handler_t)(struct net_device *dev, unsigned int cmd,
- const void *data);
+ const struct ethnl_req_info *req_info);
static const ethnl_notify_handler_t ethnl_notify_handlers[] = {
[ETHTOOL_MSG_LINKINFO_NTF] = ethnl_default_notify,
@@ -1043,9 +1053,11 @@ static const ethnl_notify_handler_t ethnl_notify_handlers[] = {
[ETHTOOL_MSG_MODULE_NTF] = ethnl_default_notify,
[ETHTOOL_MSG_PLCA_NTF] = ethnl_default_notify,
[ETHTOOL_MSG_MM_NTF] = ethnl_default_notify,
+ [ETHTOOL_MSG_RSS_NTF] = ethnl_default_notify,
};
-void ethtool_notify(struct net_device *dev, unsigned int cmd, const void *data)
+void ethnl_notify(struct net_device *dev, unsigned int cmd,
+ const struct ethnl_req_info *req_info)
{
if (unlikely(!ethnl_ok))
return;
@@ -1053,18 +1065,23 @@ void ethtool_notify(struct net_device *dev, unsigned int cmd, const void *data)
if (likely(cmd < ARRAY_SIZE(ethnl_notify_handlers) &&
ethnl_notify_handlers[cmd]))
- ethnl_notify_handlers[cmd](dev, cmd, data);
+ ethnl_notify_handlers[cmd](dev, cmd, req_info);
else
WARN_ONCE(1, "notification %u not implemented (dev=%s)\n",
cmd, netdev_name(dev));
}
+
+void ethtool_notify(struct net_device *dev, unsigned int cmd)
+{
+ ethnl_notify(dev, cmd, NULL);
+}
EXPORT_SYMBOL(ethtool_notify);
static void ethnl_notify_features(struct netdev_notifier_info *info)
{
struct net_device *dev = netdev_notifier_info_to_dev(info);
- ethtool_notify(dev, ETHTOOL_MSG_FEATURES_NTF, NULL);
+ ethtool_notify(dev, ETHTOOL_MSG_FEATURES_NTF);
}
static int ethnl_netdev_event(struct notifier_block *this, unsigned long event,
diff --git a/net/ethtool/netlink.h b/net/ethtool/netlink.h
index 91b953924af3..373a8d5e86ae 100644
--- a/net/ethtool/netlink.h
+++ b/net/ethtool/netlink.h
@@ -23,6 +23,8 @@ void *ethnl_dump_put(struct sk_buff *skb, struct netlink_callback *cb, u8 cmd);
void *ethnl_bcastmsg_put(struct sk_buff *skb, u8 cmd);
void *ethnl_unicast_put(struct sk_buff *skb, u32 portid, u32 seq, u8 cmd);
int ethnl_multicast(struct sk_buff *skb, struct net_device *dev);
+void ethnl_notify(struct net_device *dev, unsigned int cmd,
+ const struct ethnl_req_info *req_info);
/**
* ethnl_strz_size() - calculate attribute length for fixed size string
@@ -337,6 +339,8 @@ int ethnl_sock_priv_set(struct sk_buff *skb, struct net_device *dev, u32 portid,
* header is already filled on entry, the rest up to @repdata_offset
* is zero initialized. This callback should only modify type specific
* request info by parsed attributes from request message.
+ * Called for both GET and SET. Information parsed for SET will
+ * be conveyed to the req_info used during NTF generation.
* @prepare_data:
* Retrieve and prepare data needed to compose a reply message. Calls to
* ethtool_ops handlers are limited to this callback. Common reply data
diff --git a/net/ethtool/pse-pd.c b/net/ethtool/pse-pd.c
index 4f6b99eab2a6..24def9c9dd54 100644
--- a/net/ethtool/pse-pd.c
+++ b/net/ethtool/pse-pd.c
@@ -11,6 +11,7 @@
#include "netlink.h"
#include <linux/ethtool_netlink.h>
#include <linux/ethtool.h>
+#include <linux/export.h>
#include <linux/phy.h>
struct pse_req_info {
@@ -83,6 +84,8 @@ static int pse_reply_size(const struct ethnl_req_info *req_base,
const struct ethtool_pse_control_status *st = &data->status;
int len = 0;
+ if (st->pw_d_id)
+ len += nla_total_size(sizeof(u32)); /* _PSE_PW_D_ID */
if (st->podl_admin_state > 0)
len += nla_total_size(sizeof(u32)); /* _PODL_PSE_ADMIN_STATE */
if (st->podl_pw_status > 0)
@@ -109,6 +112,9 @@ static int pse_reply_size(const struct ethnl_req_info *req_base,
len += st->c33_pw_limit_nb_ranges *
(nla_total_size(0) +
nla_total_size(sizeof(u32)) * 2);
+ if (st->prio_max)
+ /* _PSE_PRIO_MAX + _PSE_PRIO */
+ len += nla_total_size(sizeof(u32)) * 2;
return len;
}
@@ -148,6 +154,11 @@ static int pse_fill_reply(struct sk_buff *skb,
const struct pse_reply_data *data = PSE_REPDATA(reply_base);
const struct ethtool_pse_control_status *st = &data->status;
+ if (st->pw_d_id &&
+ nla_put_u32(skb, ETHTOOL_A_PSE_PW_D_ID,
+ st->pw_d_id))
+ return -EMSGSIZE;
+
if (st->podl_admin_state > 0 &&
nla_put_u32(skb, ETHTOOL_A_PODL_PSE_ADMIN_STATE,
st->podl_admin_state))
@@ -198,6 +209,11 @@ static int pse_fill_reply(struct sk_buff *skb,
pse_put_pw_limit_ranges(skb, st))
return -EMSGSIZE;
+ if (st->prio_max &&
+ (nla_put_u32(skb, ETHTOOL_A_PSE_PRIO_MAX, st->prio_max) ||
+ nla_put_u32(skb, ETHTOOL_A_PSE_PRIO, st->prio)))
+ return -EMSGSIZE;
+
return 0;
}
@@ -219,6 +235,7 @@ const struct nla_policy ethnl_pse_set_policy[ETHTOOL_A_PSE_MAX + 1] = {
NLA_POLICY_RANGE(NLA_U32, ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED,
ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED),
[ETHTOOL_A_C33_PSE_AVAIL_PW_LIMIT] = { .type = NLA_U32 },
+ [ETHTOOL_A_PSE_PRIO] = { .type = NLA_U32 },
};
static int
@@ -267,6 +284,15 @@ ethnl_set_pse(struct ethnl_req_info *req_info, struct genl_info *info)
if (ret)
return ret;
+ if (tb[ETHTOOL_A_PSE_PRIO]) {
+ unsigned int prio;
+
+ prio = nla_get_u32(tb[ETHTOOL_A_PSE_PRIO]);
+ ret = pse_ethtool_set_prio(phydev->psec, info->extack, prio);
+ if (ret)
+ return ret;
+ }
+
if (tb[ETHTOOL_A_C33_PSE_AVAIL_PW_LIMIT]) {
unsigned int pw_limit;
@@ -315,3 +341,42 @@ const struct ethnl_request_ops ethnl_pse_request_ops = {
.set = ethnl_set_pse,
/* PSE has no notification */
};
+
+void ethnl_pse_send_ntf(struct net_device *netdev, unsigned long notifs)
+{
+ void *reply_payload;
+ struct sk_buff *skb;
+ int reply_len;
+ int ret;
+
+ ASSERT_RTNL();
+
+ if (!netdev || !notifs)
+ return;
+
+ reply_len = ethnl_reply_header_size() +
+ nla_total_size(sizeof(u32)); /* _PSE_NTF_EVENTS */
+
+ skb = genlmsg_new(reply_len, GFP_KERNEL);
+ if (!skb)
+ return;
+
+ reply_payload = ethnl_bcastmsg_put(skb, ETHTOOL_MSG_PSE_NTF);
+ if (!reply_payload)
+ goto err_skb;
+
+ ret = ethnl_fill_reply_header(skb, netdev, ETHTOOL_A_PSE_NTF_HEADER);
+ if (ret < 0)
+ goto err_skb;
+
+ if (nla_put_uint(skb, ETHTOOL_A_PSE_NTF_EVENTS, notifs))
+ goto err_skb;
+
+ genlmsg_end(skb, reply_payload);
+ ethnl_multicast(skb, netdev);
+ return;
+
+err_skb:
+ nlmsg_free(skb);
+}
+EXPORT_SYMBOL_GPL(ethnl_pse_send_ntf);
diff --git a/net/ethtool/rss.c b/net/ethtool/rss.c
index 6d9b1769896b..3adddca7e215 100644
--- a/net/ethtool/rss.c
+++ b/net/ethtool/rss.c
@@ -358,6 +358,17 @@ int ethnl_rss_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
return ret;
}
+/* RSS_NTF */
+
+void ethtool_rss_notify(struct net_device *dev, u32 rss_context)
+{
+ struct rss_req_info req_info = {
+ .rss_context = rss_context,
+ };
+
+ ethnl_notify(dev, ETHTOOL_MSG_RSS_NTF, &req_info.base);
+}
+
const struct ethnl_request_ops ethnl_rss_request_ops = {
.request_cmd = ETHTOOL_MSG_RSS_GET,
.reply_cmd = ETHTOOL_MSG_RSS_GET_REPLY,
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index a648fff71ea7..c0440d61cf2f 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -966,6 +966,7 @@ static int arp_is_multicast(const void *pkey)
static int arp_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
+ enum skb_drop_reason drop_reason;
const struct arphdr *arp;
/* do not tweak dropwatch on an ARP we will ignore */
@@ -979,12 +980,15 @@ static int arp_rcv(struct sk_buff *skb, struct net_device *dev,
goto out_of_mem;
/* ARP header, plus 2 device addresses, plus 2 IP addresses. */
- if (!pskb_may_pull(skb, arp_hdr_len(dev)))
+ drop_reason = pskb_may_pull_reason(skb, arp_hdr_len(dev));
+ if (drop_reason != SKB_NOT_DROPPED_YET)
goto freeskb;
arp = arp_hdr(skb);
- if (arp->ar_hln != dev->addr_len || arp->ar_pln != 4)
+ if (arp->ar_hln != dev->addr_len || arp->ar_pln != 4) {
+ drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
goto freeskb;
+ }
memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb));
@@ -996,7 +1000,7 @@ consumeskb:
consume_skb(skb);
return NET_RX_SUCCESS;
freeskb:
- kfree_skb(skb);
+ kfree_skb_reason(skb, drop_reason);
out_of_mem:
return NET_RX_DROP;
}
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index d643bd1a0d9d..f7c9c6a9f53e 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -1640,8 +1640,7 @@ int fib_nexthop_info(struct sk_buff *skb, const struct fib_nh_common *nhc,
nla_put_u32(skb, RTA_OIF, nhc->nhc_dev->ifindex))
goto nla_put_failure;
- if (nhc->nhc_lwtstate &&
- lwtunnel_fill_encap(skb, nhc->nhc_lwtstate,
+ if (lwtunnel_fill_encap(skb, nhc->nhc_lwtstate,
RTA_ENCAP, RTA_ENCAP_TYPE) < 0)
goto nla_put_failure;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 6906bedad19a..f4157d26ec9e 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -168,7 +168,7 @@ static bool inet_use_bhash2_on_bind(const struct sock *sk)
}
static bool inet_bind_conflict(const struct sock *sk, struct sock *sk2,
- kuid_t sk_uid, bool relax,
+ kuid_t uid, bool relax,
bool reuseport_cb_ok, bool reuseport_ok)
{
int bound_dev_if2;
@@ -185,12 +185,12 @@ static bool inet_bind_conflict(const struct sock *sk, struct sock *sk2,
if (!relax || (!reuseport_ok && sk->sk_reuseport &&
sk2->sk_reuseport && reuseport_cb_ok &&
(sk2->sk_state == TCP_TIME_WAIT ||
- uid_eq(sk_uid, sock_i_uid(sk2)))))
+ uid_eq(uid, sk_uid(sk2)))))
return true;
} else if (!reuseport_ok || !sk->sk_reuseport ||
!sk2->sk_reuseport || !reuseport_cb_ok ||
(sk2->sk_state != TCP_TIME_WAIT &&
- !uid_eq(sk_uid, sock_i_uid(sk2)))) {
+ !uid_eq(uid, sk_uid(sk2)))) {
return true;
}
}
@@ -198,7 +198,7 @@ static bool inet_bind_conflict(const struct sock *sk, struct sock *sk2,
}
static bool __inet_bhash2_conflict(const struct sock *sk, struct sock *sk2,
- kuid_t sk_uid, bool relax,
+ kuid_t uid, bool relax,
bool reuseport_cb_ok, bool reuseport_ok)
{
if (ipv6_only_sock(sk2)) {
@@ -211,20 +211,20 @@ static bool __inet_bhash2_conflict(const struct sock *sk, struct sock *sk2,
#endif
}
- return inet_bind_conflict(sk, sk2, sk_uid, relax,
+ return inet_bind_conflict(sk, sk2, uid, relax,
reuseport_cb_ok, reuseport_ok);
}
static bool inet_bhash2_conflict(const struct sock *sk,
const struct inet_bind2_bucket *tb2,
- kuid_t sk_uid,
+ kuid_t uid,
bool relax, bool reuseport_cb_ok,
bool reuseport_ok)
{
struct sock *sk2;
sk_for_each_bound(sk2, &tb2->owners) {
- if (__inet_bhash2_conflict(sk, sk2, sk_uid, relax,
+ if (__inet_bhash2_conflict(sk, sk2, uid, relax,
reuseport_cb_ok, reuseport_ok))
return true;
}
@@ -242,8 +242,8 @@ static int inet_csk_bind_conflict(const struct sock *sk,
const struct inet_bind2_bucket *tb2, /* may be null */
bool relax, bool reuseport_ok)
{
- kuid_t uid = sock_i_uid((struct sock *)sk);
struct sock_reuseport *reuseport_cb;
+ kuid_t uid = sk_uid(sk);
bool reuseport_cb_ok;
struct sock *sk2;
@@ -287,11 +287,11 @@ static int inet_csk_bind_conflict(const struct sock *sk,
static bool inet_bhash2_addr_any_conflict(const struct sock *sk, int port, int l3mdev,
bool relax, bool reuseport_ok)
{
- kuid_t uid = sock_i_uid((struct sock *)sk);
const struct net *net = sock_net(sk);
struct sock_reuseport *reuseport_cb;
struct inet_bind_hashbucket *head2;
struct inet_bind2_bucket *tb2;
+ kuid_t uid = sk_uid(sk);
bool conflict = false;
bool reuseport_cb_ok;
@@ -425,15 +425,13 @@ success:
static inline int sk_reuseport_match(struct inet_bind_bucket *tb,
struct sock *sk)
{
- kuid_t uid = sock_i_uid(sk);
-
if (tb->fastreuseport <= 0)
return 0;
if (!sk->sk_reuseport)
return 0;
if (rcu_access_pointer(sk->sk_reuseport_cb))
return 0;
- if (!uid_eq(tb->fastuid, uid))
+ if (!uid_eq(tb->fastuid, sk_uid(sk)))
return 0;
/* We only need to check the rcv_saddr if this tb was once marked
* without fastreuseport and then was reset, as we can only know that
@@ -458,14 +456,13 @@ static inline int sk_reuseport_match(struct inet_bind_bucket *tb,
void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
struct sock *sk)
{
- kuid_t uid = sock_i_uid(sk);
bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
if (hlist_empty(&tb->bhash2)) {
tb->fastreuse = reuse;
if (sk->sk_reuseport) {
tb->fastreuseport = FASTREUSEPORT_ANY;
- tb->fastuid = uid;
+ tb->fastuid = sk_uid(sk);
tb->fast_rcv_saddr = sk->sk_rcv_saddr;
tb->fast_ipv6_only = ipv6_only_sock(sk);
tb->fast_sk_family = sk->sk_family;
@@ -492,7 +489,7 @@ void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
*/
if (!sk_reuseport_match(tb, sk)) {
tb->fastreuseport = FASTREUSEPORT_STRICT;
- tb->fastuid = uid;
+ tb->fastuid = sk_uid(sk);
tb->fast_rcv_saddr = sk->sk_rcv_saddr;
tb->fast_ipv6_only = ipv6_only_sock(sk);
tb->fast_sk_family = sk->sk_family;
@@ -812,7 +809,7 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk,
sk->sk_protocol, inet_sk_flowi_flags(sk),
(opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
ireq->ir_loc_addr, ireq->ir_rmt_port,
- htons(ireq->ir_num), sk->sk_uid);
+ htons(ireq->ir_num), sk_uid(sk));
security_req_classify_flow(req, flowi4_to_flowi_common(fl4));
rt = ip_route_output_flow(net, fl4, sk);
if (IS_ERR(rt))
@@ -849,7 +846,7 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
sk->sk_protocol, inet_sk_flowi_flags(sk),
(opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
ireq->ir_loc_addr, ireq->ir_rmt_port,
- htons(ireq->ir_num), sk->sk_uid);
+ htons(ireq->ir_num), sk_uid(sk));
security_req_classify_flow(req, flowi4_to_flowi_common(fl4));
rt = ip_route_output_flow(net, fl4, sk);
if (IS_ERR(rt))
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 1d1d6ad53f4c..2fa53b16fe77 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -181,7 +181,7 @@ int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
goto errout;
#endif
- r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk));
+ r->idiag_uid = from_kuid_munged(user_ns, sk_uid(sk));
r->idiag_inode = sock_i_ino(sk);
memset(&inet_sockopt, 0, sizeof(inet_sockopt));
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 77a0b52b2eab..ceeeec9b7290 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -721,8 +721,8 @@ static int inet_reuseport_add_sock(struct sock *sk,
{
struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash;
const struct hlist_nulls_node *node;
+ kuid_t uid = sk_uid(sk);
struct sock *sk2;
- kuid_t uid = sock_i_uid(sk);
sk_nulls_for_each_rcu(sk2, node, &ilb->nulls_head) {
if (sk2 != sk &&
@@ -730,7 +730,7 @@ static int inet_reuseport_add_sock(struct sock *sk,
ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
sk2->sk_bound_dev_if == sk->sk_bound_dev_if &&
inet_csk(sk2)->icsk_bind_hash == tb &&
- sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) &&
+ sk2->sk_reuseport && uid_eq(uid, sk_uid(sk2)) &&
inet_rcv_saddr_equal(sk, sk2, false))
return reuseport_add_sock(sk, sk2,
inet_rcv_saddr_any(sk));
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 678b8f96e3e9..aaeb5d16f0c9 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -668,7 +668,7 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
ip_tunnel_adj_headroom(dev, headroom);
iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, proto, tos, ttl,
- df, !net_eq(tunnel->net, dev_net(dev)));
+ df, !net_eq(tunnel->net, dev_net(dev)), 0);
return;
tx_error:
DEV_STATS_INC(dev, tx_errors);
@@ -857,7 +857,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
ip_tunnel_adj_headroom(dev, max_headroom);
iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, protocol, tos, ttl,
- df, !net_eq(tunnel->net, dev_net(dev)));
+ df, !net_eq(tunnel->net, dev_net(dev)), 0);
return;
#if IS_ENABLED(CONFIG_IPV6)
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index f65d2f727381..cc9915543637 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -49,7 +49,8 @@ EXPORT_SYMBOL(ip6tun_encaps);
void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
__be32 src, __be32 dst, __u8 proto,
- __u8 tos, __u8 ttl, __be16 df, bool xnet)
+ __u8 tos, __u8 ttl, __be16 df, bool xnet,
+ u16 ipcb_flags)
{
int pkt_len = skb->len - skb_inner_network_offset(skb);
struct net *net = dev_net(rt->dst.dev);
@@ -62,6 +63,7 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
skb_clear_hash_if_not_l4(skb);
skb_dst_set(skb, &rt->dst);
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
+ IPCB(skb)->flags = ipcb_flags;
/* Push down and install the IP header. */
skb_push(skb, sizeof(struct iphdr));
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index a7d09ae9d761..f78c4e53dc8c 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1853,20 +1853,19 @@ static bool ipmr_forward_offloaded(struct sk_buff *skb, struct mr_table *mrt,
/* Processing handlers for ipmr_forward, under rcu_read_lock() */
-static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
- int in_vifi, struct sk_buff *skb, int vifi)
+static int ipmr_prepare_xmit(struct net *net, struct mr_table *mrt,
+ struct sk_buff *skb, int vifi)
{
const struct iphdr *iph = ip_hdr(skb);
struct vif_device *vif = &mrt->vif_table[vifi];
struct net_device *vif_dev;
- struct net_device *dev;
struct rtable *rt;
struct flowi4 fl4;
int encap = 0;
vif_dev = vif_dev_read(vif);
if (!vif_dev)
- goto out_free;
+ return -1;
if (vif->flags & VIFF_REGISTER) {
WRITE_ONCE(vif->pkt_out, vif->pkt_out + 1);
@@ -1874,12 +1873,9 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
DEV_STATS_ADD(vif_dev, tx_bytes, skb->len);
DEV_STATS_INC(vif_dev, tx_packets);
ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT);
- goto out_free;
+ return -1;
}
- if (ipmr_forward_offloaded(skb, mrt, in_vifi, vifi))
- goto out_free;
-
if (vif->flags & VIFF_TUNNEL) {
rt = ip_route_output_ports(net, &fl4, NULL,
vif->remote, vif->local,
@@ -1887,7 +1883,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
IPPROTO_IPIP,
iph->tos & INET_DSCP_MASK, vif->link);
if (IS_ERR(rt))
- goto out_free;
+ return -1;
encap = sizeof(struct iphdr);
} else {
rt = ip_route_output_ports(net, &fl4, NULL, iph->daddr, 0,
@@ -1895,11 +1891,9 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
IPPROTO_IPIP,
iph->tos & INET_DSCP_MASK, vif->link);
if (IS_ERR(rt))
- goto out_free;
+ return -1;
}
- dev = rt->dst.dev;
-
if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) {
/* Do not fragment multicasts. Alas, IPv4 does not
* allow to send ICMP, so that packets will disappear
@@ -1907,14 +1901,14 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
*/
IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
ip_rt_put(rt);
- goto out_free;
+ return -1;
}
- encap += LL_RESERVED_SPACE(dev) + rt->dst.header_len;
+ encap += LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len;
if (skb_cow(skb, encap)) {
ip_rt_put(rt);
- goto out_free;
+ return -1;
}
WRITE_ONCE(vif->pkt_out, vif->pkt_out + 1);
@@ -1934,6 +1928,22 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
DEV_STATS_ADD(vif_dev, tx_bytes, skb->len);
}
+ return 0;
+}
+
+static void ipmr_queue_fwd_xmit(struct net *net, struct mr_table *mrt,
+ int in_vifi, struct sk_buff *skb, int vifi)
+{
+ struct rtable *rt;
+
+ if (ipmr_forward_offloaded(skb, mrt, in_vifi, vifi))
+ goto out_free;
+
+ if (ipmr_prepare_xmit(net, mrt, skb, vifi))
+ goto out_free;
+
+ rt = skb_rtable(skb);
+
IPCB(skb)->flags |= IPSKB_FORWARDED;
/* RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
@@ -1947,7 +1957,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
* result in receiving multiple packets.
*/
NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD,
- net, NULL, skb, skb->dev, dev,
+ net, NULL, skb, skb->dev, rt->dst.dev,
ipmr_forward_finish);
return;
@@ -1955,6 +1965,19 @@ out_free:
kfree_skb(skb);
}
+static void ipmr_queue_output_xmit(struct net *net, struct mr_table *mrt,
+ struct sk_buff *skb, int vifi)
+{
+ if (ipmr_prepare_xmit(net, mrt, skb, vifi))
+ goto out_free;
+
+ ip_mc_output(net, NULL, skb);
+ return;
+
+out_free:
+ kfree_skb(skb);
+}
+
/* Called with mrt_lock or rcu_read_lock() */
static int ipmr_find_vif(const struct mr_table *mrt, struct net_device *dev)
{
@@ -2065,8 +2088,8 @@ forward:
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
if (skb2)
- ipmr_queue_xmit(net, mrt, true_vifi,
- skb2, psend);
+ ipmr_queue_fwd_xmit(net, mrt, true_vifi,
+ skb2, psend);
}
psend = ct;
}
@@ -2077,10 +2100,10 @@ last_forward:
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
if (skb2)
- ipmr_queue_xmit(net, mrt, true_vifi, skb2,
- psend);
+ ipmr_queue_fwd_xmit(net, mrt, true_vifi, skb2,
+ psend);
} else {
- ipmr_queue_xmit(net, mrt, true_vifi, skb, psend);
+ ipmr_queue_fwd_xmit(net, mrt, true_vifi, skb, psend);
return;
}
}
@@ -2214,6 +2237,110 @@ dont_forward:
return 0;
}
+static void ip_mr_output_finish(struct net *net, struct mr_table *mrt,
+ struct net_device *dev, struct sk_buff *skb,
+ struct mfc_cache *c)
+{
+ int psend = -1;
+ int ct;
+
+ atomic_long_inc(&c->_c.mfc_un.res.pkt);
+ atomic_long_add(skb->len, &c->_c.mfc_un.res.bytes);
+ WRITE_ONCE(c->_c.mfc_un.res.lastuse, jiffies);
+
+ /* Forward the frame */
+ if (c->mfc_origin == htonl(INADDR_ANY) &&
+ c->mfc_mcastgrp == htonl(INADDR_ANY)) {
+ if (ip_hdr(skb)->ttl >
+ c->_c.mfc_un.res.ttls[c->_c.mfc_parent]) {
+ /* It's an (*,*) entry and the packet is not coming from
+ * the upstream: forward the packet to the upstream
+ * only.
+ */
+ psend = c->_c.mfc_parent;
+ goto last_xmit;
+ }
+ goto dont_xmit;
+ }
+
+ for (ct = c->_c.mfc_un.res.maxvif - 1;
+ ct >= c->_c.mfc_un.res.minvif; ct--) {
+ if (ip_hdr(skb)->ttl > c->_c.mfc_un.res.ttls[ct]) {
+ if (psend != -1) {
+ struct sk_buff *skb2;
+
+ skb2 = skb_clone(skb, GFP_ATOMIC);
+ if (skb2)
+ ipmr_queue_output_xmit(net, mrt,
+ skb2, psend);
+ }
+ psend = ct;
+ }
+ }
+
+last_xmit:
+ if (psend != -1) {
+ ipmr_queue_output_xmit(net, mrt, skb, psend);
+ return;
+ }
+
+dont_xmit:
+ kfree_skb(skb);
+}
+
+/* Multicast packets for forwarding arrive here
+ * Called with rcu_read_lock();
+ */
+int ip_mr_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+{
+ struct rtable *rt = skb_rtable(skb);
+ struct mfc_cache *cache;
+ struct net_device *dev;
+ struct mr_table *mrt;
+ int vif;
+
+ WARN_ON_ONCE(!rcu_read_lock_held());
+ dev = rt->dst.dev;
+
+ if (IPCB(skb)->flags & IPSKB_FORWARDED)
+ goto mc_output;
+ if (!(IPCB(skb)->flags & IPSKB_MCROUTE))
+ goto mc_output;
+
+ skb->dev = dev;
+
+ mrt = ipmr_rt_fib_lookup(net, skb);
+ if (IS_ERR(mrt))
+ goto mc_output;
+
+ /* already under rcu_read_lock() */
+ cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
+ if (!cache) {
+ vif = ipmr_find_vif(mrt, dev);
+ if (vif >= 0)
+ cache = ipmr_cache_find_any(mrt, ip_hdr(skb)->daddr,
+ vif);
+ }
+
+ /* No usable cache entry */
+ if (!cache) {
+ vif = ipmr_find_vif(mrt, dev);
+ if (vif >= 0)
+ return ipmr_cache_unresolved(mrt, vif, skb, dev);
+ goto mc_output;
+ }
+
+ vif = cache->_c.mfc_parent;
+ if (rcu_access_pointer(mrt->vif_table[vif].dev) != dev)
+ goto mc_output;
+
+ ip_mr_output_finish(net, mrt, dev, skb, cache);
+ return 0;
+
+mc_output:
+ return ip_mc_output(net, sk, skb);
+}
+
#ifdef CONFIG_IP_PIMSM_V1
/* Handle IGMP messages of PIMv1 */
int pim_rcv_v1(struct sk_buff *skb)
diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
index 4397e89d3123..e808801ab9b8 100644
--- a/net/ipv4/nexthop.c
+++ b/net/ipv4/nexthop.c
@@ -985,8 +985,7 @@ static int nh_fill_node(struct sk_buff *skb, struct nexthop *nh,
break;
}
- if (nhi->fib_nhc.nhc_lwtstate &&
- lwtunnel_fill_encap(skb, nhi->fib_nhc.nhc_lwtstate,
+ if (lwtunnel_fill_encap(skb, nhi->fib_nhc.nhc_lwtstate,
NHA_ENCAP, NHA_ENCAP_TYPE) < 0)
goto nla_put_failure;
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index c14baa6589c7..031df4c19fcc 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -781,7 +781,7 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
flowi4_init_output(&fl4, ipc.oif, ipc.sockc.mark,
ipc.tos & INET_DSCP_MASK, scope,
sk->sk_protocol, inet_sk_flowi_flags(sk), faddr,
- saddr, 0, 0, sk->sk_uid);
+ saddr, 0, 0, sk_uid(sk));
fl4.fl4_icmp_type = user_icmph.type;
fl4.fl4_icmp_code = user_icmph.code;
@@ -1116,7 +1116,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
sk_wmem_alloc_get(sp),
sk_rmem_alloc_get(sp),
0, 0L, 0,
- from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
+ from_kuid_munged(seq_user_ns(f), sk_uid(sp)),
0, sock_i_ino(sp),
refcount_read(&sp->sk_refcnt), sp,
atomic_read(&sp->sk_drops));
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 6aace4d55733..1d2c89d63cc7 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -610,7 +610,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
hdrincl ? ipc.protocol : sk->sk_protocol,
inet_sk_flowi_flags(sk) |
(hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
- daddr, saddr, 0, 0, sk->sk_uid);
+ daddr, saddr, 0, 0, sk_uid(sk));
fl4.fl4_icmp_type = 0;
fl4.fl4_icmp_code = 0;
@@ -1043,7 +1043,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
sk_wmem_alloc_get(sp),
sk_rmem_alloc_get(sp),
0, 0L, 0,
- from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
+ from_kuid_munged(seq_user_ns(seq), sk_uid(sp)),
0, sock_i_ino(sp),
refcount_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
}
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index fccb05fb3a79..a2b7cadf66af 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -556,7 +556,8 @@ static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
inet_test_bit(HDRINCL, sk) ?
IPPROTO_RAW : sk->sk_protocol,
inet_sk_flowi_flags(sk),
- daddr, inet->inet_saddr, 0, 0, sk->sk_uid);
+ daddr, inet->inet_saddr, 0, 0,
+ sk_uid(sk));
rcu_read_unlock();
}
@@ -2660,7 +2661,7 @@ add:
if (IN_DEV_MFORWARD(in_dev) &&
!ipv4_is_local_multicast(fl4->daddr)) {
rth->dst.input = ip_mr_input;
- rth->dst.output = ip_mc_output;
+ rth->dst.output = ip_mr_output;
}
}
#endif
@@ -2977,8 +2978,7 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
if (rt->dst.dev &&
nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
goto nla_put_failure;
- if (rt->dst.lwtstate &&
- lwtunnel_fill_encap(skb, rt->dst.lwtstate, RTA_ENCAP, RTA_ENCAP_TYPE) < 0)
+ if (lwtunnel_fill_encap(skb, rt->dst.lwtstate, RTA_ENCAP, RTA_ENCAP_TYPE) < 0)
goto nla_put_failure;
#ifdef CONFIG_IP_ROUTE_CLASSID
if (rt->dst.tclassid &&
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 5459a78b9809..eb0819463fae 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -454,7 +454,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
ip_sock_rt_tos(sk), ip_sock_rt_scope(sk),
IPPROTO_TCP, inet_sk_flowi_flags(sk),
opt->srr ? opt->faddr : ireq->ir_rmt_addr,
- ireq->ir_loc_addr, th->source, th->dest, sk->sk_uid);
+ ireq->ir_loc_addr, th->source, th->dest,
+ sk_uid(sk));
security_req_classify_flow(req, flowi4_to_flowi_common(&fl4));
rt = ip_route_output_key(net, &fl4);
if (IS_ERR(rt)) {
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index f64f8276a73c..8a3c99246d2e 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -5053,9 +5053,8 @@ static void __init tcp_struct_check(void)
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, reordering);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, notsent_lowat);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, gso_segs);
- CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, lost_skb_hint);
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, retransmit_skb_hint);
- CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_read_tx, 40);
+ CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_read_tx, 32);
/* TXRX read-mostly hotpath cache lines */
CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, tsoffset);
@@ -5243,6 +5242,6 @@ void __init tcp_init(void)
tcp_v4_init();
tcp_metrics_init();
BUG_ON(tcp_register_congestion_control(&tcp_reno) != 0);
- tcp_tasklet_init();
+ tcp_tsq_work_init();
mptcp_init();
}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 12c2e6fc85c6..19a1542883df 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1451,11 +1451,6 @@ static u8 tcp_sacktag_one(struct sock *sk,
tp->sacked_out += pcount;
/* Out-of-order packets delivered */
state->sack_delivered += pcount;
-
- /* Lost marker hint past SACKed? Tweak RFC3517 cnt */
- if (tp->lost_skb_hint &&
- before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq))
- tp->lost_cnt_hint += pcount;
}
/* D-SACK. We can detect redundant retransmission in S|R and plain R
@@ -1496,9 +1491,6 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *prev,
tcp_skb_timestamp_us(skb));
tcp_rate_skb_delivered(sk, skb, state->rate);
- if (skb == tp->lost_skb_hint)
- tp->lost_cnt_hint += pcount;
-
TCP_SKB_CB(prev)->end_seq += shifted;
TCP_SKB_CB(skb)->seq += shifted;
@@ -1531,10 +1523,6 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *prev,
if (skb == tp->retransmit_skb_hint)
tp->retransmit_skb_hint = prev;
- if (skb == tp->lost_skb_hint) {
- tp->lost_skb_hint = prev;
- tp->lost_cnt_hint -= tcp_skb_pcount(prev);
- }
TCP_SKB_CB(prev)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
TCP_SKB_CB(prev)->eor = TCP_SKB_CB(skb)->eor;
@@ -2151,12 +2139,6 @@ static inline void tcp_init_undo(struct tcp_sock *tp)
tp->undo_retrans = -1;
}
-static bool tcp_is_rack(const struct sock *sk)
-{
- return READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_recovery) &
- TCP_RACK_LOSS_DETECTION;
-}
-
/* If we detect SACK reneging, forget all SACK information
* and reset tags completely, otherwise preserve SACKs. If receiver
* dropped its ofo queue, we will know this due to reneging detection.
@@ -2182,8 +2164,7 @@ static void tcp_timeout_mark_lost(struct sock *sk)
skb_rbtree_walk_from(skb) {
if (is_reneg)
TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED;
- else if (tcp_is_rack(sk) && skb != head &&
- tcp_rack_skb_timeout(tp, skb, 0) > 0)
+ else if (skb != head && tcp_rack_skb_timeout(tp, skb, 0) > 0)
continue; /* Don't mark recently sent ones lost yet */
tcp_mark_skb_lost(sk, skb);
}
@@ -2264,22 +2245,6 @@ static bool tcp_check_sack_reneging(struct sock *sk, int *ack_flag)
return false;
}
-/* Heurestics to calculate number of duplicate ACKs. There's no dupACKs
- * counter when SACK is enabled (without SACK, sacked_out is used for
- * that purpose).
- *
- * With reordering, holes may still be in flight, so RFC3517 recovery
- * uses pure sacked_out (total number of SACKed segments) even though
- * it violates the RFC that uses duplicate ACKs, often these are equal
- * but when e.g. out-of-window ACKs or packet duplication occurs,
- * they differ. Since neither occurs due to loss, TCP should really
- * ignore them.
- */
-static inline int tcp_dupack_heuristics(const struct tcp_sock *tp)
-{
- return tp->sacked_out + 1;
-}
-
/* Linux NewReno/SACK/ECN state machine.
* --------------------------------------
*
@@ -2332,13 +2297,7 @@ static inline int tcp_dupack_heuristics(const struct tcp_sock *tp)
*
* If the receiver supports SACK:
*
- * RFC6675/3517: It is the conventional algorithm. A packet is
- * considered lost if the number of higher sequence packets
- * SACKed is greater than or equal the DUPACK thoreshold
- * (reordering). This is implemented in tcp_mark_head_lost and
- * tcp_update_scoreboard.
- *
- * RACK (draft-ietf-tcpm-rack-01): it is a newer algorithm
+ * RACK (RFC8985): RACK is a newer loss detection algorithm
* (2017-) that checks timing instead of counting DUPACKs.
* Essentially a packet is considered lost if it's not S/ACKed
* after RTT + reordering_window, where both metrics are
@@ -2353,8 +2312,8 @@ static inline int tcp_dupack_heuristics(const struct tcp_sock *tp)
* is lost (NewReno). This heuristics are the same in NewReno
* and SACK.
*
- * Really tricky (and requiring careful tuning) part of algorithm
- * is hidden in functions tcp_time_to_recover() and tcp_xmit_retransmit_queue().
+ * The really tricky (and requiring careful tuning) part of the algorithm
+ * is hidden in the RACK code in tcp_recovery.c and tcp_xmit_retransmit_queue().
* The first determines the moment _when_ we should reduce CWND and,
* hence, slow down forward transmission. In fact, it determines the moment
* when we decide that hole is caused by loss, rather than by a reorder.
@@ -2377,83 +2336,10 @@ static inline int tcp_dupack_heuristics(const struct tcp_sock *tp)
* Main question: may we further continue forward transmission
* with the same cwnd?
*/
-static bool tcp_time_to_recover(struct sock *sk, int flag)
+static bool tcp_time_to_recover(const struct tcp_sock *tp)
{
- struct tcp_sock *tp = tcp_sk(sk);
-
- /* Trick#1: The loss is proven. */
- if (tp->lost_out)
- return true;
-
- /* Not-A-Trick#2 : Classic rule... */
- if (!tcp_is_rack(sk) && tcp_dupack_heuristics(tp) > tp->reordering)
- return true;
-
- return false;
-}
-
-/* Detect loss in event "A" above by marking head of queue up as lost.
- * For RFC3517 SACK, a segment is considered lost if it
- * has at least tp->reordering SACKed seqments above it; "packets" refers to
- * the maximum SACKed segments to pass before reaching this limit.
- */
-static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head)
-{
- struct tcp_sock *tp = tcp_sk(sk);
- struct sk_buff *skb;
- int cnt;
- /* Use SACK to deduce losses of new sequences sent during recovery */
- const u32 loss_high = tp->snd_nxt;
-
- WARN_ON(packets > tp->packets_out);
- skb = tp->lost_skb_hint;
- if (skb) {
- /* Head already handled? */
- if (mark_head && after(TCP_SKB_CB(skb)->seq, tp->snd_una))
- return;
- cnt = tp->lost_cnt_hint;
- } else {
- skb = tcp_rtx_queue_head(sk);
- cnt = 0;
- }
-
- skb_rbtree_walk_from(skb) {
- /* TODO: do this better */
- /* this is not the most efficient way to do this... */
- tp->lost_skb_hint = skb;
- tp->lost_cnt_hint = cnt;
-
- if (after(TCP_SKB_CB(skb)->end_seq, loss_high))
- break;
-
- if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
- cnt += tcp_skb_pcount(skb);
-
- if (cnt > packets)
- break;
-
- if (!(TCP_SKB_CB(skb)->sacked & TCPCB_LOST))
- tcp_mark_skb_lost(sk, skb);
-
- if (mark_head)
- break;
- }
- tcp_verify_left_out(tp);
-}
-
-/* Account newly detected lost packet(s) */
-
-static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit)
-{
- struct tcp_sock *tp = tcp_sk(sk);
-
- if (tcp_is_sack(tp)) {
- int sacked_upto = tp->sacked_out - tp->reordering;
- if (sacked_upto >= 0)
- tcp_mark_head_lost(sk, sacked_upto, 0);
- else if (fast_rexmit)
- tcp_mark_head_lost(sk, 1, 1);
- }
+ /* Has loss detection marked at least one packet lost? */
+ return tp->lost_out != 0;
}
static bool tcp_tsopt_ecr_before(const struct tcp_sock *tp, u32 when)
@@ -2894,8 +2780,6 @@ void tcp_simple_retransmit(struct sock *sk)
tcp_mark_skb_lost(sk, skb);
}
- tcp_clear_retrans_hints_partial(tp);
-
if (!tp->lost_out)
return;
@@ -3003,17 +2887,8 @@ static void tcp_process_loss(struct sock *sk, int flag, int num_dupack,
*rexmit = REXMIT_LOST;
}
-static bool tcp_force_fast_retransmit(struct sock *sk)
-{
- struct tcp_sock *tp = tcp_sk(sk);
-
- return after(tcp_highest_sack_seq(tp),
- tp->snd_una + tp->reordering * tp->mss_cache);
-}
-
/* Undo during fast recovery after partial ACK. */
-static bool tcp_try_undo_partial(struct sock *sk, u32 prior_snd_una,
- bool *do_lost)
+static bool tcp_try_undo_partial(struct sock *sk, u32 prior_snd_una)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -3038,9 +2913,6 @@ static bool tcp_try_undo_partial(struct sock *sk, u32 prior_snd_una,
tcp_undo_cwnd_reduction(sk, true);
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO);
tcp_try_keep_open(sk);
- } else {
- /* Partial ACK arrived. Force fast retransmit. */
- *do_lost = tcp_force_fast_retransmit(sk);
}
return false;
}
@@ -3054,7 +2926,7 @@ static void tcp_identify_packet_loss(struct sock *sk, int *ack_flag)
if (unlikely(tcp_is_reno(tp))) {
tcp_newreno_mark_lost(sk, *ack_flag & FLAG_SND_UNA_ADVANCED);
- } else if (tcp_is_rack(sk)) {
+ } else {
u32 prior_retrans = tp->retrans_out;
if (tcp_rack_mark_lost(sk))
@@ -3081,10 +2953,8 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
- int fast_rexmit = 0, flag = *ack_flag;
+ int flag = *ack_flag;
bool ece_ack = flag & FLAG_ECE;
- bool do_lost = num_dupack || ((flag & FLAG_DATA_SACKED) &&
- tcp_force_fast_retransmit(sk));
if (!tp->packets_out && tp->sacked_out)
tp->sacked_out = 0;
@@ -3133,7 +3003,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
if (!(flag & FLAG_SND_UNA_ADVANCED)) {
if (tcp_is_reno(tp))
tcp_add_reno_sack(sk, num_dupack, ece_ack);
- } else if (tcp_try_undo_partial(sk, prior_snd_una, &do_lost))
+ } else if (tcp_try_undo_partial(sk, prior_snd_una))
return;
if (tcp_try_undo_dsack(sk))
@@ -3141,7 +3011,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
tcp_identify_packet_loss(sk, ack_flag);
if (icsk->icsk_ca_state != TCP_CA_Recovery) {
- if (!tcp_time_to_recover(sk, flag))
+ if (!tcp_time_to_recover(tp))
return;
/* Undo reverts the recovery state. If loss is evident,
* starts a new recovery (e.g. reordering then loss);
@@ -3170,7 +3040,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
tcp_try_undo_dsack(sk);
tcp_identify_packet_loss(sk, ack_flag);
- if (!tcp_time_to_recover(sk, flag)) {
+ if (!tcp_time_to_recover(tp)) {
tcp_try_to_open(sk, flag);
return;
}
@@ -3188,11 +3058,8 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
/* Otherwise enter Recovery state */
tcp_enter_recovery(sk, ece_ack);
- fast_rexmit = 1;
}
- if (!tcp_is_rack(sk) && do_lost)
- tcp_update_scoreboard(sk, fast_rexmit);
*rexmit = REXMIT_LOST;
}
@@ -3448,8 +3315,6 @@ static int tcp_clean_rtx_queue(struct sock *sk, const struct sk_buff *ack_skb,
next = skb_rb_next(skb);
if (unlikely(skb == tp->retransmit_skb_hint))
tp->retransmit_skb_hint = NULL;
- if (unlikely(skb == tp->lost_skb_hint))
- tp->lost_skb_hint = NULL;
tcp_highest_sack_replace(sk, skb, next);
tcp_rtx_queue_unlink_and_free(skb, sk);
}
@@ -3507,14 +3372,9 @@ static int tcp_clean_rtx_queue(struct sock *sk, const struct sk_buff *ack_skb,
if (flag & FLAG_RETRANS_DATA_ACKED)
flag &= ~FLAG_ORIG_SACK_ACKED;
} else {
- int delta;
-
/* Non-retransmitted hole got filled? That's reordering */
if (before(reord, prior_fack))
tcp_check_sack_reordering(sk, reord, 0);
-
- delta = prior_sacked - tp->sacked_out;
- tp->lost_cnt_hint -= min(tp->lost_cnt_hint, delta);
}
} else if (skb && rtt_update && sack_rtt_us >= 0 &&
sack_rtt_us > tcp_stamp_us_delta(tp->tcp_mstamp,
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 6a14f9e6fef6..429fb34b075e 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2896,7 +2896,7 @@ static void get_openreq4(const struct request_sock *req,
jiffies_delta_to_clock_t(delta),
req->num_timeout,
from_kuid_munged(seq_user_ns(f),
- sock_i_uid(req->rsk_listener)),
+ sk_uid(req->rsk_listener)),
0, /* non standard timer */
0, /* open_requests have no inode */
0,
@@ -2954,7 +2954,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
timer_active,
jiffies_delta_to_clock_t(timer_expires - jiffies),
icsk->icsk_retransmits,
- from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
+ from_kuid_munged(seq_user_ns(f), sk_uid(sk)),
icsk->icsk_probes_out,
sock_i_ino(sk),
refcount_read(&sk->sk_refcnt), sk,
@@ -3246,9 +3246,9 @@ static int bpf_iter_tcp_seq_show(struct seq_file *seq, void *v)
const struct request_sock *req = v;
uid = from_kuid_munged(seq_user_ns(seq),
- sock_i_uid(req->rsk_listener));
+ sk_uid(req->rsk_listener));
} else {
- uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk));
+ uid = from_kuid_munged(seq_user_ns(seq), sk_uid(sk));
}
meta.seq = seq;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 3ac8d2d17e1f..28f840724fe8 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1066,15 +1066,15 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb
* needs to be reallocated in a driver.
* The invariant being skb->truesize subtracted from sk->sk_wmem_alloc
*
- * Since transmit from skb destructor is forbidden, we use a tasklet
+ * Since transmit from skb destructor is forbidden, we use a BH work item
* to process all sockets that eventually need to send more skbs.
- * We use one tasklet per cpu, with its own queue of sockets.
+ * We use one work item per cpu, with its own queue of sockets.
*/
-struct tsq_tasklet {
- struct tasklet_struct tasklet;
+struct tsq_work {
+ struct work_struct work;
struct list_head head; /* queue of tcp sockets */
};
-static DEFINE_PER_CPU(struct tsq_tasklet, tsq_tasklet);
+static DEFINE_PER_CPU(struct tsq_work, tsq_work);
static void tcp_tsq_write(struct sock *sk)
{
@@ -1104,14 +1104,14 @@ static void tcp_tsq_handler(struct sock *sk)
bh_unlock_sock(sk);
}
/*
- * One tasklet per cpu tries to send more skbs.
- * We run in tasklet context but need to disable irqs when
+ * One work item per cpu tries to send more skbs.
+ * We run in BH context but need to disable irqs when
* transferring tsq->head because tcp_wfree() might
* interrupt us (non NAPI drivers)
*/
-static void tcp_tasklet_func(struct tasklet_struct *t)
+static void tcp_tsq_workfn(struct work_struct *work)
{
- struct tsq_tasklet *tsq = from_tasklet(tsq, t, tasklet);
+ struct tsq_work *tsq = container_of(work, struct tsq_work, work);
LIST_HEAD(list);
unsigned long flags;
struct list_head *q, *n;
@@ -1181,15 +1181,15 @@ void tcp_release_cb(struct sock *sk)
}
EXPORT_IPV6_MOD(tcp_release_cb);
-void __init tcp_tasklet_init(void)
+void __init tcp_tsq_work_init(void)
{
int i;
for_each_possible_cpu(i) {
- struct tsq_tasklet *tsq = &per_cpu(tsq_tasklet, i);
+ struct tsq_work *tsq = &per_cpu(tsq_work, i);
INIT_LIST_HEAD(&tsq->head);
- tasklet_setup(&tsq->tasklet, tcp_tasklet_func);
+ INIT_WORK(&tsq->work, tcp_tsq_workfn);
}
}
@@ -1203,11 +1203,11 @@ void tcp_wfree(struct sk_buff *skb)
struct sock *sk = skb->sk;
struct tcp_sock *tp = tcp_sk(sk);
unsigned long flags, nval, oval;
- struct tsq_tasklet *tsq;
+ struct tsq_work *tsq;
bool empty;
/* Keep one reference on sk_wmem_alloc.
- * Will be released by sk_free() from here or tcp_tasklet_func()
+ * Will be released by sk_free() from here or tcp_tsq_workfn()
*/
WARN_ON(refcount_sub_and_test(skb->truesize - 1, &sk->sk_wmem_alloc));
@@ -1229,13 +1229,13 @@ void tcp_wfree(struct sk_buff *skb)
nval = (oval & ~TSQF_THROTTLED) | TSQF_QUEUED;
} while (!try_cmpxchg(&sk->sk_tsq_flags, &oval, nval));
- /* queue this socket to tasklet queue */
+ /* queue this socket to BH workqueue */
local_irq_save(flags);
- tsq = this_cpu_ptr(&tsq_tasklet);
+ tsq = this_cpu_ptr(&tsq_work);
empty = list_empty(&tsq->head);
list_add(&tp->tsq_node, &tsq->head);
if (empty)
- tasklet_schedule(&tsq->tasklet);
+ queue_work(system_bh_wq, &tsq->work);
local_irq_restore(flags);
return;
out:
@@ -1554,11 +1554,6 @@ static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int de
if (tcp_is_reno(tp) && decr > 0)
tp->sacked_out -= min_t(u32, tp->sacked_out, decr);
- if (tp->lost_skb_hint &&
- before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) &&
- (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
- tp->lost_cnt_hint -= decr;
-
tcp_verify_left_out(tp);
}
@@ -2639,7 +2634,7 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
if (refcount_read(&sk->sk_wmem_alloc) > limit) {
/* Always send skb if rtx queue is empty or has one skb.
* No need to wait for TX completion to call us back,
- * after softirq/tasklet schedule.
+ * after softirq schedule.
* This helps when TX completions are delayed too much.
*/
if (tcp_rtx_queue_empty_or_single_skb(sk))
@@ -3252,7 +3247,6 @@ static bool tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
TCP_SKB_CB(skb)->eor = TCP_SKB_CB(next_skb)->eor;
/* changed transmit queue under us so clear hints */
- tcp_clear_retrans_hints_partial(tp);
if (next_skb == tp->retransmit_skb_hint)
tp->retransmit_skb_hint = skb;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index dde52b8050b8..19573ee64a0f 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -145,8 +145,8 @@ static int udp_lib_lport_inuse(struct net *net, __u16 num,
unsigned long *bitmap,
struct sock *sk, unsigned int log)
{
+ kuid_t uid = sk_uid(sk);
struct sock *sk2;
- kuid_t uid = sock_i_uid(sk);
sk_for_each(sk2, &hslot->head) {
if (net_eq(sock_net(sk2), net) &&
@@ -158,7 +158,7 @@ static int udp_lib_lport_inuse(struct net *net, __u16 num,
inet_rcv_saddr_equal(sk, sk2, true)) {
if (sk2->sk_reuseport && sk->sk_reuseport &&
!rcu_access_pointer(sk->sk_reuseport_cb) &&
- uid_eq(uid, sock_i_uid(sk2))) {
+ uid_eq(uid, sk_uid(sk2))) {
if (!bitmap)
return 0;
} else {
@@ -180,8 +180,8 @@ static int udp_lib_lport_inuse2(struct net *net, __u16 num,
struct udp_hslot *hslot2,
struct sock *sk)
{
+ kuid_t uid = sk_uid(sk);
struct sock *sk2;
- kuid_t uid = sock_i_uid(sk);
int res = 0;
spin_lock(&hslot2->lock);
@@ -195,7 +195,7 @@ static int udp_lib_lport_inuse2(struct net *net, __u16 num,
inet_rcv_saddr_equal(sk, sk2, true)) {
if (sk2->sk_reuseport && sk->sk_reuseport &&
!rcu_access_pointer(sk->sk_reuseport_cb) &&
- uid_eq(uid, sock_i_uid(sk2))) {
+ uid_eq(uid, sk_uid(sk2))) {
res = 0;
} else {
res = 1;
@@ -210,7 +210,7 @@ static int udp_lib_lport_inuse2(struct net *net, __u16 num,
static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot)
{
struct net *net = sock_net(sk);
- kuid_t uid = sock_i_uid(sk);
+ kuid_t uid = sk_uid(sk);
struct sock *sk2;
sk_for_each(sk2, &hslot->head) {
@@ -220,7 +220,7 @@ static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot)
ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
(udp_sk(sk2)->udp_port_hash == udp_sk(sk)->udp_port_hash) &&
(sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
- sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) &&
+ sk2->sk_reuseport && uid_eq(uid, sk_uid(sk2)) &&
inet_rcv_saddr_equal(sk, sk2, false)) {
return reuseport_add_sock(sk, sk2,
inet_rcv_saddr_any(sk));
@@ -1445,7 +1445,8 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
flowi4_init_output(fl4, ipc.oif, ipc.sockc.mark,
ipc.tos & INET_DSCP_MASK, scope,
sk->sk_protocol, flow_flags, faddr, saddr,
- dport, inet->inet_sport, sk->sk_uid);
+ dport, inet->inet_sport,
+ sk_uid(sk));
security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4));
rt = ip_route_output_flow(net, fl4, sk);
@@ -3386,7 +3387,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
sk_wmem_alloc_get(sp),
udp_rqueue_get(sp),
0, 0L, 0,
- from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
+ from_kuid_munged(seq_user_ns(f), sk_uid(sp)),
0, sock_i_ino(sp),
refcount_read(&sp->sk_refcnt), sp,
atomic_read(&sp->sk_drops));
@@ -3629,7 +3630,7 @@ static int bpf_iter_udp_seq_show(struct seq_file *seq, void *v)
goto unlock;
}
- uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk));
+ uid = from_kuid_munged(seq_user_ns(seq), sk_uid(sk));
meta.seq = seq;
prog = bpf_iter_get_info(&meta, false);
ret = udp_prog_seq_show(prog, &meta, v, uid, state->bucket);
diff --git a/net/ipv4/udp_tunnel_core.c b/net/ipv4/udp_tunnel_core.c
index 2326548997d3..fce945f23069 100644
--- a/net/ipv4/udp_tunnel_core.c
+++ b/net/ipv4/udp_tunnel_core.c
@@ -134,15 +134,17 @@ void udp_tunnel_notify_add_rx_port(struct socket *sock, unsigned short type)
struct udp_tunnel_info ti;
struct net_device *dev;
+ ASSERT_RTNL();
+
ti.type = type;
ti.sa_family = sk->sk_family;
ti.port = inet_sk(sk)->inet_sport;
- rcu_read_lock();
- for_each_netdev_rcu(net, dev) {
+ for_each_netdev(net, dev) {
+ udp_tunnel_nic_lock(dev);
udp_tunnel_nic_add_port(dev, &ti);
+ udp_tunnel_nic_unlock(dev);
}
- rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(udp_tunnel_notify_add_rx_port);
@@ -154,22 +156,24 @@ void udp_tunnel_notify_del_rx_port(struct socket *sock, unsigned short type)
struct udp_tunnel_info ti;
struct net_device *dev;
+ ASSERT_RTNL();
+
ti.type = type;
ti.sa_family = sk->sk_family;
ti.port = inet_sk(sk)->inet_sport;
- rcu_read_lock();
- for_each_netdev_rcu(net, dev) {
+ for_each_netdev(net, dev) {
+ udp_tunnel_nic_lock(dev);
udp_tunnel_nic_del_port(dev, &ti);
+ udp_tunnel_nic_unlock(dev);
}
- rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(udp_tunnel_notify_del_rx_port);
void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
__be32 src, __be32 dst, __u8 tos, __u8 ttl,
__be16 df, __be16 src_port, __be16 dst_port,
- bool xnet, bool nocheck)
+ bool xnet, bool nocheck, u16 ipcb_flags)
{
struct udphdr *uh;
@@ -185,7 +189,8 @@ void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb
udp_set_csum(nocheck, skb, src, dst, skb->len);
- iptunnel_xmit(sk, rt, skb, src, dst, IPPROTO_UDP, tos, ttl, df, xnet);
+ iptunnel_xmit(sk, rt, skb, src, dst, IPPROTO_UDP, tos, ttl, df, xnet,
+ ipcb_flags);
}
EXPORT_SYMBOL_GPL(udp_tunnel_xmit_skb);
diff --git a/net/ipv4/udp_tunnel_nic.c b/net/ipv4/udp_tunnel_nic.c
index b6d2d16189c0..ff66db48453c 100644
--- a/net/ipv4/udp_tunnel_nic.c
+++ b/net/ipv4/udp_tunnel_nic.c
@@ -29,6 +29,7 @@ struct udp_tunnel_nic_table_entry {
* struct udp_tunnel_nic - UDP tunnel port offload state
* @work: async work for talking to hardware from process context
* @dev: netdev pointer
+ * @lock: protects all fields
* @need_sync: at least one port start changed
* @need_replay: space was freed, we need a replay of all ports
* @work_pending: @work is currently scheduled
@@ -41,6 +42,8 @@ struct udp_tunnel_nic {
struct net_device *dev;
+ struct mutex lock;
+
u8 need_sync:1;
u8 need_replay:1;
u8 work_pending:1;
@@ -298,22 +301,11 @@ __udp_tunnel_nic_device_sync(struct net_device *dev, struct udp_tunnel_nic *utn)
static void
udp_tunnel_nic_device_sync(struct net_device *dev, struct udp_tunnel_nic *utn)
{
- const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
- bool may_sleep;
-
if (!utn->need_sync)
return;
- /* Drivers which sleep in the callback need to update from
- * the workqueue, if we come from the tunnel driver's notification.
- */
- may_sleep = info->flags & UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
- if (!may_sleep)
- __udp_tunnel_nic_device_sync(dev, utn);
- if (may_sleep || utn->need_replay) {
- queue_work(udp_tunnel_nic_workqueue, &utn->work);
- utn->work_pending = 1;
- }
+ queue_work(udp_tunnel_nic_workqueue, &utn->work);
+ utn->work_pending = 1;
}
static bool
@@ -554,12 +546,12 @@ static void __udp_tunnel_nic_reset_ntf(struct net_device *dev)
struct udp_tunnel_nic *utn;
unsigned int i, j;
- ASSERT_RTNL();
-
utn = dev->udp_tunnel_nic;
if (!utn)
return;
+ mutex_lock(&utn->lock);
+
utn->need_sync = false;
for (i = 0; i < utn->n_tables; i++)
for (j = 0; j < info->tables[i].n_entries; j++) {
@@ -569,7 +561,7 @@ static void __udp_tunnel_nic_reset_ntf(struct net_device *dev)
entry->flags &= ~(UDP_TUNNEL_NIC_ENTRY_DEL |
UDP_TUNNEL_NIC_ENTRY_OP_FAIL);
- /* We don't release rtnl across ops */
+ /* We don't release utn lock across ops */
WARN_ON(entry->flags & UDP_TUNNEL_NIC_ENTRY_FROZEN);
if (!entry->use_cnt)
continue;
@@ -579,6 +571,8 @@ static void __udp_tunnel_nic_reset_ntf(struct net_device *dev)
}
__udp_tunnel_nic_device_sync(dev, utn);
+
+ mutex_unlock(&utn->lock);
}
static size_t
@@ -643,6 +637,33 @@ err_cancel:
return -EMSGSIZE;
}
+static void __udp_tunnel_nic_assert_locked(struct net_device *dev)
+{
+ struct udp_tunnel_nic *utn;
+
+ utn = dev->udp_tunnel_nic;
+ if (utn)
+ lockdep_assert_held(&utn->lock);
+}
+
+static void __udp_tunnel_nic_lock(struct net_device *dev)
+{
+ struct udp_tunnel_nic *utn;
+
+ utn = dev->udp_tunnel_nic;
+ if (utn)
+ mutex_lock(&utn->lock);
+}
+
+static void __udp_tunnel_nic_unlock(struct net_device *dev)
+{
+ struct udp_tunnel_nic *utn;
+
+ utn = dev->udp_tunnel_nic;
+ if (utn)
+ mutex_unlock(&utn->lock);
+}
+
static const struct udp_tunnel_nic_ops __udp_tunnel_nic_ops = {
.get_port = __udp_tunnel_nic_get_port,
.set_port_priv = __udp_tunnel_nic_set_port_priv,
@@ -651,6 +672,9 @@ static const struct udp_tunnel_nic_ops __udp_tunnel_nic_ops = {
.reset_ntf = __udp_tunnel_nic_reset_ntf,
.dump_size = __udp_tunnel_nic_dump_size,
.dump_write = __udp_tunnel_nic_dump_write,
+ .assert_locked = __udp_tunnel_nic_assert_locked,
+ .lock = __udp_tunnel_nic_lock,
+ .unlock = __udp_tunnel_nic_unlock,
};
static void
@@ -710,11 +734,15 @@ static void udp_tunnel_nic_device_sync_work(struct work_struct *work)
container_of(work, struct udp_tunnel_nic, work);
rtnl_lock();
+ mutex_lock(&utn->lock);
+
utn->work_pending = 0;
__udp_tunnel_nic_device_sync(utn->dev, utn);
if (utn->need_replay)
udp_tunnel_nic_replay(utn->dev, utn);
+
+ mutex_unlock(&utn->lock);
rtnl_unlock();
}
@@ -730,6 +758,7 @@ udp_tunnel_nic_alloc(const struct udp_tunnel_nic_info *info,
return NULL;
utn->n_tables = n_tables;
INIT_WORK(&utn->work, udp_tunnel_nic_device_sync_work);
+ mutex_init(&utn->lock);
for (i = 0; i < n_tables; i++) {
utn->entries[i] = kcalloc(info->tables[i].n_entries,
@@ -821,8 +850,11 @@ static int udp_tunnel_nic_register(struct net_device *dev)
dev_hold(dev);
dev->udp_tunnel_nic = utn;
- if (!(info->flags & UDP_TUNNEL_NIC_INFO_OPEN_ONLY))
+ if (!(info->flags & UDP_TUNNEL_NIC_INFO_OPEN_ONLY)) {
+ udp_tunnel_nic_lock(dev);
udp_tunnel_get_rx_info(dev);
+ udp_tunnel_nic_unlock(dev);
+ }
return 0;
}
@@ -832,6 +864,8 @@ udp_tunnel_nic_unregister(struct net_device *dev, struct udp_tunnel_nic *utn)
{
const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
+ udp_tunnel_nic_lock(dev);
+
/* For a shared table remove this dev from the list of sharing devices
* and if there are other devices just detach.
*/
@@ -841,8 +875,10 @@ udp_tunnel_nic_unregister(struct net_device *dev, struct udp_tunnel_nic *utn)
list_for_each_entry(node, &info->shared->devices, list)
if (node->dev == dev)
break;
- if (list_entry_is_head(node, &info->shared->devices, list))
+ if (list_entry_is_head(node, &info->shared->devices, list)) {
+ udp_tunnel_nic_unlock(dev);
return;
+ }
list_del(&node->list);
kfree(node);
@@ -852,6 +888,7 @@ udp_tunnel_nic_unregister(struct net_device *dev, struct udp_tunnel_nic *utn)
if (first) {
udp_tunnel_drop_rx_info(dev);
utn->dev = first->dev;
+ udp_tunnel_nic_unlock(dev);
goto release_dev;
}
@@ -862,6 +899,7 @@ udp_tunnel_nic_unregister(struct net_device *dev, struct udp_tunnel_nic *utn)
* from the work which we will boot immediately.
*/
udp_tunnel_nic_flush(dev, utn);
+ udp_tunnel_nic_unlock(dev);
/* Wait for the work to be done using the state, netdev core will
* retry unregister until we give up our reference on this device.
@@ -910,12 +948,16 @@ udp_tunnel_nic_netdevice_event(struct notifier_block *unused,
return NOTIFY_DONE;
if (event == NETDEV_UP) {
+ udp_tunnel_nic_lock(dev);
WARN_ON(!udp_tunnel_nic_is_empty(dev, utn));
udp_tunnel_get_rx_info(dev);
+ udp_tunnel_nic_unlock(dev);
return NOTIFY_OK;
}
if (event == NETDEV_GOING_DOWN) {
+ udp_tunnel_nic_lock(dev);
udp_tunnel_nic_flush(dev, utn);
+ udp_tunnel_nic_unlock(dev);
return NOTIFY_OK;
}
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index ba2ec7c870cc..9c297974d3a6 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3208,7 +3208,7 @@ static void add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
}
}
-#if IS_ENABLED(CONFIG_IPV6_SIT) || IS_ENABLED(CONFIG_NET_IPGRE) || IS_ENABLED(CONFIG_IPV6_GRE)
+#if IS_ENABLED(CONFIG_IPV6_SIT) || IS_ENABLED(CONFIG_NET_IPGRE)
static void add_v4_addrs(struct inet6_dev *idev)
{
struct in6_addr addr;
@@ -3463,6 +3463,7 @@ static void addrconf_dev_config(struct net_device *dev)
(dev->type != ARPHRD_IEEE1394) &&
(dev->type != ARPHRD_TUNNEL6) &&
(dev->type != ARPHRD_6LOWPAN) &&
+ (dev->type != ARPHRD_IP6GRE) &&
(dev->type != ARPHRD_TUNNEL) &&
(dev->type != ARPHRD_NONE) &&
(dev->type != ARPHRD_RAWIP)) {
@@ -3518,7 +3519,7 @@ static void addrconf_sit_config(struct net_device *dev)
}
#endif
-#if IS_ENABLED(CONFIG_NET_IPGRE) || IS_ENABLED(CONFIG_IPV6_GRE)
+#if IS_ENABLED(CONFIG_NET_IPGRE)
static void addrconf_gre_config(struct net_device *dev)
{
struct inet6_dev *idev;
@@ -3536,7 +3537,7 @@ static void addrconf_gre_config(struct net_device *dev)
* which is in EUI64 mode (as __ipv6_isatap_ifid() would fail in this
* case). Such devices fall back to add_v4_addrs() instead.
*/
- if (!(dev->type == ARPHRD_IPGRE && *(__be32 *)dev->dev_addr == 0 &&
+ if (!(*(__be32 *)dev->dev_addr == 0 &&
idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64)) {
addrconf_addr_gen(idev, true);
return;
@@ -3557,8 +3558,7 @@ static void addrconf_init_auto_addrs(struct net_device *dev)
addrconf_sit_config(dev);
break;
#endif
-#if IS_ENABLED(CONFIG_NET_IPGRE) || IS_ENABLED(CONFIG_IPV6_GRE)
- case ARPHRD_IP6GRE:
+#if IS_ENABLED(CONFIG_NET_IPGRE)
case ARPHRD_IPGRE:
addrconf_gre_config(dev);
break;
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index acaff1296783..1992621e3f3f 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -842,7 +842,7 @@ int inet6_sk_rebuild_header(struct sock *sk)
fl6.flowi6_mark = sk->sk_mark;
fl6.fl6_dport = inet->inet_dport;
fl6.fl6_sport = inet->inet_sport;
- fl6.flowi6_uid = sk->sk_uid;
+ fl6.flowi6_uid = sk_uid(sk);
security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6));
rcu_read_lock();
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index fff78496803d..281722817a65 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -53,7 +53,7 @@ static void ip6_datagram_flow_key_init(struct flowi6 *fl6,
fl6->fl6_dport = inet->inet_dport;
fl6->fl6_sport = inet->inet_sport;
fl6->flowlabel = ip6_make_flowinfo(np->tclass, np->flow_label);
- fl6->flowi6_uid = sk->sk_uid;
+ fl6->flowi6_uid = sk_uid(sk);
if (!oif)
oif = np->sticky_pktinfo.ipi6_ifindex;
@@ -1064,7 +1064,7 @@ void __ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
sk_wmem_alloc_get(sp),
rqueue,
0, 0L, 0,
- from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
+ from_kuid_munged(seq_user_ns(seq), sk_uid(sp)),
0,
sock_i_ino(sp),
refcount_read(&sp->sk_refcnt), sp,
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 8f500eaf33cf..333e43434dd7 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -45,7 +45,7 @@ struct dst_entry *inet6_csk_route_req(const struct sock *sk,
fl6->flowi6_mark = ireq->ir_mark;
fl6->fl6_dport = ireq->ir_rmt_port;
fl6->fl6_sport = htons(ireq->ir_num);
- fl6->flowi6_uid = sk->sk_uid;
+ fl6->flowi6_uid = sk_uid(sk);
security_req_classify_flow(req, flowi6_to_flowi_common(fl6));
dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p);
@@ -79,7 +79,7 @@ static struct dst_entry *inet6_csk_route_socket(struct sock *sk,
fl6->flowi6_mark = sk->sk_mark;
fl6->fl6_sport = inet->inet_sport;
fl6->fl6_dport = inet->inet_dport;
- fl6->flowi6_uid = sk->sk_uid;
+ fl6->flowi6_uid = sk_uid(sk);
security_sk_classify_flow(sk, flowi6_to_flowi_common(fl6));
rcu_read_lock();
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 894d3158a6f0..a885bb5c98ea 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1278,7 +1278,7 @@ route_lookup:
ipv6h->nexthdr = proto;
ipv6h->saddr = fl6->saddr;
ipv6h->daddr = fl6->daddr;
- ip6tunnel_xmit(NULL, skb, dev);
+ ip6tunnel_xmit(NULL, skb, dev, 0);
return 0;
tx_err_link_failure:
DEV_STATS_INC(dev, tx_carrier_errors);
diff --git a/net/ipv6/ip6_udp_tunnel.c b/net/ipv6/ip6_udp_tunnel.c
index c99053189ea8..8ebe17a6058a 100644
--- a/net/ipv6/ip6_udp_tunnel.c
+++ b/net/ipv6/ip6_udp_tunnel.c
@@ -74,13 +74,14 @@ error:
}
EXPORT_SYMBOL_GPL(udp_sock_create6);
-int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb,
- struct net_device *dev,
- const struct in6_addr *saddr,
- const struct in6_addr *daddr,
- __u8 prio, __u8 ttl, __be32 label,
- __be16 src_port, __be16 dst_port, bool nocheck)
+void udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb,
+ struct net_device *dev,
+ const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
+ __u8 prio, __u8 ttl, __be32 label,
+ __be16 src_port, __be16 dst_port, bool nocheck,
+ u16 ip6cb_flags)
{
struct udphdr *uh;
struct ipv6hdr *ip6h;
@@ -108,8 +109,7 @@ int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk,
ip6h->daddr = *daddr;
ip6h->saddr = *saddr;
- ip6tunnel_xmit(sk, skb, dev);
- return 0;
+ ip6tunnel_xmit(sk, skb, dev, ip6cb_flags);
}
EXPORT_SYMBOL_GPL(udp_tunnel6_xmit_skb);
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 9db31e5b998c..a35f4f1c6589 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -2035,8 +2035,8 @@ static inline int ip6mr_forward2_finish(struct net *net, struct sock *sk, struct
* Processing handlers for ip6mr_forward
*/
-static int ip6mr_forward2(struct net *net, struct mr_table *mrt,
- struct sk_buff *skb, int vifi)
+static int ip6mr_prepare_xmit(struct net *net, struct mr_table *mrt,
+ struct sk_buff *skb, int vifi)
{
struct vif_device *vif = &mrt->vif_table[vifi];
struct net_device *vif_dev;
@@ -2046,7 +2046,7 @@ static int ip6mr_forward2(struct net *net, struct mr_table *mrt,
vif_dev = vif_dev_read(vif);
if (!vif_dev)
- goto out_free;
+ return -1;
#ifdef CONFIG_IPV6_PIMSM_V2
if (vif->flags & MIFF_REGISTER) {
@@ -2055,7 +2055,7 @@ static int ip6mr_forward2(struct net *net, struct mr_table *mrt,
DEV_STATS_ADD(vif_dev, tx_bytes, skb->len);
DEV_STATS_INC(vif_dev, tx_packets);
ip6mr_cache_report(mrt, skb, vifi, MRT6MSG_WHOLEPKT);
- goto out_free;
+ return -1;
}
#endif
@@ -2069,7 +2069,7 @@ static int ip6mr_forward2(struct net *net, struct mr_table *mrt,
dst = ip6_route_output(net, NULL, &fl6);
if (dst->error) {
dst_release(dst);
- goto out_free;
+ return -1;
}
skb_dst_drop(skb);
@@ -2093,20 +2093,43 @@ static int ip6mr_forward2(struct net *net, struct mr_table *mrt,
/* We are about to write */
/* XXX: extension headers? */
if (skb_cow(skb, sizeof(*ipv6h) + LL_RESERVED_SPACE(vif_dev)))
- goto out_free;
+ return -1;
ipv6h = ipv6_hdr(skb);
ipv6h->hop_limit--;
+ return 0;
+}
+
+static void ip6mr_forward2(struct net *net, struct mr_table *mrt,
+ struct sk_buff *skb, int vifi)
+{
+ struct net_device *indev = skb->dev;
+
+ if (ip6mr_prepare_xmit(net, mrt, skb, vifi))
+ goto out_free;
IP6CB(skb)->flags |= IP6SKB_FORWARDED;
- return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
- net, NULL, skb, skb->dev, vif_dev,
- ip6mr_forward2_finish);
+ NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
+ net, NULL, skb, indev, skb->dev,
+ ip6mr_forward2_finish);
+ return;
+
+out_free:
+ kfree_skb(skb);
+}
+
+static void ip6mr_output2(struct net *net, struct mr_table *mrt,
+ struct sk_buff *skb, int vifi)
+{
+ if (ip6mr_prepare_xmit(net, mrt, skb, vifi))
+ goto out_free;
+
+ ip6_output(net, NULL, skb);
+ return;
out_free:
kfree_skb(skb);
- return 0;
}
/* Called with rcu_read_lock() */
@@ -2221,6 +2244,56 @@ dont_forward:
kfree_skb(skb);
}
+/* Called under rcu_read_lock() */
+static void ip6_mr_output_finish(struct net *net, struct mr_table *mrt,
+ struct net_device *dev, struct sk_buff *skb,
+ struct mfc6_cache *c)
+{
+ int psend = -1;
+ int ct;
+
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
+ atomic_long_inc(&c->_c.mfc_un.res.pkt);
+ atomic_long_add(skb->len, &c->_c.mfc_un.res.bytes);
+ WRITE_ONCE(c->_c.mfc_un.res.lastuse, jiffies);
+
+ /* Forward the frame */
+ if (ipv6_addr_any(&c->mf6c_origin) &&
+ ipv6_addr_any(&c->mf6c_mcastgrp)) {
+ if (ipv6_hdr(skb)->hop_limit >
+ c->_c.mfc_un.res.ttls[c->_c.mfc_parent]) {
+ /* It's an (*,*) entry and the packet is not coming from
+ * the upstream: forward the packet to the upstream
+ * only.
+ */
+ psend = c->_c.mfc_parent;
+ goto last_forward;
+ }
+ goto dont_forward;
+ }
+ for (ct = c->_c.mfc_un.res.maxvif - 1;
+ ct >= c->_c.mfc_un.res.minvif; ct--) {
+ if (ipv6_hdr(skb)->hop_limit > c->_c.mfc_un.res.ttls[ct]) {
+ if (psend != -1) {
+ struct sk_buff *skb2;
+
+ skb2 = skb_clone(skb, GFP_ATOMIC);
+ if (skb2)
+ ip6mr_output2(net, mrt, skb2, psend);
+ }
+ psend = ct;
+ }
+ }
+last_forward:
+ if (psend != -1) {
+ ip6mr_output2(net, mrt, skb, psend);
+ return;
+ }
+
+dont_forward:
+ kfree_skb(skb);
+}
/*
* Multicast packets for forwarding arrive here
@@ -2288,6 +2361,61 @@ int ip6_mr_input(struct sk_buff *skb)
return 0;
}
+int ip6_mr_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+{
+ struct net_device *dev = skb_dst(skb)->dev;
+ struct flowi6 fl6 = (struct flowi6) {
+ .flowi6_iif = LOOPBACK_IFINDEX,
+ .flowi6_mark = skb->mark,
+ };
+ struct mfc6_cache *cache;
+ struct mr_table *mrt;
+ int err;
+ int vif;
+
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
+ if (IP6CB(skb)->flags & IP6SKB_FORWARDED)
+ goto ip6_output;
+ if (!(IP6CB(skb)->flags & IP6SKB_MCROUTE))
+ goto ip6_output;
+
+ err = ip6mr_fib_lookup(net, &fl6, &mrt);
+ if (err < 0) {
+ kfree_skb(skb);
+ return err;
+ }
+
+ cache = ip6mr_cache_find(mrt,
+ &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
+ if (!cache) {
+ vif = ip6mr_find_vif(mrt, dev);
+ if (vif >= 0)
+ cache = ip6mr_cache_find_any(mrt,
+ &ipv6_hdr(skb)->daddr,
+ vif);
+ }
+
+ /* No usable cache entry */
+ if (!cache) {
+ vif = ip6mr_find_vif(mrt, dev);
+ if (vif >= 0)
+ return ip6mr_cache_unresolved(mrt, vif, skb, dev);
+ goto ip6_output;
+ }
+
+ /* Wrong interface */
+ vif = cache->_c.mfc_parent;
+ if (rcu_access_pointer(mrt->vif_table[vif].dev) != dev)
+ goto ip6_output;
+
+ ip6_mr_output_finish(net, mrt, dev, skb, cache);
+ return 0;
+
+ip6_output:
+ return ip6_output(net, sk, skb);
+}
+
int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
u32 portid)
{
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index 84d90dd8b3f0..82b0492923d4 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -142,7 +142,7 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
fl6.saddr = np->saddr;
fl6.daddr = *daddr;
fl6.flowi6_mark = ipc6.sockc.mark;
- fl6.flowi6_uid = sk->sk_uid;
+ fl6.flowi6_uid = sk_uid(sk);
fl6.fl6_icmp_type = user_icmph.icmp6_type;
fl6.fl6_icmp_code = user_icmph.icmp6_code;
security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6));
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index fda640ebd53f..4c3f8245c40f 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -777,7 +777,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_mark = ipc6.sockc.mark;
- fl6.flowi6_uid = sk->sk_uid;
+ fl6.flowi6_uid = sk_uid(sk);
if (sin6) {
if (addr_len < SIN6_LEN_RFC2133)
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 79c8f1acf8a3..46a4f9d1900f 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1145,6 +1145,7 @@ static void ip6_rt_init_dst(struct rt6_info *rt, const struct fib6_result *res)
rt->dst.input = ip6_input;
} else if (ipv6_addr_type(&f6i->fib6_dst.addr) & IPV6_ADDR_MULTICAST) {
rt->dst.input = ip6_mc_input;
+ rt->dst.output = ip6_mr_output;
} else {
rt->dst.input = ip6_forward;
}
@@ -3010,7 +3011,7 @@ void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
oif = l3mdev_master_ifindex(skb->dev);
ip6_update_pmtu(skb, sock_net(sk), mtu, oif, READ_ONCE(sk->sk_mark),
- sk->sk_uid);
+ sk_uid(sk));
dst = __sk_dst_get(sk);
if (!dst || !dst->obsolete ||
@@ -3232,7 +3233,7 @@ void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif)
void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
{
ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if,
- READ_ONCE(sk->sk_mark), sk->sk_uid);
+ READ_ONCE(sk->sk_mark), sk_uid(sk));
}
EXPORT_SYMBOL_GPL(ip6_sk_redirect);
@@ -5851,8 +5852,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
if (dst->dev && nla_put_u32(skb, RTA_OIF, dst->dev->ifindex))
goto nla_put_failure;
- if (dst->lwtstate &&
- lwtunnel_fill_encap(skb, dst->lwtstate, RTA_ENCAP, RTA_ENCAP_TYPE) < 0)
+ if (lwtunnel_fill_encap(skb, dst->lwtstate, RTA_ENCAP, RTA_ENCAP_TYPE) < 0)
goto nla_put_failure;
} else if (rt->fib6_nsiblings) {
struct fib6_info *sibling;
diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c
index a11a02b4ba95..dfa825ee870e 100644
--- a/net/ipv6/seg6_local.c
+++ b/net/ipv6/seg6_local.c
@@ -270,7 +270,7 @@ static void advance_nextseg(struct ipv6_sr_hdr *srh, struct in6_addr *daddr)
static int
seg6_lookup_any_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr,
- u32 tbl_id, bool local_delivery)
+ u32 tbl_id, bool local_delivery, int oif)
{
struct net *net = dev_net(skb->dev);
struct ipv6hdr *hdr = ipv6_hdr(skb);
@@ -282,6 +282,7 @@ seg6_lookup_any_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr,
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_iif = skb->dev->ifindex;
+ fl6.flowi6_oif = oif;
fl6.daddr = nhaddr ? *nhaddr : hdr->daddr;
fl6.saddr = hdr->saddr;
fl6.flowlabel = ip6_flowinfo(hdr);
@@ -291,17 +292,19 @@ seg6_lookup_any_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr,
if (nhaddr)
fl6.flowi6_flags = FLOWI_FLAG_KNOWN_NH;
- if (!tbl_id) {
+ if (!tbl_id && !oif) {
dst = ip6_route_input_lookup(net, skb->dev, &fl6, skb, flags);
- } else {
+ } else if (tbl_id) {
struct fib6_table *table;
table = fib6_get_table(net, tbl_id);
if (!table)
goto out;
- rt = ip6_pol_route(net, table, 0, &fl6, skb, flags);
+ rt = ip6_pol_route(net, table, oif, &fl6, skb, flags);
dst = &rt->dst;
+ } else {
+ dst = ip6_route_output(net, NULL, &fl6);
}
/* we want to discard traffic destined for local packet processing,
@@ -330,7 +333,7 @@ out:
int seg6_lookup_nexthop(struct sk_buff *skb,
struct in6_addr *nhaddr, u32 tbl_id)
{
- return seg6_lookup_any_nexthop(skb, nhaddr, tbl_id, false);
+ return seg6_lookup_any_nexthop(skb, nhaddr, tbl_id, false, 0);
}
static __u8 seg6_flv_lcblock_octects(const struct seg6_flavors_info *finfo)
@@ -418,7 +421,7 @@ static int end_next_csid_core(struct sk_buff *skb, struct seg6_local_lwt *slwt)
static int input_action_end_x_finish(struct sk_buff *skb,
struct seg6_local_lwt *slwt)
{
- seg6_lookup_nexthop(skb, &slwt->nh6, 0);
+ seg6_lookup_any_nexthop(skb, &slwt->nh6, 0, false, slwt->oif);
return dst_input(skb);
}
@@ -1277,7 +1280,7 @@ static int input_action_end_dt6(struct sk_buff *skb,
/* note: this time we do not need to specify the table because the VRF
* takes care of selecting the correct table.
*/
- seg6_lookup_any_nexthop(skb, NULL, 0, true);
+ seg6_lookup_any_nexthop(skb, NULL, 0, true, 0);
return dst_input(skb);
@@ -1285,7 +1288,7 @@ legacy_mode:
#endif
skb_set_transport_header(skb, sizeof(struct ipv6hdr));
- seg6_lookup_any_nexthop(skb, NULL, slwt->table, true);
+ seg6_lookup_any_nexthop(skb, NULL, slwt->table, true, 0);
return dst_input(skb);
@@ -1477,7 +1480,8 @@ static struct seg6_action_desc seg6_action_table[] = {
.action = SEG6_LOCAL_ACTION_END_X,
.attrs = SEG6_F_ATTR(SEG6_LOCAL_NH6),
.optattrs = SEG6_F_LOCAL_COUNTERS |
- SEG6_F_LOCAL_FLAVORS,
+ SEG6_F_LOCAL_FLAVORS |
+ SEG6_F_ATTR(SEG6_LOCAL_OIF),
.input = input_action_end_x,
},
{
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index a72dbca9e8fc..12496ba1b7d4 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1035,7 +1035,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
skb_set_inner_ipproto(skb, IPPROTO_IPV6);
iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, protocol, tos, ttl,
- df, !net_eq(tunnel->net, dev_net(dev)));
+ df, !net_eq(tunnel->net, dev_net(dev)), 0);
return NETDEV_TX_OK;
tx_error_icmp:
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 9d83eadd308b..f0ee1a909771 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -236,7 +236,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
fl6.flowi6_mark = ireq->ir_mark;
fl6.fl6_dport = ireq->ir_rmt_port;
fl6.fl6_sport = inet_sk(sk)->inet_sport;
- fl6.flowi6_uid = sk->sk_uid;
+ fl6.flowi6_uid = sk_uid(sk);
security_req_classify_flow(req, flowi6_to_flowi_common(&fl6));
dst = ip6_dst_lookup_flow(net, sk, &fl6, final_p);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index e8e68a142649..f0ce62549d90 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -269,7 +269,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
fl6.fl6_sport = inet->inet_sport;
if (IS_ENABLED(CONFIG_IP_ROUTE_MULTIPATH) && !fl6.fl6_sport)
fl6.flowi6_flags = FLOWI_FLAG_ANY_SPORT;
- fl6.flowi6_uid = sk->sk_uid;
+ fl6.flowi6_uid = sk_uid(sk);
opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
final_p = fl6_update_dst(&fl6, opt, &final);
@@ -2168,7 +2168,7 @@ static void get_openreq6(struct seq_file *seq,
jiffies_to_clock_t(ttd),
req->num_timeout,
from_kuid_munged(seq_user_ns(seq),
- sock_i_uid(req->rsk_listener)),
+ sk_uid(req->rsk_listener)),
0, /* non standard timer */
0, /* open_requests have no inode */
0, req);
@@ -2234,7 +2234,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
timer_active,
jiffies_delta_to_clock_t(timer_expires - jiffies),
icsk->icsk_retransmits,
- from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
+ from_kuid_munged(seq_user_ns(seq), sk_uid(sp)),
icsk->icsk_probes_out,
sock_i_ino(sp),
refcount_read(&sp->sk_refcnt), sp,
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 7317f8e053f1..ebb95d8bc681 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -750,7 +750,8 @@ int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (type == NDISC_REDIRECT) {
if (tunnel) {
ip6_redirect(skb, sock_net(sk), inet6_iif(skb),
- READ_ONCE(sk->sk_mark), sk->sk_uid);
+ READ_ONCE(sk->sk_mark),
+ sk_uid(sk));
} else {
ip6_sk_redirect(skb, sk);
}
@@ -1620,7 +1621,7 @@ do_udp_sendmsg:
if (!fl6->flowi6_oif)
fl6->flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
- fl6->flowi6_uid = sk->sk_uid;
+ fl6->flowi6_uid = sk_uid(sk);
if (msg->msg_controllen) {
opt = &opt_space;
diff --git a/net/key/af_key.c b/net/key/af_key.c
index efc2a91f4c48..1f82f69acfde 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -3788,7 +3788,7 @@ static int pfkey_seq_show(struct seq_file *f, void *v)
refcount_read(&s->sk_refcnt),
sk_rmem_alloc_get(s),
sk_wmem_alloc_get(s),
- from_kuid_munged(seq_user_ns(f), sock_i_uid(s)),
+ from_kuid_munged(seq_user_ns(f), sk_uid(s)),
sock_i_ino(s)
);
return 0;
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index b98d13584c81..ea232f338dcb 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -545,7 +545,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_mark = READ_ONCE(sk->sk_mark);
- fl6.flowi6_uid = sk->sk_uid;
+ fl6.flowi6_uid = sk_uid(sk);
ipcm6_init_sk(&ipc6, sk);
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index cc77ec5769d8..5958a80fe14c 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -210,7 +210,7 @@ static int llc_ui_release(struct socket *sock)
dprintk("%s: closing local(%02X) remote(%02X)\n", __func__,
llc->laddr.lsap, llc->daddr.lsap);
if (!llc_send_disc(sk))
- llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo);
+ llc_ui_wait_for_disc(sk, READ_ONCE(sk->sk_rcvtimeo));
if (!sock_flag(sk, SOCK_ZAPPED)) {
struct llc_sap *sap = llc->sap;
@@ -455,7 +455,7 @@ static int llc_ui_shutdown(struct socket *sock, int how)
goto out;
rc = llc_send_disc(sk);
if (!rc)
- rc = llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo);
+ rc = llc_ui_wait_for_disc(sk, READ_ONCE(sk->sk_rcvtimeo));
/* Wake up anyone sleeping in poll */
sk->sk_state_change(sk);
out:
@@ -712,7 +712,7 @@ static int llc_ui_accept(struct socket *sock, struct socket *newsock,
goto out;
/* wait for a connection to arrive. */
if (skb_queue_empty(&sk->sk_receive_queue)) {
- rc = llc_wait_data(sk, sk->sk_rcvtimeo);
+ rc = llc_wait_data(sk, READ_ONCE(sk->sk_rcvtimeo));
if (rc)
goto out;
}
diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
index 07e9abb5978a..aa81c67b24a1 100644
--- a/net/llc/llc_proc.c
+++ b/net/llc/llc_proc.c
@@ -151,7 +151,7 @@ static int llc_seq_socket_show(struct seq_file *seq, void *v)
sk_wmem_alloc_get(sk),
sk_rmem_alloc_get(sk) - llc->copied_seq,
sk->sk_state,
- from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
+ from_kuid_munged(seq_user_ns(seq), sk_uid(sk)),
llc->link);
out:
return 0;
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index ee534797c033..e38f46ffebfa 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -299,7 +299,8 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
if (!sta->sta.valid_links &&
!sta->sta.deflink.ht_cap.ht_supported &&
- !sta->sta.deflink.he_cap.has_he) {
+ !sta->sta.deflink.he_cap.has_he &&
+ !sta->sta.deflink.s1g_cap.s1g) {
ht_dbg(sta->sdata,
"STA %pM erroneously requests BA session on tid %d w/o HT\n",
sta->sta.addr, tid);
@@ -327,7 +328,8 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
/* XXX: check own ht delayed BA capability?? */
if (((ba_policy != 1) &&
(sta->sta.valid_links ||
- !(sta->sta.deflink.ht_cap.cap & IEEE80211_HT_CAP_DELAY_BA))) ||
+ !(sta->sta.deflink.ht_cap.cap & IEEE80211_HT_CAP_DELAY_BA) ||
+ !(sta->sta.deflink.s1g_cap.cap[3] & S1G_CAP3_HT_DELAYED_BA))) ||
(buf_size > max_buf_size)) {
status = WLAN_STATUS_INVALID_QOS_PARAM;
ht_dbg_ratelimited(sta->sdata,
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index dbd9ad5f3992..d981b0fc57bf 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -616,7 +616,8 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
!pubsta->deflink.ht_cap.ht_supported &&
!pubsta->deflink.vht_cap.vht_supported &&
!pubsta->deflink.he_cap.has_he &&
- !pubsta->deflink.eht_cap.has_eht)
+ !pubsta->deflink.eht_cap.has_eht &&
+ !pubsta->deflink.s1g_cap.s1g)
return -EINVAL;
if (WARN_ON_ONCE(!local->ops->ampdu_action))
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index d9d88f2f2831..56540c3701ed 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -178,6 +178,7 @@ static int ieee80211_set_ap_mbssid_options(struct ieee80211_sub_if_data *sdata,
link_conf->nontransmitted = true;
link_conf->bssid_index = params->index;
+ link_conf->bssid_indicator = tx_bss_conf->bssid_indicator;
}
if (params->ema)
link_conf->ema_ap = true;
@@ -885,6 +886,13 @@ static int ieee80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
ret = 0;
memcpy(mac, sta->sta.addr, ETH_ALEN);
sta_set_sinfo(sta, sinfo, true);
+
+ /* Add accumulated removed link data to sinfo data for
+ * consistency for MLO
+ */
+ if (sinfo->valid_links)
+ sta_set_accumulated_removed_links_sinfo(sta, sinfo);
+
}
return ret;
@@ -912,6 +920,12 @@ static int ieee80211_get_station(struct wiphy *wiphy, struct net_device *dev,
if (sta) {
ret = 0;
sta_set_sinfo(sta, sinfo, true);
+
+ /* Add accumulated removed link data to sinfo data for
+ * consistency for MLO
+ */
+ if (sinfo->valid_links)
+ sta_set_accumulated_removed_links_sinfo(sta, sinfo);
}
return ret;
@@ -1218,8 +1232,11 @@ ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata,
ieee80211_copy_rnr_beacon(pos, new->rnr_ies, rnr);
}
/* update bssid_indicator */
- link_conf->bssid_indicator =
- ilog2(__roundup_pow_of_two(mbssid->cnt + 1));
+ if (new->mbssid_ies->cnt && new->mbssid_ies->elem[0].len > 2)
+ link_conf->bssid_indicator =
+ *(new->mbssid_ies->elem[0].data + 2);
+ else
+ link_conf->bssid_indicator = 0;
}
if (csa) {
@@ -1878,6 +1895,7 @@ static int sta_link_apply_parameters(struct ieee80211_local *local,
params->vht_capa ||
params->he_capa ||
params->eht_capa ||
+ params->s1g_capa ||
params->opmode_notif_used;
switch (mode) {
@@ -1956,6 +1974,10 @@ static int sta_link_apply_parameters(struct ieee80211_local *local,
params->eht_capa_len,
link_sta);
+ if (params->s1g_capa)
+ ieee80211_s1g_cap_to_sta_s1g_cap(sdata, params->s1g_capa,
+ link_sta);
+
ieee80211_sta_init_nss(link_sta);
if (params->opmode_notif_used) {
@@ -3028,7 +3050,8 @@ static int ieee80211_set_mcast_rate(struct wiphy *wiphy, struct net_device *dev,
return 0;
}
-static int ieee80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
+static int ieee80211_set_wiphy_params(struct wiphy *wiphy, int radio_idx,
+ u32 changed)
{
struct ieee80211_local *local = wiphy_priv(wiphy);
int err;
@@ -3036,7 +3059,8 @@ static int ieee80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
if (changed & WIPHY_PARAM_FRAG_THRESHOLD) {
ieee80211_check_fast_xmit_all(local);
- err = drv_set_frag_threshold(local, wiphy->frag_threshold);
+ err = drv_set_frag_threshold(local, radio_idx,
+ wiphy->frag_threshold);
if (err) {
ieee80211_check_fast_xmit_all(local);
@@ -3050,14 +3074,23 @@ static int ieee80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
coverage_class = changed & WIPHY_PARAM_COVERAGE_CLASS ?
wiphy->coverage_class : -1;
- err = drv_set_coverage_class(local, coverage_class);
+ err = drv_set_coverage_class(local, radio_idx,
+ coverage_class);
if (err)
return err;
}
if (changed & WIPHY_PARAM_RTS_THRESHOLD) {
- err = drv_set_rts_threshold(local, wiphy->rts_threshold);
+ u32 rts_threshold;
+
+ if ((radio_idx == -1) || (radio_idx >= wiphy->n_radio))
+ rts_threshold = wiphy->rts_threshold;
+ else
+ rts_threshold =
+ wiphy->radio_cfg[radio_idx].rts_threshold;
+
+ err = drv_set_rts_threshold(local, radio_idx, rts_threshold);
if (err)
return err;
@@ -3075,18 +3108,19 @@ static int ieee80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
}
if (changed &
(WIPHY_PARAM_RETRY_SHORT | WIPHY_PARAM_RETRY_LONG))
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_RETRY_LIMITS);
+ ieee80211_hw_config(local, radio_idx,
+ IEEE80211_CONF_CHANGE_RETRY_LIMITS);
if (changed & (WIPHY_PARAM_TXQ_LIMIT |
WIPHY_PARAM_TXQ_MEMORY_LIMIT |
WIPHY_PARAM_TXQ_QUANTUM))
- ieee80211_txq_set_params(local);
+ ieee80211_txq_set_params(local, radio_idx);
return 0;
}
static int ieee80211_set_tx_power(struct wiphy *wiphy,
- struct wireless_dev *wdev,
+ struct wireless_dev *wdev, int radio_idx,
enum nl80211_tx_power_setting type, int mbm)
{
struct ieee80211_local *local = wiphy_priv(wiphy);
@@ -3214,6 +3248,7 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy,
static int ieee80211_get_tx_power(struct wiphy *wiphy,
struct wireless_dev *wdev,
+ int radio_idx,
unsigned int link_id,
int *dbm)
{
@@ -3392,7 +3427,7 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
}
if (ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS))
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
+ ieee80211_hw_config(local, -1, IEEE80211_CONF_CHANGE_PS);
ieee80211_recalc_ps(local);
ieee80211_recalc_ps_vif(sdata);
@@ -3549,6 +3584,56 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
return 0;
}
+static bool ieee80211_is_scan_ongoing(struct wiphy *wiphy,
+ struct ieee80211_local *local,
+ struct cfg80211_chan_def *chandef)
+{
+ struct cfg80211_scan_request *scan_req;
+ int chan_radio_idx, req_radio_idx;
+ struct ieee80211_roc_work *roc;
+
+ if (list_empty(&local->roc_list) && !local->scanning)
+ return false;
+
+ if (wiphy->n_radio < 2)
+ return true;
+
+ req_radio_idx = cfg80211_get_radio_idx_by_chan(wiphy, chandef->chan);
+ if (req_radio_idx < 0)
+ return true;
+
+ if (local->scanning) {
+ scan_req = wiphy_dereference(wiphy, local->scan_req);
+ /*
+ * Scan is going on but info is not there. Should not happen
+ * but if it does, let's not take risk and assume we can't use
+ * the hw hence return true
+ */
+ if (WARN_ON_ONCE(!scan_req))
+ return true;
+
+ return ieee80211_is_radio_idx_in_scan_req(wiphy, scan_req,
+ req_radio_idx);
+ }
+
+ list_for_each_entry(roc, &local->roc_list, list) {
+ chan_radio_idx = cfg80211_get_radio_idx_by_chan(wiphy,
+ roc->chan);
+ /*
+ * The roc work is added but chan_radio_idx is invalid.
+ * Should not happen but if it does, let's not take
+ * risk and return true.
+ */
+ if (chan_radio_idx < 0)
+ return true;
+
+ if (chan_radio_idx == req_radio_idx)
+ return true;
+ }
+
+ return false;
+}
+
static int ieee80211_start_radar_detection(struct wiphy *wiphy,
struct net_device *dev,
struct cfg80211_chan_def *chandef,
@@ -3562,7 +3647,7 @@ static int ieee80211_start_radar_detection(struct wiphy *wiphy,
lockdep_assert_wiphy(local->hw.wiphy);
- if (!list_empty(&local->roc_list) || local->scanning)
+ if (ieee80211_is_scan_ongoing(wiphy, local, chandef))
return -EBUSY;
link_data = sdata_dereference(sdata->link[link_id], sdata);
@@ -4054,7 +4139,7 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
lockdep_assert_wiphy(local->hw.wiphy);
- if (!list_empty(&local->roc_list) || local->scanning)
+ if (ieee80211_is_scan_ongoing(wiphy, local, &params->chandef))
return -EBUSY;
if (sdata->wdev.links[link_id].cac_started)
@@ -4238,7 +4323,8 @@ ieee80211_update_mgmt_frame_registrations(struct wiphy *wiphy,
ieee80211_configure_filter(local);
}
-static int ieee80211_set_antenna(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant)
+static int ieee80211_set_antenna(struct wiphy *wiphy, int radio_idx,
+ u32 tx_ant, u32 rx_ant)
{
struct ieee80211_local *local = wiphy_priv(wiphy);
int ret;
@@ -4254,11 +4340,12 @@ static int ieee80211_set_antenna(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant)
return 0;
}
-static int ieee80211_get_antenna(struct wiphy *wiphy, u32 *tx_ant, u32 *rx_ant)
+static int ieee80211_get_antenna(struct wiphy *wiphy, int radio_idx,
+ u32 *tx_ant, u32 *rx_ant)
{
struct ieee80211_local *local = wiphy_priv(wiphy);
- return drv_get_antenna(local, tx_ant, rx_ant);
+ return drv_get_antenna(local, radio_idx, tx_ant, rx_ant);
}
static int ieee80211_set_rekey_data(struct wiphy *wiphy,
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index 3aaf5abf1acc..4bcbcf9d98b5 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -644,15 +644,39 @@ ieee80211_find_chanctx(struct ieee80211_local *local,
return NULL;
}
-bool ieee80211_is_radar_required(struct ieee80211_local *local)
+bool ieee80211_is_radar_required(struct ieee80211_local *local,
+ struct cfg80211_scan_request *req)
{
+ struct wiphy *wiphy = local->hw.wiphy;
struct ieee80211_link_data *link;
+ struct ieee80211_channel *chan;
+ int radio_idx;
lockdep_assert_wiphy(local->hw.wiphy);
+ if (!req)
+ return false;
+
for_each_sdata_link(local, link) {
- if (link->radar_required)
- return true;
+ if (link->radar_required) {
+ if (wiphy->n_radio < 2)
+ return true;
+
+ chan = link->conf->chanreq.oper.chan;
+ radio_idx = cfg80211_get_radio_idx_by_chan(wiphy, chan);
+ /*
+ * The radio index (radio_idx) is expected to be valid,
+ * as it's derived from a channel tied to a link. If
+ * it's invalid (i.e., negative), return true to avoid
+ * potential issues with radar-sensitive operations.
+ */
+ if (radio_idx < 0)
+ return true;
+
+ if (ieee80211_is_radio_idx_in_scan_req(wiphy, req,
+ radio_idx))
+ return true;
+ }
}
return false;
@@ -720,7 +744,7 @@ static int ieee80211_add_chanctx(struct ieee80211_local *local,
/* turn idle off *before* setting channel -- some drivers need that */
changed = ieee80211_idle_off(local);
if (changed)
- ieee80211_hw_config(local, changed);
+ ieee80211_hw_config(local, -1, changed);
err = drv_add_chanctx(local, ctx);
if (err) {
@@ -1381,6 +1405,7 @@ ieee80211_link_use_reserved_reassign(struct ieee80211_link_data *link)
goto out;
}
+ link->radar_required = link->reserved_radar_required;
list_move(&link->assigned_chanctx_list, &new_ctx->assigned_links);
rcu_assign_pointer(link_conf->chanctx_conf, &new_ctx->conf);
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 307587c8a003..8baebb5636ec 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -143,15 +143,16 @@ int drv_change_interface(struct ieee80211_local *local,
void drv_remove_interface(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata);
-static inline int drv_config(struct ieee80211_local *local, u32 changed)
+static inline int drv_config(struct ieee80211_local *local, int radio_idx,
+ u32 changed)
{
int ret;
might_sleep();
lockdep_assert_wiphy(local->hw.wiphy);
- trace_drv_config(local, changed);
- ret = local->ops->config(&local->hw, changed);
+ trace_drv_config(local, radio_idx, changed);
+ ret = local->ops->config(&local->hw, radio_idx, changed);
trace_drv_return_int(local, ret);
return ret;
}
@@ -387,45 +388,47 @@ static inline void drv_get_key_seq(struct ieee80211_local *local,
}
static inline int drv_set_frag_threshold(struct ieee80211_local *local,
- u32 value)
+ int radio_idx, u32 value)
{
int ret = 0;
might_sleep();
lockdep_assert_wiphy(local->hw.wiphy);
- trace_drv_set_frag_threshold(local, value);
+ trace_drv_set_frag_threshold(local, radio_idx, value);
if (local->ops->set_frag_threshold)
- ret = local->ops->set_frag_threshold(&local->hw, value);
+ ret = local->ops->set_frag_threshold(&local->hw, radio_idx,
+ value);
trace_drv_return_int(local, ret);
return ret;
}
static inline int drv_set_rts_threshold(struct ieee80211_local *local,
- u32 value)
+ int radio_idx, u32 value)
{
int ret = 0;
might_sleep();
lockdep_assert_wiphy(local->hw.wiphy);
- trace_drv_set_rts_threshold(local, value);
+ trace_drv_set_rts_threshold(local, radio_idx, value);
if (local->ops->set_rts_threshold)
- ret = local->ops->set_rts_threshold(&local->hw, value);
+ ret = local->ops->set_rts_threshold(&local->hw, radio_idx,
+ value);
trace_drv_return_int(local, ret);
return ret;
}
static inline int drv_set_coverage_class(struct ieee80211_local *local,
- s16 value)
+ int radio_idx, s16 value)
{
int ret = 0;
might_sleep();
lockdep_assert_wiphy(local->hw.wiphy);
- trace_drv_set_coverage_class(local, value);
+ trace_drv_set_coverage_class(local, radio_idx, value);
if (local->ops->set_coverage_class)
- local->ops->set_coverage_class(&local->hw, value);
+ local->ops->set_coverage_class(&local->hw, radio_idx, value);
else
ret = -EOPNOTSUPP;
@@ -631,6 +634,25 @@ static inline void drv_sta_statistics(struct ieee80211_local *local,
trace_drv_return_void(local);
}
+static inline void drv_link_sta_statistics(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_link_sta *link_sta,
+ struct link_station_info *link_sinfo)
+{
+ might_sleep();
+ lockdep_assert_wiphy(local->hw.wiphy);
+
+ sdata = get_bss_sdata(sdata);
+ if (!check_sdata_in_driver(sdata))
+ return;
+
+ trace_drv_link_sta_statistics(local, sdata, link_sta);
+ if (local->ops->link_sta_statistics)
+ local->ops->link_sta_statistics(&local->hw, &sdata->vif,
+ link_sta, link_sinfo);
+ trace_drv_return_void(local);
+}
+
int drv_conf_tx(struct ieee80211_local *local,
struct ieee80211_link_data *link, u16 ac,
const struct ieee80211_tx_queue_params *params);
@@ -753,20 +775,21 @@ static inline int drv_set_antenna(struct ieee80211_local *local,
might_sleep();
lockdep_assert_wiphy(local->hw.wiphy);
if (local->ops->set_antenna)
- ret = local->ops->set_antenna(&local->hw, tx_ant, rx_ant);
+ ret = local->ops->set_antenna(&local->hw, -1, tx_ant, rx_ant);
trace_drv_set_antenna(local, tx_ant, rx_ant, ret);
return ret;
}
-static inline int drv_get_antenna(struct ieee80211_local *local,
+static inline int drv_get_antenna(struct ieee80211_local *local, int radio_idx,
u32 *tx_ant, u32 *rx_ant)
{
int ret = -EOPNOTSUPP;
might_sleep();
lockdep_assert_wiphy(local->hw.wiphy);
if (local->ops->get_antenna)
- ret = local->ops->get_antenna(&local->hw, tx_ant, rx_ant);
- trace_drv_get_antenna(local, *tx_ant, *rx_ant, ret);
+ ret = local->ops->get_antenna(&local->hw, radio_idx,
+ tx_ant, rx_ant);
+ trace_drv_get_antenna(local, radio_idx, *tx_ant, *rx_ant, ret);
return ret;
}
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 9ed87d6f5019..6e36b09fe97f 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -635,7 +635,7 @@ static int ieee80211_sta_active_ibss(struct ieee80211_sub_if_data *sdata)
rcu_read_lock();
list_for_each_entry_rcu(sta, &local->sta_list, list) {
- unsigned long last_active = ieee80211_sta_last_active(sta);
+ unsigned long last_active = ieee80211_sta_last_active(sta, -1);
if (sta->sdata == sdata &&
time_is_after_jiffies(last_active +
@@ -1228,7 +1228,7 @@ static void ieee80211_ibss_sta_expire(struct ieee80211_sub_if_data *sdata)
lockdep_assert_wiphy(local->hw.wiphy);
list_for_each_entry_safe(sta, tmp, &local->sta_list, list) {
- unsigned long last_active = ieee80211_sta_last_active(sta);
+ unsigned long last_active = ieee80211_sta_last_active(sta, -1);
if (sdata != sta->sdata)
continue;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 30809f0b35f7..4ef7b3656aca 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1872,7 +1872,8 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
struct ieee80211_rx_status *status,
unsigned int mpdu_len,
unsigned int mpdu_offset);
-int ieee80211_hw_config(struct ieee80211_local *local, u32 changed);
+int ieee80211_hw_config(struct ieee80211_local *local, int radio_idx,
+ u32 changed);
int ieee80211_hw_conf_chan(struct ieee80211_local *local);
void ieee80211_hw_conf_init(struct ieee80211_local *local);
void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx);
@@ -2269,6 +2270,9 @@ void ieee80211_s1g_rx_twt_action(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb);
void ieee80211_s1g_status_twt_action(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb);
+void ieee80211_s1g_cap_to_sta_s1g_cap(struct ieee80211_sub_if_data *sdata,
+ const struct ieee80211_s1g_cap *s1g_cap_ie,
+ struct link_sta_info *link_sta);
/* Spectrum management */
void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
@@ -2542,7 +2546,7 @@ static inline bool ieee80211_can_run_worker(struct ieee80211_local *local)
}
int ieee80211_txq_setup_flows(struct ieee80211_local *local);
-void ieee80211_txq_set_params(struct ieee80211_local *local);
+void ieee80211_txq_set_params(struct ieee80211_local *local, int radio_idx);
void ieee80211_txq_teardown_flows(struct ieee80211_local *local);
void ieee80211_txq_init(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta,
@@ -2712,7 +2716,11 @@ void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
struct ieee80211_chanctx *ctx,
struct ieee80211_link_data *rsvd_for,
bool check_reserved);
-bool ieee80211_is_radar_required(struct ieee80211_local *local);
+bool ieee80211_is_radar_required(struct ieee80211_local *local,
+ struct cfg80211_scan_request *req);
+bool ieee80211_is_radio_idx_in_scan_req(struct wiphy *wiphy,
+ struct cfg80211_scan_request *scan_req,
+ int radio_idx);
void ieee80211_dfs_cac_timer_work(struct wiphy *wiphy, struct wiphy_work *work);
void ieee80211_dfs_cac_cancel(struct ieee80211_local *local,
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 7c27f3cd841c..7b2baebb8644 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -146,7 +146,7 @@ void ieee80211_recalc_idle(struct ieee80211_local *local)
{
u32 change = __ieee80211_recalc_idle(local, false);
if (change)
- ieee80211_hw_config(local, change);
+ ieee80211_hw_config(local, -1, change);
}
static int ieee80211_verify_mac(struct ieee80211_sub_if_data *sdata, u8 *addr,
@@ -726,7 +726,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
/* do after stop to avoid reconfiguring when we stop anyway */
ieee80211_configure_filter(local);
- ieee80211_hw_config(local, hw_reconf_flags);
+ ieee80211_hw_config(local, -1, hw_reconf_flags);
if (local->virt_monitors == local->open_count)
ieee80211_add_virtual_monitor(local);
@@ -1491,7 +1491,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
if (local->open_count == 1)
ieee80211_hw_conf_init(local);
else if (hw_reconf_flags)
- ieee80211_hw_config(local, hw_reconf_flags);
+ ieee80211_hw_config(local, -1, hw_reconf_flags);
ieee80211_recalc_ps(local);
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 6b6de43d9420..c1c758e76d2e 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -190,7 +190,8 @@ static u32 ieee80211_calc_hw_conf_chan(struct ieee80211_local *local,
return changed;
}
-int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
+int ieee80211_hw_config(struct ieee80211_local *local, int radio_idx,
+ u32 changed)
{
int ret = 0;
@@ -201,7 +202,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
IEEE80211_CONF_CHANGE_SMPS));
if (changed && local->open_count) {
- ret = drv_config(local, changed);
+ ret = drv_config(local, radio_idx, changed);
/*
* Goal:
* HW reconfiguration should never fail, the driver has told
@@ -235,7 +236,7 @@ static int _ieee80211_hw_conf_chan(struct ieee80211_local *local,
if (!changed)
return 0;
- return drv_config(local, changed);
+ return drv_config(local, -1, changed);
}
int ieee80211_hw_conf_chan(struct ieee80211_local *local)
@@ -269,7 +270,7 @@ void ieee80211_hw_conf_init(struct ieee80211_local *local)
ctx ? &ctx->conf : NULL);
}
- WARN_ON(drv_config(local, changed));
+ WARN_ON(drv_config(local, -1, changed));
}
int ieee80211_emulate_add_chanctx(struct ieee80211_hw *hw,
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 2d46d4af60d7..6001c8897d7c 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -3181,7 +3181,7 @@ static void ieee80211_enable_ps(struct ieee80211_local *local,
return;
conf->flags |= IEEE80211_CONF_PS;
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
+ ieee80211_hw_config(local, -1, IEEE80211_CONF_CHANGE_PS);
}
}
@@ -3193,7 +3193,7 @@ static void ieee80211_change_ps(struct ieee80211_local *local)
ieee80211_enable_ps(local, local->ps_sdata);
} else if (conf->flags & IEEE80211_CONF_PS) {
conf->flags &= ~IEEE80211_CONF_PS;
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
+ ieee80211_hw_config(local, -1, IEEE80211_CONF_CHANGE_PS);
timer_delete_sync(&local->dynamic_ps_timer);
wiphy_work_cancel(local->hw.wiphy,
&local->dynamic_ps_enable_work);
@@ -3302,7 +3302,7 @@ void ieee80211_dynamic_ps_disable_work(struct wiphy *wiphy,
if (local->hw.conf.flags & IEEE80211_CONF_PS) {
local->hw.conf.flags &= ~IEEE80211_CONF_PS;
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
+ ieee80211_hw_config(local, -1, IEEE80211_CONF_CHANGE_PS);
}
ieee80211_wake_queues_by_reason(&local->hw,
@@ -3377,7 +3377,7 @@ void ieee80211_dynamic_ps_enable_work(struct wiphy *wiphy,
(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)) {
ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
local->hw.conf.flags |= IEEE80211_CONF_PS;
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
+ ieee80211_hw_config(local, -1, IEEE80211_CONF_CHANGE_PS);
}
}
@@ -3986,7 +3986,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
*/
if (local->hw.conf.flags & IEEE80211_CONF_PS) {
local->hw.conf.flags &= ~IEEE80211_CONF_PS;
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
+ ieee80211_hw_config(local, -1, IEEE80211_CONF_CHANGE_PS);
}
local->ps_sdata = NULL;
@@ -5399,6 +5399,12 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
bss_conf->epcs_support = false;
}
+ if (elems->s1g_oper &&
+ link->u.mgd.conn.mode == IEEE80211_CONN_MODE_S1G &&
+ elems->s1g_capab)
+ ieee80211_s1g_cap_to_sta_s1g_cap(sdata, elems->s1g_capab,
+ link_sta);
+
bss_conf->twt_broadcast =
ieee80211_twt_bcast_support(sdata, bss_conf, sband, link_sta);
@@ -7340,7 +7346,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
if (local->hw.conf.dynamic_ps_timeout > 0) {
if (local->hw.conf.flags & IEEE80211_CONF_PS) {
local->hw.conf.flags &= ~IEEE80211_CONF_PS;
- ieee80211_hw_config(local,
+ ieee80211_hw_config(local, -1,
IEEE80211_CONF_CHANGE_PS);
}
ieee80211_send_nullfunc(local, sdata, false);
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index 2b9abc27462e..13df6321634d 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -39,7 +39,7 @@ static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata)
if (local->hw.conf.flags & IEEE80211_CONF_PS) {
offchannel_ps_enabled = true;
local->hw.conf.flags &= ~IEEE80211_CONF_PS;
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
+ ieee80211_hw_config(local, -1, IEEE80211_CONF_CHANGE_PS);
}
if (!offchannel_ps_enabled ||
@@ -567,6 +567,7 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
{
struct ieee80211_roc_work *roc, *tmp;
bool queued = false, combine_started = true;
+ struct cfg80211_scan_request *req;
int ret;
lockdep_assert_wiphy(local->hw.wiphy);
@@ -612,9 +613,11 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
roc->mgmt_tx_cookie = *cookie;
}
+ req = wiphy_dereference(local->hw.wiphy, local->scan_req);
+
/* if there's no need to queue, handle it immediately */
if (list_empty(&local->roc_list) &&
- !local->scanning && !ieee80211_is_radar_required(local)) {
+ !local->scanning && !ieee80211_is_radar_required(local, req)) {
/* if not HW assist, just queue & schedule work */
if (!local->ops->remain_on_channel) {
list_add_tail(&roc->list, &local->roc_list);
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index a9cc832240a5..5a508d99e84f 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -108,7 +108,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
sdata->u.mgd.powersave &&
!(local->hw.conf.flags & IEEE80211_CONF_PS)) {
local->hw.conf.flags |= IEEE80211_CONF_PS;
- ieee80211_hw_config(local,
+ ieee80211_hw_config(local, -1,
IEEE80211_CONF_CHANGE_PS);
}
}
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index e73431549ce7..8699755081ad 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -231,8 +231,19 @@ static void __ieee80211_queue_skb_to_iface(struct ieee80211_sub_if_data *sdata,
skb_queue_tail(&sdata->skb_queue, skb);
wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work);
- if (sta)
- sta->deflink.rx_stats.packets++;
+ if (sta) {
+ struct link_sta_info *link_sta_info;
+
+ if (link_id >= 0) {
+ link_sta_info = rcu_dereference(sta->link[link_id]);
+ if (!link_sta_info)
+ return;
+ } else {
+ link_sta_info = &sta->deflink;
+ }
+
+ link_sta_info->rx_stats.packets++;
+ }
}
static void ieee80211_queue_skb_to_iface(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/s1g.c b/net/mac80211/s1g.c
index d4ed0c0a335c..1f68df6e8067 100644
--- a/net/mac80211/s1g.c
+++ b/net/mac80211/s1g.c
@@ -194,3 +194,29 @@ void ieee80211_s1g_status_twt_action(struct ieee80211_sub_if_data *sdata,
break;
}
}
+
+void ieee80211_s1g_cap_to_sta_s1g_cap(struct ieee80211_sub_if_data *sdata,
+ const struct ieee80211_s1g_cap *s1g_cap_ie,
+ struct link_sta_info *link_sta)
+{
+ struct ieee80211_sta_s1g_cap *s1g_cap = &link_sta->pub->s1g_cap;
+
+ memset(s1g_cap, 0, sizeof(*s1g_cap));
+
+ memcpy(s1g_cap->cap, s1g_cap_ie->capab_info, sizeof(s1g_cap->cap));
+ memcpy(s1g_cap->nss_mcs, s1g_cap_ie->supp_mcs_nss,
+ sizeof(s1g_cap->nss_mcs));
+
+ s1g_cap->s1g = true;
+
+ /* Maximum MPDU length is 1 bit for S1G */
+ if (s1g_cap->cap[3] & S1G_CAP3_MAX_MPDU_LEN) {
+ link_sta->pub->agg.max_amsdu_len =
+ IEEE80211_MAX_MPDU_LEN_VHT_7991;
+ } else {
+ link_sta->pub->agg.max_amsdu_len =
+ IEEE80211_MAX_MPDU_LEN_VHT_3895;
+ }
+
+ ieee80211_sta_recalc_aggregates(&link_sta->sta->sta);
+}
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index cd8385ecafd9..9799164a56d9 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -586,7 +586,8 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local,
return 0;
}
-static bool __ieee80211_can_leave_ch(struct ieee80211_sub_if_data *sdata)
+static bool __ieee80211_can_leave_ch(struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_scan_request *req)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_sub_if_data *sdata_iter;
@@ -594,7 +595,7 @@ static bool __ieee80211_can_leave_ch(struct ieee80211_sub_if_data *sdata)
lockdep_assert_wiphy(local->hw.wiphy);
- if (!ieee80211_is_radar_required(local))
+ if (!ieee80211_is_radar_required(local, req))
return true;
if (!regulatory_pre_cac_allowed(local->hw.wiphy))
@@ -610,9 +611,10 @@ static bool __ieee80211_can_leave_ch(struct ieee80211_sub_if_data *sdata)
}
static bool ieee80211_can_scan(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata)
+ struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_scan_request *req)
{
- if (!__ieee80211_can_leave_ch(sdata))
+ if (!__ieee80211_can_leave_ch(sdata, req))
return false;
if (!list_empty(&local->roc_list))
@@ -627,15 +629,19 @@ static bool ieee80211_can_scan(struct ieee80211_local *local,
void ieee80211_run_deferred_scan(struct ieee80211_local *local)
{
+ struct cfg80211_scan_request *req;
+
lockdep_assert_wiphy(local->hw.wiphy);
if (!local->scan_req || local->scanning)
return;
+ req = wiphy_dereference(local->hw.wiphy, local->scan_req);
if (!ieee80211_can_scan(local,
rcu_dereference_protected(
local->scan_sdata,
- lockdep_is_held(&local->hw.wiphy->mtx))))
+ lockdep_is_held(&local->hw.wiphy->mtx)),
+ req))
return;
wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work,
@@ -732,10 +738,10 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
!(sdata->vif.active_links & BIT(req->tsf_report_link_id)))
return -EINVAL;
- if (!__ieee80211_can_leave_ch(sdata))
+ if (!__ieee80211_can_leave_ch(sdata, req))
return -EBUSY;
- if (!ieee80211_can_scan(local, sdata)) {
+ if (!ieee80211_can_scan(local, sdata, req)) {
/* wait for the work to finish/time out */
rcu_assign_pointer(local->scan_req, req);
rcu_assign_pointer(local->scan_sdata, sdata);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 61583173629e..89cf365b07e6 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -355,6 +355,50 @@ static void sta_info_free_link(struct link_sta_info *link_sta)
free_percpu(link_sta->pcpu_rx_stats);
}
+static void sta_accumulate_removed_link_stats(struct sta_info *sta, int link_id)
+{
+ struct link_sta_info *link_sta = wiphy_dereference(sta->local->hw.wiphy,
+ sta->link[link_id]);
+ struct ieee80211_link_data *link;
+ int ac, tid;
+ u32 thr;
+
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ sta->rem_link_stats.tx_packets +=
+ link_sta->tx_stats.packets[ac];
+ sta->rem_link_stats.tx_bytes += link_sta->tx_stats.bytes[ac];
+ }
+
+ sta->rem_link_stats.rx_packets += link_sta->rx_stats.packets;
+ sta->rem_link_stats.rx_bytes += link_sta->rx_stats.bytes;
+ sta->rem_link_stats.tx_retries += link_sta->status_stats.retry_count;
+ sta->rem_link_stats.tx_failed += link_sta->status_stats.retry_failed;
+ sta->rem_link_stats.rx_dropped_misc += link_sta->rx_stats.dropped;
+
+ thr = sta_get_expected_throughput(sta);
+ if (thr != 0)
+ sta->rem_link_stats.expected_throughput += thr;
+
+ for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) {
+ sta->rem_link_stats.pertid_stats.rx_msdu +=
+ link_sta->rx_stats.msdu[tid];
+ sta->rem_link_stats.pertid_stats.tx_msdu +=
+ link_sta->tx_stats.msdu[tid];
+ sta->rem_link_stats.pertid_stats.tx_msdu_retries +=
+ link_sta->status_stats.msdu_retries[tid];
+ sta->rem_link_stats.pertid_stats.tx_msdu_failed +=
+ link_sta->status_stats.msdu_failed[tid];
+ }
+
+ if (sta->sdata->vif.type == NL80211_IFTYPE_STATION) {
+ link = wiphy_dereference(sta->sdata->local->hw.wiphy,
+ sta->sdata->link[link_id]);
+ if (link)
+ sta->rem_link_stats.beacon_loss_count +=
+ link->u.mgd.beacon_loss_count;
+ }
+}
+
static void sta_remove_link(struct sta_info *sta, unsigned int link_id,
bool unhash)
{
@@ -377,6 +421,10 @@ static void sta_remove_link(struct sta_info *sta, unsigned int link_id,
alloc = container_of(link_sta, typeof(*alloc), info);
sta->sta.valid_links &= ~BIT(link_id);
+
+ /* store removed link info for accumulated stats consistency */
+ sta_accumulate_removed_link_stats(sta, link_id);
+
RCU_INIT_POINTER(sta->link[link_id], NULL);
RCU_INIT_POINTER(sta->sta.link[link_id], NULL);
if (alloc) {
@@ -1651,7 +1699,7 @@ void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
lockdep_assert_wiphy(local->hw.wiphy);
list_for_each_entry_safe(sta, tmp, &local->sta_list, list) {
- unsigned long last_active = ieee80211_sta_last_active(sta);
+ unsigned long last_active = ieee80211_sta_last_active(sta, -1);
if (sdata != sta->sdata)
continue;
@@ -2420,18 +2468,27 @@ void ieee80211_sta_update_pending_airtime(struct ieee80211_local *local,
}
static struct ieee80211_sta_rx_stats *
-sta_get_last_rx_stats(struct sta_info *sta)
+sta_get_last_rx_stats(struct sta_info *sta, int link_id)
{
- struct ieee80211_sta_rx_stats *stats = &sta->deflink.rx_stats;
+ struct ieee80211_sta_rx_stats *stats;
+ struct link_sta_info *link_sta_info;
int cpu;
- if (!sta->deflink.pcpu_rx_stats)
+ if (link_id < 0)
+ link_sta_info = &sta->deflink;
+ else
+ link_sta_info = wiphy_dereference(sta->local->hw.wiphy,
+ sta->link[link_id]);
+
+ stats = &link_sta_info->rx_stats;
+
+ if (!link_sta_info->pcpu_rx_stats)
return stats;
for_each_possible_cpu(cpu) {
struct ieee80211_sta_rx_stats *cpustats;
- cpustats = per_cpu_ptr(sta->deflink.pcpu_rx_stats, cpu);
+ cpustats = per_cpu_ptr(link_sta_info->pcpu_rx_stats, cpu);
if (time_after(cpustats->last_rx, stats->last_rx))
stats = cpustats;
@@ -2499,9 +2556,10 @@ static void sta_stats_decode_rate(struct ieee80211_local *local, u32 rate,
}
}
-static int sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo)
+static int sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo,
+ int link_id)
{
- u32 rate = READ_ONCE(sta_get_last_rx_stats(sta)->last_rate);
+ u32 rate = READ_ONCE(sta_get_last_rx_stats(sta, link_id)->last_rate);
if (rate == STA_STATS_RATE_INVALID)
return -EINVAL;
@@ -2526,20 +2584,28 @@ static inline u64 sta_get_tidstats_msdu(struct ieee80211_sta_rx_stats *rxstats,
static void sta_set_tidstats(struct sta_info *sta,
struct cfg80211_tid_stats *tidstats,
- int tid)
+ int tid, int link_id)
{
struct ieee80211_local *local = sta->local;
+ struct link_sta_info *link_sta_info;
int cpu;
+ if (link_id < 0)
+ link_sta_info = &sta->deflink;
+ else
+ link_sta_info = wiphy_dereference(sta->local->hw.wiphy,
+ sta->link[link_id]);
+
if (!(tidstats->filled & BIT(NL80211_TID_STATS_RX_MSDU))) {
- tidstats->rx_msdu += sta_get_tidstats_msdu(&sta->deflink.rx_stats,
- tid);
+ tidstats->rx_msdu +=
+ sta_get_tidstats_msdu(&link_sta_info->rx_stats,
+ tid);
- if (sta->deflink.pcpu_rx_stats) {
+ if (link_sta_info->pcpu_rx_stats) {
for_each_possible_cpu(cpu) {
struct ieee80211_sta_rx_stats *cpurxs;
- cpurxs = per_cpu_ptr(sta->deflink.pcpu_rx_stats,
+ cpurxs = per_cpu_ptr(link_sta_info->pcpu_rx_stats,
cpu);
tidstats->rx_msdu +=
sta_get_tidstats_msdu(cpurxs, tid);
@@ -2551,22 +2617,24 @@ static void sta_set_tidstats(struct sta_info *sta,
if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU))) {
tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU);
- tidstats->tx_msdu = sta->deflink.tx_stats.msdu[tid];
+ tidstats->tx_msdu = link_sta_info->tx_stats.msdu[tid];
}
if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU_RETRIES)) &&
ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU_RETRIES);
- tidstats->tx_msdu_retries = sta->deflink.status_stats.msdu_retries[tid];
+ tidstats->tx_msdu_retries =
+ link_sta_info->status_stats.msdu_retries[tid];
}
if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU_FAILED)) &&
ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU_FAILED);
- tidstats->tx_msdu_failed = sta->deflink.status_stats.msdu_failed[tid];
+ tidstats->tx_msdu_failed =
+ link_sta_info->status_stats.msdu_failed[tid];
}
- if (tid < IEEE80211_NUM_TIDS) {
+ if (link_id < 0 && tid < IEEE80211_NUM_TIDS) {
spin_lock_bh(&local->fq.lock);
rcu_read_lock();
@@ -2625,16 +2693,278 @@ static void sta_set_mesh_sinfo(struct sta_info *sta,
}
#endif
+void sta_set_accumulated_removed_links_sinfo(struct sta_info *sta,
+ struct station_info *sinfo)
+{
+ /* Accumulating the removed link statistics. */
+ sinfo->tx_packets = sta->rem_link_stats.tx_packets;
+ sinfo->rx_packets = sta->rem_link_stats.rx_packets;
+ sinfo->tx_bytes = sta->rem_link_stats.tx_bytes;
+ sinfo->rx_bytes = sta->rem_link_stats.rx_bytes;
+ sinfo->tx_retries = sta->rem_link_stats.tx_retries;
+ sinfo->tx_failed = sta->rem_link_stats.tx_failed;
+ sinfo->rx_dropped_misc = sta->rem_link_stats.rx_dropped_misc;
+ sinfo->beacon_loss_count = sta->rem_link_stats.beacon_loss_count;
+ sinfo->expected_throughput = sta->rem_link_stats.expected_throughput;
+
+ if (sinfo->pertid) {
+ sinfo->pertid->rx_msdu =
+ sta->rem_link_stats.pertid_stats.rx_msdu;
+ sinfo->pertid->tx_msdu =
+ sta->rem_link_stats.pertid_stats.tx_msdu;
+ sinfo->pertid->tx_msdu_retries =
+ sta->rem_link_stats.pertid_stats.tx_msdu_retries;
+ sinfo->pertid->tx_msdu_failed =
+ sta->rem_link_stats.pertid_stats.tx_msdu_failed;
+ }
+}
+
+static void sta_set_link_sinfo(struct sta_info *sta,
+ struct link_station_info *link_sinfo,
+ struct ieee80211_link_data *link,
+ bool tidstats)
+{
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+ struct ieee80211_sta_rx_stats *last_rxstats;
+ int i, ac, cpu, link_id = link->link_id;
+ struct link_sta_info *link_sta_info;
+ u32 thr = 0;
+
+ last_rxstats = sta_get_last_rx_stats(sta, link_id);
+
+ link_sta_info = wiphy_dereference(sta->local->hw.wiphy,
+ sta->link[link_id]);
+
+ /* do before driver, so beacon filtering drivers have a
+ * chance to e.g. just add the number of filtered beacons
+ * (or just modify the value entirely, of course)
+ */
+ if (sdata->vif.type == NL80211_IFTYPE_STATION)
+ link_sinfo->rx_beacon = link->u.mgd.count_beacon_signal;
+
+ ether_addr_copy(link_sinfo->addr, link_sta_info->addr);
+
+ drv_link_sta_statistics(sta->local, sdata,
+ link_sta_info->pub,
+ link_sinfo);
+
+ link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_INACTIVE_TIME) |
+ BIT_ULL(NL80211_STA_INFO_BSS_PARAM) |
+ BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC);
+
+ if (sdata->vif.type == NL80211_IFTYPE_STATION) {
+ link_sinfo->beacon_loss_count =
+ link->u.mgd.beacon_loss_count;
+ link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_LOSS);
+ }
+
+ link_sinfo->inactive_time =
+ jiffies_to_msecs(jiffies - ieee80211_sta_last_active(sta, link_id));
+
+ if (!(link_sinfo->filled & (BIT_ULL(NL80211_STA_INFO_TX_BYTES64) |
+ BIT_ULL(NL80211_STA_INFO_TX_BYTES)))) {
+ link_sinfo->tx_bytes = 0;
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+ link_sinfo->tx_bytes +=
+ link_sta_info->tx_stats.bytes[ac];
+ link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES64);
+ }
+
+ if (!(link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_PACKETS))) {
+ link_sinfo->tx_packets = 0;
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+ link_sinfo->tx_packets +=
+ link_sta_info->tx_stats.packets[ac];
+ link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS);
+ }
+
+ if (!(link_sinfo->filled & (BIT_ULL(NL80211_STA_INFO_RX_BYTES64) |
+ BIT_ULL(NL80211_STA_INFO_RX_BYTES)))) {
+ link_sinfo->rx_bytes +=
+ sta_get_stats_bytes(&link_sta_info->rx_stats);
+
+ if (link_sta_info->pcpu_rx_stats) {
+ for_each_possible_cpu(cpu) {
+ struct ieee80211_sta_rx_stats *cpurxs;
+
+ cpurxs = per_cpu_ptr(link_sta_info->pcpu_rx_stats,
+ cpu);
+ link_sinfo->rx_bytes +=
+ sta_get_stats_bytes(cpurxs);
+ }
+ }
+
+ link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES64);
+ }
+
+ if (!(link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_PACKETS))) {
+ link_sinfo->rx_packets = link_sta_info->rx_stats.packets;
+ if (link_sta_info->pcpu_rx_stats) {
+ for_each_possible_cpu(cpu) {
+ struct ieee80211_sta_rx_stats *cpurxs;
+
+ cpurxs = per_cpu_ptr(link_sta_info->pcpu_rx_stats,
+ cpu);
+ link_sinfo->rx_packets += cpurxs->packets;
+ }
+ }
+ link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS);
+ }
+
+ if (!(link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_RETRIES))) {
+ link_sinfo->tx_retries =
+ link_sta_info->status_stats.retry_count;
+ link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES);
+ }
+
+ if (!(link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_FAILED))) {
+ link_sinfo->tx_failed =
+ link_sta_info->status_stats.retry_failed;
+ link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED);
+ }
+
+ if (!(link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_DURATION))) {
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+ link_sinfo->rx_duration += sta->airtime[ac].rx_airtime;
+ link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION);
+ }
+
+ if (!(link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_DURATION))) {
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+ link_sinfo->tx_duration += sta->airtime[ac].tx_airtime;
+ link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_DURATION);
+ }
+
+ if (!(link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_AIRTIME_WEIGHT))) {
+ link_sinfo->airtime_weight = sta->airtime_weight;
+ link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_AIRTIME_WEIGHT);
+ }
+
+ link_sinfo->rx_dropped_misc = link_sta_info->rx_stats.dropped;
+ if (link_sta_info->pcpu_rx_stats) {
+ for_each_possible_cpu(cpu) {
+ struct ieee80211_sta_rx_stats *cpurxs;
+
+ cpurxs = per_cpu_ptr(link_sta_info->pcpu_rx_stats,
+ cpu);
+ link_sinfo->rx_dropped_misc += cpurxs->dropped;
+ }
+ }
+
+ if (sdata->vif.type == NL80211_IFTYPE_STATION &&
+ !(sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)) {
+ link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX) |
+ BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG);
+ link_sinfo->rx_beacon_signal_avg =
+ ieee80211_ave_rssi(&sdata->vif, -1);
+ }
+
+ if (ieee80211_hw_check(&sta->local->hw, SIGNAL_DBM) ||
+ ieee80211_hw_check(&sta->local->hw, SIGNAL_UNSPEC)) {
+ if (!(link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_SIGNAL))) {
+ link_sinfo->signal = (s8)last_rxstats->last_signal;
+ link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
+ }
+
+ if (!link_sta_info->pcpu_rx_stats &&
+ !(link_sinfo->filled &
+ BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG))) {
+ link_sinfo->signal_avg =
+ -ewma_signal_read(&link_sta_info->rx_stats_avg.signal);
+ link_sinfo->filled |=
+ BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
+ }
+ }
+
+ /* for the average - if pcpu_rx_stats isn't set - rxstats must point to
+ * the sta->rx_stats struct, so the check here is fine with and without
+ * pcpu statistics
+ */
+ if (last_rxstats->chains &&
+ !(link_sinfo->filled & (BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL) |
+ BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG)))) {
+ link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL);
+ if (!link_sta_info->pcpu_rx_stats)
+ link_sinfo->filled |=
+ BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG);
+
+ link_sinfo->chains = last_rxstats->chains;
+
+ for (i = 0; i < ARRAY_SIZE(link_sinfo->chain_signal); i++) {
+ link_sinfo->chain_signal[i] =
+ last_rxstats->chain_signal_last[i];
+ link_sinfo->chain_signal_avg[i] =
+ -ewma_signal_read(
+ &link_sta_info->rx_stats_avg.chain_signal[i]);
+ }
+ }
+
+ if (!(link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE)) &&
+ ieee80211_rate_valid(&link_sta_info->tx_stats.last_rate)) {
+ sta_set_rate_info_tx(sta, &link_sta_info->tx_stats.last_rate,
+ &link_sinfo->txrate);
+ link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+ }
+
+ if (!(link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_BITRATE))) {
+ if (sta_set_rate_info_rx(sta, &link_sinfo->rxrate,
+ link_id) == 0)
+ link_sinfo->filled |=
+ BIT_ULL(NL80211_STA_INFO_RX_BITRATE);
+ }
+
+ if (tidstats && !cfg80211_link_sinfo_alloc_tid_stats(link_sinfo,
+ GFP_KERNEL)) {
+ for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++)
+ sta_set_tidstats(sta, &link_sinfo->pertid[i], i,
+ link_id);
+ }
+
+ link_sinfo->bss_param.flags = 0;
+ if (sdata->vif.bss_conf.use_cts_prot)
+ link_sinfo->bss_param.flags |= BSS_PARAM_FLAGS_CTS_PROT;
+ if (sdata->vif.bss_conf.use_short_preamble)
+ link_sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_PREAMBLE;
+ if (sdata->vif.bss_conf.use_short_slot)
+ link_sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_SLOT_TIME;
+ link_sinfo->bss_param.dtim_period = link->conf->dtim_period;
+ link_sinfo->bss_param.beacon_interval = link->conf->beacon_int;
+
+ thr = sta_get_expected_throughput(sta);
+
+ if (thr != 0) {
+ link_sinfo->filled |=
+ BIT_ULL(NL80211_STA_INFO_EXPECTED_THROUGHPUT);
+ link_sinfo->expected_throughput = thr;
+ }
+
+ if (!(link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL)) &&
+ link_sta_info->status_stats.ack_signal_filled) {
+ link_sinfo->ack_signal =
+ link_sta_info->status_stats.last_ack_signal;
+ link_sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL);
+ }
+
+ if (!(link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG)) &&
+ link_sta_info->status_stats.ack_signal_filled) {
+ link_sinfo->avg_ack_signal =
+ -(s8)ewma_avg_signal_read(
+ &link_sta_info->status_stats.avg_ack_signal);
+ link_sinfo->filled |=
+ BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG);
+ }
+}
+
void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
bool tidstats)
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
struct ieee80211_local *local = sdata->local;
u32 thr = 0;
- int i, ac, cpu;
+ int i, ac, cpu, link_id;
struct ieee80211_sta_rx_stats *last_rxstats;
- last_rxstats = sta_get_last_rx_stats(sta);
+ last_rxstats = sta_get_last_rx_stats(sta, -1);
sinfo->generation = sdata->local->sta_generation;
@@ -2662,7 +2992,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
sinfo->connected_time = ktime_get_seconds() - sta->last_connected;
sinfo->assoc_at = sta->assoc_at;
sinfo->inactive_time =
- jiffies_to_msecs(jiffies - ieee80211_sta_last_active(sta));
+ jiffies_to_msecs(jiffies - ieee80211_sta_last_active(sta, -1));
if (!(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_TX_BYTES64) |
BIT_ULL(NL80211_STA_INFO_TX_BYTES)))) {
@@ -2751,7 +3081,8 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
!(sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)) {
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX) |
BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG);
- sinfo->rx_beacon_signal_avg = ieee80211_ave_rssi(&sdata->vif);
+ sinfo->rx_beacon_signal_avg =
+ ieee80211_ave_rssi(&sdata->vif, -1);
}
if (ieee80211_hw_check(&sta->local->hw, SIGNAL_DBM) ||
@@ -2800,13 +3131,13 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_BITRATE)) &&
!sta->sta.valid_links) {
- if (sta_set_rate_info_rx(sta, &sinfo->rxrate) == 0)
+ if (sta_set_rate_info_rx(sta, &sinfo->rxrate, -1) == 0)
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE);
}
if (tidstats && !cfg80211_sinfo_alloc_tid_stats(sinfo, GFP_KERNEL)) {
for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++)
- sta_set_tidstats(sta, &sinfo->pertid[i], i);
+ sta_set_tidstats(sta, &sinfo->pertid[i], i, -1);
}
#ifdef CONFIG_MAC80211_MESH
@@ -2868,6 +3199,26 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
sinfo->filled |=
BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG);
}
+
+ if (sta->sta.valid_links) {
+ struct ieee80211_link_data *link;
+ struct link_sta_info *link_sta;
+
+ ether_addr_copy(sinfo->mld_addr, sta->addr);
+ for_each_valid_link(sinfo, link_id) {
+ link_sta = wiphy_dereference(sta->local->hw.wiphy,
+ sta->link[link_id]);
+ link = wiphy_dereference(sdata->local->hw.wiphy,
+ sdata->link[link_id]);
+
+ if (!link_sta || !sinfo->links[link_id] || !link)
+ continue;
+
+ sinfo->valid_links = sta->sta.valid_links;
+ sta_set_link_sinfo(sta, sinfo->links[link_id],
+ link, tidstats);
+ }
+ }
}
u32 sta_get_expected_throughput(struct sta_info *sta)
@@ -2889,14 +3240,24 @@ u32 sta_get_expected_throughput(struct sta_info *sta)
return thr;
}
-unsigned long ieee80211_sta_last_active(struct sta_info *sta)
+unsigned long ieee80211_sta_last_active(struct sta_info *sta, int link_id)
{
- struct ieee80211_sta_rx_stats *stats = sta_get_last_rx_stats(sta);
+ struct ieee80211_sta_rx_stats *stats;
+ struct link_sta_info *link_sta_info;
- if (!sta->deflink.status_stats.last_ack ||
- time_after(stats->last_rx, sta->deflink.status_stats.last_ack))
+ stats = sta_get_last_rx_stats(sta, link_id);
+
+ if (link_id < 0)
+ link_sta_info = &sta->deflink;
+ else
+ link_sta_info = wiphy_dereference(sta->local->hw.wiphy,
+ sta->link[link_id]);
+
+ if (!link_sta_info->status_stats.last_ack ||
+ time_after(stats->last_rx, link_sta_info->status_stats.last_ack))
return stats->last_rx;
- return sta->deflink.status_stats.last_ack;
+
+ return link_sta_info->status_stats.last_ack;
}
int ieee80211_sta_allocate_link(struct sta_info *sta, unsigned int link_id)
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 7a95d8d34fca..5288d5286651 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -569,6 +569,58 @@ struct link_sta_info {
};
/**
+ * struct ieee80211_sta_removed_link_stats - Removed link sta data
+ *
+ * keep required accumulated removed link data for stats
+ *
+ * @rx_packets: accumulated packets (MSDUs & MMPDUs) received from
+ * this station for removed links
+ * @tx_packets: accumulated packets (MSDUs & MMPDUs) transmitted to
+ * this station for removed links
+ * @rx_bytes: accumulated bytes (size of MPDUs) received from this
+ * station for removed links
+ * @tx_bytes: accumulated bytes (size of MPDUs) transmitted to this
+ * station for removed links
+ * @tx_retries: cumulative retry counts (MPDUs) for removed links
+ * @tx_failed: accumulated number of failed transmissions (MPDUs)
+ * (retries exceeded, no ACK) for removed links
+ * @rx_dropped_misc: accumulated dropped packets for un-specified reason
+ * from this station for removed links
+ * @beacon_loss_count: Number of times beacon loss event has triggered
+ * from this station for removed links.
+ * @expected_throughput: expected throughput in kbps (including 802.11
+ * headers) towards this station for removed links
+ * @pertid_stats: accumulated per-TID statistics for removed link of
+ * station
+ * @pertid_stats.rx_msdu : accumulated number of received MSDUs towards
+ * this station for removed links.
+ * @pertid_stats.tx_msdu: accumulated number of (attempted) transmitted
+ * MSDUs towards this station for removed links
+ * @pertid_stats.tx_msdu_retries: accumulated number of retries (not
+ * counting the first) for transmitted MSDUs towards this station
+ * for removed links
+ * @pertid_stats.tx_msdu_failed: accumulated number of failed transmitted
+ * MSDUs towards this station for removed links
+ */
+struct ieee80211_sta_removed_link_stats {
+ u32 rx_packets;
+ u32 tx_packets;
+ u64 rx_bytes;
+ u64 tx_bytes;
+ u32 tx_retries;
+ u32 tx_failed;
+ u32 rx_dropped_misc;
+ u32 beacon_loss_count;
+ u32 expected_throughput;
+ struct {
+ u64 rx_msdu;
+ u64 tx_msdu;
+ u64 tx_msdu_retries;
+ u64 tx_msdu_failed;
+ } pertid_stats;
+};
+
+/**
* struct sta_info - STA information
*
* This structure collects information about a station that
@@ -644,6 +696,7 @@ struct link_sta_info {
* @deflink address and remaining would be allocated and the address
* would be assigned to link[link_id] where link_id is the id assigned
* by the AP.
+ * @rem_link_stats: accumulated removed link stats
*/
struct sta_info {
/* General information, mostly static */
@@ -718,6 +771,7 @@ struct sta_info {
struct ieee80211_sta_aggregates cur;
struct link_sta_info deflink;
struct link_sta_info __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS];
+ struct ieee80211_sta_removed_link_stats rem_link_stats;
/* keep last! */
struct ieee80211_sta sta;
@@ -922,6 +976,9 @@ void sta_set_rate_info_tx(struct sta_info *sta,
void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
bool tidstats);
+void sta_set_accumulated_removed_links_sinfo(struct sta_info *sta,
+ struct station_info *sinfo);
+
u32 sta_get_expected_throughput(struct sta_info *sta);
void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
@@ -936,7 +993,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta);
void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta);
void ieee80211_sta_ps_deliver_uapsd(struct sta_info *sta);
-unsigned long ieee80211_sta_last_active(struct sta_info *sta);
+unsigned long ieee80211_sta_last_active(struct sta_info *sta, int link_id);
void ieee80211_sta_set_max_amsdu_subframes(struct sta_info *sta,
const u8 *ext_capab,
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index 72fad8ea8bb9..0bfbce157486 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -384,12 +384,14 @@ DEFINE_EVENT(local_sdata_addr_evt, drv_remove_interface,
TRACE_EVENT(drv_config,
TP_PROTO(struct ieee80211_local *local,
+ int radio_idx,
u32 changed),
- TP_ARGS(local, changed),
+ TP_ARGS(local, radio_idx, changed),
TP_STRUCT__entry(
LOCAL_ENTRY
+ __field(int, radio_idx)
__field(u32, changed)
__field(u32, flags)
__field(int, power_level)
@@ -403,6 +405,7 @@ TRACE_EVENT(drv_config,
TP_fast_assign(
LOCAL_ASSIGN;
+ __entry->radio_idx = radio_idx;
__entry->changed = changed;
__entry->flags = local->hw.conf.flags;
__entry->power_level = local->hw.conf.power_level;
@@ -417,8 +420,8 @@ TRACE_EVENT(drv_config,
),
TP_printk(
- LOCAL_PR_FMT " ch:%#x" CHANDEF_PR_FMT,
- LOCAL_PR_ARG, __entry->changed, CHANDEF_PR_ARG
+ LOCAL_PR_FMT " radio_idx:%d ch:%#x" CHANDEF_PR_FMT,
+ LOCAL_PR_ARG, __entry->radio_idx, __entry->changed, CHANDEF_PR_ARG
)
);
@@ -818,34 +821,71 @@ TRACE_EVENT(drv_get_key_seq,
)
);
-DEFINE_EVENT(local_u32_evt, drv_set_frag_threshold,
- TP_PROTO(struct ieee80211_local *local, u32 value),
- TP_ARGS(local, value)
+TRACE_EVENT(drv_set_frag_threshold,
+ TP_PROTO(struct ieee80211_local *local, int radio_idx, u32 value),
+
+ TP_ARGS(local, radio_idx, value),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ __field(int, radio_idx)
+ __field(u32, value)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ __entry->radio_idx = radio_idx;
+ __entry->value = value;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT " radio_id:%d value:%u",
+ LOCAL_PR_ARG, __entry->radio_idx, __entry->value
+ )
);
-DEFINE_EVENT(local_u32_evt, drv_set_rts_threshold,
- TP_PROTO(struct ieee80211_local *local, u32 value),
- TP_ARGS(local, value)
+TRACE_EVENT(drv_set_rts_threshold,
+ TP_PROTO(struct ieee80211_local *local, int radio_idx, u32 value),
+
+ TP_ARGS(local, radio_idx, value),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ __field(int, radio_idx)
+ __field(u32, value)
+ ),
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ __entry->radio_idx = radio_idx;
+ __entry->value = value;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT " radio_id:%d value:%u",
+ LOCAL_PR_ARG, __entry->radio_idx, __entry->value
+ )
);
TRACE_EVENT(drv_set_coverage_class,
- TP_PROTO(struct ieee80211_local *local, s16 value),
+ TP_PROTO(struct ieee80211_local *local, int radio_idx, s16 value),
- TP_ARGS(local, value),
+ TP_ARGS(local, radio_idx, value),
TP_STRUCT__entry(
LOCAL_ENTRY
+ __field(int, radio_idx)
__field(s16, value)
),
TP_fast_assign(
LOCAL_ASSIGN;
+ __entry->radio_idx = radio_idx;
__entry->value = value;
),
TP_printk(
- LOCAL_PR_FMT " value:%d",
- LOCAL_PR_ARG, __entry->value
+ LOCAL_PR_FMT " radio_id:%d value:%d",
+ LOCAL_PR_ARG, __entry->radio_idx, __entry->value
)
);
@@ -1002,6 +1042,33 @@ DEFINE_EVENT(sta_event, drv_sta_statistics,
TP_ARGS(local, sdata, sta)
);
+TRACE_EVENT(drv_link_sta_statistics,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_link_sta *link_sta),
+
+ TP_ARGS(local, sdata, link_sta),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ VIF_ENTRY
+ STA_ENTRY
+ __field(u32, link_id)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ STA_NAMED_ASSIGN(link_sta->sta);
+ __entry->link_id = link_sta->link_id;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " (link %d)",
+ LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->link_id
+ )
+);
+
DEFINE_EVENT(sta_event, drv_sta_add,
TP_PROTO(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
@@ -1291,12 +1358,14 @@ TRACE_EVENT(drv_set_antenna,
);
TRACE_EVENT(drv_get_antenna,
- TP_PROTO(struct ieee80211_local *local, u32 tx_ant, u32 rx_ant, int ret),
+ TP_PROTO(struct ieee80211_local *local, int radio_idx, u32 tx_ant,
+ u32 rx_ant, int ret),
- TP_ARGS(local, tx_ant, rx_ant, ret),
+ TP_ARGS(local, radio_idx, tx_ant, rx_ant, ret),
TP_STRUCT__entry(
LOCAL_ENTRY
+ __field(int, radio_idx)
__field(u32, tx_ant)
__field(u32, rx_ant)
__field(int, ret)
@@ -1304,14 +1373,16 @@ TRACE_EVENT(drv_get_antenna,
TP_fast_assign(
LOCAL_ASSIGN;
+ __entry->radio_idx = radio_idx;
__entry->tx_ant = tx_ant;
__entry->rx_ant = rx_ant;
__entry->ret = ret;
),
TP_printk(
- LOCAL_PR_FMT " tx_ant:%d rx_ant:%d ret:%d",
- LOCAL_PR_ARG, __entry->tx_ant, __entry->rx_ant, __entry->ret
+ LOCAL_PR_FMT " radio_idx:%d tx_ant:%d rx_ant:%d ret:%d",
+ LOCAL_PR_ARG, __entry->radio_idx, __entry->tx_ant,
+ __entry->rx_ant, __entry->ret
)
);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index d58b80813bdd..6fa883a9250d 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1173,7 +1173,8 @@ void ieee80211_aggr_check(struct ieee80211_sub_if_data *sdata,
return;
if (!sta ||
- (!sta->sta.valid_links && !sta->sta.deflink.ht_cap.ht_supported) ||
+ (!sta->sta.valid_links && !sta->sta.deflink.ht_cap.ht_supported &&
+ !sta->sta.deflink.s1g_cap.s1g) ||
!sta->sta.wme || skb_get_queue_mapping(skb) == IEEE80211_AC_VO ||
skb->protocol == sdata->control_port_protocol)
return;
@@ -1541,7 +1542,7 @@ void ieee80211_txq_purge(struct ieee80211_local *local,
spin_unlock_bh(&local->active_txq_lock[txqi->txq.ac]);
}
-void ieee80211_txq_set_params(struct ieee80211_local *local)
+void ieee80211_txq_set_params(struct ieee80211_local *local, int radio_idx)
{
if (local->hw.wiphy->txq_limit)
local->fq.limit = local->hw.wiphy->txq_limit;
@@ -1605,7 +1606,7 @@ int ieee80211_txq_setup_flows(struct ieee80211_local *local)
for (i = 0; i < fq->flows_cnt; i++)
codel_vars_init(&local->cvars[i]);
- ieee80211_txq_set_params(local);
+ ieee80211_txq_set_params(local, -1);
return 0;
}
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index a125995ed252..ff6c5d5e631d 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1756,6 +1756,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
bool sched_scan_stopped = false;
bool suspended = local->suspended;
bool in_reconfig = false;
+ u32 rts_threshold;
lockdep_assert_wiphy(local->hw.wiphy);
@@ -1826,13 +1827,20 @@ int ieee80211_reconfig(struct ieee80211_local *local)
}
/* setup fragmentation threshold */
- drv_set_frag_threshold(local, hw->wiphy->frag_threshold);
+ drv_set_frag_threshold(local, -1, hw->wiphy->frag_threshold);
/* setup RTS threshold */
- drv_set_rts_threshold(local, hw->wiphy->rts_threshold);
+ if (hw->wiphy->n_radio > 0) {
+ for (i = 0; i < hw->wiphy->n_radio; i++) {
+ rts_threshold = hw->wiphy->radio_cfg[i].rts_threshold;
+ drv_set_rts_threshold(local, i, rts_threshold);
+ }
+ } else {
+ drv_set_rts_threshold(local, -1, hw->wiphy->rts_threshold);
+ }
/* reset coverage class */
- drv_set_coverage_class(local, hw->wiphy->coverage_class);
+ drv_set_coverage_class(local, -1, hw->wiphy->coverage_class);
ieee80211_led_radio(local, true);
ieee80211_mod_tpt_led_trig(local,
@@ -1890,11 +1898,11 @@ int ieee80211_reconfig(struct ieee80211_local *local)
ieee80211_assign_chanctx(local, sdata, &sdata->deflink);
/* reconfigure hardware */
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_LISTEN_INTERVAL |
- IEEE80211_CONF_CHANGE_MONITOR |
- IEEE80211_CONF_CHANGE_PS |
- IEEE80211_CONF_CHANGE_RETRY_LIMITS |
- IEEE80211_CONF_CHANGE_IDLE);
+ ieee80211_hw_config(local, -1, IEEE80211_CONF_CHANGE_LISTEN_INTERVAL |
+ IEEE80211_CONF_CHANGE_MONITOR |
+ IEEE80211_CONF_CHANGE_PS |
+ IEEE80211_CONF_CHANGE_RETRY_LIMITS |
+ IEEE80211_CONF_CHANGE_IDLE);
ieee80211_configure_filter(local);
@@ -3265,14 +3273,24 @@ int ieee80211_put_srates_elem(struct sk_buff *skb,
return 0;
}
-int ieee80211_ave_rssi(struct ieee80211_vif *vif)
+int ieee80211_ave_rssi(struct ieee80211_vif *vif, int link_id)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ struct ieee80211_link_data *link_data;
if (WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION))
return 0;
- return -ewma_beacon_signal_read(&sdata->deflink.u.mgd.ave_beacon_signal);
+ if (link_id < 0)
+ link_data = &sdata->deflink;
+ else
+ link_data = wiphy_dereference(sdata->local->hw.wiphy,
+ sdata->link[link_id]);
+
+ if (WARN_ON_ONCE(!link_data))
+ return -99;
+
+ return -ewma_beacon_signal_read(&link_data->u.mgd.ave_beacon_signal);
}
EXPORT_SYMBOL_GPL(ieee80211_ave_rssi);
@@ -3953,6 +3971,33 @@ static u8 ieee80211_chanctx_radar_detect(struct ieee80211_local *local,
return radar_detect;
}
+bool ieee80211_is_radio_idx_in_scan_req(struct wiphy *wiphy,
+ struct cfg80211_scan_request *scan_req,
+ int radio_idx)
+{
+ struct ieee80211_channel *chan;
+ int i, chan_radio_idx;
+
+ for (i = 0; i < scan_req->n_channels; i++) {
+ chan = scan_req->channels[i];
+ chan_radio_idx = cfg80211_get_radio_idx_by_chan(wiphy, chan);
+ /*
+ * The chan_radio_idx should be valid since it's taken from a
+ * valid scan request.
+ * However, if chan_radio_idx is unexpectedly invalid (negative),
+ * we take a conservative approach and assume the scan request
+ * might use the specified radio_idx. Hence, return true.
+ */
+ if (WARN_ON(chan_radio_idx < 0))
+ return true;
+
+ if (chan_radio_idx == radio_idx)
+ return true;
+ }
+
+ return false;
+}
+
static u32
__ieee80211_get_radio_mask(struct ieee80211_sub_if_data *sdata)
{
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index edf14c2c2062..e7972e633236 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -3503,7 +3503,7 @@ void mptcp_sock_graft(struct sock *sk, struct socket *parent)
write_lock_bh(&sk->sk_callback_lock);
rcu_assign_pointer(sk->sk_wq, &parent->wq);
sk_set_socket(sk, parent);
- sk->sk_uid = SOCK_INODE(parent)->i_uid;
+ WRITE_ONCE(sk->sk_uid, SOCK_INODE(parent)->i_uid);
write_unlock_bh(&sk->sk_callback_lock);
}
diff --git a/net/ncsi/internal.h b/net/ncsi/internal.h
index e76c6de0c784..adee6dcabdc3 100644
--- a/net/ncsi/internal.h
+++ b/net/ncsi/internal.h
@@ -110,7 +110,7 @@ struct ncsi_channel_version {
u8 update; /* NCSI version update */
char alpha1; /* NCSI version alpha1 */
char alpha2; /* NCSI version alpha2 */
- u8 fw_name[12]; /* Firmware name string */
+ u8 fw_name[12 + 1]; /* Firmware name string */
u32 fw_version; /* Firmware version */
u16 pci_ids[4]; /* PCI identification */
u32 mf_id; /* Manufacture ID */
diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c
index 472cc68ad86f..271ec6c3929e 100644
--- a/net/ncsi/ncsi-rsp.c
+++ b/net/ncsi/ncsi-rsp.c
@@ -775,6 +775,7 @@ static int ncsi_rsp_handler_gvi(struct ncsi_request *nr)
ncv->alpha1 = rsp->alpha1;
ncv->alpha2 = rsp->alpha2;
memcpy(ncv->fw_name, rsp->fw_name, 12);
+ ncv->fw_name[12] = '\0';
ncv->fw_version = ntohl(rsp->fw_version);
for (i = 0; i < ARRAY_SIZE(ncv->pci_ids); i++)
ncv->pci_ids[i] = ntohs(rsp->pci_ids[i]);
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index 6a40b8d0350d..a18e2c503da6 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -1192,7 +1192,7 @@ static int nfc_genl_llc_sdreq(struct sk_buff *skb, struct genl_info *info)
continue;
uri = nla_data(sdp_attrs[NFC_SDP_ATTR_URI]);
- if (uri == NULL || *uri == 0)
+ if (*uri == 0)
continue;
tid = local->sdreq_next_tid++;
@@ -1540,10 +1540,6 @@ static int nfc_genl_se_io(struct sk_buff *skb, struct genl_info *info)
}
apdu = nla_data(info->attrs[NFC_ATTR_SE_APDU]);
- if (!apdu) {
- rc = -EINVAL;
- goto put_dev;
- }
ctx = kzalloc(sizeof(struct se_io_ctx), GFP_KERNEL);
if (!ctx) {
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 3d43f3eae759..f6b1ff883c93 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -4783,7 +4783,7 @@ static int packet_seq_show(struct seq_file *seq, void *v)
READ_ONCE(po->ifindex),
packet_sock_flag(po, PACKET_SOCK_RUNNING),
atomic_read(&s->sk_rmem_alloc),
- from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
+ from_kuid_munged(seq_user_ns(seq), sk_uid(s)),
sock_i_ino(s));
}
diff --git a/net/packet/diag.c b/net/packet/diag.c
index 47f69f3dbf73..6ce1dcc284d9 100644
--- a/net/packet/diag.c
+++ b/net/packet/diag.c
@@ -153,7 +153,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
if ((req->pdiag_show & PACKET_SHOW_INFO) &&
nla_put_u32(skb, PACKET_DIAG_UID,
- from_kuid_munged(user_ns, sock_i_uid(sk))))
+ from_kuid_munged(user_ns, sk_uid(sk))))
goto out_nlmsg_trim;
if ((req->pdiag_show & PACKET_SHOW_MCLIST) &&
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index 5ce0b3ee5def..ea4d5e6533db 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -584,7 +584,7 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
sk->sk_protocol, pn->sobject, pn->dobject,
pn->resource, sk->sk_state,
sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
- from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
+ from_kuid_munged(seq_user_ns(seq), sk_uid(sk)),
sock_i_ino(sk),
refcount_read(&sk->sk_refcnt), sk,
atomic_read(&sk->sk_drops));
@@ -755,7 +755,7 @@ static int pn_res_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "%02X %5u %lu",
(int) (psk - pnres.sk),
- from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
+ from_kuid_munged(seq_user_ns(seq), sk_uid(sk)),
sock_i_ino(sk));
}
seq_pad(seq, '\n');
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
index 8435a20968ef..086a13170e09 100644
--- a/net/rds/af_rds.c
+++ b/net/rds/af_rds.c
@@ -598,7 +598,7 @@ static int rds_connect(struct socket *sock, struct sockaddr *uaddr,
}
if (addr_type & IPV6_ADDR_LINKLOCAL) {
- /* If socket is arleady bound to a link local address,
+ /* If socket is already bound to a link local address,
* the peer address must be on the same link.
*/
if (sin6->sin6_scope_id == 0 ||
diff --git a/net/rds/send.c b/net/rds/send.c
index 09a280110654..42d991bc8543 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -232,7 +232,7 @@ restart:
* If not already working on one, grab the next message.
*
* cp_xmit_rm holds a ref while we're sending this message down
- * the connction. We can use this ref while holding the
+ * the connection. We can use this ref while holding the
* send_sem.. rds_send_reset() is serialized with it.
*/
if (!rm) {
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index d89bd8d0c354..b5c801c629a4 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -298,15 +298,15 @@ struct socket *rds_tcp_listen_init(struct net *net, bool isv6)
sin6 = (struct sockaddr_in6 *)&ss;
sin6->sin6_family = PF_INET6;
sin6->sin6_addr = in6addr_any;
- sin6->sin6_port = (__force u16)htons(RDS_TCP_PORT);
+ sin6->sin6_port = htons(RDS_TCP_PORT);
sin6->sin6_scope_id = 0;
sin6->sin6_flowinfo = 0;
addr_len = sizeof(*sin6);
} else {
sin = (struct sockaddr_in *)&ss;
sin->sin_family = PF_INET;
- sin->sin_addr.s_addr = INADDR_ANY;
- sin->sin_port = (__force u16)htons(RDS_TCP_PORT);
+ sin->sin_addr.s_addr = htonl(INADDR_ANY);
+ sin->sin_port = htons(RDS_TCP_PORT);
addr_len = sizeof(*sin);
}
diff --git a/net/sched/em_text.c b/net/sched/em_text.c
index 420c66203b17..6b3d0af72c39 100644
--- a/net/sched/em_text.c
+++ b/net/sched/em_text.c
@@ -108,7 +108,7 @@ static int em_text_dump(struct sk_buff *skb, struct tcf_ematch *m)
struct text_match *tm = EM_TEXT_PRIV(m);
struct tcf_em_text conf;
- strncpy(conf.algo, tm->config->ops->name, sizeof(conf.algo) - 1);
+ strscpy(conf.algo, tm->config->ops->name);
conf.from_offset = tm->from_offset;
conf.to_offset = tm->to_offset;
conf.from_layer = tm->from_layer;
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 0c0d2757f6f8..2dc2666988fb 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -756,7 +756,7 @@ static int __sctp_hash_endpoint(struct sctp_endpoint *ep)
struct sock *sk2 = ep2->base.sk;
if (!net_eq(sock_net(sk2), net) || sk2 == sk ||
- !uid_eq(sock_i_uid(sk2), sock_i_uid(sk)) ||
+ !uid_eq(sk_uid(sk2), sk_uid(sk)) ||
!sk2->sk_reuseport)
continue;
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index a9ed2ccab1bd..3336dcfb4515 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -261,9 +261,10 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *t)
skb_set_inner_ipproto(skb, IPPROTO_SCTP);
label = ip6_make_flowlabel(sock_net(sk), skb, fl6->flowlabel, true, fl6);
- return udp_tunnel6_xmit_skb(dst, sk, skb, NULL, &fl6->saddr,
- &fl6->daddr, tclass, ip6_dst_hoplimit(dst),
- label, sctp_sk(sk)->udp_port, t->encap_port, false);
+ udp_tunnel6_xmit_skb(dst, sk, skb, NULL, &fl6->saddr, &fl6->daddr,
+ tclass, ip6_dst_hoplimit(dst), label,
+ sctp_sk(sk)->udp_port, t->encap_port, false, 0);
+ return 0;
}
/* Returns the dst cache entry for the given source and destination ip
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index ec00ee75d59a..74bff317e205 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -177,7 +177,7 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "%8pK %8pK %-3d %-3d %-4d %-5d %5u %5lu ", ep, sk,
sctp_sk(sk)->type, sk->sk_state, hash,
ep->base.bind_addr.port,
- from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
+ from_kuid_munged(seq_user_ns(seq), sk_uid(sk)),
sock_i_ino(sk));
sctp_seq_dump_local_addrs(seq, &ep->base);
@@ -267,7 +267,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
assoc->assoc_id,
assoc->sndbuf_used,
atomic_read(&assoc->rmem_alloc),
- from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
+ from_kuid_munged(seq_user_ns(seq), sk_uid(sk)),
sock_i_ino(sk),
epb->bind_addr.port,
assoc->peer.port);
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index f402f90eb6b6..a5ccada55f2b 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -1103,7 +1103,8 @@ static inline int sctp_v4_xmit(struct sk_buff *skb, struct sctp_transport *t)
skb_set_inner_ipproto(skb, IPPROTO_SCTP);
udp_tunnel_xmit_skb(dst_rtable(dst), sk, skb, fl4->saddr,
fl4->daddr, dscp, ip4_dst_hoplimit(dst), df,
- sctp_sk(sk)->udp_port, t->encap_port, false, false);
+ sctp_sk(sk)->udp_port, t->encap_port, false, false,
+ 0);
return 0;
}
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 1e5739858c20..4921416434f9 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -8345,8 +8345,8 @@ static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
bool reuse = (sk->sk_reuse || sp->reuse);
struct sctp_bind_hashbucket *head; /* hash list */
struct net *net = sock_net(sk);
- kuid_t uid = sock_i_uid(sk);
struct sctp_bind_bucket *pp;
+ kuid_t uid = sk_uid(sk);
unsigned short snum;
int ret;
@@ -8444,7 +8444,7 @@ pp_found:
(reuse && (sk2->sk_reuse || sp2->reuse) &&
sk2->sk_state != SCTP_SS_LISTENING) ||
(sk->sk_reuseport && sk2->sk_reuseport &&
- uid_eq(uid, sock_i_uid(sk2))))
+ uid_eq(uid, sk_uid(sk2))))
continue;
if ((!sk->sk_bound_dev_if || !bound_dev_if2 ||
@@ -9492,8 +9492,8 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
newsk->sk_sndbuf = sk->sk_sndbuf;
newsk->sk_rcvbuf = sk->sk_rcvbuf;
newsk->sk_lingertime = sk->sk_lingertime;
- newsk->sk_rcvtimeo = sk->sk_rcvtimeo;
- newsk->sk_sndtimeo = sk->sk_sndtimeo;
+ newsk->sk_rcvtimeo = READ_ONCE(sk->sk_rcvtimeo);
+ newsk->sk_sndtimeo = READ_ONCE(sk->sk_sndtimeo);
newsk->sk_rxhash = sk->sk_rxhash;
newinet = inet_sk(newsk);
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 3760131f1484..8d56e4db63e0 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -486,8 +486,8 @@ static void smc_copy_sock_settings(struct sock *nsk, struct sock *osk,
{
/* options we don't get control via setsockopt for */
nsk->sk_type = osk->sk_type;
- nsk->sk_sndtimeo = osk->sk_sndtimeo;
- nsk->sk_rcvtimeo = osk->sk_rcvtimeo;
+ nsk->sk_sndtimeo = READ_ONCE(osk->sk_sndtimeo);
+ nsk->sk_rcvtimeo = READ_ONCE(osk->sk_rcvtimeo);
nsk->sk_mark = READ_ONCE(osk->sk_mark);
nsk->sk_priority = READ_ONCE(osk->sk_priority);
nsk->sk_rcvlowat = osk->sk_rcvlowat;
@@ -1585,7 +1585,7 @@ static void smc_connect_work(struct work_struct *work)
{
struct smc_sock *smc = container_of(work, struct smc_sock,
connect_work);
- long timeo = smc->sk.sk_sndtimeo;
+ long timeo = READ_ONCE(smc->sk.sk_sndtimeo);
int rc = 0;
if (!timeo)
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
index 521f5df80e10..5a4db151fe95 100644
--- a/net/smc/smc_clc.c
+++ b/net/smc/smc_clc.c
@@ -688,7 +688,7 @@ out:
int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
u8 expected_type, unsigned long timeout)
{
- long rcvtimeo = smc->clcsock->sk->sk_rcvtimeo;
+ long rcvtimeo = READ_ONCE(smc->clcsock->sk->sk_rcvtimeo);
struct sock *clc_sk = smc->clcsock->sk;
struct smc_clc_msg_hdr *clcm = buf;
struct msghdr msg = {NULL, 0};
@@ -707,7 +707,7 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
* sizeof(struct smc_clc_msg_hdr)
*/
krflags = MSG_PEEK | MSG_WAITALL;
- clc_sk->sk_rcvtimeo = timeout;
+ WRITE_ONCE(clc_sk->sk_rcvtimeo, timeout);
iov_iter_kvec(&msg.msg_iter, ITER_DEST, &vec, 1,
sizeof(struct smc_clc_msg_hdr));
len = sock_recvmsg(smc->clcsock, &msg, krflags);
@@ -795,7 +795,7 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
}
out:
- clc_sk->sk_rcvtimeo = rcvtimeo;
+ WRITE_ONCE(clc_sk->sk_rcvtimeo, rcvtimeo);
return reason_code;
}
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index ac07b963aede..262746e304dd 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -2100,8 +2100,7 @@ int smc_uncompress_bufsize(u8 compressed)
/* try to reuse a sndbuf or rmb description slot for a certain
* buffer size; if not available, return NULL
*/
-static struct smc_buf_desc *smc_buf_get_slot(int compressed_bufsize,
- struct rw_semaphore *lock,
+static struct smc_buf_desc *smc_buf_get_slot(struct rw_semaphore *lock,
struct list_head *buf_list)
{
struct smc_buf_desc *buf_slot;
@@ -2442,7 +2441,7 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
bufsize = smc_uncompress_bufsize(bufsize_comp);
/* check for reusable slot in the link group */
- buf_desc = smc_buf_get_slot(bufsize_comp, lock, buf_list);
+ buf_desc = smc_buf_get_slot(lock, buf_list);
if (buf_desc) {
buf_desc->is_dma_need_sync = 0;
SMC_STAT_RMB_SIZE(smc, is_smcd, is_rmb, true, bufsize);
diff --git a/net/smc/smc_diag.c b/net/smc/smc_diag.c
index 6fdb2d96777a..8ed2f6689b01 100644
--- a/net/smc/smc_diag.c
+++ b/net/smc/smc_diag.c
@@ -64,7 +64,7 @@ static int smc_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
if (nla_put_u8(skb, SMC_DIAG_SHUTDOWN, sk->sk_shutdown))
return 1;
- r->diag_uid = from_kuid_munged(user_ns, sock_i_uid(sk));
+ r->diag_uid = from_kuid_munged(user_ns, sk_uid(sk));
r->diag_inode = sock_i_ino(sk);
return 0;
}
diff --git a/net/smc/smc_loopback.c b/net/smc/smc_loopback.c
index 3c5f64ca4115..0eb00bbefd17 100644
--- a/net/smc/smc_loopback.c
+++ b/net/smc/smc_loopback.c
@@ -251,11 +251,6 @@ static int smc_lo_move_data(struct smcd_dev *smcd, u64 dmb_tok,
return 0;
}
-static int smc_lo_supports_v2(void)
-{
- return SMC_LO_V2_CAPABLE;
-}
-
static void smc_lo_get_local_gid(struct smcd_dev *smcd,
struct smcd_gid *smcd_gid)
{
@@ -288,7 +283,6 @@ static const struct smcd_ops lo_ops = {
.reset_vlan_required = NULL,
.signal_event = NULL,
.move_data = smc_lo_move_data,
- .supports_v2 = smc_lo_supports_v2,
.get_local_gid = smc_lo_get_local_gid,
.get_chid = smc_lo_get_chid,
.get_dev = smc_lo_get_dev,
diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c
index b391c2ef463f..76ad29e31d60 100644
--- a/net/smc/smc_pnet.c
+++ b/net/smc/smc_pnet.c
@@ -370,7 +370,7 @@ static int smc_pnet_add_eth(struct smc_pnettable *pnettable, struct net *net,
goto out_put;
new_pe->type = SMC_PNET_ETH;
memcpy(new_pe->pnet_name, pnet_name, SMC_MAX_PNETID_LEN);
- strncpy(new_pe->eth_name, eth_name, IFNAMSIZ);
+ strscpy(new_pe->eth_name, eth_name);
rc = -EEXIST;
new_netdev = true;
mutex_lock(&pnettable->lock);
diff --git a/net/socket.c b/net/socket.c
index 9a0e720f0859..682969deaed3 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -592,10 +592,12 @@ static int sockfs_setattr(struct mnt_idmap *idmap,
if (!err && (iattr->ia_valid & ATTR_UID)) {
struct socket *sock = SOCKET_I(d_inode(dentry));
- if (sock->sk)
- sock->sk->sk_uid = iattr->ia_uid;
- else
+ if (sock->sk) {
+ /* Paired with READ_ONCE() in sk_uid() */
+ WRITE_ONCE(sock->sk->sk_uid, iattr->ia_uid);
+ } else {
err = -ENOENT;
+ }
}
return err;
@@ -843,6 +845,52 @@ static void put_ts_pktinfo(struct msghdr *msg, struct sk_buff *skb,
sizeof(ts_pktinfo), &ts_pktinfo);
}
+bool skb_has_tx_timestamp(struct sk_buff *skb, const struct sock *sk)
+{
+ const struct sock_exterr_skb *serr = SKB_EXT_ERR(skb);
+ u32 tsflags = READ_ONCE(sk->sk_tsflags);
+
+ if (serr->ee.ee_errno != ENOMSG ||
+ serr->ee.ee_origin != SO_EE_ORIGIN_TIMESTAMPING)
+ return false;
+
+ /* software time stamp available and wanted */
+ if ((tsflags & SOF_TIMESTAMPING_SOFTWARE) && skb->tstamp)
+ return true;
+ /* hardware time stamps available and wanted */
+ return (tsflags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
+ skb_hwtstamps(skb)->hwtstamp;
+}
+
+int skb_get_tx_timestamp(struct sk_buff *skb, struct sock *sk,
+ struct timespec64 *ts)
+{
+ u32 tsflags = READ_ONCE(sk->sk_tsflags);
+ ktime_t hwtstamp;
+ int if_index = 0;
+
+ if ((tsflags & SOF_TIMESTAMPING_SOFTWARE) &&
+ ktime_to_timespec64_cond(skb->tstamp, ts))
+ return SOF_TIMESTAMPING_TX_SOFTWARE;
+
+ if (!(tsflags & SOF_TIMESTAMPING_RAW_HARDWARE) ||
+ skb_is_swtx_tstamp(skb, false))
+ return -ENOENT;
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP_NETDEV)
+ hwtstamp = get_timestamp(sk, skb, &if_index);
+ else
+ hwtstamp = skb_hwtstamps(skb)->hwtstamp;
+
+ if (tsflags & SOF_TIMESTAMPING_BIND_PHC)
+ hwtstamp = ptp_convert_timestamp(&hwtstamp,
+ READ_ONCE(sk->sk_bind_phc));
+ if (!ktime_to_timespec64_cond(hwtstamp, ts))
+ return -ENOENT;
+
+ return SOF_TIMESTAMPING_TX_HARDWARE;
+}
+
/*
* called from sock_recv_timestamp() if sock_flag(sk, SOCK_RCVTSTAMP)
*/
diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c
index d946bfb424c7..43b1f558b33d 100644
--- a/net/strparser/strparser.c
+++ b/net/strparser/strparser.c
@@ -333,7 +333,7 @@ static int strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
struct strparser *strp = (struct strparser *)desc->arg.data;
return __strp_recv(desc, orig_skb, orig_offset, orig_len,
- strp->sk->sk_rcvbuf, strp->sk->sk_rcvtimeo);
+ strp->sk->sk_rcvbuf, READ_ONCE(strp->sk->sk_rcvtimeo));
}
static int default_read_sock_done(struct strparser *strp, int err)
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 7c61d47ea208..e028bf658499 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -3642,7 +3642,7 @@ int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb,
nla_put_u32(skb, TIPC_NLA_SOCK_INO, sock_i_ino(sk)) ||
nla_put_u32(skb, TIPC_NLA_SOCK_UID,
from_kuid_munged(sk_user_ns(NETLINK_CB(cb->skb).sk),
- sock_i_uid(sk))) ||
+ sk_uid(sk))) ||
nla_put_u64_64bit(skb, TIPC_NLA_SOCK_COOKIE,
tipc_diag_gen_cookie(sk),
TIPC_NLA_SOCK_PAD))
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index 258d6aa4f21a..b85ab0fb3b8c 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -172,7 +172,7 @@ static int tipc_udp_xmit(struct net *net, struct sk_buff *skb,
struct udp_media_addr *dst, struct dst_cache *cache)
{
struct dst_entry *ndst;
- int ttl, err = 0;
+ int ttl, err;
local_bh_disable();
ndst = dst_cache_get(cache);
@@ -197,7 +197,7 @@ static int tipc_udp_xmit(struct net *net, struct sk_buff *skb,
ttl = ip4_dst_hoplimit(&rt->dst);
udp_tunnel_xmit_skb(rt, ub->ubsock->sk, skb, src->ipv4.s_addr,
dst->ipv4.s_addr, 0, ttl, 0, src->port,
- dst->port, false, true);
+ dst->port, false, true, 0);
#if IS_ENABLED(CONFIG_IPV6)
} else {
if (!ndst) {
@@ -217,13 +217,13 @@ static int tipc_udp_xmit(struct net *net, struct sk_buff *skb,
dst_cache_set_ip6(cache, ndst, &fl6.saddr);
}
ttl = ip6_dst_hoplimit(ndst);
- err = udp_tunnel6_xmit_skb(ndst, ub->ubsock->sk, skb, NULL,
- &src->ipv6, &dst->ipv6, 0, ttl, 0,
- src->port, dst->port, false);
+ udp_tunnel6_xmit_skb(ndst, ub->ubsock->sk, skb, NULL,
+ &src->ipv6, &dst->ipv6, 0, ttl, 0,
+ src->port, dst->port, false, 0);
#endif
}
local_bh_enable();
- return err;
+ return 0;
tx_error:
local_bh_enable();
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 52b155123985..564c970d97ff 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -3697,7 +3697,7 @@ static int bpf_iter_unix_seq_show(struct seq_file *seq, void *v)
goto unlock;
}
- uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk));
+ uid = from_kuid_munged(seq_user_ns(seq), sk_uid(sk));
meta.seq = seq;
prog = bpf_iter_get_info(&meta, false);
ret = unix_prog_seq_show(prog, &meta, v, uid);
diff --git a/net/unix/diag.c b/net/unix/diag.c
index 79b182d0e62a..ca3473026151 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -106,7 +106,7 @@ static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb)
static int sk_diag_dump_uid(struct sock *sk, struct sk_buff *nlskb,
struct user_namespace *user_ns)
{
- uid_t uid = from_kuid_munged(user_ns, sock_i_uid(sk));
+ uid_t uid = from_kuid_munged(user_ns, sk_uid(sk));
return nla_put(nlskb, UNIX_DIAG_UID, sizeof(uid_t), &uid);
}
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 5c3c72df0591..f3cd70757ef2 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -995,6 +995,24 @@ int wiphy_register(struct wiphy *wiphy)
wiphy->max_num_akm_suites > CFG80211_MAX_NUM_AKM_SUITES)
return -EINVAL;
+ /* Allocate radio configuration space for multi-radio wiphy */
+ if (wiphy->n_radio > 0) {
+ int idx;
+
+ wiphy->radio_cfg = kcalloc(wiphy->n_radio,
+ sizeof(*wiphy->radio_cfg),
+ GFP_KERNEL);
+ if (!wiphy->radio_cfg)
+ return -ENOMEM;
+ /*
+ * Initialize wiphy radio parameters to IEEE 802.11
+ * MIB default values. RTS threshold is disabled by
+ * default with the special -1 value.
+ */
+ for (idx = 0; idx < wiphy->n_radio; idx++)
+ wiphy->radio_cfg[idx].rts_threshold = (u32)-1;
+ }
+
/* check and set up bitrates */
ieee80211_set_bitrate_flags(wiphy);
@@ -1222,6 +1240,7 @@ void cfg80211_dev_free(struct cfg80211_registered_device *rdev)
void wiphy_free(struct wiphy *wiphy)
{
+ kfree(wiphy->radio_cfg);
put_device(&wiphy->dev);
}
EXPORT_SYMBOL(wiphy_free);
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 05d44a443518..29e1ce8aff42 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -1331,7 +1331,8 @@ void cfg80211_mlo_reconf_add_done(struct net_device *dev,
lockdep_assert_wiphy(wiphy);
trace_cfg80211_mlo_reconf_add_done(dev, data->added_links,
- data->buf, data->len);
+ data->buf, data->len,
+ data->driver_initiated);
if (WARN_ON(!wdev->valid_links))
return;
@@ -1361,11 +1362,16 @@ void cfg80211_mlo_reconf_add_done(struct net_device *dev,
wdev->links[link_id].client.current_bss =
bss_from_pub(bss);
+ if (data->driver_initiated)
+ cfg80211_hold_bss(bss_from_pub(bss));
+
memcpy(wdev->links[link_id].addr,
data->links[link_id].addr,
ETH_ALEN);
} else {
- cfg80211_unhold_bss(bss_from_pub(bss));
+ if (!data->driver_initiated)
+ cfg80211_unhold_bss(bss_from_pub(bss));
+
cfg80211_put_bss(wiphy, bss);
}
}
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 85f139016da2..70ca74a75f22 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -854,6 +854,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_MLO_RECONF_REM_LINKS] = { .type = NLA_U16 },
[NL80211_ATTR_EPCS] = { .type = NLA_FLAG },
[NL80211_ATTR_ASSOC_MLD_EXT_CAPA_OPS] = { .type = NLA_U16 },
+ [NL80211_ATTR_WIPHY_RADIO_INDEX] = { .type = NLA_U8 },
};
/* policy for the key attributes */
@@ -2446,6 +2447,7 @@ fail:
static int nl80211_put_radio(struct wiphy *wiphy, struct sk_buff *msg, int idx)
{
const struct wiphy_radio *r = &wiphy->radio[idx];
+ const struct wiphy_radio_cfg *rcfg = &wiphy->radio_cfg[idx];
struct nlattr *radio, *freq;
int i;
@@ -2456,6 +2458,11 @@ static int nl80211_put_radio(struct wiphy *wiphy, struct sk_buff *msg, int idx)
if (nla_put_u32(msg, NL80211_WIPHY_RADIO_ATTR_INDEX, idx))
goto nla_put_failure;
+ if (rcfg->rts_threshold &&
+ nla_put_u32(msg, NL80211_WIPHY_RADIO_ATTR_RTS_THRESHOLD,
+ rcfg->rts_threshold))
+ goto nla_put_failure;
+
if (r->antenna_mask &&
nla_put_u32(msg, NL80211_WIPHY_RADIO_ATTR_ANTENNA_MASK,
r->antenna_mask))
@@ -2639,7 +2646,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
u32 tx_ant = 0, rx_ant = 0;
int res;
- res = rdev_get_antenna(rdev, &tx_ant, &rx_ant);
+ res = rdev_get_antenna(rdev, -1, &tx_ant, &rx_ant);
if (!res) {
if (nla_put_u32(msg,
NL80211_ATTR_WIPHY_ANTENNA_TX,
@@ -3608,6 +3615,33 @@ static int nl80211_set_channel(struct sk_buff *skb, struct genl_info *info)
return __nl80211_set_channel(rdev, netdev, info, link_id);
}
+static int nl80211_set_wiphy_radio(struct genl_info *info,
+ struct cfg80211_registered_device *rdev,
+ int radio_idx)
+{
+ u32 rts_threshold = 0, old_rts, changed = 0;
+ int result;
+
+ if (!rdev->ops->set_wiphy_params)
+ return -EOPNOTSUPP;
+
+ if (info->attrs[NL80211_ATTR_WIPHY_RTS_THRESHOLD]) {
+ rts_threshold = nla_get_u32(
+ info->attrs[NL80211_ATTR_WIPHY_RTS_THRESHOLD]);
+ changed |= WIPHY_PARAM_RTS_THRESHOLD;
+ }
+
+ old_rts = rdev->wiphy.radio_cfg[radio_idx].rts_threshold;
+
+ rdev->wiphy.radio_cfg[radio_idx].rts_threshold = rts_threshold;
+
+ result = rdev_set_wiphy_params(rdev, radio_idx, changed);
+ if (result)
+ rdev->wiphy.radio_cfg[radio_idx].rts_threshold = old_rts;
+
+ return 0;
+}
+
static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = NULL;
@@ -3620,6 +3654,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
u32 frag_threshold = 0, rts_threshold = 0;
u8 coverage_class = 0;
u32 txq_limit = 0, txq_memory_limit = 0, txq_quantum = 0;
+ int radio_idx = -1;
rtnl_lock();
/*
@@ -3670,6 +3705,19 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
if (result)
return result;
+ if (info->attrs[NL80211_ATTR_WIPHY_RADIO_INDEX]) {
+ /* Radio idx is not expected for non-multi radio wiphy */
+ if (rdev->wiphy.n_radio <= 0)
+ return -EINVAL;
+
+ radio_idx = nla_get_u8(
+ info->attrs[NL80211_ATTR_WIPHY_RADIO_INDEX]);
+ if (radio_idx >= rdev->wiphy.n_radio)
+ return -EINVAL;
+
+ return nl80211_set_wiphy_radio(info, rdev, radio_idx);
+ }
+
if (info->attrs[NL80211_ATTR_WIPHY_TXQ_PARAMS]) {
struct ieee80211_txq_params txq_params;
struct nlattr *tb[NL80211_TXQ_ATTR_MAX + 1];
@@ -3759,7 +3807,8 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
mbm = nla_get_u32(info->attrs[idx]);
}
- result = rdev_set_tx_power(rdev, txp_wdev, type, mbm);
+ result = rdev_set_tx_power(rdev, txp_wdev, radio_idx, type,
+ mbm);
if (result)
return result;
}
@@ -3785,7 +3834,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
tx_ant = tx_ant & rdev->wiphy.available_antennas_tx;
rx_ant = rx_ant & rdev->wiphy.available_antennas_rx;
- result = rdev_set_antenna(rdev, tx_ant, rx_ant);
+ result = rdev_set_antenna(rdev, radio_idx, tx_ant, rx_ant);
if (result)
return result;
}
@@ -3879,16 +3928,30 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
if (changed) {
u8 old_retry_short, old_retry_long;
u32 old_frag_threshold, old_rts_threshold;
- u8 old_coverage_class;
+ u8 old_coverage_class, i;
u32 old_txq_limit, old_txq_memory_limit, old_txq_quantum;
+ u32 *old_radio_rts_threshold = NULL;
if (!rdev->ops->set_wiphy_params)
return -EOPNOTSUPP;
+ if (rdev->wiphy.n_radio) {
+ old_radio_rts_threshold = kcalloc(rdev->wiphy.n_radio,
+ sizeof(u32),
+ GFP_KERNEL);
+ if (!old_radio_rts_threshold)
+ return -ENOMEM;
+ }
+
old_retry_short = rdev->wiphy.retry_short;
old_retry_long = rdev->wiphy.retry_long;
old_frag_threshold = rdev->wiphy.frag_threshold;
old_rts_threshold = rdev->wiphy.rts_threshold;
+ if (old_radio_rts_threshold) {
+ for (i = 0 ; i < rdev->wiphy.n_radio; i++)
+ old_radio_rts_threshold[i] =
+ rdev->wiphy.radio_cfg[i].rts_threshold;
+ }
old_coverage_class = rdev->wiphy.coverage_class;
old_txq_limit = rdev->wiphy.txq_limit;
old_txq_memory_limit = rdev->wiphy.txq_memory_limit;
@@ -3900,8 +3963,13 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
rdev->wiphy.retry_long = retry_long;
if (changed & WIPHY_PARAM_FRAG_THRESHOLD)
rdev->wiphy.frag_threshold = frag_threshold;
- if (changed & WIPHY_PARAM_RTS_THRESHOLD)
+ if ((changed & WIPHY_PARAM_RTS_THRESHOLD) &&
+ old_radio_rts_threshold) {
rdev->wiphy.rts_threshold = rts_threshold;
+ for (i = 0 ; i < rdev->wiphy.n_radio; i++)
+ rdev->wiphy.radio_cfg[i].rts_threshold =
+ rdev->wiphy.rts_threshold;
+ }
if (changed & WIPHY_PARAM_COVERAGE_CLASS)
rdev->wiphy.coverage_class = coverage_class;
if (changed & WIPHY_PARAM_TXQ_LIMIT)
@@ -3911,18 +3979,26 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
if (changed & WIPHY_PARAM_TXQ_QUANTUM)
rdev->wiphy.txq_quantum = txq_quantum;
- result = rdev_set_wiphy_params(rdev, changed);
+ result = rdev_set_wiphy_params(rdev, radio_idx, changed);
if (result) {
rdev->wiphy.retry_short = old_retry_short;
rdev->wiphy.retry_long = old_retry_long;
rdev->wiphy.frag_threshold = old_frag_threshold;
rdev->wiphy.rts_threshold = old_rts_threshold;
+ if (old_radio_rts_threshold) {
+ for (i = 0 ; i < rdev->wiphy.n_radio; i++)
+ rdev->wiphy.radio_cfg[i].rts_threshold =
+ old_radio_rts_threshold[i];
+ }
rdev->wiphy.coverage_class = old_coverage_class;
rdev->wiphy.txq_limit = old_txq_limit;
rdev->wiphy.txq_memory_limit = old_txq_memory_limit;
rdev->wiphy.txq_quantum = old_txq_quantum;
- return result;
}
+
+ if (old_rts_threshold)
+ kfree(old_radio_rts_threshold);
+ return result;
}
return 0;
@@ -4012,7 +4088,7 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
if (rdev->ops->get_tx_power && !wdev->valid_links) {
int dbm, ret;
- ret = rdev_get_tx_power(rdev, wdev, 0, &dbm);
+ ret = rdev_get_tx_power(rdev, wdev, -1, 0, &dbm);
if (ret == 0 &&
nla_put_u32(msg, NL80211_ATTR_WIPHY_TX_POWER_LEVEL,
DBM_TO_MBM(dbm)))
@@ -4084,7 +4160,7 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
if (rdev->ops->get_tx_power) {
int dbm, ret;
- ret = rdev_get_tx_power(rdev, wdev, link_id, &dbm);
+ ret = rdev_get_tx_power(rdev, wdev, -1, link_id, &dbm);
if (ret == 0 &&
nla_put_u32(msg, NL80211_ATTR_WIPHY_TX_POWER_LEVEL,
DBM_TO_MBM(dbm)))
@@ -6728,6 +6804,185 @@ static bool nl80211_put_signal(struct sk_buff *msg, u8 mask, s8 *signal,
return true;
}
+static int nl80211_fill_link_station(struct sk_buff *msg,
+ struct cfg80211_registered_device *rdev,
+ struct link_station_info *link_sinfo)
+{
+ struct nlattr *bss_param, *link_sinfoattr;
+
+#define PUT_LINK_SINFO(attr, memb, type) do { \
+ BUILD_BUG_ON(sizeof(type) == sizeof(u64)); \
+ if (link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_ ## attr) && \
+ nla_put_ ## type(msg, NL80211_STA_INFO_ ## attr, \
+ link_sinfo->memb)) \
+ goto nla_put_failure; \
+ } while (0)
+#define PUT_LINK_SINFO_U64(attr, memb) do { \
+ if (link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_ ## attr) && \
+ nla_put_u64_64bit(msg, NL80211_STA_INFO_ ## attr, \
+ link_sinfo->memb, NL80211_STA_INFO_PAD)) \
+ goto nla_put_failure; \
+ } while (0)
+
+ link_sinfoattr = nla_nest_start_noflag(msg, NL80211_ATTR_STA_INFO);
+ if (!link_sinfoattr)
+ goto nla_put_failure;
+
+ PUT_LINK_SINFO(INACTIVE_TIME, inactive_time, u32);
+
+ if (link_sinfo->filled & (BIT_ULL(NL80211_STA_INFO_RX_BYTES) |
+ BIT_ULL(NL80211_STA_INFO_RX_BYTES64)) &&
+ nla_put_u32(msg, NL80211_STA_INFO_RX_BYTES,
+ (u32)link_sinfo->rx_bytes))
+ goto nla_put_failure;
+
+ if (link_sinfo->filled & (BIT_ULL(NL80211_STA_INFO_TX_BYTES) |
+ BIT_ULL(NL80211_STA_INFO_TX_BYTES64)) &&
+ nla_put_u32(msg, NL80211_STA_INFO_TX_BYTES,
+ (u32)link_sinfo->tx_bytes))
+ goto nla_put_failure;
+
+ PUT_LINK_SINFO_U64(RX_BYTES64, rx_bytes);
+ PUT_LINK_SINFO_U64(TX_BYTES64, tx_bytes);
+ PUT_LINK_SINFO_U64(RX_DURATION, rx_duration);
+ PUT_LINK_SINFO_U64(TX_DURATION, tx_duration);
+
+ if (wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_AIRTIME_FAIRNESS))
+ PUT_LINK_SINFO(AIRTIME_WEIGHT, airtime_weight, u16);
+
+ switch (rdev->wiphy.signal_type) {
+ case CFG80211_SIGNAL_TYPE_MBM:
+ PUT_LINK_SINFO(SIGNAL, signal, u8);
+ PUT_LINK_SINFO(SIGNAL_AVG, signal_avg, u8);
+ break;
+ default:
+ break;
+ }
+ if (link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL)) {
+ if (!nl80211_put_signal(msg, link_sinfo->chains,
+ link_sinfo->chain_signal,
+ NL80211_STA_INFO_CHAIN_SIGNAL))
+ goto nla_put_failure;
+ }
+ if (link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG)) {
+ if (!nl80211_put_signal(msg, link_sinfo->chains,
+ link_sinfo->chain_signal_avg,
+ NL80211_STA_INFO_CHAIN_SIGNAL_AVG))
+ goto nla_put_failure;
+ }
+ if (link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE)) {
+ if (!nl80211_put_sta_rate(msg, &link_sinfo->txrate,
+ NL80211_STA_INFO_TX_BITRATE))
+ goto nla_put_failure;
+ }
+ if (link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_BITRATE)) {
+ if (!nl80211_put_sta_rate(msg, &link_sinfo->rxrate,
+ NL80211_STA_INFO_RX_BITRATE))
+ goto nla_put_failure;
+ }
+
+ PUT_LINK_SINFO(RX_PACKETS, rx_packets, u32);
+ PUT_LINK_SINFO(TX_PACKETS, tx_packets, u32);
+ PUT_LINK_SINFO(TX_RETRIES, tx_retries, u32);
+ PUT_LINK_SINFO(TX_FAILED, tx_failed, u32);
+ PUT_LINK_SINFO(EXPECTED_THROUGHPUT, expected_throughput, u32);
+ PUT_LINK_SINFO(BEACON_LOSS, beacon_loss_count, u32);
+
+ if (link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_BSS_PARAM)) {
+ bss_param = nla_nest_start_noflag(msg,
+ NL80211_STA_INFO_BSS_PARAM);
+ if (!bss_param)
+ goto nla_put_failure;
+
+ if (((link_sinfo->bss_param.flags &
+ BSS_PARAM_FLAGS_CTS_PROT) &&
+ nla_put_flag(msg, NL80211_STA_BSS_PARAM_CTS_PROT)) ||
+ ((link_sinfo->bss_param.flags &
+ BSS_PARAM_FLAGS_SHORT_PREAMBLE) &&
+ nla_put_flag(msg,
+ NL80211_STA_BSS_PARAM_SHORT_PREAMBLE)) ||
+ ((link_sinfo->bss_param.flags &
+ BSS_PARAM_FLAGS_SHORT_SLOT_TIME) &&
+ nla_put_flag(msg,
+ NL80211_STA_BSS_PARAM_SHORT_SLOT_TIME)) ||
+ nla_put_u8(msg, NL80211_STA_BSS_PARAM_DTIM_PERIOD,
+ link_sinfo->bss_param.dtim_period) ||
+ nla_put_u16(msg, NL80211_STA_BSS_PARAM_BEACON_INTERVAL,
+ link_sinfo->bss_param.beacon_interval))
+ goto nla_put_failure;
+
+ nla_nest_end(msg, bss_param);
+ }
+
+ PUT_LINK_SINFO_U64(RX_DROP_MISC, rx_dropped_misc);
+ PUT_LINK_SINFO_U64(BEACON_RX, rx_beacon);
+ PUT_LINK_SINFO(BEACON_SIGNAL_AVG, rx_beacon_signal_avg, u8);
+ PUT_LINK_SINFO(RX_MPDUS, rx_mpdu_count, u32);
+ PUT_LINK_SINFO(FCS_ERROR_COUNT, fcs_err_count, u32);
+ if (wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT)) {
+ PUT_LINK_SINFO(ACK_SIGNAL, ack_signal, u8);
+ PUT_LINK_SINFO(ACK_SIGNAL_AVG, avg_ack_signal, s8);
+ }
+
+#undef PUT_LINK_SINFO
+#undef PUT_LINK_SINFO_U64
+
+ if (link_sinfo->pertid) {
+ struct nlattr *tidsattr;
+ int tid;
+
+ tidsattr = nla_nest_start_noflag(msg,
+ NL80211_STA_INFO_TID_STATS);
+ if (!tidsattr)
+ goto nla_put_failure;
+
+ for (tid = 0; tid < IEEE80211_NUM_TIDS + 1; tid++) {
+ struct cfg80211_tid_stats *tidstats;
+ struct nlattr *tidattr;
+
+ tidstats = &link_sinfo->pertid[tid];
+
+ if (!tidstats->filled)
+ continue;
+
+ tidattr = nla_nest_start_noflag(msg, tid + 1);
+ if (!tidattr)
+ goto nla_put_failure;
+
+#define PUT_TIDVAL_U64(attr, memb) do { \
+ if (tidstats->filled & BIT(NL80211_TID_STATS_ ## attr) && \
+ nla_put_u64_64bit(msg, NL80211_TID_STATS_ ## attr, \
+ tidstats->memb, NL80211_TID_STATS_PAD)) \
+ goto nla_put_failure; \
+ } while (0)
+
+ PUT_TIDVAL_U64(RX_MSDU, rx_msdu);
+ PUT_TIDVAL_U64(TX_MSDU, tx_msdu);
+ PUT_TIDVAL_U64(TX_MSDU_RETRIES, tx_msdu_retries);
+ PUT_TIDVAL_U64(TX_MSDU_FAILED, tx_msdu_failed);
+
+#undef PUT_TIDVAL_U64
+ if ((tidstats->filled &
+ BIT(NL80211_TID_STATS_TXQ_STATS)) &&
+ !nl80211_put_txq_stats(msg, &tidstats->txq_stats,
+ NL80211_TID_STATS_TXQ_STATS))
+ goto nla_put_failure;
+
+ nla_nest_end(msg, tidattr);
+ }
+
+ nla_nest_end(msg, tidsattr);
+ }
+
+ nla_nest_end(msg, link_sinfoattr);
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid,
u32 seq, int flags,
struct cfg80211_registered_device *rdev,
@@ -6736,6 +6991,9 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid,
{
void *hdr;
struct nlattr *sinfoattr, *bss_param;
+ struct link_station_info *link_sinfo;
+ struct nlattr *links, *link;
+ int link_id;
hdr = nl80211hdr_put(msg, portid, seq, flags, cmd);
if (!hdr) {
@@ -6950,6 +7208,40 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid,
goto nla_put_failure;
}
+ if (sinfo->valid_links) {
+ links = nla_nest_start(msg, NL80211_ATTR_MLO_LINKS);
+ if (!links)
+ goto nla_put_failure;
+
+ for_each_valid_link(sinfo, link_id) {
+ link_sinfo = sinfo->links[link_id];
+
+ if (WARN_ON_ONCE(!link_sinfo))
+ continue;
+
+ if (!is_valid_ether_addr(link_sinfo->addr))
+ continue;
+
+ link = nla_nest_start(msg, link_id + 1);
+ if (!link)
+ goto nla_put_failure;
+
+ if (nla_put_u8(msg, NL80211_ATTR_MLO_LINK_ID,
+ link_id))
+ goto nla_put_failure;
+
+ if (nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN,
+ link_sinfo->addr))
+ goto nla_put_failure;
+
+ if (nl80211_fill_link_station(msg, rdev, link_sinfo))
+ goto nla_put_failure;
+
+ nla_nest_end(msg, link);
+ }
+ nla_nest_end(msg, links);
+ }
+
cfg80211_sinfo_release_content(sinfo);
genlmsg_end(msg, hdr);
return 0;
@@ -6960,6 +7252,194 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid,
return -EMSGSIZE;
}
+static void cfg80211_sta_set_mld_sinfo(struct station_info *sinfo)
+{
+ struct link_station_info *link_sinfo;
+ int link_id, init = 0;
+ u32 link_inactive_time;
+
+ sinfo->signal = -99;
+
+ for_each_valid_link(sinfo, link_id) {
+ link_sinfo = sinfo->links[link_id];
+ if (!link_sinfo)
+ continue;
+
+ if ((link_sinfo->filled &
+ BIT_ULL(NL80211_STA_INFO_TX_PACKETS))) {
+ sinfo->tx_packets += link_sinfo->tx_packets;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS);
+ }
+
+ if ((link_sinfo->filled &
+ BIT_ULL(NL80211_STA_INFO_RX_PACKETS))) {
+ sinfo->rx_packets += link_sinfo->rx_packets;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS);
+ }
+
+ if (link_sinfo->filled &
+ (BIT_ULL(NL80211_STA_INFO_TX_BYTES) |
+ BIT_ULL(NL80211_STA_INFO_TX_BYTES64))) {
+ sinfo->tx_bytes += link_sinfo->tx_bytes;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES);
+ }
+
+ if (link_sinfo->filled &
+ (BIT_ULL(NL80211_STA_INFO_RX_BYTES) |
+ BIT_ULL(NL80211_STA_INFO_TX_BYTES64))) {
+ sinfo->rx_bytes += link_sinfo->rx_bytes;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES);
+ }
+
+ if (link_sinfo->filled &
+ BIT_ULL(NL80211_STA_INFO_TX_RETRIES)) {
+ sinfo->tx_retries += link_sinfo->tx_retries;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES);
+ }
+
+ if (link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_FAILED)) {
+ sinfo->tx_failed += link_sinfo->tx_failed;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED);
+ }
+
+ if (link_sinfo->filled &
+ BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC)) {
+ sinfo->rx_dropped_misc += link_sinfo->rx_dropped_misc;
+ sinfo->filled |=
+ BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC);
+ }
+
+ if (link_sinfo->filled &
+ BIT_ULL(NL80211_STA_INFO_BEACON_LOSS)) {
+ sinfo->beacon_loss_count +=
+ link_sinfo->beacon_loss_count;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_LOSS);
+ }
+
+ if (link_sinfo->filled &
+ BIT_ULL(NL80211_STA_INFO_EXPECTED_THROUGHPUT)) {
+ sinfo->expected_throughput +=
+ link_sinfo->expected_throughput;
+ sinfo->filled |=
+ BIT_ULL(NL80211_STA_INFO_EXPECTED_THROUGHPUT);
+ }
+
+ if (link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_MPDUS)) {
+ sinfo->rx_mpdu_count += link_sinfo->rx_mpdu_count;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_MPDUS);
+ }
+
+ if (link_sinfo->filled &
+ BIT_ULL(NL80211_STA_INFO_FCS_ERROR_COUNT)) {
+ sinfo->fcs_err_count += link_sinfo->fcs_err_count;
+ sinfo->filled |=
+ BIT_ULL(NL80211_STA_INFO_FCS_ERROR_COUNT);
+ }
+
+ if (link_sinfo->filled &
+ BIT_ULL(NL80211_STA_INFO_BEACON_RX)) {
+ sinfo->rx_beacon += link_sinfo->rx_beacon;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX);
+ }
+
+ /* Update MLO signal, signal_avg as best among links */
+ if ((link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_SIGNAL)) &&
+ link_sinfo->signal > sinfo->signal) {
+ sinfo->signal = link_sinfo->signal;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
+ }
+
+ if ((link_sinfo->filled &
+ BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG)) &&
+ link_sinfo->signal_avg > sinfo->signal_avg) {
+ sinfo->signal_avg = link_sinfo->signal_avg;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
+ }
+
+ /* Update MLO inactive_time, bss_param based on least
+ * value for corresponding field of link.
+ */
+ if ((link_sinfo->filled &
+ BIT_ULL(NL80211_STA_INFO_INACTIVE_TIME)) &&
+ (!init ||
+ link_inactive_time > link_sinfo->inactive_time)) {
+ link_inactive_time = link_sinfo->inactive_time;
+ sinfo->inactive_time = link_sinfo->inactive_time;
+ sinfo->filled |= NL80211_STA_INFO_INACTIVE_TIME;
+ }
+
+ if (link_sinfo->filled & BIT_ULL(NL80211_STA_INFO_BSS_PARAM) &&
+ (!init ||
+ sinfo->bss_param.dtim_period >
+ link_sinfo->bss_param.dtim_period)) {
+ sinfo->bss_param.dtim_period =
+ link_sinfo->bss_param.dtim_period;
+ sinfo->filled |= NL80211_STA_BSS_PARAM_DTIM_PERIOD;
+ sinfo->bss_param.beacon_interval =
+ link_sinfo->bss_param.beacon_interval;
+ sinfo->filled |= NL80211_STA_BSS_PARAM_BEACON_INTERVAL;
+ }
+
+ /* Update MLO rates as per last updated link rate */
+ if ((link_sinfo->filled &
+ BIT_ULL(NL80211_STA_INFO_TX_BITRATE)) &&
+ (!init ||
+ link_inactive_time > link_sinfo->inactive_time)) {
+ sinfo->txrate = link_sinfo->txrate;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+ }
+ if ((link_sinfo->filled &
+ BIT_ULL(NL80211_STA_INFO_RX_BITRATE)) &&
+ (!init ||
+ link_inactive_time > link_sinfo->inactive_time)) {
+ sinfo->rxrate = link_sinfo->rxrate;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE);
+ }
+
+ if (link_sinfo->filled &
+ BIT_ULL(NL80211_STA_INFO_TX_DURATION) &&
+ (!init ||
+ link_inactive_time > link_sinfo->inactive_time)) {
+ sinfo->tx_duration += link_sinfo->tx_duration;
+ sinfo->filled |=
+ BIT_ULL(NL80211_STA_INFO_TX_DURATION);
+ }
+ if (link_sinfo->filled &
+ BIT_ULL(NL80211_STA_INFO_RX_DURATION) &&
+ (!init ||
+ link_inactive_time > link_sinfo->inactive_time)) {
+ sinfo->rx_duration += link_sinfo->rx_duration;
+ sinfo->filled |=
+ BIT_ULL(NL80211_STA_INFO_RX_DURATION);
+ }
+ init++;
+
+ /* pertid stats accumulate for rx/tx fields */
+ if (sinfo->pertid) {
+ sinfo->pertid->rx_msdu +=
+ link_sinfo->pertid->rx_msdu;
+ sinfo->pertid->tx_msdu +=
+ link_sinfo->pertid->tx_msdu;
+ sinfo->pertid->tx_msdu_retries +=
+ link_sinfo->pertid->tx_msdu_retries;
+ sinfo->pertid->tx_msdu_failed +=
+ link_sinfo->pertid->tx_msdu_failed;
+
+ sinfo->pertid->filled |=
+ BIT(NL80211_TID_STATS_RX_MSDU) |
+ BIT(NL80211_TID_STATS_TX_MSDU) |
+ BIT(NL80211_TID_STATS_TX_MSDU_RETRIES) |
+ BIT(NL80211_TID_STATS_TX_MSDU_FAILED);
+ }
+ }
+
+ /* Reset sinfo->filled bits to exclude fields which don't make
+ * much sense at the MLO level.
+ */
+ sinfo->filled &= ~BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL);
+ sinfo->filled &= ~BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG);
+}
+
static int nl80211_dump_station(struct sk_buff *skb,
struct netlink_callback *cb)
{
@@ -6968,7 +7448,7 @@ static int nl80211_dump_station(struct sk_buff *skb,
struct wireless_dev *wdev;
u8 mac_addr[ETH_ALEN];
int sta_idx = cb->args[2];
- int err;
+ int err, i;
err = nl80211_prepare_wdev_dump(cb, &rdev, &wdev, NULL);
if (err)
@@ -6988,6 +7468,16 @@ static int nl80211_dump_station(struct sk_buff *skb,
while (1) {
memset(&sinfo, 0, sizeof(sinfo));
+
+ for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) {
+ sinfo.links[i] =
+ kzalloc(sizeof(*sinfo.links[0]), GFP_KERNEL);
+ if (!sinfo.links[i]) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+ }
+
err = rdev_dump_station(rdev, wdev->netdev, sta_idx,
mac_addr, &sinfo);
if (err == -ENOENT)
@@ -6995,6 +7485,9 @@ static int nl80211_dump_station(struct sk_buff *skb,
if (err)
goto out_err;
+ if (sinfo.valid_links)
+ cfg80211_sta_set_mld_sinfo(&sinfo);
+
if (nl80211_send_station(skb, NL80211_CMD_NEW_STATION,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
@@ -7009,6 +7502,7 @@ static int nl80211_dump_station(struct sk_buff *skb,
cb->args[2] = sta_idx;
err = skb->len;
out_err:
+ cfg80211_sinfo_release_content(&sinfo);
wiphy_unlock(&rdev->wiphy);
return err;
@@ -7021,7 +7515,7 @@ static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info)
struct station_info sinfo;
struct sk_buff *msg;
u8 *mac_addr = NULL;
- int err;
+ int err, i;
memset(&sinfo, 0, sizeof(sinfo));
@@ -7033,9 +7527,19 @@ static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info)
if (!rdev->ops->get_station)
return -EOPNOTSUPP;
+ for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) {
+ sinfo.links[i] = kzalloc(sizeof(*sinfo.links[0]), GFP_KERNEL);
+ if (!sinfo.links[i]) {
+ cfg80211_sinfo_release_content(&sinfo);
+ return -ENOMEM;
+ }
+ }
+
err = rdev_get_station(rdev, dev, mac_addr, &sinfo);
- if (err)
+ if (err) {
+ cfg80211_sinfo_release_content(&sinfo);
return err;
+ }
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg) {
@@ -7043,6 +7547,9 @@ static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info)
return -ENOMEM;
}
+ if (sinfo.valid_links)
+ cfg80211_sta_set_mld_sinfo(&sinfo);
+
if (nl80211_send_station(msg, NL80211_CMD_NEW_STATION,
info->snd_portid, info->snd_seq, 0,
rdev, dev, mac_addr, &sinfo) < 0) {
@@ -7349,6 +7856,10 @@ static int nl80211_set_station_tdls(struct genl_info *info,
}
}
+ if (info->attrs[NL80211_ATTR_S1G_CAPABILITY])
+ params->link_sta_params.s1g_capa =
+ nla_data(info->attrs[NL80211_ATTR_S1G_CAPABILITY]);
+
err = nl80211_parse_sta_channel_info(info, params);
if (err)
return err;
@@ -7675,6 +8186,10 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
params.link_sta_params.he_6ghz_capa =
nla_data(info->attrs[NL80211_ATTR_HE_6GHZ_CAPABILITY]);
+ if (info->attrs[NL80211_ATTR_S1G_CAPABILITY])
+ params.link_sta_params.s1g_capa =
+ nla_data(info->attrs[NL80211_ATTR_S1G_CAPABILITY]);
+
if (info->attrs[NL80211_ATTR_OPMODE_NOTIF]) {
params.link_sta_params.opmode_notif_used = true;
params.link_sta_params.opmode_notif =
diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h
index 9f4783c2354c..803b39c26587 100644
--- a/net/wireless/rdev-ops.h
+++ b/net/wireless/rdev-ops.h
@@ -577,35 +577,40 @@ static inline int rdev_leave_ibss(struct cfg80211_registered_device *rdev,
}
static inline int
-rdev_set_wiphy_params(struct cfg80211_registered_device *rdev, u32 changed)
+rdev_set_wiphy_params(struct cfg80211_registered_device *rdev, int radio_idx,
+ u32 changed)
{
int ret = -EOPNOTSUPP;
- trace_rdev_set_wiphy_params(&rdev->wiphy, changed);
+ trace_rdev_set_wiphy_params(&rdev->wiphy, radio_idx, changed);
if (rdev->ops->set_wiphy_params)
- ret = rdev->ops->set_wiphy_params(&rdev->wiphy, changed);
+ ret = rdev->ops->set_wiphy_params(&rdev->wiphy, radio_idx,
+ changed);
trace_rdev_return_int(&rdev->wiphy, ret);
return ret;
}
static inline int rdev_set_tx_power(struct cfg80211_registered_device *rdev,
- struct wireless_dev *wdev,
- enum nl80211_tx_power_setting type, int mbm)
+ struct wireless_dev *wdev, int radio_idx,
+ enum nl80211_tx_power_setting type,
+ int mbm)
{
int ret;
- trace_rdev_set_tx_power(&rdev->wiphy, wdev, type, mbm);
- ret = rdev->ops->set_tx_power(&rdev->wiphy, wdev, type, mbm);
+ trace_rdev_set_tx_power(&rdev->wiphy, wdev, radio_idx, type, mbm);
+ ret = rdev->ops->set_tx_power(&rdev->wiphy, wdev, radio_idx, type,
+ mbm);
trace_rdev_return_int(&rdev->wiphy, ret);
return ret;
}
static inline int rdev_get_tx_power(struct cfg80211_registered_device *rdev,
- struct wireless_dev *wdev, unsigned int link_id,
- int *dbm)
+ struct wireless_dev *wdev, int radio_idx,
+ unsigned int link_id, int *dbm)
{
int ret;
- trace_rdev_get_tx_power(&rdev->wiphy, wdev, link_id);
- ret = rdev->ops->get_tx_power(&rdev->wiphy, wdev, link_id, dbm);
+ trace_rdev_get_tx_power(&rdev->wiphy, wdev, radio_idx, link_id);
+ ret = rdev->ops->get_tx_power(&rdev->wiphy, wdev, radio_idx, link_id,
+ dbm);
trace_rdev_return_int_int(&rdev->wiphy, ret, *dbm);
return ret;
}
@@ -857,21 +862,21 @@ rdev_update_mgmt_frame_registrations(struct cfg80211_registered_device *rdev,
}
static inline int rdev_set_antenna(struct cfg80211_registered_device *rdev,
- u32 tx_ant, u32 rx_ant)
+ int radio_idx, u32 tx_ant, u32 rx_ant)
{
int ret;
- trace_rdev_set_antenna(&rdev->wiphy, tx_ant, rx_ant);
- ret = rdev->ops->set_antenna(&rdev->wiphy, tx_ant, rx_ant);
+ trace_rdev_set_antenna(&rdev->wiphy, radio_idx, tx_ant, rx_ant);
+ ret = rdev->ops->set_antenna(&rdev->wiphy, -1, tx_ant, rx_ant);
trace_rdev_return_int(&rdev->wiphy, ret);
return ret;
}
static inline int rdev_get_antenna(struct cfg80211_registered_device *rdev,
- u32 *tx_ant, u32 *rx_ant)
+ int radio_idx, u32 *tx_ant, u32 *rx_ant)
{
int ret;
- trace_rdev_get_antenna(&rdev->wiphy);
- ret = rdev->ops->get_antenna(&rdev->wiphy, tx_ant, rx_ant);
+ trace_rdev_get_antenna(&rdev->wiphy, radio_idx);
+ ret = rdev->ops->get_antenna(&rdev->wiphy, radio_idx, tx_ant, rx_ant);
if (ret)
trace_rdev_return_int(&rdev->wiphy, ret);
else
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index 4ed9fada4ec0..7e43ab9de923 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -406,9 +406,19 @@ DEFINE_EVENT(wiphy_only_evt, rdev_return_void,
TP_ARGS(wiphy)
);
-DEFINE_EVENT(wiphy_only_evt, rdev_get_antenna,
- TP_PROTO(struct wiphy *wiphy),
- TP_ARGS(wiphy)
+TRACE_EVENT(rdev_get_antenna,
+ TP_PROTO(struct wiphy *wiphy, int radio_idx),
+ TP_ARGS(wiphy, radio_idx),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ __field(int, radio_idx)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ __entry->radio_idx = radio_idx;
+ ),
+ TP_printk(WIPHY_PR_FMT ", radio_idx: %d",
+ WIPHY_PR_ARG, __entry->radio_idx)
);
DEFINE_EVENT(wiphy_only_evt, rdev_rfkill_poll,
@@ -1678,18 +1688,20 @@ TRACE_EVENT(rdev_join_ocb,
);
TRACE_EVENT(rdev_set_wiphy_params,
- TP_PROTO(struct wiphy *wiphy, u32 changed),
- TP_ARGS(wiphy, changed),
+ TP_PROTO(struct wiphy *wiphy, int radio_idx, u32 changed),
+ TP_ARGS(wiphy, radio_idx, changed),
TP_STRUCT__entry(
WIPHY_ENTRY
+ __field(int, radio_idx)
__field(u32, changed)
),
TP_fast_assign(
WIPHY_ASSIGN;
+ __entry->radio_idx = radio_idx;
__entry->changed = changed;
),
- TP_printk(WIPHY_PR_FMT ", changed: %u",
- WIPHY_PR_ARG, __entry->changed)
+ TP_printk(WIPHY_PR_FMT ", radio_idx: %d, changed: %u",
+ WIPHY_PR_ARG, __entry->radio_idx, __entry->changed)
);
DECLARE_EVENT_CLASS(wiphy_wdev_link_evt,
@@ -1710,30 +1722,51 @@ DECLARE_EVENT_CLASS(wiphy_wdev_link_evt,
WIPHY_PR_ARG, WDEV_PR_ARG, __entry->link_id)
);
-DEFINE_EVENT(wiphy_wdev_link_evt, rdev_get_tx_power,
+TRACE_EVENT(rdev_get_tx_power,
TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev,
- unsigned int link_id),
- TP_ARGS(wiphy, wdev, link_id)
+ int radio_idx, unsigned int link_id),
+ TP_ARGS(wiphy, wdev, radio_idx, link_id),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ WDEV_ENTRY
+ __field(int, radio_idx)
+ __field(unsigned int, link_id)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ WDEV_ASSIGN;
+ __entry->radio_idx = radio_idx;
+ __entry->link_id = link_id;
+ ),
+ TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT
+ ", radio_idx: %d, link_id: %u",
+ WIPHY_PR_ARG, WDEV_PR_ARG,
+ __entry->radio_idx, __entry->link_id)
);
TRACE_EVENT(rdev_set_tx_power,
TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev,
- enum nl80211_tx_power_setting type, int mbm),
- TP_ARGS(wiphy, wdev, type, mbm),
+ int radio_idx, enum nl80211_tx_power_setting type,
+ int mbm),
+ TP_ARGS(wiphy, wdev, radio_idx, type, mbm),
TP_STRUCT__entry(
WIPHY_ENTRY
WDEV_ENTRY
+ __field(int, radio_idx)
__field(enum nl80211_tx_power_setting, type)
__field(int, mbm)
),
TP_fast_assign(
WIPHY_ASSIGN;
WDEV_ASSIGN;
+ __entry->radio_idx = radio_idx;
__entry->type = type;
__entry->mbm = mbm;
),
- TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", type: %u, mbm: %d",
- WIPHY_PR_ARG, WDEV_PR_ARG,__entry->type, __entry->mbm)
+ TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT
+ ", radio_idx: %d, type: %u, mbm: %d",
+ WIPHY_PR_ARG, WDEV_PR_ARG,
+ __entry->radio_idx, __entry->type, __entry->mbm)
);
TRACE_EVENT(rdev_return_int_int,
@@ -1866,26 +1899,24 @@ TRACE_EVENT(rdev_return_void_tx_rx,
__entry->rx_max)
);
-DECLARE_EVENT_CLASS(tx_rx_evt,
- TP_PROTO(struct wiphy *wiphy, u32 tx, u32 rx),
- TP_ARGS(wiphy, tx, rx),
+TRACE_EVENT(rdev_set_antenna,
+ TP_PROTO(struct wiphy *wiphy, int radio_idx, u32 tx, u32 rx),
+ TP_ARGS(wiphy, radio_idx, tx, rx),
TP_STRUCT__entry(
WIPHY_ENTRY
+ __field(int, radio_idx)
__field(u32, tx)
__field(u32, rx)
),
TP_fast_assign(
WIPHY_ASSIGN;
+ __entry->radio_idx = radio_idx;
__entry->tx = tx;
__entry->rx = rx;
),
- TP_printk(WIPHY_PR_FMT ", tx: %u, rx: %u ",
- WIPHY_PR_ARG, __entry->tx, __entry->rx)
-);
-
-DEFINE_EVENT(tx_rx_evt, rdev_set_antenna,
- TP_PROTO(struct wiphy *wiphy, u32 tx, u32 rx),
- TP_ARGS(wiphy, tx, rx)
+ TP_printk(WIPHY_PR_FMT ", radio_idx: %d, tx: %u, rx: %u ",
+ WIPHY_PR_ARG, __entry->radio_idx,
+ __entry->tx, __entry->rx)
);
DECLARE_EVENT_CLASS(wiphy_netdev_id_evt,
@@ -4126,20 +4157,22 @@ TRACE_EVENT(cfg80211_links_removed,
TRACE_EVENT(cfg80211_mlo_reconf_add_done,
TP_PROTO(struct net_device *netdev, u16 link_mask,
- const u8 *buf, size_t len),
- TP_ARGS(netdev, link_mask, buf, len),
+ const u8 *buf, size_t len, bool driver_initiated),
+ TP_ARGS(netdev, link_mask, buf, len, driver_initiated),
TP_STRUCT__entry(
NETDEV_ENTRY
__field(u16, link_mask)
__dynamic_array(u8, buf, len)
+ __field(bool, driver_initiated)
),
TP_fast_assign(
NETDEV_ASSIGN;
__entry->link_mask = link_mask;
memcpy(__get_dynamic_array(buf), buf, len);
+ __entry->driver_initiated = driver_initiated;
),
- TP_printk(NETDEV_PR_FMT ", link_mask:0x%x",
- NETDEV_PR_ARG, __entry->link_mask)
+ TP_printk(NETDEV_PR_FMT ", link_mask:0x%x, driver_initiated:%d",
+ NETDEV_PR_ARG, __entry->link_mask, __entry->driver_initiated)
);
TRACE_EVENT(rdev_assoc_ml_reconf,
diff --git a/net/wireless/util.c b/net/wireless/util.c
index ed868c0f7ca8..5aff11c35303 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -2516,6 +2516,30 @@ int cfg80211_check_combinations(struct wiphy *wiphy,
}
EXPORT_SYMBOL(cfg80211_check_combinations);
+int cfg80211_get_radio_idx_by_chan(struct wiphy *wiphy,
+ const struct ieee80211_channel *chan)
+{
+ const struct wiphy_radio *radio;
+ int i, j;
+ u32 freq;
+
+ if (!chan)
+ return -EINVAL;
+
+ freq = ieee80211_channel_to_khz(chan);
+ for (i = 0; i < wiphy->n_radio; i++) {
+ radio = &wiphy->radio[i];
+ for (j = 0; j < radio->n_freq_range; j++) {
+ if (freq >= radio->freq_range[j].start_freq &&
+ freq < radio->freq_range[j].end_freq)
+ return i;
+ }
+ }
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL(cfg80211_get_radio_idx_by_chan);
+
int ieee80211_get_ratemask(struct ieee80211_supported_band *sband,
const u8 *rates, unsigned int n_rates,
u32 *mask)
@@ -2626,6 +2650,18 @@ bool cfg80211_does_bw_fit_range(const struct ieee80211_freq_range *freq_range,
return false;
}
+int cfg80211_link_sinfo_alloc_tid_stats(struct link_station_info *link_sinfo,
+ gfp_t gfp)
+{
+ link_sinfo->pertid = kcalloc(IEEE80211_NUM_TIDS + 1,
+ sizeof(*link_sinfo->pertid), gfp);
+ if (!link_sinfo->pertid)
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL(cfg80211_link_sinfo_alloc_tid_stats);
+
int cfg80211_sinfo_alloc_tid_stats(struct station_info *sinfo, gfp_t gfp)
{
sinfo->pertid = kcalloc(IEEE80211_NUM_TIDS + 1,
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index a74b1afc594e..1241fda78a68 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -263,7 +263,7 @@ int cfg80211_wext_siwrts(struct net_device *dev,
else
wdev->wiphy->rts_threshold = rts->value;
- err = rdev_set_wiphy_params(rdev, WIPHY_PARAM_RTS_THRESHOLD);
+ err = rdev_set_wiphy_params(rdev, -1, WIPHY_PARAM_RTS_THRESHOLD);
if (err)
wdev->wiphy->rts_threshold = orts;
return err;
@@ -304,7 +304,7 @@ int cfg80211_wext_siwfrag(struct net_device *dev,
wdev->wiphy->frag_threshold = frag->value & ~0x1;
}
- err = rdev_set_wiphy_params(rdev, WIPHY_PARAM_FRAG_THRESHOLD);
+ err = rdev_set_wiphy_params(rdev, -1, WIPHY_PARAM_FRAG_THRESHOLD);
if (err)
wdev->wiphy->frag_threshold = ofrag;
return err;
@@ -355,7 +355,7 @@ static int cfg80211_wext_siwretry(struct net_device *dev,
changed |= WIPHY_PARAM_RETRY_SHORT;
}
- err = rdev_set_wiphy_params(rdev, changed);
+ err = rdev_set_wiphy_params(rdev, -1, changed);
if (err) {
wdev->wiphy->retry_short = oshort;
wdev->wiphy->retry_long = olong;
@@ -890,7 +890,7 @@ static int cfg80211_wext_siwtxpower(struct net_device *dev,
guard(wiphy)(&rdev->wiphy);
- return rdev_set_tx_power(rdev, wdev, type, DBM_TO_MBM(dbm));
+ return rdev_set_tx_power(rdev, wdev, -1, type, DBM_TO_MBM(dbm));
}
static int cfg80211_wext_giwtxpower(struct net_device *dev,
@@ -910,7 +910,7 @@ static int cfg80211_wext_giwtxpower(struct net_device *dev,
return -EOPNOTSUPP;
scoped_guard(wiphy, &rdev->wiphy) {
- err = rdev_get_tx_power(rdev, wdev, 0, &val);
+ err = rdev_get_tx_power(rdev, wdev, -1, 0, &val);
}
if (err)
return err;
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index 1f8ae9f4a3f1..655d1e0ae25f 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -891,7 +891,7 @@ static int x25_accept(struct socket *sock, struct socket *newsock,
if (sk->sk_state != TCP_LISTEN)
goto out2;
- rc = x25_wait_for_data(sk, sk->sk_rcvtimeo);
+ rc = x25_wait_for_data(sk, READ_ONCE(sk->sk_rcvtimeo));
if (rc)
goto out2;
skb = skb_dequeue(&sk->sk_receive_queue);
diff --git a/net/xdp/xsk_diag.c b/net/xdp/xsk_diag.c
index 09dcea0cbbed..0e0bca031c03 100644
--- a/net/xdp/xsk_diag.c
+++ b/net/xdp/xsk_diag.c
@@ -119,7 +119,7 @@ static int xsk_diag_fill(struct sock *sk, struct sk_buff *nlskb,
if ((req->xdiag_show & XDP_SHOW_INFO) &&
nla_put_u32(nlskb, XDP_DIAG_UID,
- from_kuid_munged(user_ns, sock_i_uid(sk))))
+ from_kuid_munged(user_ns, sk_uid(sk))))
goto out_nlmsg_trim;
if ((req->xdiag_show & XDP_SHOW_RING_CFG) &&
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 664f7b7a622c..f597734d83cc 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -3741,6 +3741,18 @@ sub process {
}
}
+# Check for RGMII phy-mode with delay on PCB
+ if ($realfile =~ /\.(dts|dtsi|dtso)$/ &&
+ $line =~ /^\+\s*(phy-mode|phy-connection-type)\s*=\s*"/ &&
+ !ctx_has_comment($first_line, $linenr)) {
+ my $prop = $1;
+ my $mode = get_quoted_string($line, $rawline);
+ if ($mode =~ /^"rgmii(?:|-rxid|-txid)"$/) {
+ WARN("UNCOMMENTED_RGMII_MODE",
+ "$prop $mode without comment -- delays on the PCB should be described, otherwise use \"rgmii-id\"\n" . $herecurr);
+ }
+ }
+
# check for using SPDX license tag at beginning of files
if ($realline == $checklicenseline) {
if ($rawline =~ /^[ \+]\s*\#\!\s*\//) {
diff --git a/tools/testing/selftests/drivers/net/Makefile b/tools/testing/selftests/drivers/net/Makefile
index be780bcb73a3..bd309b2d3909 100644
--- a/tools/testing/selftests/drivers/net/Makefile
+++ b/tools/testing/selftests/drivers/net/Makefile
@@ -12,6 +12,7 @@ TEST_GEN_FILES := \
TEST_PROGS := \
napi_id.py \
netcons_basic.sh \
+ netcons_cmdline.sh \
netcons_fragmented_msg.sh \
netcons_overflow.sh \
netcons_sysdata.sh \
diff --git a/tools/testing/selftests/drivers/net/hw/devmem.py b/tools/testing/selftests/drivers/net/hw/devmem.py
index 7947650210a0..baa2f24240ba 100755
--- a/tools/testing/selftests/drivers/net/hw/devmem.py
+++ b/tools/testing/selftests/drivers/net/hw/devmem.py
@@ -51,15 +51,14 @@ def check_tx(cfg) -> None:
@ksft_disruptive
def check_tx_chunks(cfg) -> None:
- cfg.require_ipver("6")
require_devmem(cfg)
port = rand_port()
- listen_cmd = f"socat -U - TCP6-LISTEN:{port}"
+ listen_cmd = f"socat -U - TCP{cfg.addr_ipver}-LISTEN:{port}"
with bkg(listen_cmd, exit_wait=True) as socat:
wait_port_listen(port)
- cmd(f"echo -e \"hello\\nworld\"| {cfg.bin_remote} -f {cfg.ifname} -s {cfg.addr_v['6']} -p {port} -z 3", host=cfg.remote, shell=True)
+ cmd(f"echo -e \"hello\\nworld\"| {cfg.bin_remote} -f {cfg.ifname} -s {cfg.addr} -p {port} -z 3", host=cfg.remote, shell=True)
ksft_eq(socat.stdout.strip(), "hello\nworld")
diff --git a/tools/testing/selftests/drivers/net/hw/lib/py/__init__.py b/tools/testing/selftests/drivers/net/hw/lib/py/__init__.py
index b582885786f5..56ff11074b55 100644
--- a/tools/testing/selftests/drivers/net/hw/lib/py/__init__.py
+++ b/tools/testing/selftests/drivers/net/hw/lib/py/__init__.py
@@ -7,8 +7,25 @@ KSFT_DIR = (Path(__file__).parent / "../../../../..").resolve()
try:
sys.path.append(KSFT_DIR.as_posix())
+
from net.lib.py import *
from drivers.net.lib.py import *
+
+ # Import one by one to avoid pylint false positives
+ from net.lib.py import EthtoolFamily, NetdevFamily, NetshaperFamily, \
+ NlError, RtnlFamily
+ from net.lib.py import CmdExitFailure
+ from net.lib.py import bkg, cmd, defer, ethtool, fd_read_timeout, ip, \
+ rand_port, tool, wait_port_listen
+ from net.lib.py import fd_read_timeout
+ from net.lib.py import KsftSkipEx, KsftFailEx, KsftXfailEx
+ from net.lib.py import ksft_disruptive, ksft_exit, ksft_pr, ksft_run, \
+ ksft_setup
+ from net.lib.py import ksft_eq, ksft_ge, ksft_in, ksft_is, ksft_lt, \
+ ksft_ne, ksft_not_in, ksft_raises, ksft_true
+ from net.lib.py import NetNSEnter
+ from drivers.net.lib.py import GenerateTraffic
+ from drivers.net.lib.py import NetDrvEnv, NetDrvEpEnv
except ModuleNotFoundError as e:
ksft_pr("Failed importing `net` library from kernel sources")
ksft_pr(str(e))
diff --git a/tools/testing/selftests/drivers/net/hw/ncdevmem.c b/tools/testing/selftests/drivers/net/hw/ncdevmem.c
index 02e4d3d7ded2..cc9b40d9c5d5 100644
--- a/tools/testing/selftests/drivers/net/hw/ncdevmem.c
+++ b/tools/testing/selftests/drivers/net/hw/ncdevmem.c
@@ -852,7 +852,6 @@ static int do_client(struct memory_buffer *mem)
ssize_t line_size = 0;
struct cmsghdr *cmsg;
char *line = NULL;
- unsigned long mid;
size_t len = 0;
int socket_fd;
__u32 ddmabuf;
diff --git a/tools/testing/selftests/drivers/net/hw/rss_api.py b/tools/testing/selftests/drivers/net/hw/rss_api.py
new file mode 100755
index 000000000000..db0f723a674b
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/hw/rss_api.py
@@ -0,0 +1,89 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+
+"""
+API level tests for RSS (mostly Netlink vs IOCTL).
+"""
+
+import glob
+from lib.py import ksft_run, ksft_exit, ksft_eq, ksft_is, ksft_ne
+from lib.py import KsftSkipEx, KsftFailEx
+from lib.py import defer, ethtool
+from lib.py import EthtoolFamily
+from lib.py import NetDrvEnv
+
+
+def _ethtool_create(cfg, act, opts):
+ output = ethtool(f"{act} {cfg.ifname} {opts}").stdout
+ # Output will be something like: "New RSS context is 1" or
+ # "Added rule with ID 7", we want the integer from the end
+ return int(output.split()[-1])
+
+
+def test_rxfh_indir_ntf(cfg):
+ """
+ Check that Netlink notifications are generated when RSS indirection
+ table was modified.
+ """
+
+ qcnt = len(glob.glob(f"/sys/class/net/{cfg.ifname}/queues/rx-*"))
+ if qcnt < 2:
+ raise KsftSkipEx(f"Local has only {qcnt} queues")
+
+ ethnl = EthtoolFamily()
+ ethnl.ntf_subscribe("monitor")
+
+ ethtool(f"--disable-netlink -X {cfg.ifname} weight 0 1")
+ reset = defer(ethtool, f"-X {cfg.ifname} default")
+
+ ntf = next(ethnl.poll_ntf(duration=0.2), None)
+ if ntf is None:
+ raise KsftFailEx("No notification received")
+ ksft_eq(ntf["name"], "rss-ntf")
+ ksft_eq(set(ntf["msg"]["indir"]), {1})
+
+ reset.exec()
+ ntf = next(ethnl.poll_ntf(duration=0.2), None)
+ if ntf is None:
+ raise KsftFailEx("No notification received after reset")
+ ksft_eq(ntf["name"], "rss-ntf")
+ ksft_is(ntf["msg"].get("context"), None)
+ ksft_ne(set(ntf["msg"]["indir"]), {1})
+
+
+def test_rxfh_indir_ctx_ntf(cfg):
+ """
+ Check that Netlink notifications are generated when RSS indirection
+ table was modified on an additional RSS context.
+ """
+
+ qcnt = len(glob.glob(f"/sys/class/net/{cfg.ifname}/queues/rx-*"))
+ if qcnt < 2:
+ raise KsftSkipEx(f"Local has only {qcnt} queues")
+
+ ctx_id = _ethtool_create(cfg, "-X", "context new")
+ defer(ethtool, f"-X {cfg.ifname} context {ctx_id} delete")
+
+ ethnl = EthtoolFamily()
+ ethnl.ntf_subscribe("monitor")
+
+ ethtool(f"--disable-netlink -X {cfg.ifname} context {ctx_id} weight 0 1")
+
+ ntf = next(ethnl.poll_ntf(duration=0.2), None)
+ if ntf is None:
+ raise KsftFailEx("No notification received")
+ ksft_eq(ntf["name"], "rss-ntf")
+ ksft_eq(ntf["msg"].get("context"), ctx_id)
+ ksft_eq(set(ntf["msg"]["indir"]), {1})
+
+
+def main() -> None:
+ """ Ksft boiler plate main """
+
+ with NetDrvEnv(__file__, nsim_test=False) as cfg:
+ ksft_run(globs=globals(), case_pfx={"test_"}, args=(cfg, ))
+ ksft_exit()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/testing/selftests/drivers/net/lib/py/__init__.py b/tools/testing/selftests/drivers/net/lib/py/__init__.py
index 401e70f7f136..9ed1d8f70524 100644
--- a/tools/testing/selftests/drivers/net/lib/py/__init__.py
+++ b/tools/testing/selftests/drivers/net/lib/py/__init__.py
@@ -7,7 +7,21 @@ KSFT_DIR = (Path(__file__).parent / "../../../..").resolve()
try:
sys.path.append(KSFT_DIR.as_posix())
+
from net.lib.py import *
+
+ # Import one by one to avoid pylint false positives
+ from net.lib.py import EthtoolFamily, NetdevFamily, NetshaperFamily, \
+ NlError, RtnlFamily
+ from net.lib.py import CmdExitFailure
+ from net.lib.py import bkg, cmd, defer, ethtool, fd_read_timeout, ip, \
+ rand_port, tool, wait_port_listen
+ from net.lib.py import fd_read_timeout
+ from net.lib.py import KsftSkipEx, KsftFailEx, KsftXfailEx
+ from net.lib.py import ksft_disruptive, ksft_exit, ksft_pr, ksft_run, \
+ ksft_setup
+ from net.lib.py import ksft_eq, ksft_ge, ksft_in, ksft_is, ksft_lt, \
+ ksft_ne, ksft_not_in, ksft_raises, ksft_true
except ModuleNotFoundError as e:
ksft_pr("Failed importing `net` library from kernel sources")
ksft_pr(str(e))
diff --git a/tools/testing/selftests/drivers/net/lib/sh/lib_netcons.sh b/tools/testing/selftests/drivers/net/lib/sh/lib_netcons.sh
index 29b01b8e2215..3fcf85a34596 100644
--- a/tools/testing/selftests/drivers/net/lib/sh/lib_netcons.sh
+++ b/tools/testing/selftests/drivers/net/lib/sh/lib_netcons.sh
@@ -95,6 +95,8 @@ function set_network() {
}
function create_dynamic_target() {
+ local FORMAT=${1:-"extended"}
+
DSTMAC=$(ip netns exec "${NAMESPACE}" \
ip link show "${DSTIF}" | awk '/ether/ {print $2}')
@@ -106,9 +108,30 @@ function create_dynamic_target() {
echo "${DSTMAC}" > "${NETCONS_PATH}"/remote_mac
echo "${SRCIF}" > "${NETCONS_PATH}"/dev_name
+ if [ "${FORMAT}" == "basic" ]
+ then
+ # Basic target does not support release
+ echo 0 > "${NETCONS_PATH}"/release
+ echo 0 > "${NETCONS_PATH}"/extended
+ elif [ "${FORMAT}" == "extended" ]
+ then
+ echo 1 > "${NETCONS_PATH}"/extended
+ fi
+
echo 1 > "${NETCONS_PATH}"/enabled
}
+# Generate the command line argument for netconsole following:
+# netconsole=[+][src-port]@[src-ip]/[<dev>],[tgt-port]@<tgt-ip>/[tgt-macaddr]
+function create_cmdline_str() {
+ DSTMAC=$(ip netns exec "${NAMESPACE}" \
+ ip link show "${DSTIF}" | awk '/ether/ {print $2}')
+ SRCPORT="1514"
+ TGTPORT="6666"
+
+ echo "netconsole=\"+${SRCPORT}@${SRCIP}/${SRCIF},${TGTPORT}@${DSTIP}/${DSTMAC}\""
+}
+
# Do not append the release to the header of the message
function disable_release_append() {
echo 0 > "${NETCONS_PATH}"/enabled
@@ -116,16 +139,9 @@ function disable_release_append() {
echo 1 > "${NETCONS_PATH}"/enabled
}
-function cleanup() {
+function do_cleanup() {
local NSIM_DEV_SYS_DEL="/sys/bus/netdevsim/del_device"
- # delete netconsole dynamic reconfiguration
- echo 0 > "${NETCONS_PATH}"/enabled
- # Remove all the keys that got created during the selftest
- find "${NETCONS_PATH}/userdata/" -mindepth 1 -type d -delete
- # Remove the configfs entry
- rmdir "${NETCONS_PATH}"
-
# Delete netdevsim devices
echo "$NSIM_DEV_2_ID" > "$NSIM_DEV_SYS_DEL"
echo "$NSIM_DEV_1_ID" > "$NSIM_DEV_SYS_DEL"
@@ -137,6 +153,17 @@ function cleanup() {
echo "${DEFAULT_PRINTK_VALUES}" > /proc/sys/kernel/printk
}
+function cleanup() {
+ # delete netconsole dynamic reconfiguration
+ echo 0 > "${NETCONS_PATH}"/enabled
+ # Remove all the keys that got created during the selftest
+ find "${NETCONS_PATH}/userdata/" -mindepth 1 -type d -delete
+ # Remove the configfs entry
+ rmdir "${NETCONS_PATH}"
+
+ do_cleanup
+}
+
function set_user_data() {
if [[ ! -d "${NETCONS_PATH}""/userdata" ]]
then
@@ -157,13 +184,10 @@ function listen_port_and_save_to() {
socat UDP-LISTEN:"${PORT}",fork "${OUTPUT}"
}
-function validate_result() {
+# Only validate that the message arrived properly
+function validate_msg() {
local TMPFILENAME="$1"
- # TMPFILENAME will contain something like:
- # 6.11.1-0_fbk0_rc13_509_g30d75cea12f7,13,1822,115075213798,-;netconsole selftest: netcons_gtJHM
- # key=value
-
# Check if the file exists
if [ ! -f "$TMPFILENAME" ]; then
echo "FAIL: File was not generated." >&2
@@ -175,17 +199,32 @@ function validate_result() {
cat "${TMPFILENAME}" >&2
exit "${ksft_fail}"
fi
+}
- if ! grep -q "${USERDATA_KEY}=${USERDATA_VALUE}" "${TMPFILENAME}"; then
- echo "FAIL: ${USERDATA_KEY}=${USERDATA_VALUE} not found in ${TMPFILENAME}" >&2
- cat "${TMPFILENAME}" >&2
- exit "${ksft_fail}"
+# Validate the message and userdata
+function validate_result() {
+ local TMPFILENAME="$1"
+
+ # TMPFILENAME will contain something like:
+ # 6.11.1-0_fbk0_rc13_509_g30d75cea12f7,13,1822,115075213798,-;netconsole selftest: netcons_gtJHM
+ # key=value
+
+ validate_msg "${TMPFILENAME}"
+
+ # userdata is not supported on basic format target,
+ # thus, do not validate it.
+ if [ "${FORMAT}" != "basic" ];
+ then
+ if ! grep -q "${USERDATA_KEY}=${USERDATA_VALUE}" "${TMPFILENAME}"; then
+ echo "FAIL: ${USERDATA_KEY}=${USERDATA_VALUE} not found in ${TMPFILENAME}" >&2
+ cat "${TMPFILENAME}" >&2
+ exit "${ksft_fail}"
+ fi
fi
# Delete the file once it is validated, otherwise keep it
# for debugging purposes
rm "${TMPFILENAME}"
- exit "${ksft_pass}"
}
function check_for_dependencies() {
@@ -246,3 +285,12 @@ function pkill_socat() {
pkill -f "${PROCESS_NAME}"
set -e
}
+
+# Check if netconsole was compiled as a module, otherwise exit
+function check_netconsole_module() {
+ if modinfo netconsole | grep filename: | grep -q builtin
+ then
+ echo "SKIP: netconsole should be compiled as a module" >&2
+ exit "${ksft_skip}"
+ fi
+}
diff --git a/tools/testing/selftests/drivers/net/netcons_basic.sh b/tools/testing/selftests/drivers/net/netcons_basic.sh
index fe765da498e8..40a6ac6191b8 100755
--- a/tools/testing/selftests/drivers/net/netcons_basic.sh
+++ b/tools/testing/selftests/drivers/net/netcons_basic.sh
@@ -32,21 +32,35 @@ check_for_dependencies
echo "6 5" > /proc/sys/kernel/printk
# Remove the namespace, interfaces and netconsole target on exit
trap cleanup EXIT
-# Create one namespace and two interfaces
-set_network
-# Create a dynamic target for netconsole
-create_dynamic_target
-# Set userdata "key" with the "value" value
-set_user_data
-# Listed for netconsole port inside the namespace and destination interface
-listen_port_and_save_to "${OUTPUT_FILE}" &
-# Wait for socat to start and listen to the port.
-wait_local_port_listen "${NAMESPACE}" "${PORT}" udp
-# Send the message
-echo "${MSG}: ${TARGET}" > /dev/kmsg
-# Wait until socat saves the file to disk
-busywait "${BUSYWAIT_TIMEOUT}" test -s "${OUTPUT_FILE}"
-
-# Make sure the message was received in the dst part
-# and exit
-validate_result "${OUTPUT_FILE}"
+
+# Run the test twice, with different format modes
+for FORMAT in "basic" "extended"
+do
+ echo "Running with target mode: ${FORMAT}"
+ # Create one namespace and two interfaces
+ set_network
+ # Create a dynamic target for netconsole
+ create_dynamic_target "${FORMAT}"
+ # Only set userdata for extended format
+ if [ "$FORMAT" == "extended" ]
+ then
+ # Set userdata "key" with the "value" value
+ set_user_data
+ fi
+ # Listed for netconsole port inside the namespace and destination interface
+ listen_port_and_save_to "${OUTPUT_FILE}" &
+ # Wait for socat to start and listen to the port.
+ wait_local_port_listen "${NAMESPACE}" "${PORT}" udp
+ # Send the message
+ echo "${MSG}: ${TARGET}" > /dev/kmsg
+ # Wait until socat saves the file to disk
+ busywait "${BUSYWAIT_TIMEOUT}" test -s "${OUTPUT_FILE}"
+
+ # Make sure the message was received in the dst part
+ # and exit
+ validate_result "${OUTPUT_FILE}" "${FORMAT}"
+ cleanup
+done
+
+trap - EXIT
+exit "${ksft_pass}"
diff --git a/tools/testing/selftests/drivers/net/netcons_cmdline.sh b/tools/testing/selftests/drivers/net/netcons_cmdline.sh
new file mode 100755
index 000000000000..ad2fb8b1c463
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/netcons_cmdline.sh
@@ -0,0 +1,52 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-2.0
+
+# This is a selftest to test cmdline arguments on netconsole.
+# It exercises loading of netconsole from cmdline instead of the dynamic
+# reconfiguration. This includes parsing the long netconsole= line and all the
+# flow through init_netconsole().
+#
+# Author: Breno Leitao <leitao@debian.org>
+
+set -euo pipefail
+
+SCRIPTDIR=$(dirname "$(readlink -e "${BASH_SOURCE[0]}")")
+
+source "${SCRIPTDIR}"/lib/sh/lib_netcons.sh
+
+check_netconsole_module
+
+modprobe netdevsim 2> /dev/null || true
+rmmod netconsole 2> /dev/null || true
+
+# The content of kmsg will be save to the following file
+OUTPUT_FILE="/tmp/${TARGET}"
+
+# Check for basic system dependency and exit if not found
+# check_for_dependencies
+# Set current loglevel to KERN_INFO(6), and default to KERN_NOTICE(5)
+echo "6 5" > /proc/sys/kernel/printk
+# Remove the namespace and network interfaces
+trap do_cleanup EXIT
+# Create one namespace and two interfaces
+set_network
+# Create the command line for netconsole, with the configuration from the
+# function above
+CMDLINE="$(create_cmdline_str)"
+
+# Load the module, with the cmdline set
+modprobe netconsole "${CMDLINE}"
+
+# Listed for netconsole port inside the namespace and destination interface
+listen_port_and_save_to "${OUTPUT_FILE}" &
+# Wait for socat to start and listen to the port.
+wait_local_port_listen "${NAMESPACE}" "${PORT}" udp
+# Send the message
+echo "${MSG}: ${TARGET}" > /dev/kmsg
+# Wait until socat saves the file to disk
+busywait "${BUSYWAIT_TIMEOUT}" test -s "${OUTPUT_FILE}"
+# Make sure the message was received in the dst part
+# and exit
+validate_msg "${OUTPUT_FILE}"
+
+exit "${ksft_pass}"
diff --git a/tools/testing/selftests/drivers/net/netcons_sysdata.sh b/tools/testing/selftests/drivers/net/netcons_sysdata.sh
index a737e377bf08..baf69031089e 100755
--- a/tools/testing/selftests/drivers/net/netcons_sysdata.sh
+++ b/tools/testing/selftests/drivers/net/netcons_sysdata.sh
@@ -53,6 +53,17 @@ function set_release() {
echo 1 > "${NETCONS_PATH}/userdata/release_enabled"
}
+# Enable the msgid to be appended to sysdata
+function set_msgid() {
+ if [[ ! -f "${NETCONS_PATH}/userdata/msgid_enabled" ]]
+ then
+ echo "Not able to enable msgid sysdata append. Configfs not available in ${NETCONS_PATH}/userdata/msgid_enabled" >&2
+ exit "${ksft_skip}"
+ fi
+
+ echo 1 > "${NETCONS_PATH}/userdata/msgid_enabled"
+}
+
# Disable the sysdata cpu_nr feature
function unset_cpu_nr() {
echo 0 > "${NETCONS_PATH}/userdata/cpu_nr_enabled"
@@ -67,6 +78,10 @@ function unset_release() {
echo 0 > "${NETCONS_PATH}/userdata/release_enabled"
}
+function unset_msgid() {
+ echo 0 > "${NETCONS_PATH}/userdata/msgid_enabled"
+}
+
# Test if MSG contains sysdata
function validate_sysdata() {
# OUTPUT_FILE will contain something like:
@@ -74,6 +89,7 @@ function validate_sysdata() {
# userdatakey=userdatavalue
# cpu=X
# taskname=<taskname>
+ # msgid=<id>
# Echo is what this test uses to create the message. See runtest()
# function
@@ -104,6 +120,12 @@ function validate_sysdata() {
exit "${ksft_fail}"
fi
+ if ! grep -q "msgid=[0-9]\+$" "${OUTPUT_FILE}"; then
+ echo "FAIL: 'msgid=<id>' not found in ${OUTPUT_FILE}" >&2
+ cat "${OUTPUT_FILE}" >&2
+ exit "${ksft_fail}"
+ fi
+
rm "${OUTPUT_FILE}"
pkill_socat
}
@@ -155,6 +177,12 @@ function validate_no_sysdata() {
exit "${ksft_fail}"
fi
+ if grep -q "msgid=" "${OUTPUT_FILE}"; then
+ echo "FAIL: 'msgid= found in ${OUTPUT_FILE}" >&2
+ cat "${OUTPUT_FILE}" >&2
+ exit "${ksft_fail}"
+ fi
+
rm "${OUTPUT_FILE}"
}
@@ -206,6 +234,7 @@ set_cpu_nr
# Enable taskname to be appended to sysdata
set_taskname
set_release
+set_msgid
runtest
# Make sure the message was received in the dst part
# and exit
@@ -235,6 +264,7 @@ MSG="Test #3 from CPU${CPU}"
unset_cpu_nr
unset_taskname
unset_release
+unset_msgid
runtest
# At this time, cpu= shouldn't be present in the msg
validate_no_sysdata
diff --git a/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh b/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh
index 92c2f0376c08..4c859ecdad94 100755
--- a/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh
+++ b/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh
@@ -266,7 +266,6 @@ for port in 0 1; do
echo $NSIM_ID > /sys/bus/netdevsim/new_device
else
echo 1 > $NSIM_DEV_DFS/udp_ports_open_only
- echo 1 > $NSIM_DEV_DFS/udp_ports_sleep
echo 1 > $NSIM_DEV_SYS/new_port
fi
NSIM_NETDEV=`get_netdev_name old_netdevs`
@@ -350,23 +349,11 @@ old_netdevs=$(ls /sys/class/net)
port=0
echo $NSIM_ID > /sys/bus/netdevsim/new_device
echo 0 > $NSIM_DEV_SYS/del_port
-echo 1000 > $NSIM_DEV_DFS/udp_ports_sleep
echo 0 > $NSIM_DEV_SYS/new_port
NSIM_NETDEV=`get_netdev_name old_netdevs`
msg="create VxLANs"
-exp0=( 0 0 0 0 ) # sleep is longer than out wait
-new_vxlan vxlan0 10000 $NSIM_NETDEV
-
-modprobe -r vxlan
-modprobe -r udp_tunnel
-
-msg="remove tunnels"
-exp0=( 0 0 0 0 )
-check_tables
-
-msg="create VxLANs"
-exp0=( 0 0 0 0 ) # sleep is longer than out wait
+exp0=( `mke 10000 1` 0 0 0 )
new_vxlan vxlan0 10000 $NSIM_NETDEV
exp0=( 0 0 0 0 )
@@ -428,7 +415,6 @@ echo 0 > $NSIM_DEV_SYS/del_port
for port in 0 1; do
if [ $port -ne 0 ]; then
echo 1 > $NSIM_DEV_DFS/udp_ports_open_only
- echo 1 > $NSIM_DEV_DFS/udp_ports_sleep
fi
echo $port > $NSIM_DEV_SYS/new_port
@@ -486,7 +472,6 @@ echo 1 > $NSIM_DEV_DFS/udp_ports_sync_all
for port in 0 1; do
if [ $port -ne 0 ]; then
echo 1 > $NSIM_DEV_DFS/udp_ports_open_only
- echo 1 > $NSIM_DEV_DFS/udp_ports_sleep
fi
echo $port > $NSIM_DEV_SYS/new_port
@@ -543,7 +528,6 @@ echo 0 > $NSIM_DEV_SYS/del_port
for port in 0 1; do
if [ $port -ne 0 ]; then
echo 1 > $NSIM_DEV_DFS/udp_ports_open_only
- echo 1 > $NSIM_DEV_DFS/udp_ports_sleep
fi
echo $port > $NSIM_DEV_SYS/new_port
@@ -573,7 +557,6 @@ echo 1 > $NSIM_DEV_DFS/udp_ports_ipv4_only
for port in 0 1; do
if [ $port -ne 0 ]; then
echo 1 > $NSIM_DEV_DFS/udp_ports_open_only
- echo 1 > $NSIM_DEV_DFS/udp_ports_sleep
fi
echo $port > $NSIM_DEV_SYS/new_port
@@ -634,7 +617,6 @@ echo 0 > $NSIM_DEV_SYS/del_port
for port in 0 1; do
if [ $port -ne 0 ]; then
echo 1 > $NSIM_DEV_DFS/udp_ports_open_only
- echo 1 > $NSIM_DEV_DFS/udp_ports_sleep
fi
echo $port > $NSIM_DEV_SYS/new_port
@@ -690,7 +672,6 @@ echo 0 > $NSIM_DEV_SYS/del_port
for port in 0 1; do
if [ $port -ne 0 ]; then
echo 1 > $NSIM_DEV_DFS/udp_ports_open_only
- echo 1 > $NSIM_DEV_DFS/udp_ports_sleep
fi
echo $port > $NSIM_DEV_SYS/new_port
@@ -750,7 +731,6 @@ echo 0 > $NSIM_DEV_SYS/del_port
for port in 0 1; do
if [ $port -ne 0 ]; then
echo 1 > $NSIM_DEV_DFS/udp_ports_open_only
- echo 1 > $NSIM_DEV_DFS/udp_ports_sleep
fi
echo $port > $NSIM_DEV_SYS/new_port
@@ -809,7 +789,6 @@ echo $NSIM_ID > /sys/bus/netdevsim/new_device
echo 0 > $NSIM_DEV_SYS/del_port
echo 0 > $NSIM_DEV_DFS/udp_ports_open_only
-echo 1 > $NSIM_DEV_DFS/udp_ports_sleep
echo 1 > $NSIM_DEV_DFS/udp_ports_shared
old_netdevs=$(ls /sys/class/net)
diff --git a/tools/testing/selftests/drivers/net/stats.py b/tools/testing/selftests/drivers/net/stats.py
index efcc1e10575b..c2bb5d3f1ca1 100755
--- a/tools/testing/selftests/drivers/net/stats.py
+++ b/tools/testing/selftests/drivers/net/stats.py
@@ -1,12 +1,16 @@
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0
+"""
+Tests related to standard netdevice statistics.
+"""
+
import errno
import subprocess
import time
from lib.py import ksft_run, ksft_exit, ksft_pr
from lib.py import ksft_ge, ksft_eq, ksft_is, ksft_in, ksft_lt, ksft_true, ksft_raises
-from lib.py import KsftSkipEx, KsftXfailEx
+from lib.py import KsftSkipEx, KsftFailEx
from lib.py import ksft_disruptive
from lib.py import EthtoolFamily, NetdevFamily, RtnlFamily, NlError
from lib.py import NetDrvEnv
@@ -18,13 +22,16 @@ rtnl = RtnlFamily()
def check_pause(cfg) -> None:
- global ethnl
+ """
+ Check that drivers which support Pause config also report standard
+ pause stats.
+ """
try:
ethnl.pause_get({"header": {"dev-index": cfg.ifindex}})
except NlError as e:
if e.error == errno.EOPNOTSUPP:
- raise KsftXfailEx("pause not supported by the device")
+ raise KsftSkipEx("pause not supported by the device") from e
raise
data = ethnl.pause_get({"header": {"dev-index": cfg.ifindex,
@@ -33,13 +40,16 @@ def check_pause(cfg) -> None:
def check_fec(cfg) -> None:
- global ethnl
+ """
+ Check that drivers which support FEC config also report standard
+ FEC stats.
+ """
try:
ethnl.fec_get({"header": {"dev-index": cfg.ifindex}})
except NlError as e:
if e.error == errno.EOPNOTSUPP:
- raise KsftXfailEx("FEC not supported by the device")
+ raise KsftSkipEx("FEC not supported by the device") from e
raise
data = ethnl.fec_get({"header": {"dev-index": cfg.ifindex,
@@ -48,15 +58,17 @@ def check_fec(cfg) -> None:
def pkt_byte_sum(cfg) -> None:
- global netfam, rtnl
+ """
+ Check that qstat and interface stats match in value.
+ """
def get_qstat(test):
- global netfam
stats = netfam.qstats_get({}, dump=True)
if stats:
for qs in stats:
if qs["ifindex"]== test.ifindex:
return qs
+ return None
qstat = get_qstat(cfg)
if qstat is None:
@@ -77,15 +89,14 @@ def pkt_byte_sum(cfg) -> None:
for _ in range(10):
rtstat = rtnl.getlink({"ifi-index": cfg.ifindex})['stats64']
if stat_cmp(rtstat, qstat) < 0:
- raise Exception("RTNL stats are lower, fetched later")
+ raise KsftFailEx("RTNL stats are lower, fetched later")
qstat = get_qstat(cfg)
if stat_cmp(rtstat, qstat) > 0:
- raise Exception("Qstats are lower, fetched later")
+ raise KsftFailEx("Qstats are lower, fetched later")
def qstat_by_ifindex(cfg) -> None:
- global netfam
- global rtnl
+ """ Qstats Netlink API tests - querying by ifindex. """
# Construct a map ifindex -> [dump, by-index, dump]
ifindexes = {}
@@ -93,7 +104,7 @@ def qstat_by_ifindex(cfg) -> None:
for entry in stats:
ifindexes[entry['ifindex']] = [entry, None, None]
- for ifindex in ifindexes.keys():
+ for ifindex in ifindexes:
entry = netfam.qstats_get({"ifindex": ifindex}, dump=True)
ksft_eq(len(entry), 1)
ifindexes[entry[0]['ifindex']][1] = entry[0]
@@ -145,7 +156,7 @@ def qstat_by_ifindex(cfg) -> None:
# Try to get stats for lowest unused ifindex but not 0
devs = rtnl.getlink({}, dump=True)
- all_ifindexes = set([dev["ifi-index"] for dev in devs])
+ all_ifindexes = set(dev["ifi-index"] for dev in devs)
lowest = 2
while lowest in all_ifindexes:
lowest += 1
@@ -158,18 +169,20 @@ def qstat_by_ifindex(cfg) -> None:
@ksft_disruptive
def check_down(cfg) -> None:
+ """ Test statistics (interface and qstat) are not impacted by ifdown """
+
try:
qstat = netfam.qstats_get({"ifindex": cfg.ifindex}, dump=True)[0]
except NlError as e:
if e.error == errno.EOPNOTSUPP:
- raise KsftSkipEx("qstats not supported by the device")
+ raise KsftSkipEx("qstats not supported by the device") from e
raise
ip(f"link set dev {cfg.dev['ifname']} down")
defer(ip, f"link set dev {cfg.dev['ifname']} up")
qstat2 = netfam.qstats_get({"ifindex": cfg.ifindex}, dump=True)[0]
- for k, v in qstat.items():
+ for k in qstat:
ksft_ge(qstat2[k], qstat[k], comment=f"{k} went backwards on device down")
# exercise per-queue API to make sure that "device down" state
@@ -263,6 +276,8 @@ def procfs_downup_hammer(cfg) -> None:
def main() -> None:
+ """ Ksft boiler plate main """
+
with NetDrvEnv(__file__, queue_count=100) as cfg:
ksft_run([check_pause, check_fec, pkt_byte_sum, qstat_by_ifindex,
check_down, procfs_hammer, procfs_downup_hammer],
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index 332f387615d7..227f9e067d25 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -41,6 +41,7 @@ TEST_PROGS += netns-name.sh
TEST_PROGS += link_netns.py
TEST_PROGS += nl_netdev.py
TEST_PROGS += rtnetlink.py
+TEST_PROGS += rtnetlink_notification.sh
TEST_PROGS += srv6_end_dt46_l3vpn_test.sh
TEST_PROGS += srv6_end_dt4_l3vpn_test.sh
TEST_PROGS += srv6_end_dt6_l3vpn_test.sh
diff --git a/tools/testing/selftests/net/bench/Makefile b/tools/testing/selftests/net/bench/Makefile
new file mode 100644
index 000000000000..2546c45e42f7
--- /dev/null
+++ b/tools/testing/selftests/net/bench/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+
+TEST_GEN_MODS_DIR := page_pool
+
+TEST_PROGS += test_bench_page_pool.sh
+
+include ../../lib.mk
diff --git a/tools/testing/selftests/net/bench/page_pool/Makefile b/tools/testing/selftests/net/bench/page_pool/Makefile
new file mode 100644
index 000000000000..0549a16ba275
--- /dev/null
+++ b/tools/testing/selftests/net/bench/page_pool/Makefile
@@ -0,0 +1,17 @@
+BENCH_PAGE_POOL_SIMPLE_TEST_DIR := $(realpath $(dir $(abspath $(lastword $(MAKEFILE_LIST)))))
+KDIR ?= /lib/modules/$(shell uname -r)/build
+
+ifeq ($(V),1)
+Q =
+else
+Q = @
+endif
+
+obj-m += bench_page_pool.o
+bench_page_pool-y += bench_page_pool_simple.o time_bench.o
+
+all:
+ +$(Q)make -C $(KDIR) M=$(BENCH_PAGE_POOL_SIMPLE_TEST_DIR) modules
+
+clean:
+ +$(Q)make -C $(KDIR) M=$(BENCH_PAGE_POOL_SIMPLE_TEST_DIR) clean
diff --git a/tools/testing/selftests/net/bench/page_pool/bench_page_pool_simple.c b/tools/testing/selftests/net/bench/page_pool/bench_page_pool_simple.c
new file mode 100644
index 000000000000..f183d5e30dc6
--- /dev/null
+++ b/tools/testing/selftests/net/bench/page_pool/bench_page_pool_simple.c
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Benchmark module for page_pool.
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/mutex.h>
+
+#include <linux/version.h>
+#include <net/page_pool/helpers.h>
+
+#include <linux/interrupt.h>
+#include <linux/limits.h>
+
+#include "time_bench.h"
+
+static int verbose = 1;
+#define MY_POOL_SIZE 1024
+
+static void _page_pool_put_page(struct page_pool *pool, struct page *page,
+ bool allow_direct)
+{
+ page_pool_put_page(pool, page, -1, allow_direct);
+}
+
+/* Makes tests selectable. Useful for perf-record to analyze a single test.
+ * Hint: Bash shells support writing binary number like: $((2#101010)
+ *
+ * # modprobe bench_page_pool_simple run_flags=$((2#100))
+ */
+static unsigned long run_flags = 0xFFFFFFFF;
+module_param(run_flags, ulong, 0);
+MODULE_PARM_DESC(run_flags, "Limit which bench test that runs");
+
+/* Count the bit number from the enum */
+enum benchmark_bit {
+ bit_run_bench_baseline,
+ bit_run_bench_no_softirq01,
+ bit_run_bench_no_softirq02,
+ bit_run_bench_no_softirq03,
+};
+
+#define bit(b) (1 << (b))
+#define enabled(b) ((run_flags & (bit(b))))
+
+/* notice time_bench is limited to U32_MAX nr loops */
+static unsigned long loops = 10000000;
+module_param(loops, ulong, 0);
+MODULE_PARM_DESC(loops, "Specify loops bench will run");
+
+/* Timing at the nanosec level, we need to know the overhead
+ * introduced by the for loop itself
+ */
+static int time_bench_for_loop(struct time_bench_record *rec, void *data)
+{
+ uint64_t loops_cnt = 0;
+ int i;
+
+ time_bench_start(rec);
+ /** Loop to measure **/
+ for (i = 0; i < rec->loops; i++) {
+ loops_cnt++;
+ barrier(); /* avoid compiler to optimize this loop */
+ }
+ time_bench_stop(rec, loops_cnt);
+ return loops_cnt;
+}
+
+static int time_bench_atomic_inc(struct time_bench_record *rec, void *data)
+{
+ uint64_t loops_cnt = 0;
+ atomic_t cnt;
+ int i;
+
+ atomic_set(&cnt, 0);
+
+ time_bench_start(rec);
+ /** Loop to measure **/
+ for (i = 0; i < rec->loops; i++) {
+ atomic_inc(&cnt);
+ barrier(); /* avoid compiler to optimize this loop */
+ }
+ loops_cnt = atomic_read(&cnt);
+ time_bench_stop(rec, loops_cnt);
+ return loops_cnt;
+}
+
+/* The ptr_ping in page_pool uses a spinlock. We need to know the minimum
+ * overhead of taking+releasing a spinlock, to know the cycles that can be saved
+ * by e.g. amortizing this via bulking.
+ */
+static int time_bench_lock(struct time_bench_record *rec, void *data)
+{
+ uint64_t loops_cnt = 0;
+ spinlock_t lock;
+ int i;
+
+ spin_lock_init(&lock);
+
+ time_bench_start(rec);
+ /** Loop to measure **/
+ for (i = 0; i < rec->loops; i++) {
+ spin_lock(&lock);
+ loops_cnt++;
+ barrier(); /* avoid compiler to optimize this loop */
+ spin_unlock(&lock);
+ }
+ time_bench_stop(rec, loops_cnt);
+ return loops_cnt;
+}
+
+/* Helper for filling some page's into ptr_ring */
+static void pp_fill_ptr_ring(struct page_pool *pp, int elems)
+{
+ /* GFP_ATOMIC needed when under run softirq */
+ gfp_t gfp_mask = GFP_ATOMIC;
+ struct page **array;
+ int i;
+
+ array = kcalloc(elems, sizeof(struct page *), gfp_mask);
+
+ for (i = 0; i < elems; i++)
+ array[i] = page_pool_alloc_pages(pp, gfp_mask);
+ for (i = 0; i < elems; i++)
+ _page_pool_put_page(pp, array[i], false);
+
+ kfree(array);
+}
+
+enum test_type { type_fast_path, type_ptr_ring, type_page_allocator };
+
+/* Depends on compile optimizing this function */
+static int time_bench_page_pool(struct time_bench_record *rec, void *data,
+ enum test_type type, const char *func)
+{
+ uint64_t loops_cnt = 0;
+ gfp_t gfp_mask = GFP_ATOMIC; /* GFP_ATOMIC is not really needed */
+ int i, err;
+
+ struct page_pool *pp;
+ struct page *page;
+
+ struct page_pool_params pp_params = {
+ .order = 0,
+ .flags = 0,
+ .pool_size = MY_POOL_SIZE,
+ .nid = NUMA_NO_NODE,
+ .dev = NULL, /* Only use for DMA mapping */
+ .dma_dir = DMA_BIDIRECTIONAL,
+ };
+
+ pp = page_pool_create(&pp_params);
+ if (IS_ERR(pp)) {
+ err = PTR_ERR(pp);
+ pr_warn("%s: Error(%d) creating page_pool\n", func, err);
+ goto out;
+ }
+ pp_fill_ptr_ring(pp, 64);
+
+ if (in_serving_softirq())
+ pr_warn("%s(): in_serving_softirq fast-path\n", func);
+ else
+ pr_warn("%s(): Cannot use page_pool fast-path\n", func);
+
+ time_bench_start(rec);
+ /** Loop to measure **/
+ for (i = 0; i < rec->loops; i++) {
+ /* Common fast-path alloc that depend on in_serving_softirq() */
+ page = page_pool_alloc_pages(pp, gfp_mask);
+ if (!page)
+ break;
+ loops_cnt++;
+ barrier(); /* avoid compiler to optimize this loop */
+
+ /* The benchmarks purpose it to test different return paths.
+ * Compiler should inline optimize other function calls out
+ */
+ if (type == type_fast_path) {
+ /* Fast-path recycling e.g. XDP_DROP use-case */
+ page_pool_recycle_direct(pp, page);
+
+ } else if (type == type_ptr_ring) {
+ /* Normal return path */
+ _page_pool_put_page(pp, page, false);
+
+ } else if (type == type_page_allocator) {
+ /* Test if not pages are recycled, but instead
+ * returned back into systems page allocator
+ */
+ get_page(page); /* cause no-recycling */
+ _page_pool_put_page(pp, page, false);
+ put_page(page);
+ } else {
+ BUILD_BUG();
+ }
+ }
+ time_bench_stop(rec, loops_cnt);
+out:
+ page_pool_destroy(pp);
+ return loops_cnt;
+}
+
+static int time_bench_page_pool01_fast_path(struct time_bench_record *rec,
+ void *data)
+{
+ return time_bench_page_pool(rec, data, type_fast_path, __func__);
+}
+
+static int time_bench_page_pool02_ptr_ring(struct time_bench_record *rec,
+ void *data)
+{
+ return time_bench_page_pool(rec, data, type_ptr_ring, __func__);
+}
+
+static int time_bench_page_pool03_slow(struct time_bench_record *rec,
+ void *data)
+{
+ return time_bench_page_pool(rec, data, type_page_allocator, __func__);
+}
+
+static int run_benchmark_tests(void)
+{
+ uint32_t nr_loops = loops;
+
+ /* Baseline tests */
+ if (enabled(bit_run_bench_baseline)) {
+ time_bench_loop(nr_loops * 10, 0, "for_loop", NULL,
+ time_bench_for_loop);
+ time_bench_loop(nr_loops * 10, 0, "atomic_inc", NULL,
+ time_bench_atomic_inc);
+ time_bench_loop(nr_loops, 0, "lock", NULL, time_bench_lock);
+ }
+
+ /* This test cannot activate correct code path, due to no-softirq ctx */
+ if (enabled(bit_run_bench_no_softirq01))
+ time_bench_loop(nr_loops, 0, "no-softirq-page_pool01", NULL,
+ time_bench_page_pool01_fast_path);
+ if (enabled(bit_run_bench_no_softirq02))
+ time_bench_loop(nr_loops, 0, "no-softirq-page_pool02", NULL,
+ time_bench_page_pool02_ptr_ring);
+ if (enabled(bit_run_bench_no_softirq03))
+ time_bench_loop(nr_loops, 0, "no-softirq-page_pool03", NULL,
+ time_bench_page_pool03_slow);
+
+ return 0;
+}
+
+static int __init bench_page_pool_simple_module_init(void)
+{
+ if (verbose)
+ pr_info("Loaded\n");
+
+ if (loops > U32_MAX) {
+ pr_err("Module param loops(%lu) exceeded U32_MAX(%u)\n", loops,
+ U32_MAX);
+ return -ECHRNG;
+ }
+
+ run_benchmark_tests();
+
+ return 0;
+}
+module_init(bench_page_pool_simple_module_init);
+
+static void __exit bench_page_pool_simple_module_exit(void)
+{
+ if (verbose)
+ pr_info("Unloaded\n");
+}
+module_exit(bench_page_pool_simple_module_exit);
+
+MODULE_DESCRIPTION("Benchmark of page_pool simple cases");
+MODULE_AUTHOR("Jesper Dangaard Brouer <netoptimizer@brouer.com>");
+MODULE_LICENSE("GPL");
diff --git a/tools/testing/selftests/net/bench/page_pool/time_bench.c b/tools/testing/selftests/net/bench/page_pool/time_bench.c
new file mode 100644
index 000000000000..073bb36ec5f2
--- /dev/null
+++ b/tools/testing/selftests/net/bench/page_pool/time_bench.c
@@ -0,0 +1,394 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Benchmarking code execution time inside the kernel
+ *
+ * Copyright (C) 2014, Red Hat, Inc., Jesper Dangaard Brouer
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/time.h>
+
+#include <linux/perf_event.h> /* perf_event_create_kernel_counter() */
+
+/* For concurrency testing */
+#include <linux/completion.h>
+#include <linux/sched.h>
+#include <linux/workqueue.h>
+#include <linux/kthread.h>
+
+#include "time_bench.h"
+
+static int verbose = 1;
+
+/** TSC (Time-Stamp Counter) based **
+ * See: linux/time_bench.h
+ * tsc_start_clock() and tsc_stop_clock()
+ */
+
+/** Wall-clock based **
+ */
+
+/** PMU (Performance Monitor Unit) based **
+ */
+#define PERF_FORMAT \
+ (PERF_FORMAT_GROUP | PERF_FORMAT_ID | PERF_FORMAT_TOTAL_TIME_ENABLED | \
+ PERF_FORMAT_TOTAL_TIME_RUNNING)
+
+struct raw_perf_event {
+ uint64_t config; /* event */
+ uint64_t config1; /* umask */
+ struct perf_event *save;
+ char *desc;
+};
+
+/* if HT is enable a maximum of 4 events (5 if one is instructions
+ * retired can be specified, if HT is disabled a maximum of 8 (9 if
+ * one is instructions retired) can be specified.
+ *
+ * From Table 19-1. Architectural Performance Events
+ * Architectures Software Developer’s Manual Volume 3: System Programming
+ * Guide
+ */
+struct raw_perf_event perf_events[] = {
+ { 0x3c, 0x00, NULL, "Unhalted CPU Cycles" },
+ { 0xc0, 0x00, NULL, "Instruction Retired" }
+};
+
+#define NUM_EVTS (ARRAY_SIZE(perf_events))
+
+/* WARNING: PMU config is currently broken!
+ */
+bool time_bench_PMU_config(bool enable)
+{
+ int i;
+ struct perf_event_attr perf_conf;
+ struct perf_event *perf_event;
+ int cpu;
+
+ preempt_disable();
+ cpu = smp_processor_id();
+ pr_info("DEBUG: cpu:%d\n", cpu);
+ preempt_enable();
+
+ memset(&perf_conf, 0, sizeof(struct perf_event_attr));
+ perf_conf.type = PERF_TYPE_RAW;
+ perf_conf.size = sizeof(struct perf_event_attr);
+ perf_conf.read_format = PERF_FORMAT;
+ perf_conf.pinned = 1;
+ perf_conf.exclude_user = 1; /* No userspace events */
+ perf_conf.exclude_kernel = 0; /* Only kernel events */
+
+ for (i = 0; i < NUM_EVTS; i++) {
+ perf_conf.disabled = enable;
+ //perf_conf.disabled = (i == 0) ? 1 : 0;
+ perf_conf.config = perf_events[i].config;
+ perf_conf.config1 = perf_events[i].config1;
+ if (verbose)
+ pr_info("%s() enable PMU counter: %s\n",
+ __func__, perf_events[i].desc);
+ perf_event = perf_event_create_kernel_counter(&perf_conf, cpu,
+ NULL /* task */,
+ NULL /* overflow_handler*/,
+ NULL /* context */);
+ if (perf_event) {
+ perf_events[i].save = perf_event;
+ pr_info("%s():DEBUG perf_event success\n", __func__);
+
+ perf_event_enable(perf_event);
+ } else {
+ pr_info("%s():DEBUG perf_event is NULL\n", __func__);
+ }
+ }
+
+ return true;
+}
+
+/** Generic functions **
+ */
+
+/* Calculate stats, store results in record */
+bool time_bench_calc_stats(struct time_bench_record *rec)
+{
+#define NANOSEC_PER_SEC 1000000000 /* 10^9 */
+ uint64_t ns_per_call_tmp_rem = 0;
+ uint32_t ns_per_call_remainder = 0;
+ uint64_t pmc_ipc_tmp_rem = 0;
+ uint32_t pmc_ipc_remainder = 0;
+ uint32_t pmc_ipc_div = 0;
+ uint32_t invoked_cnt_precision = 0;
+ uint32_t invoked_cnt = 0; /* 32-bit due to div_u64_rem() */
+
+ if (rec->flags & TIME_BENCH_LOOP) {
+ if (rec->invoked_cnt < 1000) {
+ pr_err("ERR: need more(>1000) loops(%llu) for timing\n",
+ rec->invoked_cnt);
+ return false;
+ }
+ if (rec->invoked_cnt > ((1ULL << 32) - 1)) {
+ /* div_u64_rem() can only support div with 32bit*/
+ pr_err("ERR: Invoke cnt(%llu) too big overflow 32bit\n",
+ rec->invoked_cnt);
+ return false;
+ }
+ invoked_cnt = (uint32_t)rec->invoked_cnt;
+ }
+
+ /* TSC (Time-Stamp Counter) records */
+ if (rec->flags & TIME_BENCH_TSC) {
+ rec->tsc_interval = rec->tsc_stop - rec->tsc_start;
+ if (rec->tsc_interval == 0) {
+ pr_err("ABORT: timing took ZERO TSC time\n");
+ return false;
+ }
+ /* Calculate stats */
+ if (rec->flags & TIME_BENCH_LOOP)
+ rec->tsc_cycles = rec->tsc_interval / invoked_cnt;
+ else
+ rec->tsc_cycles = rec->tsc_interval;
+ }
+
+ /* Wall-clock time calc */
+ if (rec->flags & TIME_BENCH_WALLCLOCK) {
+ rec->time_start = rec->ts_start.tv_nsec +
+ (NANOSEC_PER_SEC * rec->ts_start.tv_sec);
+ rec->time_stop = rec->ts_stop.tv_nsec +
+ (NANOSEC_PER_SEC * rec->ts_stop.tv_sec);
+ rec->time_interval = rec->time_stop - rec->time_start;
+ if (rec->time_interval == 0) {
+ pr_err("ABORT: timing took ZERO wallclock time\n");
+ return false;
+ }
+ /* Calculate stats */
+ /*** Division in kernel it tricky ***/
+ /* Orig: time_sec = (time_interval / NANOSEC_PER_SEC); */
+ /* remainder only correct because NANOSEC_PER_SEC is 10^9 */
+ rec->time_sec = div_u64_rem(rec->time_interval, NANOSEC_PER_SEC,
+ &rec->time_sec_remainder);
+ //TODO: use existing struct timespec records instead of div?
+
+ if (rec->flags & TIME_BENCH_LOOP) {
+ /*** Division in kernel it tricky ***/
+ /* Orig: ns = ((double)time_interval / invoked_cnt); */
+ /* First get quotient */
+ rec->ns_per_call_quotient =
+ div_u64_rem(rec->time_interval, invoked_cnt,
+ &ns_per_call_remainder);
+ /* Now get decimals .xxx precision (incorrect roundup)*/
+ ns_per_call_tmp_rem = ns_per_call_remainder;
+ invoked_cnt_precision = invoked_cnt / 1000;
+ if (invoked_cnt_precision > 0) {
+ rec->ns_per_call_decimal =
+ div_u64_rem(ns_per_call_tmp_rem,
+ invoked_cnt_precision,
+ &ns_per_call_remainder);
+ }
+ }
+ }
+
+ /* Performance Monitor Unit (PMU) counters */
+ if (rec->flags & TIME_BENCH_PMU) {
+ //FIXME: Overflow handling???
+ rec->pmc_inst = rec->pmc_inst_stop - rec->pmc_inst_start;
+ rec->pmc_clk = rec->pmc_clk_stop - rec->pmc_clk_start;
+
+ /* Calc Instruction Per Cycle (IPC) */
+ /* First get quotient */
+ rec->pmc_ipc_quotient = div_u64_rem(rec->pmc_inst, rec->pmc_clk,
+ &pmc_ipc_remainder);
+ /* Now get decimals .xxx precision (incorrect roundup)*/
+ pmc_ipc_tmp_rem = pmc_ipc_remainder;
+ pmc_ipc_div = rec->pmc_clk / 1000;
+ if (pmc_ipc_div > 0) {
+ rec->pmc_ipc_decimal = div_u64_rem(pmc_ipc_tmp_rem,
+ pmc_ipc_div,
+ &pmc_ipc_remainder);
+ }
+ }
+
+ return true;
+}
+
+/* Generic function for invoking a loop function and calculating
+ * execution time stats. The function being called/timed is assumed
+ * to perform a tight loop, and update the timing record struct.
+ */
+bool time_bench_loop(uint32_t loops, int step, char *txt, void *data,
+ int (*func)(struct time_bench_record *record, void *data))
+{
+ struct time_bench_record rec;
+
+ /* Setup record */
+ memset(&rec, 0, sizeof(rec)); /* zero func might not update all */
+ rec.version_abi = 1;
+ rec.loops = loops;
+ rec.step = step;
+ rec.flags = (TIME_BENCH_LOOP | TIME_BENCH_TSC | TIME_BENCH_WALLCLOCK);
+
+ /*** Loop function being timed ***/
+ if (!func(&rec, data)) {
+ pr_err("ABORT: function being timed failed\n");
+ return false;
+ }
+
+ if (rec.invoked_cnt < loops)
+ pr_warn("WARNING: Invoke count(%llu) smaller than loops(%d)\n",
+ rec.invoked_cnt, loops);
+
+ /* Calculate stats */
+ time_bench_calc_stats(&rec);
+
+ pr_info("Type:%s Per elem: %llu cycles(tsc) %llu.%03llu ns (step:%d) - (measurement period time:%llu.%09u sec time_interval:%llu) - (invoke count:%llu tsc_interval:%llu)\n",
+ txt, rec.tsc_cycles, rec.ns_per_call_quotient,
+ rec.ns_per_call_decimal, rec.step, rec.time_sec,
+ rec.time_sec_remainder, rec.time_interval, rec.invoked_cnt,
+ rec.tsc_interval);
+ if (rec.flags & TIME_BENCH_PMU)
+ pr_info("Type:%s PMU inst/clock%llu/%llu = %llu.%03llu IPC (inst per cycle)\n",
+ txt, rec.pmc_inst, rec.pmc_clk, rec.pmc_ipc_quotient,
+ rec.pmc_ipc_decimal);
+ return true;
+}
+
+/* Function getting invoked by kthread */
+static int invoke_test_on_cpu_func(void *private)
+{
+ struct time_bench_cpu *cpu = private;
+ struct time_bench_sync *sync = cpu->sync;
+ cpumask_t newmask = CPU_MASK_NONE;
+ void *data = cpu->data;
+
+ /* Restrict CPU */
+ cpumask_set_cpu(cpu->rec.cpu, &newmask);
+ set_cpus_allowed_ptr(current, &newmask);
+
+ /* Synchronize start of concurrency test */
+ atomic_inc(&sync->nr_tests_running);
+ wait_for_completion(&sync->start_event);
+
+ /* Start benchmark function */
+ if (!cpu->bench_func(&cpu->rec, data)) {
+ pr_err("ERROR: function being timed failed on CPU:%d(%d)\n",
+ cpu->rec.cpu, smp_processor_id());
+ } else {
+ if (verbose)
+ pr_info("SUCCESS: ran on CPU:%d(%d)\n", cpu->rec.cpu,
+ smp_processor_id());
+ }
+ cpu->did_bench_run = true;
+
+ /* End test */
+ atomic_dec(&sync->nr_tests_running);
+ /* Wait for kthread_stop() telling us to stop */
+ while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
+ }
+ __set_current_state(TASK_RUNNING);
+ return 0;
+}
+
+void time_bench_print_stats_cpumask(const char *desc,
+ struct time_bench_cpu *cpu_tasks,
+ const struct cpumask *mask)
+{
+ uint64_t average = 0;
+ int cpu;
+ int step = 0;
+ struct sum {
+ uint64_t tsc_cycles;
+ int records;
+ } sum = { 0 };
+
+ /* Get stats */
+ for_each_cpu(cpu, mask) {
+ struct time_bench_cpu *c = &cpu_tasks[cpu];
+ struct time_bench_record *rec = &c->rec;
+
+ /* Calculate stats */
+ time_bench_calc_stats(rec);
+
+ pr_info("Type:%s CPU(%d) %llu cycles(tsc) %llu.%03llu ns (step:%d) - (measurement period time:%llu.%09u sec time_interval:%llu) - (invoke count:%llu tsc_interval:%llu)\n",
+ desc, cpu, rec->tsc_cycles, rec->ns_per_call_quotient,
+ rec->ns_per_call_decimal, rec->step, rec->time_sec,
+ rec->time_sec_remainder, rec->time_interval,
+ rec->invoked_cnt, rec->tsc_interval);
+
+ /* Collect average */
+ sum.records++;
+ sum.tsc_cycles += rec->tsc_cycles;
+ step = rec->step;
+ }
+
+ if (sum.records) /* avoid div-by-zero */
+ average = sum.tsc_cycles / sum.records;
+ pr_info("Sum Type:%s Average: %llu cycles(tsc) CPUs:%d step:%d\n", desc,
+ average, sum.records, step);
+}
+
+void time_bench_run_concurrent(uint32_t loops, int step, void *data,
+ const struct cpumask *mask, /* Support masking outsome CPUs*/
+ struct time_bench_sync *sync,
+ struct time_bench_cpu *cpu_tasks,
+ int (*func)(struct time_bench_record *record, void *data))
+{
+ int cpu, running = 0;
+
+ if (verbose) // DEBUG
+ pr_warn("%s() Started on CPU:%d\n", __func__,
+ smp_processor_id());
+
+ /* Reset sync conditions */
+ atomic_set(&sync->nr_tests_running, 0);
+ init_completion(&sync->start_event);
+
+ /* Spawn off jobs on all CPUs */
+ for_each_cpu(cpu, mask) {
+ struct time_bench_cpu *c = &cpu_tasks[cpu];
+
+ running++;
+ c->sync = sync; /* Send sync variable along */
+ c->data = data; /* Send opaque along */
+
+ /* Init benchmark record */
+ memset(&c->rec, 0, sizeof(struct time_bench_record));
+ c->rec.version_abi = 1;
+ c->rec.loops = loops;
+ c->rec.step = step;
+ c->rec.flags = (TIME_BENCH_LOOP | TIME_BENCH_TSC |
+ TIME_BENCH_WALLCLOCK);
+ c->rec.cpu = cpu;
+ c->bench_func = func;
+ c->task = kthread_run(invoke_test_on_cpu_func, c,
+ "time_bench%d", cpu);
+ if (IS_ERR(c->task)) {
+ pr_err("%s(): Failed to start test func\n", __func__);
+ return; /* Argh, what about cleanup?! */
+ }
+ }
+
+ /* Wait until all processes are running */
+ while (atomic_read(&sync->nr_tests_running) < running) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(10);
+ }
+ /* Kick off all CPU concurrently on completion event */
+ complete_all(&sync->start_event);
+
+ /* Wait for CPUs to finish */
+ while (atomic_read(&sync->nr_tests_running)) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(10);
+ }
+
+ /* Stop the kthreads */
+ for_each_cpu(cpu, mask) {
+ struct time_bench_cpu *c = &cpu_tasks[cpu];
+
+ kthread_stop(c->task);
+ }
+
+ if (verbose) // DEBUG - happens often, finish on another CPU
+ pr_warn("%s() Finished on CPU:%d\n", __func__,
+ smp_processor_id());
+}
diff --git a/tools/testing/selftests/net/bench/page_pool/time_bench.h b/tools/testing/selftests/net/bench/page_pool/time_bench.h
new file mode 100644
index 000000000000..e113fcf341dc
--- /dev/null
+++ b/tools/testing/selftests/net/bench/page_pool/time_bench.h
@@ -0,0 +1,238 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Benchmarking code execution time inside the kernel
+ *
+ * Copyright (C) 2014, Red Hat, Inc., Jesper Dangaard Brouer
+ * for licensing details see kernel-base/COPYING
+ */
+#ifndef _LINUX_TIME_BENCH_H
+#define _LINUX_TIME_BENCH_H
+
+/* Main structure used for recording a benchmark run */
+struct time_bench_record {
+ uint32_t version_abi;
+ uint32_t loops; /* Requested loop invocations */
+ uint32_t step; /* option for e.g. bulk invocations */
+
+ uint32_t flags; /* Measurements types enabled */
+#define TIME_BENCH_LOOP BIT(0)
+#define TIME_BENCH_TSC BIT(1)
+#define TIME_BENCH_WALLCLOCK BIT(2)
+#define TIME_BENCH_PMU BIT(3)
+
+ uint32_t cpu; /* Used when embedded in time_bench_cpu */
+
+ /* Records */
+ uint64_t invoked_cnt; /* Returned actual invocations */
+ uint64_t tsc_start;
+ uint64_t tsc_stop;
+ struct timespec64 ts_start;
+ struct timespec64 ts_stop;
+ /* PMU counters for instruction and cycles
+ * instructions counter including pipelined instructions
+ */
+ uint64_t pmc_inst_start;
+ uint64_t pmc_inst_stop;
+ /* CPU unhalted clock counter */
+ uint64_t pmc_clk_start;
+ uint64_t pmc_clk_stop;
+
+ /* Result records */
+ uint64_t tsc_interval;
+ uint64_t time_start, time_stop, time_interval; /* in nanosec */
+ uint64_t pmc_inst, pmc_clk;
+
+ /* Derived result records */
+ uint64_t tsc_cycles; // +decimal?
+ uint64_t ns_per_call_quotient, ns_per_call_decimal;
+ uint64_t time_sec;
+ uint32_t time_sec_remainder;
+ uint64_t pmc_ipc_quotient, pmc_ipc_decimal; /* inst per cycle */
+};
+
+/* For synchronizing parallel CPUs to run concurrently */
+struct time_bench_sync {
+ atomic_t nr_tests_running;
+ struct completion start_event;
+};
+
+/* Keep track of CPUs executing our bench function.
+ *
+ * Embed a time_bench_record for storing info per cpu
+ */
+struct time_bench_cpu {
+ struct time_bench_record rec;
+ struct time_bench_sync *sync; /* back ptr */
+ struct task_struct *task;
+ /* "data" opaque could have been placed in time_bench_sync,
+ * but to avoid any false sharing, place it per CPU
+ */
+ void *data;
+ /* Support masking outsome CPUs, mark if it ran */
+ bool did_bench_run;
+ /* int cpu; // note CPU stored in time_bench_record */
+ int (*bench_func)(struct time_bench_record *record, void *data);
+};
+
+/*
+ * Below TSC assembler code is not compatible with other archs, and
+ * can also fail on guests if cpu-flags are not correct.
+ *
+ * The way TSC reading is used, many iterations, does not require as
+ * high accuracy as described below (in Intel Doc #324264).
+ *
+ * Considering changing to use get_cycles() (#include <asm/timex.h>).
+ */
+
+/** TSC (Time-Stamp Counter) based **
+ * Recommend reading, to understand details of reading TSC accurately:
+ * Intel Doc #324264, "How to Benchmark Code Execution Times on Intel"
+ *
+ * Consider getting exclusive ownership of CPU by using:
+ * unsigned long flags;
+ * preempt_disable();
+ * raw_local_irq_save(flags);
+ * _your_code_
+ * raw_local_irq_restore(flags);
+ * preempt_enable();
+ *
+ * Clobbered registers: "%rax", "%rbx", "%rcx", "%rdx"
+ * RDTSC only change "%rax" and "%rdx" but
+ * CPUID clears the high 32-bits of all (rax/rbx/rcx/rdx)
+ */
+static __always_inline uint64_t tsc_start_clock(void)
+{
+ /* See: Intel Doc #324264 */
+ unsigned int hi, lo;
+
+ asm volatile("CPUID\n\t"
+ "RDTSC\n\t"
+ "mov %%edx, %0\n\t"
+ "mov %%eax, %1\n\t"
+ : "=r"(hi), "=r"(lo)::"%rax", "%rbx", "%rcx", "%rdx");
+ //FIXME: on 32bit use clobbered %eax + %edx
+ return ((uint64_t)lo) | (((uint64_t)hi) << 32);
+}
+
+static __always_inline uint64_t tsc_stop_clock(void)
+{
+ /* See: Intel Doc #324264 */
+ unsigned int hi, lo;
+
+ asm volatile("RDTSCP\n\t"
+ "mov %%edx, %0\n\t"
+ "mov %%eax, %1\n\t"
+ "CPUID\n\t"
+ : "=r"(hi), "=r"(lo)::"%rax", "%rbx", "%rcx", "%rdx");
+ return ((uint64_t)lo) | (((uint64_t)hi) << 32);
+}
+
+/** Wall-clock based **
+ *
+ * use: getnstimeofday()
+ * getnstimeofday(&rec->ts_start);
+ * getnstimeofday(&rec->ts_stop);
+ *
+ * API changed see: Documentation/core-api/timekeeping.rst
+ * https://www.kernel.org/doc/html/latest/core-api/timekeeping.html#c.getnstimeofday
+ *
+ * We should instead use: ktime_get_real_ts64() is a direct
+ * replacement, but consider using monotonic time (ktime_get_ts64())
+ * and/or a ktime_t based interface (ktime_get()/ktime_get_real()).
+ */
+
+/** PMU (Performance Monitor Unit) based **
+ *
+ * Needed for calculating: Instructions Per Cycle (IPC)
+ * - The IPC number tell how efficient the CPU pipelining were
+ */
+//lookup: perf_event_create_kernel_counter()
+
+bool time_bench_PMU_config(bool enable);
+
+/* Raw reading via rdpmc() using fixed counters
+ *
+ * From: https://github.com/andikleen/simple-pmu
+ */
+enum {
+ FIXED_SELECT = (1U << 30), /* == 0x40000000 */
+ FIXED_INST_RETIRED_ANY = 0,
+ FIXED_CPU_CLK_UNHALTED_CORE = 1,
+ FIXED_CPU_CLK_UNHALTED_REF = 2,
+};
+
+static __always_inline unsigned int long long p_rdpmc(unsigned int in)
+{
+ unsigned int d, a;
+
+ asm volatile("rdpmc" : "=d"(d), "=a"(a) : "c"(in) : "memory");
+ return ((unsigned long long)d << 32) | a;
+}
+
+/* These PMU counter needs to be enabled, but I don't have the
+ * configure code implemented. My current hack is running:
+ * sudo perf stat -e cycles:k -e instructions:k insmod lib/ring_queue_test.ko
+ */
+/* Reading all pipelined instruction */
+static __always_inline unsigned long long pmc_inst(void)
+{
+ return p_rdpmc(FIXED_SELECT | FIXED_INST_RETIRED_ANY);
+}
+
+/* Reading CPU clock cycles */
+static __always_inline unsigned long long pmc_clk(void)
+{
+ return p_rdpmc(FIXED_SELECT | FIXED_CPU_CLK_UNHALTED_CORE);
+}
+
+/* Raw reading via MSR rdmsr() is likely wrong
+ * FIXME: How can I know which raw MSR registers are conf for what?
+ */
+#define MSR_IA32_PCM0 0x400000C1 /* PERFCTR0 */
+#define MSR_IA32_PCM1 0x400000C2 /* PERFCTR1 */
+#define MSR_IA32_PCM2 0x400000C3
+static inline uint64_t msr_inst(unsigned long long *msr_result)
+{
+ return rdmsrq_safe(MSR_IA32_PCM0, msr_result);
+}
+
+/** Generic functions **
+ */
+bool time_bench_loop(uint32_t loops, int step, char *txt, void *data,
+ int (*func)(struct time_bench_record *rec, void *data));
+bool time_bench_calc_stats(struct time_bench_record *rec);
+
+void time_bench_run_concurrent(uint32_t loops, int step, void *data,
+ const struct cpumask *mask, /* Support masking outsome CPUs*/
+ struct time_bench_sync *sync, struct time_bench_cpu *cpu_tasks,
+ int (*func)(struct time_bench_record *record, void *data));
+void time_bench_print_stats_cpumask(const char *desc,
+ struct time_bench_cpu *cpu_tasks,
+ const struct cpumask *mask);
+
+//FIXME: use rec->flags to select measurement, should be MACRO
+static __always_inline void time_bench_start(struct time_bench_record *rec)
+{
+ //getnstimeofday(&rec->ts_start);
+ ktime_get_real_ts64(&rec->ts_start);
+ if (rec->flags & TIME_BENCH_PMU) {
+ rec->pmc_inst_start = pmc_inst();
+ rec->pmc_clk_start = pmc_clk();
+ }
+ rec->tsc_start = tsc_start_clock();
+}
+
+static __always_inline void time_bench_stop(struct time_bench_record *rec,
+ uint64_t invoked_cnt)
+{
+ rec->tsc_stop = tsc_stop_clock();
+ if (rec->flags & TIME_BENCH_PMU) {
+ rec->pmc_inst_stop = pmc_inst();
+ rec->pmc_clk_stop = pmc_clk();
+ }
+ //getnstimeofday(&rec->ts_stop);
+ ktime_get_real_ts64(&rec->ts_stop);
+ rec->invoked_cnt = invoked_cnt;
+}
+
+#endif /* _LINUX_TIME_BENCH_H */
diff --git a/tools/testing/selftests/net/bench/test_bench_page_pool.sh b/tools/testing/selftests/net/bench/test_bench_page_pool.sh
new file mode 100755
index 000000000000..7b8b18cfedce
--- /dev/null
+++ b/tools/testing/selftests/net/bench/test_bench_page_pool.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+
+set -e
+
+DRIVER="./page_pool/bench_page_pool.ko"
+result=""
+
+function run_test()
+{
+ rmmod "bench_page_pool.ko" || true
+ insmod $DRIVER > /dev/null 2>&1
+ result=$(dmesg | tail -10)
+ echo "$result"
+
+ echo
+ echo "Fast path results:"
+ echo "${result}" | grep -o -E "no-softirq-page_pool01 Per elem: ([0-9]+) cycles\(tsc\) ([0-9]+\.[0-9]+) ns"
+
+ echo
+ echo "ptr_ring results:"
+ echo "${result}" | grep -o -E "no-softirq-page_pool02 Per elem: ([0-9]+) cycles\(tsc\) ([0-9]+\.[0-9]+) ns"
+
+ echo
+ echo "slow path results:"
+ echo "${result}" | grep -o -E "no-softirq-page_pool03 Per elem: ([0-9]+) cycles\(tsc\) ([0-9]+\.[0-9]+) ns"
+}
+
+run_test
+
+exit 0
diff --git a/tools/testing/selftests/net/forwarding/Makefile b/tools/testing/selftests/net/forwarding/Makefile
index 00bde7b6f39e..d7bb2e80e88c 100644
--- a/tools/testing/selftests/net/forwarding/Makefile
+++ b/tools/testing/selftests/net/forwarding/Makefile
@@ -102,6 +102,7 @@ TEST_PROGS = bridge_fdb_learning_limit.sh \
vxlan_bridge_1d_port_8472.sh \
vxlan_bridge_1d.sh \
vxlan_bridge_1q_ipv6.sh \
+ vxlan_bridge_1q_mc_ul.sh \
vxlan_bridge_1q_port_8472_ipv6.sh \
vxlan_bridge_1q_port_8472.sh \
vxlan_bridge_1q.sh \
diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh
index 508f3c700d71..83ee6a07e072 100644
--- a/tools/testing/selftests/net/forwarding/lib.sh
+++ b/tools/testing/selftests/net/forwarding/lib.sh
@@ -37,6 +37,7 @@ declare -A NETIFS=(
: "${TEAMD:=teamd}"
: "${MCD:=smcrouted}"
: "${MC_CLI:=smcroutectl}"
+: "${MCD_TABLE_NAME:=selftests}"
# Constants for netdevice bring-up:
# Default time in seconds to wait for an interface to come up before giving up
@@ -1757,6 +1758,51 @@ mc_send()
msend -g $groups -I $if_name -c 1 > /dev/null 2>&1
}
+adf_mcd_start()
+{
+ local ifs=("$@")
+
+ local table_name="$MCD_TABLE_NAME"
+ local smcroutedir
+ local pid
+ local if
+ local i
+
+ check_command "$MCD" || return 1
+ check_command "$MC_CLI" || return 1
+
+ smcroutedir=$(mktemp -d)
+ defer rm -rf "$smcroutedir"
+
+ for ((i = 1; i <= NUM_NETIFS; ++i)); do
+ echo "phyint ${NETIFS[p$i]} enable" >> \
+ "$smcroutedir/$table_name.conf"
+ done
+
+ for if in "${ifs[@]}"; do
+ if ! ip_link_has_flag "$if" MULTICAST; then
+ ip link set dev "$if" multicast on
+ defer ip link set dev "$if" multicast off
+ fi
+
+ echo "phyint $if enable" >> \
+ "$smcroutedir/$table_name.conf"
+ done
+
+ "$MCD" -N -I "$table_name" -f "$smcroutedir/$table_name.conf" \
+ -P "$smcroutedir/$table_name.pid"
+ busywait "$BUSYWAIT_TIMEOUT" test -e "$smcroutedir/$table_name.pid"
+ pid=$(cat "$smcroutedir/$table_name.pid")
+ defer kill_process "$pid"
+}
+
+mc_cli()
+{
+ local table_name="$MCD_TABLE_NAME"
+
+ "$MC_CLI" -I "$table_name" "$@"
+}
+
start_ip_monitor()
{
local mtype=$1; shift
diff --git a/tools/testing/selftests/net/forwarding/router_multicast.sh b/tools/testing/selftests/net/forwarding/router_multicast.sh
index 5a58b1ec8aef..83e52abdbc2e 100755
--- a/tools/testing/selftests/net/forwarding/router_multicast.sh
+++ b/tools/testing/selftests/net/forwarding/router_multicast.sh
@@ -33,10 +33,6 @@ NUM_NETIFS=6
source lib.sh
source tc_common.sh
-require_command $MCD
-require_command $MC_CLI
-table_name=selftests
-
h1_create()
{
simple_if_init $h1 198.51.100.2/28 2001:db8:1::2/64
@@ -149,25 +145,6 @@ router_destroy()
ip link set dev $rp1 down
}
-start_mcd()
-{
- SMCROUTEDIR="$(mktemp -d)"
-
- for ((i = 1; i <= $NUM_NETIFS; ++i)); do
- echo "phyint ${NETIFS[p$i]} enable" >> \
- $SMCROUTEDIR/$table_name.conf
- done
-
- $MCD -N -I $table_name -f $SMCROUTEDIR/$table_name.conf \
- -P $SMCROUTEDIR/$table_name.pid
-}
-
-kill_mcd()
-{
- pkill $MCD
- rm -rf $SMCROUTEDIR
-}
-
setup_prepare()
{
h1=${NETIFS[p1]}
@@ -179,7 +156,7 @@ setup_prepare()
rp3=${NETIFS[p5]}
h3=${NETIFS[p6]}
- start_mcd
+ adf_mcd_start || exit "$EXIT_STATUS"
vrf_prepare
@@ -206,7 +183,7 @@ cleanup()
vrf_cleanup
- kill_mcd
+ defer_scopes_cleanup
}
create_mcast_sg()
@@ -214,9 +191,9 @@ create_mcast_sg()
local if_name=$1; shift
local s_addr=$1; shift
local mcast=$1; shift
- local dest_ifs=${@}
+ local dest_ifs=("${@}")
- $MC_CLI -I $table_name add $if_name $s_addr $mcast $dest_ifs
+ mc_cli add "$if_name" "$s_addr" "$mcast" "${dest_ifs[@]}"
}
delete_mcast_sg()
@@ -224,9 +201,9 @@ delete_mcast_sg()
local if_name=$1; shift
local s_addr=$1; shift
local mcast=$1; shift
- local dest_ifs=${@}
+ local dest_ifs=("${@}")
- $MC_CLI -I $table_name remove $if_name $s_addr $mcast $dest_ifs
+ mc_cli remove "$if_name" "$s_addr" "$mcast" "${dest_ifs[@]}"
}
mcast_v4()
diff --git a/tools/testing/selftests/net/forwarding/vxlan_bridge_1q_mc_ul.sh b/tools/testing/selftests/net/forwarding/vxlan_bridge_1q_mc_ul.sh
new file mode 100755
index 000000000000..7ec58b6b1128
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/vxlan_bridge_1q_mc_ul.sh
@@ -0,0 +1,771 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# +-----------------------------------------+
+# | + $h1.10 + $h1.20 |
+# | | 192.0.2.1/28 | 2001:db8:1::1/64 |
+# | \________ ________/ |
+# | \ / |
+# | + $h1 H1 (vrf) |
+# +-----------|-----------------------------+
+# |
+# +-----------|----------------------------------------------------------------+
+# | +---------|--------------------------------------+ SWITCH (main vrf) |
+# | | + $swp1 BR1 (802.1q) | |
+# | | vid 10 20 | |
+# | | | |
+# | | + vx10 (vxlan) + vx20 (vxlan) | + lo10 (dummy) |
+# | | local 192.0.2.100 local 2001:db8:4::1 | 192.0.2.100/28 |
+# | | group 233.252.0.1 group ff0e::1:2:3 | 2001:db8:4::1/64 |
+# | | id 1000 id 2000 | |
+# | | vid 10 pvid untagged vid 20 pvid untagged | |
+# | +------------------------------------------------+ |
+# | |
+# | + $swp2 $swp3 + |
+# | | 192.0.2.33/28 192.0.2.65/28 | |
+# | | 2001:db8:2::1/64 2001:db8:3::1/64 | |
+# | | | |
+# +---|--------------------------------------------------------------------|---+
+# | |
+# +---|--------------------------------+ +--------------------------------|---+
+# | | H2 (vrf) | | H3 (vrf) | |
+# | +-|----------------------------+ | | +-----------------------------|-+ |
+# | | + $h2 BR2 (802.1d) | | | | BR3 (802.1d) $h3 + | |
+# | | | | | | | |
+# | | + v1$h2 (veth) | | | | v1$h3 (veth) + | |
+# | +-|----------------------------+ | | +-----------------------------|-+ |
+# | | | | | |
+# +---|--------------------------------+ +--------------------------------|---+
+# | |
+# +---|--------------------------------+ +--------------------------------|---+
+# | + v2$h2 (veth) NS2 (netns) | | NS3 (netns) v2$h3 (veth) + |
+# | 192.0.2.34/28 | | 192.0.2.66/28 |
+# | 2001:db8:2::2/64 | | 2001:db8:3::2/64 |
+# | | | |
+# | +--------------------------------+ | | +--------------------------------+ |
+# | | BR1 (802.1q) | | | | BR1 (802.1q) | |
+# | | + vx10 (vxlan) | | | | + vx10 (vxlan) | |
+# | | local 192.0.2.34 | | | | local 192.0.2.50 | |
+# | | group 233.252.0.1 dev v2$h2 | | | | group 233.252.0.1 dev v2$h3 | |
+# | | id 1000 dstport $VXPORT | | | | id 1000 dstport $VXPORT | |
+# | | vid 10 pvid untagged | | | | vid 10 pvid untagged | |
+# | | | | | | | |
+# | | + vx20 (vxlan) | | | | + vx20 (vxlan) | |
+# | | local 2001:db8:2::2 | | | | local 2001:db8:3::2 | |
+# | | group ff0e::1:2:3 dev v2$h2 | | | | group ff0e::1:2:3 dev v2$h3 | |
+# | | id 2000 dstport $VXPORT | | | | id 2000 dstport $VXPORT | |
+# | | vid 20 pvid untagged | | | | vid 20 pvid untagged | |
+# | | | | | | | |
+# | | + w1 (veth) | | | | + w1 (veth) | |
+# | | | vid 10 20 | | | | | vid 10 20 | |
+# | +--|-----------------------------+ | | +--|-----------------------------+ |
+# | | | | | |
+# | +--|-----------------------------+ | | +--|-----------------------------+ |
+# | | + w2 (veth) VW2 (vrf) | | | | + w2 (veth) VW2 (vrf) | |
+# | | |\ | | | | |\ | |
+# | | | + w2.10 | | | | | + w2.10 | |
+# | | | 192.0.2.3/28 | | | | | 192.0.2.4/28 | |
+# | | | | | | | | | |
+# | | + w2.20 | | | | + w2.20 | |
+# | | 2001:db8:1::3/64 | | | | 2001:db8:1::4/64 | |
+# | +--------------------------------+ | | +--------------------------------+ |
+# +------------------------------------+ +------------------------------------+
+#
+#shellcheck disable=SC2317 # SC doesn't see our uses of functions.
+
+: "${VXPORT:=4789}"
+export VXPORT
+
+: "${GROUP4:=233.252.0.1}"
+export GROUP4
+
+: "${GROUP6:=ff0e::1:2:3}"
+export GROUP6
+
+: "${IPMR:=lo10}"
+
+ALL_TESTS="
+ ipv4_nomcroute
+ ipv4_mcroute
+ ipv4_mcroute_changelink
+ ipv4_mcroute_starg
+ ipv4_mcroute_noroute
+ ipv4_mcroute_fdb
+ ipv4_mcroute_fdb_oif0
+ ipv4_mcroute_fdb_oif0_sep
+
+ ipv6_nomcroute
+ ipv6_mcroute
+ ipv6_mcroute_changelink
+ ipv6_mcroute_starg
+ ipv6_mcroute_noroute
+ ipv6_mcroute_fdb
+ ipv6_mcroute_fdb_oif0
+
+ ipv4_nomcroute_rx
+ ipv4_mcroute_rx
+ ipv4_mcroute_starg_rx
+ ipv4_mcroute_fdb_oif0_sep_rx
+ ipv4_mcroute_fdb_sep_rx
+
+ ipv6_nomcroute_rx
+ ipv6_mcroute_rx
+ ipv6_mcroute_starg_rx
+ ipv6_mcroute_fdb_sep_rx
+"
+
+NUM_NETIFS=6
+source lib.sh
+
+h1_create()
+{
+ simple_if_init "$h1"
+ defer simple_if_fini "$h1"
+
+ ip_link_add "$h1.10" master "v$h1" link "$h1" type vlan id 10
+ ip_link_set_up "$h1.10"
+ ip_addr_add "$h1.10" 192.0.2.1/28
+
+ ip_link_add "$h1.20" master "v$h1" link "$h1" type vlan id 20
+ ip_link_set_up "$h1.20"
+ ip_addr_add "$h1.20" 2001:db8:1::1/64
+}
+
+install_capture()
+{
+ local dev=$1; shift
+
+ tc qdisc add dev "$dev" clsact
+ defer tc qdisc del dev "$dev" clsact
+
+ tc filter add dev "$dev" ingress proto ip pref 104 \
+ flower skip_hw ip_proto udp dst_port "$VXPORT" \
+ action pass
+ defer tc filter del dev "$dev" ingress proto ip pref 104
+
+ tc filter add dev "$dev" ingress proto ipv6 pref 106 \
+ flower skip_hw ip_proto udp dst_port "$VXPORT" \
+ action pass
+ defer tc filter del dev "$dev" ingress proto ipv6 pref 106
+}
+
+h2_create()
+{
+ # $h2
+ ip_link_set_up "$h2"
+
+ # H2
+ vrf_create "v$h2"
+ defer vrf_destroy "v$h2"
+
+ ip_link_set_up "v$h2"
+
+ # br2
+ ip_link_add br2 type bridge vlan_filtering 0 mcast_snooping 0
+ ip_link_set_master br2 "v$h2"
+ ip_link_set_up br2
+
+ # $h2
+ ip_link_set_master "$h2" br2
+ install_capture "$h2"
+
+ # v1$h2
+ ip_link_set_up "v1$h2"
+ ip_link_set_master "v1$h2" br2
+}
+
+h3_create()
+{
+ # $h3
+ ip_link_set_up "$h3"
+
+ # H3
+ vrf_create "v$h3"
+ defer vrf_destroy "v$h3"
+
+ ip_link_set_up "v$h3"
+
+ # br3
+ ip_link_add br3 type bridge vlan_filtering 0 mcast_snooping 0
+ ip_link_set_master br3 "v$h3"
+ ip_link_set_up br3
+
+ # $h3
+ ip_link_set_master "$h3" br3
+ install_capture "$h3"
+
+ # v1$h3
+ ip_link_set_up "v1$h3"
+ ip_link_set_master "v1$h3" br3
+}
+
+switch_create()
+{
+ local swp1_mac
+
+ # br1
+ swp1_mac=$(mac_get "$swp1")
+ ip_link_add br1 type bridge vlan_filtering 1 \
+ vlan_default_pvid 0 mcast_snooping 0
+ ip_link_set_addr br1 "$swp1_mac"
+ ip_link_set_up br1
+
+ # A dummy to force the IPv6 OIF=0 test to install a suitable MC route on
+ # $IPMR to be deterministic. Also used for the IPv6 RX!=TX ping test.
+ ip_link_add "X$IPMR" up type dummy
+
+ # IPMR
+ ip_link_add "$IPMR" up type dummy
+ ip_addr_add "$IPMR" 192.0.2.100/28
+ ip_addr_add "$IPMR" 2001:db8:4::1/64
+
+ # $swp1
+ ip_link_set_up "$swp1"
+ ip_link_set_master "$swp1" br1
+ bridge_vlan_add vid 10 dev "$swp1"
+ bridge_vlan_add vid 20 dev "$swp1"
+
+ # $swp2
+ ip_link_set_up "$swp2"
+ ip_addr_add "$swp2" 192.0.2.33/28
+ ip_addr_add "$swp2" 2001:db8:2::1/64
+
+ # $swp3
+ ip_link_set_up "$swp3"
+ ip_addr_add "$swp3" 192.0.2.65/28
+ ip_addr_add "$swp3" 2001:db8:3::1/64
+}
+
+vx_create()
+{
+ local name=$1; shift
+ local vid=$1; shift
+
+ ip_link_add "$name" up type vxlan dstport "$VXPORT" \
+ nolearning noudpcsum tos inherit ttl 16 \
+ "$@"
+ ip_link_set_master "$name" br1
+ bridge_vlan_add vid "$vid" dev "$name" pvid untagged
+}
+export -f vx_create
+
+vx_wait()
+{
+ # Wait for all the ARP, IGMP etc. noise to settle down so that the
+ # tunnel is clear for measurements.
+ sleep 10
+}
+
+vx10_create()
+{
+ vx_create vx10 10 id 1000 "$@"
+}
+export -f vx10_create
+
+vx20_create()
+{
+ vx_create vx20 20 id 2000 "$@"
+}
+export -f vx20_create
+
+vx10_create_wait()
+{
+ vx10_create "$@"
+ vx_wait
+}
+
+vx20_create_wait()
+{
+ vx20_create "$@"
+ vx_wait
+}
+
+ns_init_common()
+{
+ local ns=$1; shift
+ local if_in=$1; shift
+ local ipv4_in=$1; shift
+ local ipv6_in=$1; shift
+ local ipv4_host=$1; shift
+ local ipv6_host=$1; shift
+
+ # v2$h2 / v2$h3
+ ip_link_set_up "$if_in"
+ ip_addr_add "$if_in" "$ipv4_in"
+ ip_addr_add "$if_in" "$ipv6_in"
+
+ # br1
+ ip_link_add br1 type bridge vlan_filtering 1 \
+ vlan_default_pvid 0 mcast_snooping 0
+ ip_link_set_up br1
+
+ # vx10, vx20
+ vx10_create local "${ipv4_in%/*}" group "$GROUP4" dev "$if_in"
+ vx20_create local "${ipv6_in%/*}" group "$GROUP6" dev "$if_in"
+
+ # w1
+ ip_link_add w1 type veth peer name w2
+ ip_link_set_master w1 br1
+ ip_link_set_up w1
+ bridge_vlan_add vid 10 dev w1
+ bridge_vlan_add vid 20 dev w1
+
+ # w2
+ simple_if_init w2
+ defer simple_if_fini w2
+
+ # w2.10
+ ip_link_add w2.10 master vw2 link w2 type vlan id 10
+ ip_link_set_up w2.10
+ ip_addr_add w2.10 "$ipv4_host"
+
+ # w2.20
+ ip_link_add w2.20 master vw2 link w2 type vlan id 20
+ ip_link_set_up w2.20
+ ip_addr_add w2.20 "$ipv6_host"
+}
+export -f ns_init_common
+
+ns2_create()
+{
+ # NS2
+ ip netns add ns2
+ defer ip netns del ns2
+
+ # v2$h2
+ ip link set dev "v2$h2" netns ns2
+ defer ip -n ns2 link set dev "v2$h2" netns 1
+
+ in_ns ns2 \
+ ns_init_common ns2 "v2$h2" \
+ 192.0.2.34/28 2001:db8:2::2/64 \
+ 192.0.2.3/28 2001:db8:1::3/64
+}
+
+ns3_create()
+{
+ # NS3
+ ip netns add ns3
+ defer ip netns del ns3
+
+ # v2$h3
+ ip link set dev "v2$h3" netns ns3
+ defer ip -n ns3 link set dev "v2$h3" netns 1
+
+ ip -n ns3 link set dev "v2$h3" up
+
+ in_ns ns3 \
+ ns_init_common ns3 "v2$h3" \
+ 192.0.2.66/28 2001:db8:3::2/64 \
+ 192.0.2.4/28 2001:db8:1::4/64
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ swp3=${NETIFS[p5]}
+ h3=${NETIFS[p6]}
+
+ vrf_prepare
+ defer vrf_cleanup
+
+ forwarding_enable
+ defer forwarding_restore
+
+ ip_link_add "v1$h2" type veth peer name "v2$h2"
+ ip_link_add "v1$h3" type veth peer name "v2$h3"
+
+ h1_create
+ h2_create
+ h3_create
+ switch_create
+ ns2_create
+ ns3_create
+}
+
+adf_install_broken_sg()
+{
+ adf_mcd_start "$IPMR" || exit "$EXIT_STATUS"
+
+ mc_cli add "$swp2" 192.0.2.100 "$GROUP4" "$swp1" "$swp3"
+ defer mc_cli remove "$swp2" 192.0.2.100 "$GROUP4" "$swp1" "$swp3"
+
+ mc_cli add "$swp2" 2001:db8:4::1 "$GROUP6" "$swp1" "$swp3"
+ defer mc_cli remove "$swp2" 2001:db8:4::1 "$GROUP6" "$swp1" "$swp3"
+}
+
+adf_install_rx()
+{
+ mc_cli add "$swp2" 0.0.0.0 "$GROUP4" "$IPMR"
+ defer mc_cli remove "$swp2" 0.0.0.0 "$GROUP4" lo10
+
+ mc_cli add "$swp3" 0.0.0.0 "$GROUP4" "$IPMR"
+ defer mc_cli remove "$swp3" 0.0.0.0 "$GROUP4" lo10
+
+ mc_cli add "$swp2" :: "$GROUP6" "$IPMR"
+ defer mc_cli remove "$swp2" :: "$GROUP6" lo10
+
+ mc_cli add "$swp3" :: "$GROUP6" "$IPMR"
+ defer mc_cli remove "$swp3" :: "$GROUP6" lo10
+}
+
+adf_install_sg()
+{
+ adf_mcd_start "$IPMR" || exit "$EXIT_STATUS"
+
+ mc_cli add "$IPMR" 192.0.2.100 "$GROUP4" "$swp2" "$swp3"
+ defer mc_cli remove "$IPMR" 192.0.2.33 "$GROUP4" "$swp2" "$swp3"
+
+ mc_cli add "$IPMR" 2001:db8:4::1 "$GROUP6" "$swp2" "$swp3"
+ defer mc_cli remove "$IPMR" 2001:db8:4::1 "$GROUP6" "$swp2" "$swp3"
+
+ adf_install_rx
+}
+
+adf_install_sg_sep()
+{
+ adf_mcd_start lo || exit "$EXIT_STATUS"
+
+ mc_cli add lo 192.0.2.120 "$GROUP4" "$swp2" "$swp3"
+ defer mc_cli remove lo 192.0.2.120 "$GROUP4" "$swp2" "$swp3"
+
+ mc_cli add lo 2001:db8:5::1 "$GROUP6" "$swp2" "$swp3"
+ defer mc_cli remove lo 2001:db8:5::1 "$GROUP6" "$swp2" "$swp3"
+}
+
+adf_install_sg_sep_rx()
+{
+ local lo=$1; shift
+
+ adf_mcd_start "$IPMR" "$lo" || exit "$EXIT_STATUS"
+
+ mc_cli add "$lo" 192.0.2.120 "$GROUP4" "$swp2" "$swp3"
+ defer mc_cli remove "$lo" 192.0.2.120 "$GROUP4" "$swp2" "$swp3"
+
+ mc_cli add "$lo" 2001:db8:5::1 "$GROUP6" "$swp2" "$swp3"
+ defer mc_cli remove "$lo" 2001:db8:5::1 "$GROUP6" "$swp2" "$swp3"
+
+ adf_install_rx
+}
+
+adf_install_starg()
+{
+ adf_mcd_start "$IPMR" || exit "$EXIT_STATUS"
+
+ mc_cli add "$IPMR" 0.0.0.0 "$GROUP4" "$swp2" "$swp3"
+ defer mc_cli remove "$IPMR" 0.0.0.0 "$GROUP4" "$swp2" "$swp3"
+
+ mc_cli add "$IPMR" :: "$GROUP6" "$swp2" "$swp3"
+ defer mc_cli remove "$IPMR" :: "$GROUP6" "$swp2" "$swp3"
+
+ adf_install_rx
+}
+
+do_packets_v4()
+{
+ local mac
+
+ mac=$(mac_get "$h2")
+ "$MZ" "$h1" -Q 10 -c 10 -d 100msec -p 64 -a own -b "$mac" \
+ -A 192.0.2.1 -B 192.0.2.2 -t udp sp=1234,dp=2345 -q
+}
+
+do_packets_v6()
+{
+ local mac
+
+ mac=$(mac_get "$h2")
+ "$MZ" -6 "$h1" -Q 20 -c 10 -d 100msec -p 64 -a own -b "$mac" \
+ -A 2001:db8:1::1 -B 2001:db8:1::2 -t udp sp=1234,dp=2345 -q
+}
+
+do_test()
+{
+ local ipv=$1; shift
+ local expect_h2=$1; shift
+ local expect_h3=$1; shift
+ local what=$1; shift
+
+ local pref=$((100 + ipv))
+ local t0_h2
+ local t0_h3
+ local t1_h2
+ local t1_h3
+ local d_h2
+ local d_h3
+
+ RET=0
+
+ t0_h2=$(tc_rule_stats_get "$h2" "$pref" ingress)
+ t0_h3=$(tc_rule_stats_get "$h3" "$pref" ingress)
+
+ "do_packets_v$ipv"
+ sleep 1
+
+ t1_h2=$(tc_rule_stats_get "$h2" "$pref" ingress)
+ t1_h3=$(tc_rule_stats_get "$h3" "$pref" ingress)
+
+ d_h2=$((t1_h2 - t0_h2))
+ d_h3=$((t1_h3 - t0_h3))
+
+ ((d_h2 == expect_h2))
+ check_err $? "Expected $expect_h2 packets on H2, got $d_h2"
+
+ ((d_h3 == expect_h3))
+ check_err $? "Expected $expect_h3 packets on H3, got $d_h3"
+
+ log_test "VXLAN MC flood $what"
+}
+
+ipv4_do_test_rx()
+{
+ local h3_should_fail=$1; shift
+ local what=$1; shift
+
+ RET=0
+
+ ping_do "$h1.10" 192.0.2.3
+ check_err $? "H2 should respond"
+
+ ping_do "$h1.10" 192.0.2.4
+ check_err_fail "$h3_should_fail" $? "H3 responds"
+
+ log_test "VXLAN MC flood $what"
+}
+
+ipv6_do_test_rx()
+{
+ local h3_should_fail=$1; shift
+ local what=$1; shift
+
+ RET=0
+
+ ping6_do "$h1.20" 2001:db8:1::3
+ check_err $? "H2 should respond"
+
+ ping6_do "$h1.20" 2001:db8:1::4
+ check_err_fail "$h3_should_fail" $? "H3 responds"
+
+ log_test "VXLAN MC flood $what"
+}
+
+ipv4_nomcroute()
+{
+ # Install a misleading (S,G) rule to attempt to trick the system into
+ # pushing the packets elsewhere.
+ adf_install_broken_sg
+ vx10_create_wait local 192.0.2.100 group "$GROUP4" dev "$swp2"
+ do_test 4 10 0 "IPv4 nomcroute"
+}
+
+ipv6_nomcroute()
+{
+ # Like for IPv4, install a misleading (S,G).
+ adf_install_broken_sg
+ vx20_create_wait local 2001:db8:4::1 group "$GROUP6" dev "$swp2"
+ do_test 6 10 0 "IPv6 nomcroute"
+}
+
+ipv4_nomcroute_rx()
+{
+ vx10_create local 192.0.2.100 group "$GROUP4" dev "$swp2"
+ ipv4_do_test_rx 1 "IPv4 nomcroute ping"
+}
+
+ipv6_nomcroute_rx()
+{
+ vx20_create local 2001:db8:4::1 group "$GROUP6" dev "$swp2"
+ ipv6_do_test_rx 1 "IPv6 nomcroute ping"
+}
+
+ipv4_mcroute()
+{
+ adf_install_sg
+ vx10_create_wait local 192.0.2.100 group "$GROUP4" dev "$IPMR" mcroute
+ do_test 4 10 10 "IPv4 mcroute"
+}
+
+ipv6_mcroute()
+{
+ adf_install_sg
+ vx20_create_wait local 2001:db8:4::1 group "$GROUP6" dev "$IPMR" mcroute
+ do_test 6 10 10 "IPv6 mcroute"
+}
+
+ipv4_mcroute_rx()
+{
+ adf_install_sg
+ vx10_create_wait local 192.0.2.100 group "$GROUP4" dev "$IPMR" mcroute
+ ipv4_do_test_rx 0 "IPv4 mcroute ping"
+}
+
+ipv6_mcroute_rx()
+{
+ adf_install_sg
+ vx20_create_wait local 2001:db8:4::1 group "$GROUP6" dev "$IPMR" mcroute
+ ipv6_do_test_rx 0 "IPv6 mcroute ping"
+}
+
+ipv4_mcroute_changelink()
+{
+ adf_install_sg
+ vx10_create_wait local 192.0.2.100 group "$GROUP4" dev "$IPMR"
+ ip link set dev vx10 type vxlan mcroute
+ sleep 1
+ do_test 4 10 10 "IPv4 mcroute changelink"
+}
+
+ipv6_mcroute_changelink()
+{
+ adf_install_sg
+ vx20_create_wait local 2001:db8:4::1 group "$GROUP6" dev "$IPMR" mcroute
+ ip link set dev vx20 type vxlan mcroute
+ sleep 1
+ do_test 6 10 10 "IPv6 mcroute changelink"
+}
+
+ipv4_mcroute_starg()
+{
+ adf_install_starg
+ vx10_create_wait local 192.0.2.100 group "$GROUP4" dev "$IPMR" mcroute
+ do_test 4 10 10 "IPv4 mcroute (*,G)"
+}
+
+ipv6_mcroute_starg()
+{
+ adf_install_starg
+ vx20_create_wait local 2001:db8:4::1 group "$GROUP6" dev "$IPMR" mcroute
+ do_test 6 10 10 "IPv6 mcroute (*,G)"
+}
+
+ipv4_mcroute_starg_rx()
+{
+ adf_install_starg
+ vx10_create_wait local 192.0.2.100 group "$GROUP4" dev "$IPMR" mcroute
+ ipv4_do_test_rx 0 "IPv4 mcroute (*,G) ping"
+}
+
+ipv6_mcroute_starg_rx()
+{
+ adf_install_starg
+ vx20_create_wait local 2001:db8:4::1 group "$GROUP6" dev "$IPMR" mcroute
+ ipv6_do_test_rx 0 "IPv6 mcroute (*,G) ping"
+}
+
+ipv4_mcroute_noroute()
+{
+ vx10_create_wait local 192.0.2.100 group "$GROUP4" dev "$IPMR" mcroute
+ do_test 4 0 0 "IPv4 mcroute, no route"
+}
+
+ipv6_mcroute_noroute()
+{
+ vx20_create_wait local 2001:db8:4::1 group "$GROUP6" dev "$IPMR" mcroute
+ do_test 6 0 0 "IPv6 mcroute, no route"
+}
+
+ipv4_mcroute_fdb()
+{
+ adf_install_sg
+ vx10_create_wait local 192.0.2.100 dev "$IPMR" mcroute
+ bridge fdb add dev vx10 \
+ 00:00:00:00:00:00 self static dst "$GROUP4" via "$IPMR"
+ do_test 4 10 10 "IPv4 mcroute FDB"
+}
+
+ipv6_mcroute_fdb()
+{
+ adf_install_sg
+ vx20_create_wait local 2001:db8:4::1 dev "$IPMR" mcroute
+ bridge -6 fdb add dev vx20 \
+ 00:00:00:00:00:00 self static dst "$GROUP6" via "$IPMR"
+ do_test 6 10 10 "IPv6 mcroute FDB"
+}
+
+# Use FDB to configure VXLAN in a way where oif=0 for purposes of FIB lookup.
+ipv4_mcroute_fdb_oif0()
+{
+ adf_install_sg
+ vx10_create_wait local 192.0.2.100 group "$GROUP4" dev "$IPMR" mcroute
+ bridge fdb del dev vx10 00:00:00:00:00:00
+ bridge fdb add dev vx10 00:00:00:00:00:00 self static dst "$GROUP4"
+ do_test 4 10 10 "IPv4 mcroute oif=0"
+}
+
+ipv6_mcroute_fdb_oif0()
+{
+ # The IPv6 tunnel lookup does not fall back to selection by source
+ # address. Instead it just does a FIB match, and that would find one of
+ # the several ff00::/8 multicast routes -- each device has one. In order
+ # to reliably force the $IPMR device, add a /128 route for the
+ # destination group address.
+ ip -6 route add table local multicast "$GROUP6/128" dev "$IPMR"
+ defer ip -6 route del table local multicast "$GROUP6/128" dev "$IPMR"
+
+ adf_install_sg
+ vx20_create_wait local 2001:db8:4::1 group "$GROUP6" dev "$IPMR" mcroute
+ bridge -6 fdb del dev vx20 00:00:00:00:00:00
+ bridge -6 fdb add dev vx20 00:00:00:00:00:00 self static dst "$GROUP6"
+ do_test 6 10 10 "IPv6 mcroute oif=0"
+}
+
+# In oif=0 test as above, have FIB lookup resolve to loopback instead of IPMR.
+# This doesn't work with IPv6 -- a MC route on lo would be marked as RTF_REJECT.
+ipv4_mcroute_fdb_oif0_sep()
+{
+ adf_install_sg_sep
+
+ ip_addr_add lo 192.0.2.120/28
+ vx10_create_wait local 192.0.2.120 group "$GROUP4" dev "$IPMR" mcroute
+ bridge fdb del dev vx10 00:00:00:00:00:00
+ bridge fdb add dev vx10 00:00:00:00:00:00 self static dst "$GROUP4"
+ do_test 4 10 10 "IPv4 mcroute TX!=RX oif=0"
+}
+
+ipv4_mcroute_fdb_oif0_sep_rx()
+{
+ adf_install_sg_sep_rx lo
+
+ ip_addr_add lo 192.0.2.120/28
+ vx10_create_wait local 192.0.2.120 group "$GROUP4" dev "$IPMR" mcroute
+ bridge fdb del dev vx10 00:00:00:00:00:00
+ bridge fdb add dev vx10 00:00:00:00:00:00 self static dst "$GROUP4"
+ ipv4_do_test_rx 0 "IPv4 mcroute TX!=RX oif=0 ping"
+}
+
+ipv4_mcroute_fdb_sep_rx()
+{
+ adf_install_sg_sep_rx lo
+
+ ip_addr_add lo 192.0.2.120/28
+ vx10_create_wait local 192.0.2.120 group "$GROUP4" dev "$IPMR" mcroute
+ bridge fdb del dev vx10 00:00:00:00:00:00
+ bridge fdb add \
+ dev vx10 00:00:00:00:00:00 self static dst "$GROUP4" via lo
+ ipv4_do_test_rx 0 "IPv4 mcroute TX!=RX ping"
+}
+
+ipv6_mcroute_fdb_sep_rx()
+{
+ adf_install_sg_sep_rx "X$IPMR"
+
+ ip_addr_add "X$IPMR" 2001:db8:5::1/64
+ vx20_create_wait local 2001:db8:5::1 group "$GROUP6" dev "$IPMR" mcroute
+ bridge -6 fdb del dev vx20 00:00:00:00:00:00
+ bridge -6 fdb add dev vx20 00:00:00:00:00:00 \
+ self static dst "$GROUP6" via "X$IPMR"
+ ipv6_do_test_rx 0 "IPv6 mcroute TX!=RX ping"
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait "$NUM_NETIFS"
+tests_run
+
+exit "$EXIT_STATUS"
diff --git a/tools/testing/selftests/net/lib.sh b/tools/testing/selftests/net/lib.sh
index 006fdadcc4b9..ff0dbe23e8e0 100644
--- a/tools/testing/selftests/net/lib.sh
+++ b/tools/testing/selftests/net/lib.sh
@@ -547,13 +547,19 @@ ip_link_set_addr()
defer ip link set dev "$name" address "$old_addr"
}
-ip_link_is_up()
+ip_link_has_flag()
{
local name=$1; shift
+ local flag=$1; shift
local state=$(ip -j link show "$name" |
- jq -r '(.[].flags[] | select(. == "UP")) // "DOWN"')
- [[ $state == "UP" ]]
+ jq --arg flag "$flag" 'any(.[].flags.[]; . == $flag)')
+ [[ $state == true ]]
+}
+
+ip_link_is_up()
+{
+ ip_link_has_flag "$1" UP
}
ip_link_set_up()
diff --git a/tools/testing/selftests/net/nettest.c b/tools/testing/selftests/net/nettest.c
index cd8a58097448..1f5227f3d64d 100644
--- a/tools/testing/selftests/net/nettest.c
+++ b/tools/testing/selftests/net/nettest.c
@@ -385,7 +385,7 @@ static int get_bind_to_device(int sd, char *name, size_t len)
name[0] = '\0';
rc = getsockopt(sd, SOL_SOCKET, SO_BINDTODEVICE, name, &optlen);
if (rc < 0)
- log_err_errno("setsockopt(SO_BINDTODEVICE)");
+ log_err_errno("getsockopt(SO_BINDTODEVICE)");
return rc;
}
@@ -535,7 +535,7 @@ static int set_freebind(int sd, int version)
break;
case AF_INET6:
if (setsockopt(sd, SOL_IPV6, IPV6_FREEBIND, &one, sizeof(one))) {
- log_err_errno("setsockopt(IPV6_FREEBIND");
+ log_err_errno("setsockopt(IPV6_FREEBIND)");
rc = -1;
}
break;
@@ -812,7 +812,7 @@ static int convert_addr(struct sock_args *args, const char *_str,
sep++;
if (str_to_uint(sep, 1, pfx_len_max,
&args->prefix_len) != 0) {
- fprintf(stderr, "Invalid port\n");
+ fprintf(stderr, "Invalid prefix length\n");
return 1;
}
} else {
@@ -1272,7 +1272,7 @@ static int msg_loop(int client, int sd, void *addr, socklen_t alen,
}
}
- nfds = interactive ? MAX(fileno(stdin), sd) + 1 : sd + 1;
+ nfds = interactive ? MAX(fileno(stdin), sd) + 1 : sd + 1;
while (1) {
FD_ZERO(&rfds);
FD_SET(sd, &rfds);
@@ -1492,7 +1492,7 @@ static int lsock_init(struct sock_args *args)
sd = socket(args->version, args->type, args->protocol);
if (sd < 0) {
log_err_errno("Error opening socket");
- return -1;
+ return -1;
}
if (set_reuseaddr(sd) != 0)
@@ -1912,7 +1912,7 @@ static int ipc_parent(int cpid, int fd, struct sock_args *args)
* waiting to be told when to continue
*/
if (read(fd, &buf, sizeof(buf)) <= 0) {
- log_err_errno("Failed to read IPC status from status");
+ log_err_errno("Failed to read IPC status from pipe");
return 1;
}
if (!buf) {
diff --git a/tools/testing/selftests/net/nl_netdev.py b/tools/testing/selftests/net/nl_netdev.py
index beaee5e4e2aa..c9109627a741 100755
--- a/tools/testing/selftests/net/nl_netdev.py
+++ b/tools/testing/selftests/net/nl_netdev.py
@@ -2,8 +2,9 @@
# SPDX-License-Identifier: GPL-2.0
import time
+from os import system
from lib.py import ksft_run, ksft_exit, ksft_pr
-from lib.py import ksft_eq, ksft_ge, ksft_busy_wait
+from lib.py import ksft_eq, ksft_ge, ksft_ne, ksft_busy_wait
from lib.py import NetdevFamily, NetdevSimDev, ip
@@ -34,6 +35,39 @@ def napi_list_check(nf) -> None:
ksft_eq(len(napis), 100,
comment=f"queue count after reset queue {q} mode {i}")
+def dev_set_threaded(nf) -> None:
+ """
+ Test that verifies various cases of napi threaded
+ set and unset at device level using sysfs.
+ """
+ with NetdevSimDev(queue_count=2) as nsimdev:
+ nsim = nsimdev.nsims[0]
+
+ ip(f"link set dev {nsim.ifname} up")
+
+ napis = nf.napi_get({'ifindex': nsim.ifindex}, dump=True)
+ ksft_eq(len(napis), 2)
+
+ napi0_id = napis[0]['id']
+ napi1_id = napis[1]['id']
+
+ # set threaded
+ system(f"echo 1 > /sys/class/net/{nsim.ifname}/threaded")
+
+ # check napi threaded is set for both napis
+ napi0 = nf.napi_get({'id': napi0_id})
+ ksft_ne(napi0.get('pid'), None)
+ napi1 = nf.napi_get({'id': napi1_id})
+ ksft_ne(napi1.get('pid'), None)
+
+ # unset threaded
+ system(f"echo 0 > /sys/class/net/{nsim.ifname}/threaded")
+
+ # check napi threaded is unset for both napis
+ napi0 = nf.napi_get({'id': napi0_id})
+ ksft_eq(napi0.get('pid'), None)
+ napi1 = nf.napi_get({'id': napi1_id})
+ ksft_eq(napi1.get('pid'), None)
def nsim_rxq_reset_down(nf) -> None:
"""
@@ -122,7 +156,7 @@ def page_pool_check(nf) -> None:
def main() -> None:
nf = NetdevFamily()
ksft_run([empty_check, lo_check, page_pool_check, napi_list_check,
- nsim_rxq_reset_down],
+ dev_set_threaded, nsim_rxq_reset_down],
args=(nf, ))
ksft_exit()
diff --git a/tools/testing/selftests/net/packetdrill/ksft_runner.sh b/tools/testing/selftests/net/packetdrill/ksft_runner.sh
index ef8b25a606d8..c5b01e1bd4c7 100755
--- a/tools/testing/selftests/net/packetdrill/ksft_runner.sh
+++ b/tools/testing/selftests/net/packetdrill/ksft_runner.sh
@@ -39,11 +39,15 @@ if [[ -n "${KSFT_MACHINE_SLOW}" ]]; then
# xfail tests that are known flaky with dbg config, not fixable.
# still run them for coverage (and expect 100% pass without dbg).
declare -ar xfail_list=(
+ "tcp_blocking_blocking-connect.pkt"
+ "tcp_blocking_blocking-read.pkt"
"tcp_eor_no-coalesce-retrans.pkt"
"tcp_fast_recovery_prr-ss.*.pkt"
+ "tcp_sack_sack-route-refresh-ip-tos.pkt"
"tcp_slow_start_slow-start-after-win-update.pkt"
"tcp_timestamping.*.pkt"
"tcp_user_timeout_user-timeout-probe.pkt"
+ "tcp_zerocopy_cl.*.pkt"
"tcp_zerocopy_epoll_.*.pkt"
"tcp_tcp_info_tcp-info-.*-limited.pkt"
)
diff --git a/tools/testing/selftests/net/rtnetlink_notification.sh b/tools/testing/selftests/net/rtnetlink_notification.sh
new file mode 100755
index 000000000000..3f9780232bd6
--- /dev/null
+++ b/tools/testing/selftests/net/rtnetlink_notification.sh
@@ -0,0 +1,112 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# This test is for checking rtnetlink notification callpaths, and get as much
+# coverage as possible.
+#
+# set -e
+
+ALL_TESTS="
+ kci_test_mcast_addr_notification
+ kci_test_anycast_addr_notification
+"
+
+source lib.sh
+test_dev="test-dummy1"
+
+kci_test_mcast_addr_notification()
+{
+ RET=0
+ local tmpfile
+ local monitor_pid
+ local match_result
+
+ tmpfile=$(mktemp)
+ defer rm "$tmpfile"
+
+ ip monitor maddr > $tmpfile &
+ monitor_pid=$!
+ defer kill_process "$monitor_pid"
+
+ sleep 1
+
+ if [ ! -e "/proc/$monitor_pid" ]; then
+ RET=$ksft_skip
+ log_test "mcast addr notification: iproute2 too old"
+ return $RET
+ fi
+
+ ip link add name "$test_dev" type dummy
+ check_err $? "failed to add dummy interface"
+ ip link set "$test_dev" up
+ check_err $? "failed to set dummy interface up"
+ ip link del dev "$test_dev"
+ check_err $? "Failed to delete dummy interface"
+ sleep 1
+
+ # There should be 4 line matches as follows.
+ # 13: test-dummy1    inet6 mcast ff02::1 scope global 
+ # 13: test-dummy1    inet mcast 224.0.0.1 scope global 
+ # Deleted 13: test-dummy1    inet mcast 224.0.0.1 scope global 
+ # Deleted 13: test-dummy1    inet6 mcast ff02::1 scope global 
+ match_result=$(grep -cE "$test_dev.*(224.0.0.1|ff02::1)" "$tmpfile")
+ if [ "$match_result" -ne 4 ]; then
+ RET=$ksft_fail
+ fi
+ log_test "mcast addr notification: Expected 4 matches, got $match_result"
+ return $RET
+}
+
+kci_test_anycast_addr_notification()
+{
+ RET=0
+ local tmpfile
+ local monitor_pid
+ local match_result
+
+ tmpfile=$(mktemp)
+ defer rm "$tmpfile"
+
+ ip monitor acaddress > "$tmpfile" &
+ monitor_pid=$!
+ defer kill_process "$monitor_pid"
+ sleep 1
+
+ if [ ! -e "/proc/$monitor_pid" ]; then
+ RET=$ksft_skip
+ log_test "anycast addr notification: iproute2 too old"
+ return "$RET"
+ fi
+
+ ip link add name "$test_dev" type dummy
+ check_err $? "failed to add dummy interface"
+ ip link set "$test_dev" up
+ check_err $? "failed to set dummy interface up"
+ sysctl -qw net.ipv6.conf."$test_dev".forwarding=1
+ ip link del dev "$test_dev"
+ check_err $? "Failed to delete dummy interface"
+ sleep 1
+
+ # There should be 2 line matches as follows.
+ # 9: dummy2 inet6 any fe80:: scope global
+ # Deleted 9: dummy2 inet6 any fe80:: scope global
+ match_result=$(grep -cE "$test_dev.*(fe80::)" "$tmpfile")
+ if [ "$match_result" -ne 2 ]; then
+ RET=$ksft_fail
+ fi
+ log_test "anycast addr notification: Expected 2 matches, got $match_result"
+ return "$RET"
+}
+
+#check for needed privileges
+if [ "$(id -u)" -ne 0 ];then
+ RET=$ksft_skip
+ log_test "need root privileges"
+ exit $RET
+fi
+
+require_command ip
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/srv6_end_x_next_csid_l3vpn_test.sh b/tools/testing/selftests/net/srv6_end_x_next_csid_l3vpn_test.sh
index 4b86040c58c6..bedf0ce885c2 100755
--- a/tools/testing/selftests/net/srv6_end_x_next_csid_l3vpn_test.sh
+++ b/tools/testing/selftests/net/srv6_end_x_next_csid_l3vpn_test.sh
@@ -72,6 +72,9 @@
# Every fcf0:0:x:y::/64 network interconnects the SRv6 routers rt-x with rt-y in
# the selftest network.
#
+# In addition, every router interface connecting rt-x to rt-y is assigned an
+# IPv6 link-local address fe80::x:y/64.
+#
# Local SID/C-SID table
# =====================
#
@@ -521,6 +524,9 @@ setup_rt_networking()
ip -netns "${nsname}" addr \
add "${net_prefix}::${rt}/64" dev "${devname}" nodad
+ ip -netns "${nsname}" addr \
+ add "fe80::${rt}:${neigh}/64" dev "${devname}" nodad
+
ip -netns "${nsname}" link set "${devname}" up
done
@@ -609,6 +615,27 @@ set_end_x_nextcsid()
nflen "${LCNODEFUNC_BLEN}" dev "${DUMMY_DEVNAME}"
}
+set_end_x_ll_nextcsid()
+{
+ local rt="$1"
+ local adj="$2"
+
+ eval nsname=\${$(get_rtname "${rt}")}
+ lcnode_func_prefix="$(build_lcnode_func_prefix "${rt}")"
+ nh6_ll_addr="fe80::${adj}:${rt}"
+ oifname="veth-rt-${rt}-${adj}"
+
+ # enabled NEXT-C-SID SRv6 End.X behavior via an IPv6 link-local nexthop
+ # address (note that "dev" is the dummy dum0 device chosen for the sake
+ # of simplicity).
+ ip -netns "${nsname}" -6 route \
+ replace "${lcnode_func_prefix}" \
+ table "${LOCALSID_TABLE_ID}" \
+ encap seg6local action End.X nh6 "${nh6_ll_addr}" \
+ oif "${oifname}" flavors next-csid lblen "${LCBLOCK_BLEN}" \
+ nflen "${LCNODEFUNC_BLEN}" dev "${DUMMY_DEVNAME}"
+}
+
set_underlay_sids_reachability()
{
local rt="$1"
@@ -1016,6 +1043,27 @@ host_vpn_tests()
check_and_log_hs_ipv4_connectivity 1 2
check_and_log_hs_ipv4_connectivity 2 1
+
+ # Setup the adjacencies in the SRv6 aware routers using IPv6 link-local
+ # addresses.
+ # - rt-3 SRv6 End.X adjacency with rt-4
+ # - rt-4 SRv6 End.X adjacency with rt-1
+ set_end_x_ll_nextcsid 3 4
+ set_end_x_ll_nextcsid 4 1
+
+ log_section "SRv6 VPN connectivity test hosts (h1 <-> h2, IPv6), link-local"
+
+ check_and_log_hs_ipv6_connectivity 1 2
+ check_and_log_hs_ipv6_connectivity 2 1
+
+ log_section "SRv6 VPN connectivity test hosts (h1 <-> h2, IPv4), link-local"
+
+ check_and_log_hs_ipv4_connectivity 1 2
+ check_and_log_hs_ipv4_connectivity 2 1
+
+ # Restore the previous adjacencies.
+ set_end_x_nextcsid 3 4
+ set_end_x_nextcsid 4 1
}
__nextcsid_end_x_behavior_test()
diff --git a/tools/testing/selftests/net/tcp_ao/seq-ext.c b/tools/testing/selftests/net/tcp_ao/seq-ext.c
index f00245263b20..6478da6a71c3 100644
--- a/tools/testing/selftests/net/tcp_ao/seq-ext.c
+++ b/tools/testing/selftests/net/tcp_ao/seq-ext.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Check that after SEQ number wrap-around:
* 1. SEQ-extension has upper bytes set
- * 2. TCP conneciton is alive and no TCPAOBad segments
+ * 2. TCP connection is alive and no TCPAOBad segments
* In order to test (2), the test doesn't just adjust seq number for a queue
* on a connected socket, but migrates it to another sk+port number, so
* that there won't be any delayed packets that will fail to verify
diff --git a/tools/testing/selftests/net/test_vxlan_vnifiltering.sh b/tools/testing/selftests/net/test_vxlan_vnifiltering.sh
index 6127a78ee988..8deacc565afa 100755
--- a/tools/testing/selftests/net/test_vxlan_vnifiltering.sh
+++ b/tools/testing/selftests/net/test_vxlan_vnifiltering.sh
@@ -146,18 +146,17 @@ run_cmd()
}
check_hv_connectivity() {
- ip netns exec $hv_1 ping -c 1 -W 1 $1 &>/dev/null
- sleep 1
- ip netns exec $hv_1 ping -c 1 -W 1 $2 &>/dev/null
+ slowwait 5 ip netns exec $hv_1 ping -c 1 -W 1 $1 &>/dev/null
+ slowwait 5 ip netns exec $hv_1 ping -c 1 -W 1 $2 &>/dev/null
return $?
}
check_vm_connectivity() {
- run_cmd "ip netns exec $vm_11 ping -c 1 -W 1 10.0.10.12"
+ slowwait 5 run_cmd "ip netns exec $vm_11 ping -c 1 -W 1 10.0.10.12"
log_test $? 0 "VM connectivity over $1 (ipv4 default rdst)"
- run_cmd "ip netns exec $vm_21 ping -c 1 -W 1 10.0.10.22"
+ slowwait 5 run_cmd "ip netns exec $vm_21 ping -c 1 -W 1 10.0.10.22"
log_test $? 0 "VM connectivity over $1 (ipv6 default rdst)"
}
diff --git a/tools/testing/selftests/net/vrf_route_leaking.sh b/tools/testing/selftests/net/vrf_route_leaking.sh
index e9c2f71da207..ce34cb2e6e0b 100755
--- a/tools/testing/selftests/net/vrf_route_leaking.sh
+++ b/tools/testing/selftests/net/vrf_route_leaking.sh
@@ -275,7 +275,7 @@ setup_sym()
# Wait for ip config to settle
- sleep 2
+ slowwait 5 ip netns exec $h1 "${ping6}" -c1 -w1 ${H2_N2_IP6} >/dev/null 2>&1
}
setup_asym()
@@ -370,7 +370,7 @@ setup_asym()
ip -netns $r2 -6 addr add dev eth1 ${R2_N2_IP6}/64 nodad
# Wait for ip config to settle
- sleep 2
+ slowwait 5 ip netns exec $h1 "${ping6}" -c1 -w1 ${H2_N2_IP6} >/dev/null 2>&1
}
check_connectivity()
diff --git a/tools/testing/selftests/ptp/testptp.c b/tools/testing/selftests/ptp/testptp.c
index edc08a4433fd..ed1e2886ba3c 100644
--- a/tools/testing/selftests/ptp/testptp.c
+++ b/tools/testing/selftests/ptp/testptp.c
@@ -120,6 +120,7 @@ static void usage(char *progname)
" -c query the ptp clock's capabilities\n"
" -d name device to open\n"
" -e val read 'val' external time stamp events\n"
+ " -E val enable rising (1), falling (2), or both (3) edges\n"
" -f val adjust the ptp clock frequency by 'val' ppb\n"
" -F chan Enable single channel mask and keep device open for debugfs verification.\n"
" -g get the ptp clock time\n"
@@ -178,6 +179,7 @@ int main(int argc, char *argv[])
int adjphase = 0;
int capabilities = 0;
int extts = 0;
+ int edge = 0;
int flagtest = 0;
int gettime = 0;
int index = 0;
@@ -202,7 +204,7 @@ int main(int argc, char *argv[])
progname = strrchr(argv[0], '/');
progname = progname ? 1+progname : argv[0];
- while (EOF != (c = getopt(argc, argv, "cd:e:f:F:ghH:i:k:lL:n:o:p:P:rsSt:T:w:x:Xy:z"))) {
+ while (EOF != (c = getopt(argc, argv, "cd:e:E:f:F:ghH:i:k:lL:n:o:p:P:rsSt:T:w:x:Xy:z"))) {
switch (c) {
case 'c':
capabilities = 1;
@@ -213,6 +215,11 @@ int main(int argc, char *argv[])
case 'e':
extts = atoi(optarg);
break;
+ case 'E':
+ edge = atoi(optarg);
+ edge = (edge & 1 ? PTP_RISING_EDGE : 0) |
+ (edge & 2 ? PTP_FALLING_EDGE : 0);
+ break;
case 'f':
adjfreq = atoi(optarg);
break;
@@ -444,7 +451,7 @@ int main(int argc, char *argv[])
if (!readonly) {
memset(&extts_request, 0, sizeof(extts_request));
extts_request.index = index;
- extts_request.flags = PTP_ENABLE_FEATURE;
+ extts_request.flags = PTP_ENABLE_FEATURE | edge;
if (ioctl(fd, PTP_EXTTS_REQUEST, &extts_request)) {
perror("PTP_EXTTS_REQUEST");
extts = 0;
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/sfq.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/sfq.json
index 28c6ce6da7db..531a2f6e4900 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/sfq.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/sfq.json
@@ -264,5 +264,41 @@
"matchPattern": "sfq",
"matchCount": "0",
"teardown": []
+ },
+ {
+ "id": "cdc1",
+ "name": "Check that a negative perturb timer is rejected",
+ "category": [
+ "qdisc",
+ "sfq"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [],
+ "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root sfq perturb -10",
+ "expExitCode": "2",
+ "verifyCmd": "$TC qdisc show dev $DUMMY",
+ "matchPattern": "sfq",
+ "matchCount": "0",
+ "teardown": []
+ },
+ {
+ "id": "a9f0",
+ "name": "Check that a too big perturb timer is rejected",
+ "category": [
+ "qdisc",
+ "sfq"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [],
+ "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root sfq perturb 1000000000",
+ "expExitCode": "2",
+ "verifyCmd": "$TC qdisc show dev $DUMMY",
+ "matchPattern": "sfq",
+ "matchCount": "0",
+ "teardown": []
}
]
diff --git a/tools/testing/selftests/vsock/.gitignore b/tools/testing/selftests/vsock/.gitignore
new file mode 100644
index 000000000000..9c5bf379480f
--- /dev/null
+++ b/tools/testing/selftests/vsock/.gitignore
@@ -0,0 +1,2 @@
+vmtest.log
+vsock_test
diff --git a/tools/testing/selftests/vsock/Makefile b/tools/testing/selftests/vsock/Makefile
new file mode 100644
index 000000000000..c407c0afd938
--- /dev/null
+++ b/tools/testing/selftests/vsock/Makefile
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0
+
+CURDIR := $(abspath .)
+TOOLSDIR := $(abspath ../../..)
+VSOCK_TEST_DIR := $(TOOLSDIR)/testing/vsock
+VSOCK_TEST_SRCS := $(wildcard $(VSOCK_TEST_DIR)/*.c $(VSOCK_TEST_DIR)/*.h)
+
+$(OUTPUT)/vsock_test: $(VSOCK_TEST_DIR)/vsock_test
+ install -m 755 $< $@
+
+$(VSOCK_TEST_DIR)/vsock_test: $(VSOCK_TEST_SRCS)
+ $(MAKE) -C $(VSOCK_TEST_DIR) vsock_test
+TEST_PROGS += vmtest.sh
+TEST_GEN_FILES := vsock_test
+
+include ../lib.mk
+
diff --git a/tools/testing/selftests/vsock/config b/tools/testing/selftests/vsock/config
new file mode 100644
index 000000000000..5f0a4f17dfc9
--- /dev/null
+++ b/tools/testing/selftests/vsock/config
@@ -0,0 +1,111 @@
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_BPF=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_BPF_JIT=y
+CONFIG_HAVE_EBPF_JIT=y
+CONFIG_BPF_EVENTS=y
+CONFIG_FTRACE_SYSCALLS=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_DYNAMIC_FTRACE=y
+CONFIG_HAVE_KPROBES=y
+CONFIG_KPROBES=y
+CONFIG_KPROBE_EVENTS=y
+CONFIG_ARCH_SUPPORTS_UPROBES=y
+CONFIG_UPROBES=y
+CONFIG_UPROBE_EVENTS=y
+CONFIG_DEBUG_FS=y
+CONFIG_FW_CFG_SYSFS=y
+CONFIG_FW_CFG_SYSFS_CMDLINE=y
+CONFIG_DRM=y
+CONFIG_DRM_VIRTIO_GPU=y
+CONFIG_DRM_VIRTIO_GPU_KMS=y
+CONFIG_DRM_BOCHS=y
+CONFIG_VIRTIO_IOMMU=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SEQUENCER=y
+CONFIG_SND_PCI=y
+CONFIG_SND_INTEL8X0=y
+CONFIG_SND_HDA_CODEC_REALTEK=y
+CONFIG_SECURITYFS=y
+CONFIG_CGROUP_BPF=y
+CONFIG_SQUASHFS=y
+CONFIG_SQUASHFS_XZ=y
+CONFIG_SQUASHFS_ZSTD=y
+CONFIG_FUSE_FS=y
+CONFIG_VIRTIO_FS=y
+CONFIG_SERIO=y
+CONFIG_PCI=y
+CONFIG_INPUT=y
+CONFIG_INPUT_KEYBOARD=y
+CONFIG_KEYBOARD_ATKBD=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_X86_VERBOSE_BOOTUP=y
+CONFIG_VGA_CONSOLE=y
+CONFIG_FB=y
+CONFIG_FB_VESA=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_HYPERVISOR_GUEST=y
+CONFIG_PARAVIRT=y
+CONFIG_KVM_GUEST=y
+CONFIG_KVM=y
+CONFIG_KVM_INTEL=y
+CONFIG_KVM_AMD=y
+CONFIG_VSOCKETS=y
+CONFIG_VSOCKETS_DIAG=y
+CONFIG_VSOCKETS_LOOPBACK=y
+CONFIG_VMWARE_VMCI_VSOCKETS=y
+CONFIG_VIRTIO_VSOCKETS=y
+CONFIG_VIRTIO_VSOCKETS_COMMON=y
+CONFIG_HYPERV_VSOCKETS=y
+CONFIG_VMWARE_VMCI=y
+CONFIG_VHOST_VSOCK=y
+CONFIG_HYPERV=y
+CONFIG_UEVENT_HELPER=n
+CONFIG_VIRTIO=y
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_NET=y
+CONFIG_NET_CORE=y
+CONFIG_NETDEVICES=y
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_INET=y
+CONFIG_NET_9P=y
+CONFIG_NET_9P_VIRTIO=y
+CONFIG_9P_FS=y
+CONFIG_VIRTIO_NET=y
+CONFIG_CMDLINE_OVERRIDE=n
+CONFIG_BINFMT_SCRIPT=y
+CONFIG_SHMEM=y
+CONFIG_TMPFS=y
+CONFIG_UNIX=y
+CONFIG_MODULE_SIG_FORCE=n
+CONFIG_DEVTMPFS=y
+CONFIG_TTY=y
+CONFIG_VT=y
+CONFIG_UNIX98_PTYS=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_INOTIFY_USER=y
+CONFIG_BLOCK=y
+CONFIG_SCSI_LOWLEVEL=y
+CONFIG_SCSI=y
+CONFIG_SCSI_VIRTIO=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_VIRTIO_CONSOLE=y
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_CORE=y
+CONFIG_I6300ESB_WDT=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
+CONFIG_OVERLAY_FS=y
+CONFIG_DAX=y
+CONFIG_DAX_DRIVER=y
+CONFIG_FS_DAX=y
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_ZONE_DEVICE=y
diff --git a/tools/testing/selftests/vsock/settings b/tools/testing/selftests/vsock/settings
new file mode 100644
index 000000000000..694d70710ff0
--- /dev/null
+++ b/tools/testing/selftests/vsock/settings
@@ -0,0 +1 @@
+timeout=300
diff --git a/tools/testing/selftests/vsock/vmtest.sh b/tools/testing/selftests/vsock/vmtest.sh
new file mode 100755
index 000000000000..edacebfc1632
--- /dev/null
+++ b/tools/testing/selftests/vsock/vmtest.sh
@@ -0,0 +1,487 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright (c) 2025 Meta Platforms, Inc. and affiliates
+#
+# Dependencies:
+# * virtme-ng
+# * busybox-static (used by virtme-ng)
+# * qemu (used by virtme-ng)
+
+readonly SCRIPT_DIR="$(cd -P -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd -P)"
+readonly KERNEL_CHECKOUT=$(realpath "${SCRIPT_DIR}"/../../../../)
+
+source "${SCRIPT_DIR}"/../kselftest/ktap_helpers.sh
+
+readonly VSOCK_TEST="${SCRIPT_DIR}"/vsock_test
+readonly TEST_GUEST_PORT=51000
+readonly TEST_HOST_PORT=50000
+readonly TEST_HOST_PORT_LISTENER=50001
+readonly SSH_GUEST_PORT=22
+readonly SSH_HOST_PORT=2222
+readonly VSOCK_CID=1234
+readonly WAIT_PERIOD=3
+readonly WAIT_PERIOD_MAX=60
+readonly WAIT_TOTAL=$(( WAIT_PERIOD * WAIT_PERIOD_MAX ))
+readonly QEMU_PIDFILE=$(mktemp /tmp/qemu_vsock_vmtest_XXXX.pid)
+
+# virtme-ng offers a netdev for ssh when using "--ssh", but we also need a
+# control port forwarded for vsock_test. Because virtme-ng doesn't support
+# adding an additional port to forward to the device created from "--ssh" and
+# virtme-init mistakenly sets identical IPs to the ssh device and additional
+# devices, we instead opt out of using --ssh, add the device manually, and also
+# add the kernel cmdline options that virtme-init uses to setup the interface.
+readonly QEMU_TEST_PORT_FWD="hostfwd=tcp::${TEST_HOST_PORT}-:${TEST_GUEST_PORT}"
+readonly QEMU_SSH_PORT_FWD="hostfwd=tcp::${SSH_HOST_PORT}-:${SSH_GUEST_PORT}"
+readonly QEMU_OPTS="\
+ -netdev user,id=n0,${QEMU_TEST_PORT_FWD},${QEMU_SSH_PORT_FWD} \
+ -device virtio-net-pci,netdev=n0 \
+ -device vhost-vsock-pci,guest-cid=${VSOCK_CID} \
+ --pidfile ${QEMU_PIDFILE} \
+"
+readonly KERNEL_CMDLINE="\
+ virtme.dhcp net.ifnames=0 biosdevname=0 \
+ virtme.ssh virtme_ssh_channel=tcp virtme_ssh_user=$USER \
+"
+readonly LOG=$(mktemp /tmp/vsock_vmtest_XXXX.log)
+readonly TEST_NAMES=(vm_server_host_client vm_client_host_server vm_loopback)
+readonly TEST_DESCS=(
+ "Run vsock_test in server mode on the VM and in client mode on the host."
+ "Run vsock_test in client mode on the VM and in server mode on the host."
+ "Run vsock_test using the loopback transport in the VM."
+)
+
+VERBOSE=0
+
+usage() {
+ local name
+ local desc
+ local i
+
+ echo
+ echo "$0 [OPTIONS] [TEST]..."
+ echo "If no TEST argument is given, all tests will be run."
+ echo
+ echo "Options"
+ echo " -b: build the kernel from the current source tree and use it for guest VMs"
+ echo " -q: set the path to or name of qemu binary"
+ echo " -v: verbose output"
+ echo
+ echo "Available tests"
+
+ for ((i = 0; i < ${#TEST_NAMES[@]}; i++)); do
+ name=${TEST_NAMES[${i}]}
+ desc=${TEST_DESCS[${i}]}
+ printf "\t%-35s%-35s\n" "${name}" "${desc}"
+ done
+ echo
+
+ exit 1
+}
+
+die() {
+ echo "$*" >&2
+ exit "${KSFT_FAIL}"
+}
+
+vm_ssh() {
+ ssh -q -o UserKnownHostsFile=/dev/null -p ${SSH_HOST_PORT} localhost "$@"
+ return $?
+}
+
+cleanup() {
+ if [[ -s "${QEMU_PIDFILE}" ]]; then
+ pkill -SIGTERM -F "${QEMU_PIDFILE}" > /dev/null 2>&1
+ fi
+
+ # If failure occurred during or before qemu start up, then we need
+ # to clean this up ourselves.
+ if [[ -e "${QEMU_PIDFILE}" ]]; then
+ rm "${QEMU_PIDFILE}"
+ fi
+}
+
+check_args() {
+ local found
+
+ for arg in "$@"; do
+ found=0
+ for name in "${TEST_NAMES[@]}"; do
+ if [[ "${name}" = "${arg}" ]]; then
+ found=1
+ break
+ fi
+ done
+
+ if [[ "${found}" -eq 0 ]]; then
+ echo "${arg} is not an available test" >&2
+ usage
+ fi
+ done
+
+ for arg in "$@"; do
+ if ! command -v > /dev/null "test_${arg}"; then
+ echo "Test ${arg} not found" >&2
+ usage
+ fi
+ done
+}
+
+check_deps() {
+ for dep in vng ${QEMU} busybox pkill ssh; do
+ if [[ ! -x $(command -v "${dep}") ]]; then
+ echo -e "skip: dependency ${dep} not found!\n"
+ exit "${KSFT_SKIP}"
+ fi
+ done
+
+ if [[ ! -x $(command -v "${VSOCK_TEST}") ]]; then
+ printf "skip: %s not found!" "${VSOCK_TEST}"
+ printf " Please build the kselftest vsock target.\n"
+ exit "${KSFT_SKIP}"
+ fi
+}
+
+check_vng() {
+ local tested_versions
+ local version
+ local ok
+
+ tested_versions=("1.33" "1.36")
+ version="$(vng --version)"
+
+ ok=0
+ for tv in "${tested_versions[@]}"; do
+ if [[ "${version}" == *"${tv}"* ]]; then
+ ok=1
+ break
+ fi
+ done
+
+ if [[ ! "${ok}" -eq 1 ]]; then
+ printf "warning: vng version '%s' has not been tested and may " "${version}" >&2
+ printf "not function properly.\n\tThe following versions have been tested: " >&2
+ echo "${tested_versions[@]}" >&2
+ fi
+}
+
+handle_build() {
+ if [[ ! "${BUILD}" -eq 1 ]]; then
+ return
+ fi
+
+ if [[ ! -d "${KERNEL_CHECKOUT}" ]]; then
+ echo "-b requires vmtest.sh called from the kernel source tree" >&2
+ exit 1
+ fi
+
+ pushd "${KERNEL_CHECKOUT}" &>/dev/null
+
+ if ! vng --kconfig --config "${SCRIPT_DIR}"/config; then
+ die "failed to generate .config for kernel source tree (${KERNEL_CHECKOUT})"
+ fi
+
+ if ! make -j$(nproc); then
+ die "failed to build kernel from source tree (${KERNEL_CHECKOUT})"
+ fi
+
+ popd &>/dev/null
+}
+
+vm_start() {
+ local logfile=/dev/null
+ local verbose_opt=""
+ local kernel_opt=""
+ local qemu
+
+ qemu=$(command -v "${QEMU}")
+
+ if [[ "${VERBOSE}" -eq 1 ]]; then
+ verbose_opt="--verbose"
+ logfile=/dev/stdout
+ fi
+
+ if [[ "${BUILD}" -eq 1 ]]; then
+ kernel_opt="${KERNEL_CHECKOUT}"
+ fi
+
+ vng \
+ --run \
+ ${kernel_opt} \
+ ${verbose_opt} \
+ --qemu-opts="${QEMU_OPTS}" \
+ --qemu="${qemu}" \
+ --user root \
+ --append "${KERNEL_CMDLINE}" \
+ --rw &> ${logfile} &
+
+ if ! timeout ${WAIT_TOTAL} \
+ bash -c 'while [[ ! -s '"${QEMU_PIDFILE}"' ]]; do sleep 1; done; exit 0'; then
+ die "failed to boot VM"
+ fi
+}
+
+vm_wait_for_ssh() {
+ local i
+
+ i=0
+ while true; do
+ if [[ ${i} -gt ${WAIT_PERIOD_MAX} ]]; then
+ die "Timed out waiting for guest ssh"
+ fi
+ if vm_ssh -- true; then
+ break
+ fi
+ i=$(( i + 1 ))
+ sleep ${WAIT_PERIOD}
+ done
+}
+
+# derived from selftests/net/net_helper.sh
+wait_for_listener()
+{
+ local port=$1
+ local interval=$2
+ local max_intervals=$3
+ local protocol=tcp
+ local pattern
+ local i
+
+ pattern=":$(printf "%04X" "${port}") "
+
+ # for tcp protocol additionally check the socket state
+ [ "${protocol}" = "tcp" ] && pattern="${pattern}0A"
+ for i in $(seq "${max_intervals}"); do
+ if awk '{print $2" "$4}' /proc/net/"${protocol}"* | \
+ grep -q "${pattern}"; then
+ break
+ fi
+ sleep "${interval}"
+ done
+}
+
+vm_wait_for_listener() {
+ local port=$1
+
+ vm_ssh <<EOF
+$(declare -f wait_for_listener)
+wait_for_listener ${port} ${WAIT_PERIOD} ${WAIT_PERIOD_MAX}
+EOF
+}
+
+host_wait_for_listener() {
+ wait_for_listener "${TEST_HOST_PORT_LISTENER}" "${WAIT_PERIOD}" "${WAIT_PERIOD_MAX}"
+}
+
+__log_stdin() {
+ cat | awk '{ printf "%s:\t%s\n","'"${prefix}"'", $0 }'
+}
+
+__log_args() {
+ echo "$*" | awk '{ printf "%s:\t%s\n","'"${prefix}"'", $0 }'
+}
+
+log() {
+ local prefix="$1"
+
+ shift
+ local redirect=
+ if [[ ${VERBOSE} -eq 0 ]]; then
+ redirect=/dev/null
+ else
+ redirect=/dev/stdout
+ fi
+
+ if [[ "$#" -eq 0 ]]; then
+ __log_stdin | tee -a "${LOG}" > ${redirect}
+ else
+ __log_args "$@" | tee -a "${LOG}" > ${redirect}
+ fi
+}
+
+log_setup() {
+ log "setup" "$@"
+}
+
+log_host() {
+ local testname=$1
+
+ shift
+ log "test:${testname}:host" "$@"
+}
+
+log_guest() {
+ local testname=$1
+
+ shift
+ log "test:${testname}:guest" "$@"
+}
+
+test_vm_server_host_client() {
+ local testname="${FUNCNAME[0]#test_}"
+
+ vm_ssh -- "${VSOCK_TEST}" \
+ --mode=server \
+ --control-port="${TEST_GUEST_PORT}" \
+ --peer-cid=2 \
+ 2>&1 | log_guest "${testname}" &
+
+ vm_wait_for_listener "${TEST_GUEST_PORT}"
+
+ ${VSOCK_TEST} \
+ --mode=client \
+ --control-host=127.0.0.1 \
+ --peer-cid="${VSOCK_CID}" \
+ --control-port="${TEST_HOST_PORT}" 2>&1 | log_host "${testname}"
+
+ return $?
+}
+
+test_vm_client_host_server() {
+ local testname="${FUNCNAME[0]#test_}"
+
+ ${VSOCK_TEST} \
+ --mode "server" \
+ --control-port "${TEST_HOST_PORT_LISTENER}" \
+ --peer-cid "${VSOCK_CID}" 2>&1 | log_host "${testname}" &
+
+ host_wait_for_listener
+
+ vm_ssh -- "${VSOCK_TEST}" \
+ --mode=client \
+ --control-host=10.0.2.2 \
+ --peer-cid=2 \
+ --control-port="${TEST_HOST_PORT_LISTENER}" 2>&1 | log_guest "${testname}"
+
+ return $?
+}
+
+test_vm_loopback() {
+ local testname="${FUNCNAME[0]#test_}"
+ local port=60000 # non-forwarded local port
+
+ vm_ssh -- "${VSOCK_TEST}" \
+ --mode=server \
+ --control-port="${port}" \
+ --peer-cid=1 2>&1 | log_guest "${testname}" &
+
+ vm_wait_for_listener "${port}"
+
+ vm_ssh -- "${VSOCK_TEST}" \
+ --mode=client \
+ --control-host="127.0.0.1" \
+ --control-port="${port}" \
+ --peer-cid=1 2>&1 | log_guest "${testname}"
+
+ return $?
+}
+
+run_test() {
+ local host_oops_cnt_before
+ local host_warn_cnt_before
+ local vm_oops_cnt_before
+ local vm_warn_cnt_before
+ local host_oops_cnt_after
+ local host_warn_cnt_after
+ local vm_oops_cnt_after
+ local vm_warn_cnt_after
+ local name
+ local rc
+
+ host_oops_cnt_before=$(dmesg | grep -c -i 'Oops')
+ host_warn_cnt_before=$(dmesg --level=warn | wc -l)
+ vm_oops_cnt_before=$(vm_ssh -- dmesg | grep -c -i 'Oops')
+ vm_warn_cnt_before=$(vm_ssh -- dmesg --level=warn | wc -l)
+
+ name=$(echo "${1}" | awk '{ print $1 }')
+ eval test_"${name}"
+ rc=$?
+
+ host_oops_cnt_after=$(dmesg | grep -i 'Oops' | wc -l)
+ if [[ ${host_oops_cnt_after} -gt ${host_oops_cnt_before} ]]; then
+ echo "FAIL: kernel oops detected on host" | log_host "${name}"
+ rc=$KSFT_FAIL
+ fi
+
+ host_warn_cnt_after=$(dmesg --level=warn | wc -l)
+ if [[ ${host_warn_cnt_after} -gt ${host_warn_cnt_before} ]]; then
+ echo "FAIL: kernel warning detected on host" | log_host "${name}"
+ rc=$KSFT_FAIL
+ fi
+
+ vm_oops_cnt_after=$(vm_ssh -- dmesg | grep -i 'Oops' | wc -l)
+ if [[ ${vm_oops_cnt_after} -gt ${vm_oops_cnt_before} ]]; then
+ echo "FAIL: kernel oops detected on vm" | log_host "${name}"
+ rc=$KSFT_FAIL
+ fi
+
+ vm_warn_cnt_after=$(vm_ssh -- dmesg --level=warn | wc -l)
+ if [[ ${vm_warn_cnt_after} -gt ${vm_warn_cnt_before} ]]; then
+ echo "FAIL: kernel warning detected on vm" | log_host "${name}"
+ rc=$KSFT_FAIL
+ fi
+
+ return "${rc}"
+}
+
+QEMU="qemu-system-$(uname -m)"
+
+while getopts :hvsq:b o
+do
+ case $o in
+ v) VERBOSE=1;;
+ b) BUILD=1;;
+ q) QEMU=$OPTARG;;
+ h|*) usage;;
+ esac
+done
+shift $((OPTIND-1))
+
+trap cleanup EXIT
+
+if [[ ${#} -eq 0 ]]; then
+ ARGS=("${TEST_NAMES[@]}")
+else
+ ARGS=("$@")
+fi
+
+check_args "${ARGS[@]}"
+check_deps
+check_vng
+handle_build
+
+echo "1..${#ARGS[@]}"
+
+log_setup "Booting up VM"
+vm_start
+vm_wait_for_ssh
+log_setup "VM booted up"
+
+cnt_pass=0
+cnt_fail=0
+cnt_skip=0
+cnt_total=0
+for arg in "${ARGS[@]}"; do
+ run_test "${arg}"
+ rc=$?
+ if [[ ${rc} -eq $KSFT_PASS ]]; then
+ cnt_pass=$(( cnt_pass + 1 ))
+ echo "ok ${cnt_total} ${arg}"
+ elif [[ ${rc} -eq $KSFT_SKIP ]]; then
+ cnt_skip=$(( cnt_skip + 1 ))
+ echo "ok ${cnt_total} ${arg} # SKIP"
+ elif [[ ${rc} -eq $KSFT_FAIL ]]; then
+ cnt_fail=$(( cnt_fail + 1 ))
+ echo "not ok ${cnt_total} ${arg} # exit=$rc"
+ fi
+ cnt_total=$(( cnt_total + 1 ))
+done
+
+echo "SUMMARY: PASS=${cnt_pass} SKIP=${cnt_skip} FAIL=${cnt_fail}"
+echo "Log: ${LOG}"
+
+if [ $((cnt_pass + cnt_skip)) -eq ${cnt_total} ]; then
+ exit "$KSFT_PASS"
+else
+ exit "$KSFT_FAIL"
+fi
diff --git a/tools/testing/vsock/util.c b/tools/testing/vsock/util.c
index 0c7e9cbcbc85..803f1e075b62 100644
--- a/tools/testing/vsock/util.c
+++ b/tools/testing/vsock/util.c
@@ -7,6 +7,7 @@
* Author: Stefan Hajnoczi <stefanha@redhat.com>
*/
+#include <ctype.h>
#include <errno.h>
#include <stdio.h>
#include <stdint.h>
@@ -23,6 +24,9 @@
#include "control.h"
#include "util.h"
+#define KALLSYMS_PATH "/proc/kallsyms"
+#define KALLSYMS_LINE_LEN 512
+
/* Install signal handlers */
void init_signals(void)
{
@@ -121,15 +125,17 @@ bool vsock_wait_sent(int fd)
return !ret;
}
-/* Create socket <type>, bind to <cid, port> and return the file descriptor. */
-int vsock_bind(unsigned int cid, unsigned int port, int type)
+/* Create socket <type>, bind to <cid, port>.
+ * Return the file descriptor, or -1 on error.
+ */
+int vsock_bind_try(unsigned int cid, unsigned int port, int type)
{
struct sockaddr_vm sa = {
.svm_family = AF_VSOCK,
.svm_cid = cid,
.svm_port = port,
};
- int fd;
+ int fd, saved_errno;
fd = socket(AF_VSOCK, type, 0);
if (fd < 0) {
@@ -138,6 +144,22 @@ int vsock_bind(unsigned int cid, unsigned int port, int type)
}
if (bind(fd, (struct sockaddr *)&sa, sizeof(sa))) {
+ saved_errno = errno;
+ close(fd);
+ errno = saved_errno;
+ fd = -1;
+ }
+
+ return fd;
+}
+
+/* Create socket <type>, bind to <cid, port> and return the file descriptor. */
+int vsock_bind(unsigned int cid, unsigned int port, int type)
+{
+ int fd;
+
+ fd = vsock_bind_try(cid, port, type);
+ if (fd < 0) {
perror("bind");
exit(EXIT_FAILURE);
}
@@ -836,3 +858,55 @@ void enable_so_linger(int fd, int timeout)
exit(EXIT_FAILURE);
}
}
+
+static int __get_transports(void)
+{
+ char buf[KALLSYMS_LINE_LEN];
+ const char *ksym;
+ int ret = 0;
+ FILE *f;
+
+ f = fopen(KALLSYMS_PATH, "r");
+ if (!f) {
+ perror("Can't open " KALLSYMS_PATH);
+ exit(EXIT_FAILURE);
+ }
+
+ while (fgets(buf, sizeof(buf), f)) {
+ char *match;
+ int i;
+
+ assert(buf[strlen(buf) - 1] == '\n');
+
+ for (i = 0; i < TRANSPORT_NUM; ++i) {
+ if (ret & BIT(i))
+ continue;
+
+ /* Match should be followed by '\t' or '\n'.
+ * See kallsyms.c:s_show().
+ */
+ ksym = transport_ksyms[i];
+ match = strstr(buf, ksym);
+ if (match && isspace(match[strlen(ksym)])) {
+ ret |= BIT(i);
+ break;
+ }
+ }
+ }
+
+ fclose(f);
+ return ret;
+}
+
+/* Return integer with TRANSPORT_* bit set for every (known) registered vsock
+ * transport.
+ */
+int get_transports(void)
+{
+ static int tr = -1;
+
+ if (tr == -1)
+ tr = __get_transports();
+
+ return tr;
+}
diff --git a/tools/testing/vsock/util.h b/tools/testing/vsock/util.h
index 5e2db67072d5..71895192cc02 100644
--- a/tools/testing/vsock/util.h
+++ b/tools/testing/vsock/util.h
@@ -3,8 +3,36 @@
#define UTIL_H
#include <sys/socket.h>
+#include <linux/bitops.h>
+#include <linux/kernel.h>
#include <linux/vm_sockets.h>
+/* All known vsock transports, see callers of vsock_core_register() */
+#define KNOWN_TRANSPORTS(x) \
+ x(LOOPBACK, "loopback") \
+ x(VIRTIO, "virtio") \
+ x(VHOST, "vhost") \
+ x(VMCI, "vmci") \
+ x(HYPERV, "hvs")
+
+enum transport {
+ TRANSPORT_COUNTER_BASE = __COUNTER__ + 1,
+ #define x(name, symbol) \
+ TRANSPORT_##name = BIT(__COUNTER__ - TRANSPORT_COUNTER_BASE),
+ KNOWN_TRANSPORTS(x)
+ TRANSPORT_NUM = __COUNTER__ - TRANSPORT_COUNTER_BASE,
+ #undef x
+};
+
+static const char * const transport_ksyms[] = {
+ #define x(name, symbol) "d " symbol "_transport",
+ KNOWN_TRANSPORTS(x)
+ #undef x
+};
+
+static_assert(ARRAY_SIZE(transport_ksyms) == TRANSPORT_NUM);
+static_assert(BITS_PER_TYPE(int) >= TRANSPORT_NUM);
+
/* Tests can either run as the client or the server */
enum test_mode {
TEST_MODE_UNSET,
@@ -44,6 +72,7 @@ int vsock_connect(unsigned int cid, unsigned int port, int type);
int vsock_accept(unsigned int cid, unsigned int port,
struct sockaddr_vm *clientaddrp, int type);
int vsock_stream_connect(unsigned int cid, unsigned int port);
+int vsock_bind_try(unsigned int cid, unsigned int port, int type);
int vsock_bind(unsigned int cid, unsigned int port, int type);
int vsock_bind_connect(unsigned int cid, unsigned int port,
unsigned int bind_port, int type);
@@ -81,4 +110,5 @@ void setsockopt_timeval_check(int fd, int level, int optname,
struct timeval val, char const *errmsg);
void enable_so_zerocopy_check(int fd);
void enable_so_linger(int fd, int timeout);
+int get_transports(void);
#endif /* UTIL_H */
diff --git a/tools/testing/vsock/vsock_test.c b/tools/testing/vsock/vsock_test.c
index f669baaa0dca..eb6f54378667 100644
--- a/tools/testing/vsock/vsock_test.c
+++ b/tools/testing/vsock/vsock_test.c
@@ -1718,16 +1718,27 @@ static void test_stream_msgzcopy_leak_zcskb_server(const struct test_opts *opts)
#define MAX_PORT_RETRIES 24 /* net/vmw_vsock/af_vsock.c */
-/* Test attempts to trigger a transport release for an unbound socket. This can
- * lead to a reference count mishandling.
- */
-static void test_stream_transport_uaf_client(const struct test_opts *opts)
+static bool test_stream_transport_uaf(int cid)
{
int sockets[MAX_PORT_RETRIES];
struct sockaddr_vm addr;
- int fd, i, alen;
+ socklen_t alen;
+ int fd, i, c;
+ bool ret;
+
+ /* Probe for a transport by attempting a local CID bind. Unavailable
+ * transport (or more specifically: an unsupported transport/CID
+ * combination) results in EADDRNOTAVAIL, other errnos are fatal.
+ */
+ fd = vsock_bind_try(cid, VMADDR_PORT_ANY, SOCK_STREAM);
+ if (fd < 0) {
+ if (errno != EADDRNOTAVAIL) {
+ perror("Unexpected bind() errno");
+ exit(EXIT_FAILURE);
+ }
- fd = vsock_bind(VMADDR_CID_ANY, VMADDR_PORT_ANY, SOCK_STREAM);
+ return false;
+ }
alen = sizeof(addr);
if (getsockname(fd, (struct sockaddr *)&addr, &alen)) {
@@ -1735,38 +1746,83 @@ static void test_stream_transport_uaf_client(const struct test_opts *opts)
exit(EXIT_FAILURE);
}
+ /* Drain the autobind pool; see __vsock_bind_connectible(). */
for (i = 0; i < MAX_PORT_RETRIES; ++i)
- sockets[i] = vsock_bind(VMADDR_CID_ANY, ++addr.svm_port,
- SOCK_STREAM);
+ sockets[i] = vsock_bind(cid, ++addr.svm_port, SOCK_STREAM);
close(fd);
- fd = socket(AF_VSOCK, SOCK_STREAM, 0);
+
+ /* Setting SOCK_NONBLOCK makes connect() return soon after
+ * (re-)assigning the transport. We are not connecting to anything
+ * anyway, so there is no point entering the main loop in
+ * vsock_connect(); waiting for timeout, checking for signals, etc.
+ */
+ fd = socket(AF_VSOCK, SOCK_STREAM | SOCK_NONBLOCK, 0);
if (fd < 0) {
perror("socket");
exit(EXIT_FAILURE);
}
- if (!vsock_connect_fd(fd, addr.svm_cid, addr.svm_port)) {
- perror("Unexpected connect() #1 success");
+ /* Assign transport, while failing to autobind. Autobind pool was
+ * drained, so EADDRNOTAVAIL coming from __vsock_bind_connectible() is
+ * expected.
+ *
+ * One exception is ENODEV which is thrown by vsock_assign_transport(),
+ * i.e. before vsock_auto_bind(), when the only transport loaded is
+ * vhost.
+ */
+ if (!connect(fd, (struct sockaddr *)&addr, alen)) {
+ fprintf(stderr, "Unexpected connect() success\n");
exit(EXIT_FAILURE);
}
-
- /* Vulnerable system may crash now. */
- if (!vsock_connect_fd(fd, VMADDR_CID_HOST, VMADDR_PORT_ANY)) {
- perror("Unexpected connect() #2 success");
+ if (errno == ENODEV && cid == VMADDR_CID_HOST) {
+ ret = false;
+ goto cleanup;
+ }
+ if (errno != EADDRNOTAVAIL) {
+ perror("Unexpected connect() errno");
exit(EXIT_FAILURE);
}
+ /* Reassign transport, triggering old transport release and
+ * (potentially) unbinding of an unbound socket.
+ *
+ * Vulnerable system may crash now.
+ */
+ for (c = VMADDR_CID_HYPERVISOR; c <= VMADDR_CID_HOST + 1; ++c) {
+ if (c != cid) {
+ addr.svm_cid = c;
+ (void)connect(fd, (struct sockaddr *)&addr, alen);
+ }
+ }
+
+ ret = true;
+cleanup:
close(fd);
while (i--)
close(sockets[i]);
- control_writeln("DONE");
+ return ret;
}
-static void test_stream_transport_uaf_server(const struct test_opts *opts)
+/* Test attempts to trigger a transport release for an unbound socket. This can
+ * lead to a reference count mishandling.
+ */
+static void test_stream_transport_uaf_client(const struct test_opts *opts)
{
- control_expectln("DONE");
+ bool tested = false;
+ int cid, tr;
+
+ for (cid = VMADDR_CID_HYPERVISOR; cid <= VMADDR_CID_HOST + 1; ++cid)
+ tested |= test_stream_transport_uaf(cid);
+
+ tr = get_transports();
+ if (!tr)
+ fprintf(stderr, "No transports detected\n");
+ else if (tr == TRANSPORT_VIRTIO)
+ fprintf(stderr, "Setup unsupported: sole virtio transport\n");
+ else if (!tested)
+ fprintf(stderr, "No transports tested\n");
}
static void test_stream_connect_retry_client(const struct test_opts *opts)
@@ -2034,7 +2090,6 @@ static struct test_case test_cases[] = {
{
.name = "SOCK_STREAM transport release use-after-free",
.run_client = test_stream_transport_uaf_client,
- .run_server = test_stream_transport_uaf_server,
},
{
.name = "SOCK_STREAM retry failed connect()",