diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2022-09-22 10:58:13 -0700 | 
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-09-22 10:58:13 -0700 | 
| commit | 504c25cb76a9cb805407f7701b25a1fbd48605fa (patch) | |
| tree | 0950b1f1ccd34ff02133bd0d73054bd98219faf4 /drivers/net/ethernet/intel | |
| parent | 129e7152184b0224f9ca3f91b870acc14c64e1fa (diff) | |
| parent | 83e4b196838d90799a8879e5054a3beecf9ed256 (diff) | |
Merge tag 'net-6.0-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Pull networking fixes from Jakub Kicinski:
 "Including fixes from wifi, netfilter and can.
  A handful of awaited fixes here - revert of the FEC changes, bluetooth
  fix, fixes for iwlwifi spew.
  We added a warning in PHY/MDIO code which is triggering on a couple of
  platforms in a false-positive-ish way. If we can't iron that out over
  the week we'll drop it and re-add for 6.1.
  I've added a new "follow up fixes" section for fixes to fixes in
  6.0-rcs but it may actually give the false impression that those are
  problematic or that more testing time would have caught them. So
  likely a one time thing.
  Follow up fixes:
   - nf_tables_addchain: fix nft_counters_enabled underflow
   - ebtables: fix memory leak when blob is malformed
   - nf_ct_ftp: fix deadlock when nat rewrite is needed
  Current release - regressions:
   - Revert "fec: Restart PPS after link state change" and the related
     "net: fec: Use a spinlock to guard `fep->ptp_clk_on`"
   - Bluetooth: fix HCIGETDEVINFO regression
   - wifi: mt76: fix 5 GHz connection regression on mt76x0/mt76x2
   - mptcp: fix fwd memory accounting on coalesce
   - rwlock removal fall out:
      - ipmr: always call ip{,6}_mr_forward() from RCU read-side
        critical section
      - ipv6: fix crash when IPv6 is administratively disabled
   - tcp: read multiple skbs in tcp_read_skb()
   - mdio_bus_phy_resume state warning fallout:
      - eth: ravb: fix PHY state warning splat during system resume
      - eth: sh_eth: fix PHY state warning splat during system resume
  Current release - new code bugs:
   - wifi: iwlwifi: don't spam logs with NSS>2 messages
   - eth: mtk_eth_soc: enable XDP support just for MT7986 SoC
  Previous releases - regressions:
   - bonding: fix NULL deref in bond_rr_gen_slave_id
   - wifi: iwlwifi: mark IWLMEI as broken
  Previous releases - always broken:
   - nf_conntrack helpers:
      - irc: tighten matching on DCC message
      - sip: fix ct_sip_walk_headers
      - osf: fix possible bogus match in nf_osf_find()
   - ipvlan: fix out-of-bound bugs caused by unset skb->mac_header
   - core: fix flow symmetric hash
   - bonding, team: unsync device addresses on ndo_stop
   - phy: micrel: fix shared interrupt on LAN8814"
* tag 'net-6.0-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (83 commits)
  selftests: forwarding: add shebang for sch_red.sh
  bnxt: prevent skb UAF after handing over to PTP worker
  net: marvell: Fix refcounting bugs in prestera_port_sfp_bind()
  net: sched: fix possible refcount leak in tc_new_tfilter()
  net: sunhme: Fix packet reception for len < RX_COPY_THRESHOLD
  udp: Use WARN_ON_ONCE() in udp_read_skb()
  selftests: bonding: cause oops in bond_rr_gen_slave_id
  bonding: fix NULL deref in bond_rr_gen_slave_id
  net: phy: micrel: fix shared interrupt on LAN8814
  net/smc: Stop the CLC flow if no link to map buffers on
  ice: Fix ice_xdp_xmit() when XDP TX queue number is not sufficient
  net: atlantic: fix potential memory leak in aq_ndev_close()
  can: gs_usb: gs_usb_set_phys_id(): return with error if identify is not supported
  can: gs_usb: gs_can_open(): fix race dev->can.state condition
  can: flexcan: flexcan_mailbox_read() fix return value for drop = true
  net: sh_eth: Fix PHY state warning splat during system resume
  net: ravb: Fix PHY state warning splat during system resume
  netfilter: nf_ct_ftp: fix deadlock when nat rewrite is needed
  netfilter: ebtables: fix memory leak when blob is malformed
  netfilter: nf_tables: fix percpu memory leak at nf_tables_addchain()
  ...
Diffstat (limited to 'drivers/net/ethernet/intel')
| -rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_main.c | 32 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 20 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/iavf/iavf_main.c | 9 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/iavf/iavf_txrx.c | 9 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/iavf/iavf_virtchnl.c | 7 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/ice/ice_lib.c | 42 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/ice/ice_main.c | 25 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/ice/ice_txrx.c | 5 | 
8 files changed, 104 insertions, 45 deletions
| diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 10c1e1ea83a1..e3d9804aeb25 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -5909,6 +5909,26 @@ static int i40e_get_link_speed(struct i40e_vsi *vsi)  }  /** + * i40e_bw_bytes_to_mbits - Convert max_tx_rate from bytes to mbits + * @vsi: Pointer to vsi structure + * @max_tx_rate: max TX rate in bytes to be converted into Mbits + * + * Helper function to convert units before send to set BW limit + **/ +static u64 i40e_bw_bytes_to_mbits(struct i40e_vsi *vsi, u64 max_tx_rate) +{ +	if (max_tx_rate < I40E_BW_MBPS_DIVISOR) { +		dev_warn(&vsi->back->pdev->dev, +			 "Setting max tx rate to minimum usable value of 50Mbps.\n"); +		max_tx_rate = I40E_BW_CREDIT_DIVISOR; +	} else { +		do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR); +	} + +	return max_tx_rate; +} + +/**   * i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate   * @vsi: VSI to be configured   * @seid: seid of the channel/VSI @@ -5930,10 +5950,10 @@ int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)  			max_tx_rate, seid);  		return -EINVAL;  	} -	if (max_tx_rate && max_tx_rate < 50) { +	if (max_tx_rate && max_tx_rate < I40E_BW_CREDIT_DIVISOR) {  		dev_warn(&pf->pdev->dev,  			 "Setting max tx rate to minimum usable value of 50Mbps.\n"); -		max_tx_rate = 50; +		max_tx_rate = I40E_BW_CREDIT_DIVISOR;  	}  	/* Tx rate credits are in values of 50Mbps, 0 is disabled */ @@ -8224,9 +8244,9 @@ config_tc:  	if (i40e_is_tc_mqprio_enabled(pf)) {  		if (vsi->mqprio_qopt.max_rate[0]) { -			u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0]; +			u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi, +						  vsi->mqprio_qopt.max_rate[0]); -			do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);  			ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);  			if (!ret) {  				u64 credits = max_tx_rate; @@ -10971,10 +10991,10 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)  	}  	if (vsi->mqprio_qopt.max_rate[0]) { -		u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0]; +		u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi, +						  vsi->mqprio_qopt.max_rate[0]);  		u64 credits = 0; -		do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);  		ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);  		if (ret)  			goto end_unlock; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 4f184c50f6e8..7e9f6a69eb10 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -2039,6 +2039,25 @@ static void i40e_del_qch(struct i40e_vf *vf)  }  /** + * i40e_vc_get_max_frame_size + * @vf: pointer to the VF + * + * Max frame size is determined based on the current port's max frame size and + * whether a port VLAN is configured on this VF. The VF is not aware whether + * it's in a port VLAN so the PF needs to account for this in max frame size + * checks and sending the max frame size to the VF. + **/ +static u16 i40e_vc_get_max_frame_size(struct i40e_vf *vf) +{ +	u16 max_frame_size = vf->pf->hw.phy.link_info.max_frame_size; + +	if (vf->port_vlan_id) +		max_frame_size -= VLAN_HLEN; + +	return max_frame_size; +} + +/**   * i40e_vc_get_vf_resources_msg   * @vf: pointer to the VF info   * @msg: pointer to the msg buffer @@ -2139,6 +2158,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)  	vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;  	vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;  	vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE; +	vfres->max_mtu = i40e_vc_get_max_frame_size(vf);  	if (vf->lan_vsi_idx) {  		vfres->vsi_res[0].vsi_id = vf->lan_vsi_id; diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 10aa99dfdcdb..0c89f16bf1e2 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -1077,7 +1077,6 @@ static int iavf_set_mac(struct net_device *netdev, void *p)  {  	struct iavf_adapter *adapter = netdev_priv(netdev);  	struct sockaddr *addr = p; -	bool handle_mac = iavf_is_mac_set_handled(netdev, addr->sa_data);  	int ret;  	if (!is_valid_ether_addr(addr->sa_data)) @@ -1094,10 +1093,9 @@ static int iavf_set_mac(struct net_device *netdev, void *p)  		return 0;  	} -	if (handle_mac) -		goto done; - -	ret = wait_event_interruptible_timeout(adapter->vc_waitqueue, false, msecs_to_jiffies(2500)); +	ret = wait_event_interruptible_timeout(adapter->vc_waitqueue, +					       iavf_is_mac_set_handled(netdev, addr->sa_data), +					       msecs_to_jiffies(2500));  	/* If ret < 0 then it means wait was interrupted.  	 * If ret == 0 then it means we got a timeout. @@ -1111,7 +1109,6 @@ static int iavf_set_mac(struct net_device *netdev, void *p)  	if (!ret)  		return -EAGAIN; -done:  	if (!ether_addr_equal(netdev->dev_addr, addr->sa_data))  		return -EACCES; diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c index 06d18797d25a..18b6a702a1d6 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c @@ -114,8 +114,11 @@ u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw)  {  	u32 head, tail; +	/* underlying hardware might not allow access and/or always return +	 * 0 for the head/tail registers so just use the cached values +	 */  	head = ring->next_to_clean; -	tail = readl(ring->tail); +	tail = ring->next_to_use;  	if (head != tail)  		return (head < tail) ? @@ -1390,7 +1393,7 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,  #endif  	struct sk_buff *skb; -	if (!rx_buffer) +	if (!rx_buffer || !size)  		return NULL;  	/* prefetch first cache line of first page */  	va = page_address(rx_buffer->page) + rx_buffer->page_offset; @@ -1548,7 +1551,7 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)  		/* exit if we failed to retrieve a buffer */  		if (!skb) {  			rx_ring->rx_stats.alloc_buff_failed++; -			if (rx_buffer) +			if (rx_buffer && size)  				rx_buffer->pagecnt_bias++;  			break;  		} diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index 15ee85dc33bd..5a9e6563923e 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -269,11 +269,14 @@ int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter)  void iavf_configure_queues(struct iavf_adapter *adapter)  {  	struct virtchnl_vsi_queue_config_info *vqci; -	struct virtchnl_queue_pair_info *vqpi; +	int i, max_frame = adapter->vf_res->max_mtu;  	int pairs = adapter->num_active_queues; -	int i, max_frame = IAVF_MAX_RXBUFFER; +	struct virtchnl_queue_pair_info *vqpi;  	size_t len; +	if (max_frame > IAVF_MAX_RXBUFFER || !max_frame) +		max_frame = IAVF_MAX_RXBUFFER; +  	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {  		/* bail because we already have a command pending */  		dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n", diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 0c4ec9264071..58d483e2f539 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -914,7 +914,7 @@ static void ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt)   */  static int ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)  { -	u16 offset = 0, qmap = 0, tx_count = 0, pow = 0; +	u16 offset = 0, qmap = 0, tx_count = 0, rx_count = 0, pow = 0;  	u16 num_txq_per_tc, num_rxq_per_tc;  	u16 qcount_tx = vsi->alloc_txq;  	u16 qcount_rx = vsi->alloc_rxq; @@ -981,23 +981,25 @@ static int ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)  	 * at least 1)  	 */  	if (offset) -		vsi->num_rxq = offset; +		rx_count = offset;  	else -		vsi->num_rxq = num_rxq_per_tc; +		rx_count = num_rxq_per_tc; -	if (vsi->num_rxq > vsi->alloc_rxq) { +	if (rx_count > vsi->alloc_rxq) {  		dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n", -			vsi->num_rxq, vsi->alloc_rxq); +			rx_count, vsi->alloc_rxq);  		return -EINVAL;  	} -	vsi->num_txq = tx_count; -	if (vsi->num_txq > vsi->alloc_txq) { +	if (tx_count > vsi->alloc_txq) {  		dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n", -			vsi->num_txq, vsi->alloc_txq); +			tx_count, vsi->alloc_txq);  		return -EINVAL;  	} +	vsi->num_txq = tx_count; +	vsi->num_rxq = rx_count; +  	if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {  		dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");  		/* since there is a chance that num_rxq could have been changed @@ -3490,6 +3492,7 @@ ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt,  	u16 pow, offset = 0, qcount_tx = 0, qcount_rx = 0, qmap;  	u16 tc0_offset = vsi->mqprio_qopt.qopt.offset[0];  	int tc0_qcount = vsi->mqprio_qopt.qopt.count[0]; +	u16 new_txq, new_rxq;  	u8 netdev_tc = 0;  	int i; @@ -3530,21 +3533,24 @@ ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt,  		}  	} -	/* Set actual Tx/Rx queue pairs */ -	vsi->num_txq = offset + qcount_tx; -	if (vsi->num_txq > vsi->alloc_txq) { +	new_txq = offset + qcount_tx; +	if (new_txq > vsi->alloc_txq) {  		dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n", -			vsi->num_txq, vsi->alloc_txq); +			new_txq, vsi->alloc_txq);  		return -EINVAL;  	} -	vsi->num_rxq = offset + qcount_rx; -	if (vsi->num_rxq > vsi->alloc_rxq) { +	new_rxq = offset + qcount_rx; +	if (new_rxq > vsi->alloc_rxq) {  		dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n", -			vsi->num_rxq, vsi->alloc_rxq); +			new_rxq, vsi->alloc_rxq);  		return -EINVAL;  	} +	/* Set actual Tx/Rx queue pairs */ +	vsi->num_txq = new_txq; +	vsi->num_rxq = new_rxq; +  	/* Setup queue TC[0].qmap for given VSI context */  	ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);  	ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]); @@ -3576,6 +3582,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)  {  	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };  	struct ice_pf *pf = vsi->back; +	struct ice_tc_cfg old_tc_cfg;  	struct ice_vsi_ctx *ctx;  	struct device *dev;  	int i, ret = 0; @@ -3600,6 +3607,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)  			max_txqs[i] = vsi->num_txq;  	} +	memcpy(&old_tc_cfg, &vsi->tc_cfg, sizeof(old_tc_cfg));  	vsi->tc_cfg.ena_tc = ena_tc;  	vsi->tc_cfg.numtc = num_tc; @@ -3616,8 +3624,10 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)  	else  		ret = ice_vsi_setup_q_map(vsi, ctx); -	if (ret) +	if (ret) { +		memcpy(&vsi->tc_cfg, &old_tc_cfg, sizeof(vsi->tc_cfg));  		goto out; +	}  	/* must to indicate which section of VSI context are being modified */  	ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 8c30eea61b6d..e109cb93886b 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -2399,8 +2399,6 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)  		return -EBUSY;  	} -	ice_unplug_aux_dev(pf); -  	switch (reset) {  	case ICE_RESET_PFR:  		set_bit(ICE_PFR_REQ, pf->state); @@ -6651,7 +6649,7 @@ static void ice_napi_disable_all(struct ice_vsi *vsi)   */  int ice_down(struct ice_vsi *vsi)  { -	int i, tx_err, rx_err, link_err = 0, vlan_err = 0; +	int i, tx_err, rx_err, vlan_err = 0;  	WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state)); @@ -6685,20 +6683,13 @@ int ice_down(struct ice_vsi *vsi)  	ice_napi_disable_all(vsi); -	if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) { -		link_err = ice_force_phys_link_state(vsi, false); -		if (link_err) -			netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n", -				   vsi->vsi_num, link_err); -	} -  	ice_for_each_txq(vsi, i)  		ice_clean_tx_ring(vsi->tx_rings[i]);  	ice_for_each_rxq(vsi, i)  		ice_clean_rx_ring(vsi->rx_rings[i]); -	if (tx_err || rx_err || link_err || vlan_err) { +	if (tx_err || rx_err || vlan_err) {  		netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",  			   vsi->vsi_num, vsi->vsw->sw_id);  		return -EIO; @@ -6860,6 +6851,8 @@ int ice_vsi_open(struct ice_vsi *vsi)  	if (err)  		goto err_setup_rx; +	ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc); +  	if (vsi->type == ICE_VSI_PF) {  		/* Notify the stack of the actual queue counts. */  		err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq); @@ -8892,6 +8885,16 @@ int ice_stop(struct net_device *netdev)  		return -EBUSY;  	} +	if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) { +		int link_err = ice_force_phys_link_state(vsi, false); + +		if (link_err) { +			netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n", +				   vsi->vsi_num, link_err); +			return -EIO; +		} +	} +  	ice_vsi_close(vsi);  	return 0; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 836dce840712..97453d1dfafe 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -610,7 +610,7 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,  	if (test_bit(ICE_VSI_DOWN, vsi->state))  		return -ENETDOWN; -	if (!ice_is_xdp_ena_vsi(vsi) || queue_index >= vsi->num_xdp_txq) +	if (!ice_is_xdp_ena_vsi(vsi))  		return -ENXIO;  	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) @@ -621,6 +621,9 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,  		xdp_ring = vsi->xdp_rings[queue_index];  		spin_lock(&xdp_ring->tx_lock);  	} else { +		/* Generally, should not happen */ +		if (unlikely(queue_index >= vsi->num_xdp_txq)) +			return -ENXIO;  		xdp_ring = vsi->xdp_rings[queue_index];  	} | 
