summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/microsoft/mana/mana_en.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2025-07-30 08:58:55 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2025-07-30 08:58:55 -0700
commit8be4d31cb8aaeea27bde4b7ddb26e28a89062ebf (patch)
treefec3039a08284cd87f4ec9c3bea5b5a439f1859f /drivers/net/ethernet/microsoft/mana/mana_en.c
parent4b290aae788e06561754b28c6842e4080957d3f7 (diff)
parentfa582ca7e187a15e772e6a72fe035f649b387a60 (diff)
Merge tag 'net-next-6.17' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next
Pull networking updates from Jakub Kicinski: "Core & protocols: - Wrap datapath globals into net_aligned_data, to avoid false sharing - Preserve MSG_ZEROCOPY in forwarding (e.g. out of a container) - Add SO_INQ and SCM_INQ support to AF_UNIX - Add SIOCINQ support to AF_VSOCK - Add TCP_MAXSEG sockopt to MPTCP - Add IPv6 force_forwarding sysctl to enable forwarding per interface - Make TCP validation of whether packet fully fits in the receive window and the rcv_buf more strict. With increased use of HW aggregation a single "packet" can be multiple 100s of kB - Add MSG_MORE flag to optimize large TCP transmissions via sockmap, improves latency up to 33% for sockmap users - Convert TCP send queue handling from tasklet to BH workque - Improve BPF iteration over TCP sockets to see each socket exactly once - Remove obsolete and unused TCP RFC3517/RFC6675 loss recovery code - Support enabling kernel threads for NAPI processing on per-NAPI instance basis rather than a whole device. Fully stop the kernel NAPI thread when threaded NAPI gets disabled. Previously thread would stick around until ifdown due to tricky synchronization - Allow multicast routing to take effect on locally-generated packets - Add output interface argument for End.X in segment routing - MCTP: add support for gateway routing, improve bind() handling - Don't require rtnl_lock when fetching an IPv6 neighbor over Netlink - Add a new neighbor flag ("extern_valid"), which cedes refresh responsibilities to userspace. This is needed for EVPN multi-homing where a neighbor entry for a multi-homed host needs to be synced across all the VTEPs among which the host is multi-homed - Support NUD_PERMANENT for proxy neighbor entries - Add a new queuing discipline for IETF RFC9332 DualQ Coupled AQM - Add sequence numbers to netconsole messages. Unregister netconsole's console when all net targets are removed. Code refactoring. Add a number of selftests - Align IPSec inbound SA lookup to RFC 4301. Only SPI and protocol should be used for an inbound SA lookup - Support inspecting ref_tracker state via DebugFS - Don't force bonding advertisement frames tx to ~333 ms boundaries. Add broadcast_neighbor option to send ARP/ND on all bonded links - Allow providing upcall pid for the 'execute' command in openvswitch - Remove DCCP support from Netfilter's conntrack - Disallow multiple packet duplications in the queuing layer - Prevent use of deprecated iptables code on PREEMPT_RT Driver API: - Support RSS and hashing configuration over ethtool Netlink - Add dedicated ethtool callbacks for getting and setting hashing fields - Add support for power budget evaluation strategy in PSE / Power-over-Ethernet. Generate Netlink events for overcurrent etc - Support DPLL phase offset monitoring across all device inputs. Support providing clock reference and SYNC over separate DPLL inputs - Support traffic classes in devlink rate API for bandwidth management - Remove rtnl_lock dependency from UDP tunnel port configuration Device drivers: - Add a new Broadcom driver for 800G Ethernet (bnge) - Add a standalone driver for Microchip ZL3073x DPLL - Remove IBM's NETIUCV device driver - Ethernet high-speed NICs: - Broadcom (bnxt): - support zero-copy Tx of DMABUF memory - take page size into account for page pool recycling rings - Intel (100G, ice, idpf): - idpf: XDP and AF_XDP support preparations - idpf: add flow steering - add link_down_events statistic - clean up the TSPLL code - preparations for live VM migration - nVidia/Mellanox: - support zero-copy Rx/Tx interfaces (DMABUF and io_uring) - optimize context memory usage for matchers - expose serial numbers in devlink info - support PCIe congestion metrics - Meta (fbnic): - add 25G, 50G, and 100G link modes to phylink - support dumping FW logs - Marvell/Cavium: - support for CN20K generation of the Octeon chips - Amazon: - add HW clock (without timestamping, just hypervisor time access) - Ethernet virtual: - VirtIO net: - support segmentation of UDP-tunnel-encapsulated packets - Google (gve): - support packet timestamping and clock synchronization - Microsoft vNIC: - add handler for device-originated servicing events - allow dynamic MSI-X vector allocation - support Tx bandwidth clamping - Ethernet NICs consumer, and embedded: - AMD: - amd-xgbe: hardware timestamping and PTP clock support - Broadcom integrated MACs (bcmgenet, bcmasp): - use napi_complete_done() return value to support NAPI polling - add support for re-starting auto-negotiation - Broadcom switches (b53): - support BCM5325 switches - add bcm63xx EPHY power control - Synopsys (stmmac): - lots of code refactoring and cleanups - TI: - icssg-prueth: read firmware-names from device tree - icssg: PRP offload support - Microchip: - lan78xx: convert to PHYLINK for improved PHY and MAC management - ksz: add KSZ8463 switch support - Intel: - support similar queue priority scheme in multi-queue and time-sensitive networking (taprio) - support packet pre-emption in both - RealTek (r8169): - enable EEE at 5Gbps on RTL8126 - Airoha: - add PPPoE offload support - MDIO bus controller for Airoha AN7583 - Ethernet PHYs: - support for the IPQ5018 internal GE PHY - micrel KSZ9477 switch-integrated PHYs: - add MDI/MDI-X control support - add RX error counters - add cable test support - add Signal Quality Indicator (SQI) reporting - dp83tg720: improve reset handling and reduce link recovery time - support bcm54811 (and its MII-Lite interface type) - air_en8811h: support resume/suspend - support PHY counters for QCA807x and QCA808x - support WoL for QCA807x - CAN drivers: - rcar_canfd: support for Transceiver Delay Compensation - kvaser: report FW versions via devlink dev info - WiFi: - extended regulatory info support (6 GHz) - add statistics and beacon monitor for Multi-Link Operation (MLO) - support S1G aggregation, improve S1G support - add Radio Measurement action fields - support per-radio RTS threshold - some work around how FIPS affects wifi, which was wrong (RC4 is used by TKIP, not only WEP) - improvements for unsolicited probe response handling - WiFi drivers: - RealTek (rtw88): - IBSS mode for SDIO devices - RealTek (rtw89): - BT coexistence for MLO/WiFi7 - concurrent station + P2P support - support for USB devices RTL8851BU/RTL8852BU - Intel (iwlwifi): - use embedded PNVM in (to be released) FW images to fix compatibility issues - many cleanups (unused FW APIs, PCIe code, WoWLAN) - some FIPS interoperability - MediaTek (mt76): - firmware recovery improvements - more MLO work - Qualcomm/Atheros (ath12k): - fix scan on multi-radio devices - more EHT/Wi-Fi 7 features - encapsulation/decapsulation offload - Broadcom (brcm80211): - support SDIO 43751 device - Bluetooth: - hci_event: add support for handling LE BIG Sync Lost event - ISO: add socket option to report packet seqnum via CMSG - ISO: support SCM_TIMESTAMPING for ISO TS - Bluetooth drivers: - intel_pcie: support Function Level Reset - nxpuart: add support for 4M baudrate - nxpuart: implement powerup sequence, reset, FW dump, and FW loading" * tag 'net-next-6.17' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next: (1742 commits) dpll: zl3073x: Fix build failure selftests: bpf: fix legacy netfilter options ipv6: annotate data-races around rt->fib6_nsiblings ipv6: fix possible infinite loop in fib6_info_uses_dev() ipv6: prevent infinite loop in rt6_nlmsg_size() ipv6: add a retry logic in net6_rt_notify() vrf: Drop existing dst reference in vrf_ip6_input_dst net/sched: taprio: align entry index attr validation with mqprio net: fsl_pq_mdio: use dev_err_probe selftests: rtnetlink.sh: remove esp4_offload after test vsock: remove unnecessary null check in vsock_getname() igb: xsk: solve negative overflow of nb_pkts in zerocopy mode stmmac: xsk: fix negative overflow of budget in zerocopy mode dt-bindings: ieee802154: Convert at86rf230.txt yaml format net: dsa: microchip: Disable PTP function of KSZ8463 net: dsa: microchip: Setup fiber ports for KSZ8463 net: dsa: microchip: Write switch MAC address differently for KSZ8463 net: dsa: microchip: Use different registers for KSZ8463 net: dsa: microchip: Add KSZ8463 switch support to KSZ DSA driver dt-bindings: net: dsa: microchip: Add KSZ8463 switch support ...
Diffstat (limited to 'drivers/net/ethernet/microsoft/mana/mana_en.c')
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c327
1 files changed, 304 insertions, 23 deletions
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index faad1cb880f8..550843e2164b 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -48,6 +48,15 @@ static const struct file_operations mana_dbg_q_fops = {
.read = mana_dbg_q_read,
};
+static bool mana_en_need_log(struct mana_port_context *apc, int err)
+{
+ if (apc && apc->ac && apc->ac->gdma_dev &&
+ apc->ac->gdma_dev->gdma_context)
+ return mana_need_log(apc->ac->gdma_dev->gdma_context, err);
+ else
+ return true;
+}
+
/* Microsoft Azure Network Adapter (MANA) functions */
static int mana_open(struct net_device *ndev)
@@ -252,10 +261,10 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
struct netdev_queue *net_txq;
struct mana_stats_tx *tx_stats;
struct gdma_queue *gdma_sq;
+ int err, len, num_gso_seg;
unsigned int csum_type;
struct mana_txq *txq;
struct mana_cq *cq;
- int err, len;
if (unlikely(!apc->port_is_up))
goto tx_drop;
@@ -408,6 +417,7 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb_queue_tail(&txq->pending_skbs, skb);
len = skb->len;
+ num_gso_seg = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
net_txq = netdev_get_tx_queue(ndev, txq_idx);
err = mana_gd_post_work_request(gdma_sq, &pkg.wqe_req,
@@ -432,10 +442,13 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* skb may be freed after mana_gd_post_work_request. Do not use it. */
skb = NULL;
+ /* Populated the packet and bytes counters based on post GSO packet
+ * calculations
+ */
tx_stats = &txq->stats;
u64_stats_update_begin(&tx_stats->syncp);
- tx_stats->packets++;
- tx_stats->bytes += len;
+ tx_stats->packets += num_gso_seg;
+ tx_stats->bytes += len + ((num_gso_seg - 1) * gso_hs);
u64_stats_update_end(&tx_stats->syncp);
tx_busy:
@@ -720,6 +733,78 @@ out:
return err;
}
+static int mana_shaper_set(struct net_shaper_binding *binding,
+ const struct net_shaper *shaper,
+ struct netlink_ext_ack *extack)
+{
+ struct mana_port_context *apc = netdev_priv(binding->netdev);
+ u32 old_speed, rate;
+ int err;
+
+ if (shaper->handle.scope != NET_SHAPER_SCOPE_NETDEV) {
+ NL_SET_ERR_MSG_MOD(extack, "net shaper scope should be netdev");
+ return -EINVAL;
+ }
+
+ if (apc->handle.id && shaper->handle.id != apc->handle.id) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot create multiple shapers");
+ return -EOPNOTSUPP;
+ }
+
+ if (!shaper->bw_max || (shaper->bw_max % 100000000)) {
+ NL_SET_ERR_MSG_MOD(extack, "Please use multiples of 100Mbps for bandwidth");
+ return -EINVAL;
+ }
+
+ rate = div_u64(shaper->bw_max, 1000); /* Convert bps to Kbps */
+ rate = div_u64(rate, 1000); /* Convert Kbps to Mbps */
+
+ /* Get current speed */
+ err = mana_query_link_cfg(apc);
+ old_speed = (err) ? SPEED_UNKNOWN : apc->speed;
+
+ if (!err) {
+ err = mana_set_bw_clamp(apc, rate, TRI_STATE_TRUE);
+ apc->speed = (err) ? old_speed : rate;
+ apc->handle = (err) ? apc->handle : shaper->handle;
+ }
+
+ return err;
+}
+
+static int mana_shaper_del(struct net_shaper_binding *binding,
+ const struct net_shaper_handle *handle,
+ struct netlink_ext_ack *extack)
+{
+ struct mana_port_context *apc = netdev_priv(binding->netdev);
+ int err;
+
+ err = mana_set_bw_clamp(apc, 0, TRI_STATE_FALSE);
+
+ if (!err) {
+ /* Reset mana port context parameters */
+ apc->handle.id = 0;
+ apc->handle.scope = NET_SHAPER_SCOPE_UNSPEC;
+ apc->speed = 0;
+ }
+
+ return err;
+}
+
+static void mana_shaper_cap(struct net_shaper_binding *binding,
+ enum net_shaper_scope scope,
+ unsigned long *flags)
+{
+ *flags = BIT(NET_SHAPER_A_CAPS_SUPPORT_BW_MAX) |
+ BIT(NET_SHAPER_A_CAPS_SUPPORT_METRIC_BPS);
+}
+
+static const struct net_shaper_ops mana_shaper_ops = {
+ .set = mana_shaper_set,
+ .delete = mana_shaper_del,
+ .capabilities = mana_shaper_cap,
+};
+
static const struct net_device_ops mana_devops = {
.ndo_open = mana_open,
.ndo_stop = mana_close,
@@ -730,6 +815,7 @@ static const struct net_device_ops mana_devops = {
.ndo_bpf = mana_bpf,
.ndo_xdp_xmit = mana_xdp_xmit,
.ndo_change_mtu = mana_change_mtu,
+ .net_shaper_ops = &mana_shaper_ops,
};
static void mana_cleanup_port_context(struct mana_port_context *apc)
@@ -775,8 +861,13 @@ static int mana_send_request(struct mana_context *ac, void *in_buf,
err = mana_gd_send_request(gc, in_len, in_buf, out_len,
out_buf);
if (err || resp->status) {
- dev_err(dev, "Failed to send mana message: %d, 0x%x\n",
- err, resp->status);
+ if (err == -EOPNOTSUPP)
+ return err;
+
+ if (req->req.msg_type != MANA_QUERY_PHY_STAT &&
+ mana_need_log(gc, err))
+ dev_err(dev, "Failed to send mana message: %d, 0x%x\n",
+ err, resp->status);
return err ? err : -EPROTO;
}
@@ -851,8 +942,10 @@ static void mana_pf_deregister_hw_vport(struct mana_port_context *apc)
err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
sizeof(resp));
if (err) {
- netdev_err(apc->ndev, "Failed to unregister hw vPort: %d\n",
- err);
+ if (mana_en_need_log(apc, err))
+ netdev_err(apc->ndev, "Failed to unregister hw vPort: %d\n",
+ err);
+
return;
}
@@ -907,8 +1000,10 @@ static void mana_pf_deregister_filter(struct mana_port_context *apc)
err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
sizeof(resp));
if (err) {
- netdev_err(apc->ndev, "Failed to unregister filter: %d\n",
- err);
+ if (mana_en_need_log(apc, err))
+ netdev_err(apc->ndev, "Failed to unregister filter: %d\n",
+ err);
+
return;
}
@@ -1138,7 +1233,9 @@ static int mana_cfg_vport_steering(struct mana_port_context *apc,
err = mana_send_request(apc->ac, req, req_buf_size, &resp,
sizeof(resp));
if (err) {
- netdev_err(ndev, "Failed to configure vPort RX: %d\n", err);
+ if (mana_en_need_log(apc, err))
+ netdev_err(ndev, "Failed to configure vPort RX: %d\n", err);
+
goto out;
}
@@ -1162,6 +1259,95 @@ out:
return err;
}
+int mana_query_link_cfg(struct mana_port_context *apc)
+{
+ struct net_device *ndev = apc->ndev;
+ struct mana_query_link_config_resp resp = {};
+ struct mana_query_link_config_req req = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_LINK_CONFIG,
+ sizeof(req), sizeof(resp));
+
+ req.vport = apc->port_handle;
+ req.hdr.resp.msg_version = GDMA_MESSAGE_V2;
+
+ err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+ sizeof(resp));
+
+ if (err) {
+ if (err == -EOPNOTSUPP) {
+ netdev_info_once(ndev, "MANA_QUERY_LINK_CONFIG not supported\n");
+ return err;
+ }
+ netdev_err(ndev, "Failed to query link config: %d\n", err);
+ return err;
+ }
+
+ err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_LINK_CONFIG,
+ sizeof(resp));
+
+ if (err || resp.hdr.status) {
+ netdev_err(ndev, "Failed to query link config: %d, 0x%x\n", err,
+ resp.hdr.status);
+ if (!err)
+ err = -EOPNOTSUPP;
+ return err;
+ }
+
+ if (resp.qos_unconfigured) {
+ err = -EINVAL;
+ return err;
+ }
+ apc->speed = resp.link_speed_mbps;
+ apc->max_speed = resp.qos_speed_mbps;
+ return 0;
+}
+
+int mana_set_bw_clamp(struct mana_port_context *apc, u32 speed,
+ int enable_clamping)
+{
+ struct mana_set_bw_clamp_resp resp = {};
+ struct mana_set_bw_clamp_req req = {};
+ struct net_device *ndev = apc->ndev;
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_SET_BW_CLAMP,
+ sizeof(req), sizeof(resp));
+ req.vport = apc->port_handle;
+ req.link_speed_mbps = speed;
+ req.enable_clamping = enable_clamping;
+
+ err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+ sizeof(resp));
+
+ if (err) {
+ if (err == -EOPNOTSUPP) {
+ netdev_info_once(ndev, "MANA_SET_BW_CLAMP not supported\n");
+ return err;
+ }
+ netdev_err(ndev, "Failed to set bandwidth clamp for speed %u, err = %d",
+ speed, err);
+ return err;
+ }
+
+ err = mana_verify_resp_hdr(&resp.hdr, MANA_SET_BW_CLAMP,
+ sizeof(resp));
+
+ if (err || resp.hdr.status) {
+ netdev_err(ndev, "Failed to set bandwidth clamp: %d, 0x%x\n", err,
+ resp.hdr.status);
+ if (!err)
+ err = -EOPNOTSUPP;
+ return err;
+ }
+
+ if (resp.qos_unconfigured)
+ netdev_info(ndev, "QoS is unconfigured\n");
+
+ return 0;
+}
+
int mana_create_wq_obj(struct mana_port_context *apc,
mana_handle_t vport,
u32 wq_type, struct mana_obj_spec *wq_spec,
@@ -1233,7 +1419,9 @@ void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
sizeof(resp));
if (err) {
- netdev_err(ndev, "Failed to destroy WQ object: %d\n", err);
+ if (mana_en_need_log(apc, err))
+ netdev_err(ndev, "Failed to destroy WQ object: %d\n", err);
+
return;
}
@@ -1912,8 +2100,10 @@ static void mana_destroy_txq(struct mana_port_context *apc)
napi = &apc->tx_qp[i].tx_cq.napi;
if (apc->tx_qp[i].txq.napi_initialized) {
napi_synchronize(napi);
- napi_disable(napi);
- netif_napi_del(napi);
+ netdev_lock_ops_to_full(napi->dev);
+ napi_disable_locked(napi);
+ netif_napi_del_locked(napi);
+ netdev_unlock_full_to_ops(napi->dev);
apc->tx_qp[i].txq.napi_initialized = false;
}
mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object);
@@ -2065,8 +2255,11 @@ static int mana_create_txq(struct mana_port_context *apc,
mana_create_txq_debugfs(apc, i);
- netif_napi_add_tx(net, &cq->napi, mana_poll);
- napi_enable(&cq->napi);
+ set_bit(NAPI_STATE_NO_BUSY_POLL, &cq->napi.state);
+ netdev_lock_ops_to_full(net);
+ netif_napi_add_locked(net, &cq->napi, mana_poll);
+ napi_enable_locked(&cq->napi);
+ netdev_unlock_full_to_ops(net);
txq->napi_initialized = true;
mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
@@ -2102,9 +2295,10 @@ static void mana_destroy_rxq(struct mana_port_context *apc,
if (napi_initialized) {
napi_synchronize(napi);
- napi_disable(napi);
-
- netif_napi_del(napi);
+ netdev_lock_ops_to_full(napi->dev);
+ napi_disable_locked(napi);
+ netif_napi_del_locked(napi);
+ netdev_unlock_full_to_ops(napi->dev);
}
xdp_rxq_info_unreg(&rxq->xdp_rxq);
@@ -2355,14 +2549,18 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
gc->cq_table[cq->gdma_id] = cq->gdma_cq;
- netif_napi_add_weight(ndev, &cq->napi, mana_poll, 1);
+ netdev_lock_ops_to_full(ndev);
+ netif_napi_add_weight_locked(ndev, &cq->napi, mana_poll, 1);
+ netdev_unlock_full_to_ops(ndev);
WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx,
cq->napi.napi_id));
WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
rxq->page_pool));
- napi_enable(&cq->napi);
+ netdev_lock_ops_to_full(ndev);
+ napi_enable_locked(&cq->napi);
+ netdev_unlock_full_to_ops(ndev);
mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
out:
@@ -2612,6 +2810,88 @@ void mana_query_gf_stats(struct mana_port_context *apc)
apc->eth_stats.hc_tx_err_gdma = resp.tx_err_gdma;
}
+void mana_query_phy_stats(struct mana_port_context *apc)
+{
+ struct mana_query_phy_stat_resp resp = {};
+ struct mana_query_phy_stat_req req = {};
+ struct net_device *ndev = apc->ndev;
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_PHY_STAT,
+ sizeof(req), sizeof(resp));
+ err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+ sizeof(resp));
+ if (err)
+ return;
+
+ err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_PHY_STAT,
+ sizeof(resp));
+ if (err || resp.hdr.status) {
+ netdev_err(ndev,
+ "Failed to query PHY stats: %d, resp:0x%x\n",
+ err, resp.hdr.status);
+ return;
+ }
+
+ /* Aggregate drop counters */
+ apc->phy_stats.rx_pkt_drop_phy = resp.rx_pkt_drop_phy;
+ apc->phy_stats.tx_pkt_drop_phy = resp.tx_pkt_drop_phy;
+
+ /* Per TC traffic Counters */
+ apc->phy_stats.rx_pkt_tc0_phy = resp.rx_pkt_tc0_phy;
+ apc->phy_stats.tx_pkt_tc0_phy = resp.tx_pkt_tc0_phy;
+ apc->phy_stats.rx_pkt_tc1_phy = resp.rx_pkt_tc1_phy;
+ apc->phy_stats.tx_pkt_tc1_phy = resp.tx_pkt_tc1_phy;
+ apc->phy_stats.rx_pkt_tc2_phy = resp.rx_pkt_tc2_phy;
+ apc->phy_stats.tx_pkt_tc2_phy = resp.tx_pkt_tc2_phy;
+ apc->phy_stats.rx_pkt_tc3_phy = resp.rx_pkt_tc3_phy;
+ apc->phy_stats.tx_pkt_tc3_phy = resp.tx_pkt_tc3_phy;
+ apc->phy_stats.rx_pkt_tc4_phy = resp.rx_pkt_tc4_phy;
+ apc->phy_stats.tx_pkt_tc4_phy = resp.tx_pkt_tc4_phy;
+ apc->phy_stats.rx_pkt_tc5_phy = resp.rx_pkt_tc5_phy;
+ apc->phy_stats.tx_pkt_tc5_phy = resp.tx_pkt_tc5_phy;
+ apc->phy_stats.rx_pkt_tc6_phy = resp.rx_pkt_tc6_phy;
+ apc->phy_stats.tx_pkt_tc6_phy = resp.tx_pkt_tc6_phy;
+ apc->phy_stats.rx_pkt_tc7_phy = resp.rx_pkt_tc7_phy;
+ apc->phy_stats.tx_pkt_tc7_phy = resp.tx_pkt_tc7_phy;
+
+ /* Per TC byte Counters */
+ apc->phy_stats.rx_byte_tc0_phy = resp.rx_byte_tc0_phy;
+ apc->phy_stats.tx_byte_tc0_phy = resp.tx_byte_tc0_phy;
+ apc->phy_stats.rx_byte_tc1_phy = resp.rx_byte_tc1_phy;
+ apc->phy_stats.tx_byte_tc1_phy = resp.tx_byte_tc1_phy;
+ apc->phy_stats.rx_byte_tc2_phy = resp.rx_byte_tc2_phy;
+ apc->phy_stats.tx_byte_tc2_phy = resp.tx_byte_tc2_phy;
+ apc->phy_stats.rx_byte_tc3_phy = resp.rx_byte_tc3_phy;
+ apc->phy_stats.tx_byte_tc3_phy = resp.tx_byte_tc3_phy;
+ apc->phy_stats.rx_byte_tc4_phy = resp.rx_byte_tc4_phy;
+ apc->phy_stats.tx_byte_tc4_phy = resp.tx_byte_tc4_phy;
+ apc->phy_stats.rx_byte_tc5_phy = resp.rx_byte_tc5_phy;
+ apc->phy_stats.tx_byte_tc5_phy = resp.tx_byte_tc5_phy;
+ apc->phy_stats.rx_byte_tc6_phy = resp.rx_byte_tc6_phy;
+ apc->phy_stats.tx_byte_tc6_phy = resp.tx_byte_tc6_phy;
+ apc->phy_stats.rx_byte_tc7_phy = resp.rx_byte_tc7_phy;
+ apc->phy_stats.tx_byte_tc7_phy = resp.tx_byte_tc7_phy;
+
+ /* Per TC pause Counters */
+ apc->phy_stats.rx_pause_tc0_phy = resp.rx_pause_tc0_phy;
+ apc->phy_stats.tx_pause_tc0_phy = resp.tx_pause_tc0_phy;
+ apc->phy_stats.rx_pause_tc1_phy = resp.rx_pause_tc1_phy;
+ apc->phy_stats.tx_pause_tc1_phy = resp.tx_pause_tc1_phy;
+ apc->phy_stats.rx_pause_tc2_phy = resp.rx_pause_tc2_phy;
+ apc->phy_stats.tx_pause_tc2_phy = resp.tx_pause_tc2_phy;
+ apc->phy_stats.rx_pause_tc3_phy = resp.rx_pause_tc3_phy;
+ apc->phy_stats.tx_pause_tc3_phy = resp.tx_pause_tc3_phy;
+ apc->phy_stats.rx_pause_tc4_phy = resp.rx_pause_tc4_phy;
+ apc->phy_stats.tx_pause_tc4_phy = resp.tx_pause_tc4_phy;
+ apc->phy_stats.rx_pause_tc5_phy = resp.rx_pause_tc5_phy;
+ apc->phy_stats.tx_pause_tc5_phy = resp.tx_pause_tc5_phy;
+ apc->phy_stats.rx_pause_tc6_phy = resp.rx_pause_tc6_phy;
+ apc->phy_stats.tx_pause_tc6_phy = resp.tx_pause_tc6_phy;
+ apc->phy_stats.rx_pause_tc7_phy = resp.rx_pause_tc7_phy;
+ apc->phy_stats.tx_pause_tc7_phy = resp.tx_pause_tc7_phy;
+}
+
static int mana_init_port(struct net_device *ndev)
{
struct mana_port_context *apc = netdev_priv(ndev);
@@ -2806,11 +3086,10 @@ static int mana_dealloc_queues(struct net_device *ndev)
apc->rss_state = TRI_STATE_FALSE;
err = mana_config_rss(apc, TRI_STATE_FALSE, false, false);
- if (err) {
+ if (err && mana_en_need_log(apc, err))
netdev_err(ndev, "Failed to disable vPort: %d\n", err);
- return err;
- }
+ /* Even in err case, still need to cleanup the vPort */
mana_destroy_vport(apc);
return 0;
@@ -2919,6 +3198,8 @@ static int mana_probe_port(struct mana_context *ac, int port_idx,
goto free_indir;
}
+ debugfs_create_u32("current_speed", 0400, apc->mana_port_debugfs, &apc->speed);
+
return 0;
free_indir: