summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSuraj Gupta <suraj.gupta2@amd.com>2025-08-13 19:25:59 +0530
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2025-08-28 16:31:12 +0200
commit87c36bef9e0f56a17719b8f9dc23284eb77c80fd (patch)
tree83571e3e73cb18974a3d3f29d425f7b0fd5afd01
parent8d2bf2ec20a67cb49f12c84626e084b6c901ffc2 (diff)
net: xilinx: axienet: Fix RX skb ring management in DMAengine mode
[ Upstream commit fd980bf6e9cdae885105685259421164f843ca55 ] Submit multiple descriptors in axienet_rx_cb() to fill Rx skb ring. This ensures the ring "catches up" on previously missed allocations. Increment Rx skb ring head pointer after BD is successfully allocated. Previously, head pointer was incremented before verifying if descriptor is successfully allocated and has valid entries, which could lead to ring state inconsistency if descriptor setup failed. These changes improve reliability by maintaining adequate descriptor availability and ensuring proper ring buffer state management. Fixes: 6a91b846af85 ("net: axienet: Introduce dmaengine support") Signed-off-by: Suraj Gupta <suraj.gupta2@amd.com> Link: https://patch.msgid.link/20250813135559.1555652-1-suraj.gupta2@amd.com Signed-off-by: Jakub Kicinski <kuba@kernel.org> Signed-off-by: Sasha Levin <sashal@kernel.org>
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c8
1 files changed, 6 insertions, 2 deletions
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 2d47b35443af..1775e060d39d 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1119,6 +1119,7 @@ static void axienet_dma_rx_cb(void *data, const struct dmaengine_result *result)
struct axienet_local *lp = data;
struct sk_buff *skb;
u32 *app_metadata;
+ int i;
skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_tail++);
skb = skbuf_dma->skb;
@@ -1137,7 +1138,10 @@ static void axienet_dma_rx_cb(void *data, const struct dmaengine_result *result)
u64_stats_add(&lp->rx_packets, 1);
u64_stats_add(&lp->rx_bytes, rx_len);
u64_stats_update_end(&lp->rx_stat_sync);
- axienet_rx_submit_desc(lp->ndev);
+
+ for (i = 0; i < CIRC_SPACE(lp->rx_ring_head, lp->rx_ring_tail,
+ RX_BUF_NUM_DEFAULT); i++)
+ axienet_rx_submit_desc(lp->ndev);
dma_async_issue_pending(lp->rx_chan);
}
@@ -1394,7 +1398,6 @@ static void axienet_rx_submit_desc(struct net_device *ndev)
if (!skbuf_dma)
return;
- lp->rx_ring_head++;
skb = netdev_alloc_skb(ndev, lp->max_frm_size);
if (!skb)
return;
@@ -1419,6 +1422,7 @@ static void axienet_rx_submit_desc(struct net_device *ndev)
skbuf_dma->desc = dma_rx_desc;
dma_rx_desc->callback_param = lp;
dma_rx_desc->callback_result = axienet_dma_rx_cb;
+ lp->rx_ring_head++;
dmaengine_submit(dma_rx_desc);
return;