summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c93
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.h8
2 files changed, 91 insertions, 10 deletions
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index 66a1b040639d..9b63944235fb 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -139,6 +139,9 @@ static void idpf_tx_desc_rel(struct idpf_tx_queue *txq)
if (!txq->desc_ring)
return;
+ if (txq->refillq)
+ kfree(txq->refillq->ring);
+
dmam_free_coherent(txq->dev, txq->size, txq->desc_ring, txq->dma);
txq->desc_ring = NULL;
txq->next_to_use = 0;
@@ -244,6 +247,7 @@ static int idpf_tx_desc_alloc(const struct idpf_vport *vport,
struct idpf_tx_queue *tx_q)
{
struct device *dev = tx_q->dev;
+ struct idpf_sw_queue *refillq;
int err;
err = idpf_tx_buf_alloc_all(tx_q);
@@ -267,6 +271,29 @@ static int idpf_tx_desc_alloc(const struct idpf_vport *vport,
tx_q->next_to_clean = 0;
idpf_queue_set(GEN_CHK, tx_q);
+ if (!idpf_queue_has(FLOW_SCH_EN, tx_q))
+ return 0;
+
+ refillq = tx_q->refillq;
+ refillq->desc_count = tx_q->desc_count;
+ refillq->ring = kcalloc(refillq->desc_count, sizeof(u32),
+ GFP_KERNEL);
+ if (!refillq->ring) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+
+ for (unsigned int i = 0; i < refillq->desc_count; i++)
+ refillq->ring[i] =
+ FIELD_PREP(IDPF_RFL_BI_BUFID_M, i) |
+ FIELD_PREP(IDPF_RFL_BI_GEN_M,
+ idpf_queue_has(GEN_CHK, refillq));
+
+ /* Go ahead and flip the GEN bit since this counts as filling
+ * up the ring, i.e. we already ring wrapped.
+ */
+ idpf_queue_change(GEN_CHK, refillq);
+
return 0;
err_alloc:
@@ -603,18 +630,18 @@ static int idpf_rx_hdr_buf_alloc_all(struct idpf_buf_queue *bufq)
}
/**
- * idpf_rx_post_buf_refill - Post buffer id to refill queue
+ * idpf_post_buf_refill - Post buffer id to refill queue
* @refillq: refill queue to post to
* @buf_id: buffer id to post
*/
-static void idpf_rx_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id)
+static void idpf_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id)
{
u32 nta = refillq->next_to_use;
/* store the buffer ID and the SW maintained GEN bit to the refillq */
refillq->ring[nta] =
- FIELD_PREP(IDPF_RX_BI_BUFID_M, buf_id) |
- FIELD_PREP(IDPF_RX_BI_GEN_M,
+ FIELD_PREP(IDPF_RFL_BI_BUFID_M, buf_id) |
+ FIELD_PREP(IDPF_RFL_BI_GEN_M,
idpf_queue_has(GEN_CHK, refillq));
if (unlikely(++nta == refillq->desc_count)) {
@@ -995,6 +1022,11 @@ static void idpf_txq_group_rel(struct idpf_vport *vport)
struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
for (j = 0; j < txq_grp->num_txq; j++) {
+ if (flow_sch_en) {
+ kfree(txq_grp->txqs[j]->refillq);
+ txq_grp->txqs[j]->refillq = NULL;
+ }
+
kfree(txq_grp->txqs[j]);
txq_grp->txqs[j] = NULL;
}
@@ -1414,6 +1446,13 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
}
idpf_queue_set(FLOW_SCH_EN, q);
+
+ q->refillq = kzalloc(sizeof(*q->refillq), GFP_KERNEL);
+ if (!q->refillq)
+ goto err_alloc;
+
+ idpf_queue_set(GEN_CHK, q->refillq);
+ idpf_queue_set(RFL_GEN_CHK, q->refillq);
}
if (!split)
@@ -2005,6 +2044,8 @@ static void idpf_tx_handle_rs_completion(struct idpf_tx_queue *txq,
compl_tag = le16_to_cpu(desc->q_head_compl_tag.compl_tag);
+ idpf_post_buf_refill(txq->refillq, compl_tag);
+
/* If we didn't clean anything on the ring, this packet must be
* in the hash table. Go clean it there.
*/
@@ -2365,6 +2406,37 @@ static unsigned int idpf_tx_splitq_bump_ntu(struct idpf_tx_queue *txq, u16 ntu)
}
/**
+ * idpf_tx_get_free_buf_id - get a free buffer ID from the refill queue
+ * @refillq: refill queue to get buffer ID from
+ * @buf_id: return buffer ID
+ *
+ * Return: true if a buffer ID was found, false if not
+ */
+static bool idpf_tx_get_free_buf_id(struct idpf_sw_queue *refillq,
+ u16 *buf_id)
+{
+ u32 ntc = refillq->next_to_clean;
+ u32 refill_desc;
+
+ refill_desc = refillq->ring[ntc];
+
+ if (unlikely(idpf_queue_has(RFL_GEN_CHK, refillq) !=
+ !!(refill_desc & IDPF_RFL_BI_GEN_M)))
+ return false;
+
+ *buf_id = FIELD_GET(IDPF_RFL_BI_BUFID_M, refill_desc);
+
+ if (unlikely(++ntc == refillq->desc_count)) {
+ idpf_queue_change(RFL_GEN_CHK, refillq);
+ ntc = 0;
+ }
+
+ refillq->next_to_clean = ntc;
+
+ return true;
+}
+
+/**
* idpf_tx_splitq_map - Build the Tx flex descriptor
* @tx_q: queue to send buffer on
* @params: pointer to splitq params struct
@@ -2912,6 +2984,13 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
}
if (idpf_queue_has(FLOW_SCH_EN, tx_q)) {
+ if (unlikely(!idpf_tx_get_free_buf_id(tx_q->refillq,
+ &tx_params.compl_tag))) {
+ u64_stats_update_begin(&tx_q->stats_sync);
+ u64_stats_inc(&tx_q->q_stats.q_busy);
+ u64_stats_update_end(&tx_q->stats_sync);
+ }
+
tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE;
tx_params.eop_cmd = IDPF_TXD_FLEX_FLOW_CMD_EOP;
/* Set the RE bit to catch any packets that may have not been
@@ -3472,7 +3551,7 @@ payload:
skip_data:
rx_buf->netmem = 0;
- idpf_rx_post_buf_refill(refillq, buf_id);
+ idpf_post_buf_refill(refillq, buf_id);
IDPF_RX_BUMP_NTC(rxq, ntc);
/* skip if it is non EOP desc */
@@ -3580,10 +3659,10 @@ static void idpf_rx_clean_refillq(struct idpf_buf_queue *bufq,
bool failure;
if (idpf_queue_has(RFL_GEN_CHK, refillq) !=
- !!(refill_desc & IDPF_RX_BI_GEN_M))
+ !!(refill_desc & IDPF_RFL_BI_GEN_M))
break;
- buf_id = FIELD_GET(IDPF_RX_BI_BUFID_M, refill_desc);
+ buf_id = FIELD_GET(IDPF_RFL_BI_BUFID_M, refill_desc);
failure = idpf_rx_update_bufq_desc(bufq, buf_id, buf_desc);
if (failure)
break;
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
index 281de655a813..58232a1bd0a9 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
@@ -108,8 +108,8 @@ do { \
*/
#define IDPF_TX_SPLITQ_RE_MIN_GAP 64
-#define IDPF_RX_BI_GEN_M BIT(16)
-#define IDPF_RX_BI_BUFID_M GENMASK(15, 0)
+#define IDPF_RFL_BI_GEN_M BIT(16)
+#define IDPF_RFL_BI_BUFID_M GENMASK(15, 0)
#define IDPF_RXD_EOF_SPLITQ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_EOF_M
#define IDPF_RXD_EOF_SINGLEQ VIRTCHNL2_RX_BASE_DESC_STATUS_EOF_M
@@ -622,6 +622,7 @@ libeth_cacheline_set_assert(struct idpf_rx_queue, 64,
* @cleaned_pkts: Number of packets cleaned for the above said case
* @tx_max_bufs: Max buffers that can be transmitted with scatter-gather
* @stash: Tx buffer stash for Flow-based scheduling mode
+ * @refillq: Pointer to refill queue
* @compl_tag_bufid_m: Completion tag buffer id mask
* @compl_tag_cur_gen: Used to keep track of current completion tag generation
* @compl_tag_gen_max: To determine when compl_tag_cur_gen should be reset
@@ -671,6 +672,7 @@ struct idpf_tx_queue {
u16 tx_max_bufs;
struct idpf_txq_stash *stash;
+ struct idpf_sw_queue *refillq;
u16 compl_tag_bufid_m;
u16 compl_tag_cur_gen;
@@ -692,7 +694,7 @@ struct idpf_tx_queue {
__cacheline_group_end_aligned(cold);
};
libeth_cacheline_set_assert(struct idpf_tx_queue, 64,
- 112 + sizeof(struct u64_stats_sync),
+ 120 + sizeof(struct u64_stats_sync),
24);
/**