diff options
-rw-r--r-- | drivers/net/ethernet/intel/idpf/idpf.h | 8 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/idpf/idpf_dev.c | 11 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/idpf/idpf_lib.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/idpf/idpf_txrx.c | 8 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/idpf/idpf_txrx.h | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/idpf/idpf_vf_dev.c | 11 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/idpf/idpf_virtchnl.c | 54 |
7 files changed, 81 insertions, 17 deletions
diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h index 269e9b41645a..2bfdf0ae24cf 100644 --- a/drivers/net/ethernet/intel/idpf/idpf.h +++ b/drivers/net/ethernet/intel/idpf/idpf.h @@ -312,6 +312,9 @@ struct idpf_fsteer_fltr { * @num_q_vectors: Number of IRQ vectors allocated * @q_vectors: Array of queue vectors * @q_vector_idxs: Starting index of queue vectors + * @noirq_dyn_ctl: register to enable/disable the vector for NOIRQ queues + * @noirq_dyn_ctl_ena: value to write to the above to enable it + * @noirq_v_idx: ID of the NOIRQ vector * @max_mtu: device given max possible MTU * @default_mac_addr: device will give a default MAC to use * @rx_itr_profile: RX profiles for Dynamic Interrupt Moderation @@ -358,6 +361,11 @@ struct idpf_vport { u16 num_q_vectors; struct idpf_q_vector *q_vectors; u16 *q_vector_idxs; + + void __iomem *noirq_dyn_ctl; + u32 noirq_dyn_ctl_ena; + u16 noirq_v_idx; + u16 max_mtu; u8 default_mac_addr[ETH_ALEN]; u16 rx_itr_profile[IDPF_DIM_PROFILE_SLOTS]; diff --git a/drivers/net/ethernet/intel/idpf/idpf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_dev.c index bfa60f7d43de..3a04a6bd0d7c 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_dev.c +++ b/drivers/net/ethernet/intel/idpf/idpf_dev.c @@ -77,7 +77,7 @@ static int idpf_intr_reg_init(struct idpf_vport *vport) int num_vecs = vport->num_q_vectors; struct idpf_vec_regs *reg_vals; int num_regs, i, err = 0; - u32 rx_itr, tx_itr; + u32 rx_itr, tx_itr, val; u16 total_vecs; total_vecs = idpf_get_reserved_vecs(vport->adapter); @@ -121,6 +121,15 @@ static int idpf_intr_reg_init(struct idpf_vport *vport) intr->tx_itr = idpf_get_reg_addr(adapter, tx_itr); } + /* Data vector for NOIRQ queues */ + + val = reg_vals[vport->q_vector_idxs[i] - IDPF_MBX_Q_VEC].dyn_ctl_reg; + vport->noirq_dyn_ctl = idpf_get_reg_addr(adapter, val); + + val = PF_GLINT_DYN_CTL_WB_ON_ITR_M | PF_GLINT_DYN_CTL_INTENA_MSK_M | + FIELD_PREP(PF_GLINT_DYN_CTL_ITR_INDX_M, IDPF_NO_ITR_UPDATE_IDX); + vport->noirq_dyn_ctl_ena = val; + free_reg_vals: kfree(reg_vals); diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c index 2f9bc7786629..cad8c9426c92 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lib.c +++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c @@ -1142,7 +1142,7 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter, if (!vport) return vport; - num_max_q = max(max_q->max_txq, max_q->max_rxq); + num_max_q = max(max_q->max_txq, max_q->max_rxq) + IDPF_RESERVED_VECS; if (!adapter->vport_config[idx]) { struct idpf_vport_config *vport_config; struct idpf_q_coalesce *q_coal; diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index 976c4e0b8afd..d9f1a73f98c8 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -3507,6 +3507,8 @@ static void idpf_vport_intr_dis_irq_all(struct idpf_vport *vport) struct idpf_q_vector *q_vector = vport->q_vectors; int q_idx; + writel(0, vport->noirq_dyn_ctl); + for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++) writel(0, q_vector[q_idx].intr_reg.dyn_ctl); } @@ -3750,6 +3752,8 @@ static void idpf_vport_intr_ena_irq_all(struct idpf_vport *vport) if (qv->num_txq || qv->num_rxq) idpf_vport_intr_update_itr_ena_irq(qv); } + + writel(vport->noirq_dyn_ctl_ena, vport->noirq_dyn_ctl); } /** @@ -4061,6 +4065,8 @@ static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport) for (i = 0; i < vport->num_q_vectors; i++) vport->q_vectors[i].v_idx = vport->q_vector_idxs[i]; + vport->noirq_v_idx = vport->q_vector_idxs[i]; + return 0; } @@ -4074,6 +4080,8 @@ static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport) for (i = 0; i < vport->num_q_vectors; i++) vport->q_vectors[i].v_idx = vecids[vport->q_vector_idxs[i]]; + vport->noirq_v_idx = vecids[vport->q_vector_idxs[i]]; + kfree(vecids); return 0; diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h index 1c570794e5bc..f8e579dab21a 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h @@ -58,6 +58,8 @@ #define IDPF_MBX_Q_VEC 1 #define IDPF_MIN_Q_VEC 1 #define IDPF_MIN_RDMA_VEC 2 +/* Data vector for NOIRQ queues */ +#define IDPF_RESERVED_VECS 1 #define IDPF_DFLT_TX_Q_DESC_COUNT 512 #define IDPF_DFLT_TX_COMPLQ_DESC_COUNT 512 @@ -279,6 +281,7 @@ struct idpf_ptype_state { * @__IDPF_Q_HSPLIT_EN: enable header split on Rx (splitq) * @__IDPF_Q_PTP: indicates whether the Rx timestamping is enabled for the * queue + * @__IDPF_Q_NOIRQ: queue is polling-driven and has no interrupt * @__IDPF_Q_FLAGS_NBITS: Must be last */ enum idpf_queue_flags_t { @@ -289,6 +292,7 @@ enum idpf_queue_flags_t { __IDPF_Q_CRC_EN, __IDPF_Q_HSPLIT_EN, __IDPF_Q_PTP, + __IDPF_Q_NOIRQ, __IDPF_Q_FLAGS_NBITS, }; diff --git a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c index 259d50fded67..4cc58c83688c 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c +++ b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c @@ -76,7 +76,7 @@ static int idpf_vf_intr_reg_init(struct idpf_vport *vport) int num_vecs = vport->num_q_vectors; struct idpf_vec_regs *reg_vals; int num_regs, i, err = 0; - u32 rx_itr, tx_itr; + u32 rx_itr, tx_itr, val; u16 total_vecs; total_vecs = idpf_get_reserved_vecs(vport->adapter); @@ -120,6 +120,15 @@ static int idpf_vf_intr_reg_init(struct idpf_vport *vport) intr->tx_itr = idpf_get_reg_addr(adapter, tx_itr); } + /* Data vector for NOIRQ queues */ + + val = reg_vals[vport->q_vector_idxs[i] - IDPF_MBX_Q_VEC].dyn_ctl_reg; + vport->noirq_dyn_ctl = idpf_get_reg_addr(adapter, val); + + val = VF_INT_DYN_CTLN_WB_ON_ITR_M | VF_INT_DYN_CTLN_INTENA_MSK_M | + FIELD_PREP(VF_INT_DYN_CTLN_ITR_INDX_M, IDPF_NO_ITR_UPDATE_IDX); + vport->noirq_dyn_ctl_ena = val; + free_reg_vals: kfree(reg_vals); diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c index 3c3c8fc0def3..357358e9043a 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c +++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c @@ -2018,21 +2018,31 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map) struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; for (j = 0; j < tx_qgrp->num_txq; j++, k++) { + const struct idpf_tx_queue *txq = tx_qgrp->txqs[j]; + const struct idpf_q_vector *vec; + u32 v_idx, tx_itr_idx; + vqv[k].queue_type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX); - vqv[k].queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id); + vqv[k].queue_id = cpu_to_le32(txq->q_id); - if (idpf_is_queue_model_split(vport->txq_model)) { - vqv[k].vector_id = - cpu_to_le16(tx_qgrp->complq->q_vector->v_idx); - vqv[k].itr_idx = - cpu_to_le32(tx_qgrp->complq->q_vector->tx_itr_idx); + if (idpf_queue_has(NOIRQ, txq)) + vec = NULL; + else if (idpf_is_queue_model_split(vport->txq_model)) + vec = txq->txq_grp->complq->q_vector; + else + vec = txq->q_vector; + + if (vec) { + v_idx = vec->v_idx; + tx_itr_idx = vec->tx_itr_idx; } else { - vqv[k].vector_id = - cpu_to_le16(tx_qgrp->txqs[j]->q_vector->v_idx); - vqv[k].itr_idx = - cpu_to_le32(tx_qgrp->txqs[j]->q_vector->tx_itr_idx); + v_idx = vport->noirq_v_idx; + tx_itr_idx = VIRTCHNL2_ITR_IDX_1; } + + vqv[k].vector_id = cpu_to_le16(v_idx); + vqv[k].itr_idx = cpu_to_le32(tx_itr_idx); } } @@ -2050,6 +2060,7 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map) for (j = 0; j < num_rxq; j++, k++) { struct idpf_rx_queue *rxq; + u32 v_idx, rx_itr_idx; if (idpf_is_queue_model_split(vport->rxq_model)) rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq; @@ -2059,8 +2070,17 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map) vqv[k].queue_type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX); vqv[k].queue_id = cpu_to_le32(rxq->q_id); - vqv[k].vector_id = cpu_to_le16(rxq->q_vector->v_idx); - vqv[k].itr_idx = cpu_to_le32(rxq->q_vector->rx_itr_idx); + + if (idpf_queue_has(NOIRQ, rxq)) { + v_idx = vport->noirq_v_idx; + rx_itr_idx = VIRTCHNL2_ITR_IDX_0; + } else { + v_idx = rxq->q_vector->v_idx; + rx_itr_idx = rxq->q_vector->rx_itr_idx; + } + + vqv[k].vector_id = cpu_to_le16(v_idx); + vqv[k].itr_idx = cpu_to_le32(rx_itr_idx); } } @@ -3281,9 +3301,15 @@ int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport) { struct idpf_vector_info vec_info; int num_alloc_vecs; + u32 req; vec_info.num_curr_vecs = vport->num_q_vectors; - vec_info.num_req_vecs = max(vport->num_txq, vport->num_rxq); + if (vec_info.num_curr_vecs) + vec_info.num_curr_vecs += IDPF_RESERVED_VECS; + + req = max(vport->num_txq, vport->num_rxq) + IDPF_RESERVED_VECS; + vec_info.num_req_vecs = req; + vec_info.default_vport = vport->default_vport; vec_info.index = vport->idx; @@ -3296,7 +3322,7 @@ int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport) return -EINVAL; } - vport->num_q_vectors = num_alloc_vecs; + vport->num_q_vectors = num_alloc_vecs - IDPF_RESERVED_VECS; return 0; } |