diff options
Diffstat (limited to 'drivers/infiniband/hw/irdma/utils.c')
| -rw-r--r-- | drivers/infiniband/hw/irdma/utils.c | 168 | 
1 files changed, 156 insertions, 12 deletions
| diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c index 346c2c5dabdf..ab3c5208a123 100644 --- a/drivers/infiniband/hw/irdma/utils.c +++ b/drivers/infiniband/hw/irdma/utils.c @@ -258,18 +258,16 @@ int irdma_net_event(struct notifier_block *notifier, unsigned long event,  	u32 local_ipaddr[4] = {};  	bool ipv4 = true; -	real_dev = rdma_vlan_dev_real_dev(netdev); -	if (!real_dev) -		real_dev = netdev; - -	ibdev = ib_device_get_by_netdev(real_dev, RDMA_DRIVER_IRDMA); -	if (!ibdev) -		return NOTIFY_DONE; - -	iwdev = to_iwdev(ibdev); -  	switch (event) {  	case NETEVENT_NEIGH_UPDATE: +		real_dev = rdma_vlan_dev_real_dev(netdev); +		if (!real_dev) +			real_dev = netdev; +		ibdev = ib_device_get_by_netdev(real_dev, RDMA_DRIVER_IRDMA); +		if (!ibdev) +			return NOTIFY_DONE; + +		iwdev = to_iwdev(ibdev);  		p = (__be32 *)neigh->primary_key;  		if (neigh->tbl->family == AF_INET6) {  			ipv4 = false; @@ -290,13 +288,12 @@ int irdma_net_event(struct notifier_block *notifier, unsigned long event,  			irdma_manage_arp_cache(iwdev->rf, neigh->ha,  					       local_ipaddr, ipv4,  					       IRDMA_ARP_DELETE); +		ib_device_put(ibdev);  		break;  	default:  		break;  	} -	ib_device_put(ibdev); -  	return NOTIFY_DONE;  } @@ -2498,3 +2495,150 @@ bool irdma_cq_empty(struct irdma_cq *iwcq)  	return polarity != ukcq->polarity;  } + +void irdma_remove_cmpls_list(struct irdma_cq *iwcq) +{ +	struct irdma_cmpl_gen *cmpl_node; +	struct list_head *tmp_node, *list_node; + +	list_for_each_safe (list_node, tmp_node, &iwcq->cmpl_generated) { +		cmpl_node = list_entry(list_node, struct irdma_cmpl_gen, list); +		list_del(&cmpl_node->list); +		kfree(cmpl_node); +	} +} + +int irdma_generated_cmpls(struct irdma_cq *iwcq, struct irdma_cq_poll_info *cq_poll_info) +{ +	struct irdma_cmpl_gen *cmpl; + +	if (list_empty(&iwcq->cmpl_generated)) +		return -ENOENT; +	cmpl = list_first_entry_or_null(&iwcq->cmpl_generated, struct irdma_cmpl_gen, list); +	list_del(&cmpl->list); +	memcpy(cq_poll_info, &cmpl->cpi, sizeof(*cq_poll_info)); +	kfree(cmpl); + +	ibdev_dbg(iwcq->ibcq.device, +		  "VERBS: %s: Poll artificially generated completion for QP 0x%X, op %u, wr_id=0x%llx\n", +		  __func__, cq_poll_info->qp_id, cq_poll_info->op_type, +		  cq_poll_info->wr_id); + +	return 0; +} + +/** + * irdma_set_cpi_common_values - fill in values for polling info struct + * @cpi: resulting structure of cq_poll_info type + * @qp: QPair + * @qp_num: id of the QP + */ +static void irdma_set_cpi_common_values(struct irdma_cq_poll_info *cpi, +					struct irdma_qp_uk *qp, u32 qp_num) +{ +	cpi->comp_status = IRDMA_COMPL_STATUS_FLUSHED; +	cpi->error = true; +	cpi->major_err = IRDMA_FLUSH_MAJOR_ERR; +	cpi->minor_err = FLUSH_GENERAL_ERR; +	cpi->qp_handle = (irdma_qp_handle)(uintptr_t)qp; +	cpi->qp_id = qp_num; +} + +static inline void irdma_comp_handler(struct irdma_cq *cq) +{ +	if (!cq->ibcq.comp_handler) +		return; +	if (atomic_cmpxchg(&cq->armed, 1, 0)) +		cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); +} + +void irdma_generate_flush_completions(struct irdma_qp *iwqp) +{ +	struct irdma_qp_uk *qp = &iwqp->sc_qp.qp_uk; +	struct irdma_ring *sq_ring = &qp->sq_ring; +	struct irdma_ring *rq_ring = &qp->rq_ring; +	struct irdma_cmpl_gen *cmpl; +	__le64 *sw_wqe; +	u64 wqe_qword; +	u32 wqe_idx; +	bool compl_generated = false; +	unsigned long flags1; + +	spin_lock_irqsave(&iwqp->iwscq->lock, flags1); +	if (irdma_cq_empty(iwqp->iwscq)) { +		unsigned long flags2; + +		spin_lock_irqsave(&iwqp->lock, flags2); +		while (IRDMA_RING_MORE_WORK(*sq_ring)) { +			cmpl = kzalloc(sizeof(*cmpl), GFP_ATOMIC); +			if (!cmpl) { +				spin_unlock_irqrestore(&iwqp->lock, flags2); +				spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1); +				return; +			} + +			wqe_idx = sq_ring->tail; +			irdma_set_cpi_common_values(&cmpl->cpi, qp, qp->qp_id); + +			cmpl->cpi.wr_id = qp->sq_wrtrk_array[wqe_idx].wrid; +			sw_wqe = qp->sq_base[wqe_idx].elem; +			get_64bit_val(sw_wqe, 24, &wqe_qword); +			cmpl->cpi.op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE, IRDMAQPSQ_OPCODE); +			/* remove the SQ WR by moving SQ tail*/ +			IRDMA_RING_SET_TAIL(*sq_ring, +				sq_ring->tail + qp->sq_wrtrk_array[sq_ring->tail].quanta); + +			ibdev_dbg(iwqp->iwscq->ibcq.device, +				  "DEV: %s: adding wr_id = 0x%llx SQ Completion to list qp_id=%d\n", +				  __func__, cmpl->cpi.wr_id, qp->qp_id); +			list_add_tail(&cmpl->list, &iwqp->iwscq->cmpl_generated); +			compl_generated = true; +		} +		spin_unlock_irqrestore(&iwqp->lock, flags2); +		spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1); +		if (compl_generated) +			irdma_comp_handler(iwqp->iwrcq); +	} else { +		spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1); +		mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush, +				 msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS)); +	} + +	spin_lock_irqsave(&iwqp->iwrcq->lock, flags1); +	if (irdma_cq_empty(iwqp->iwrcq)) { +		unsigned long flags2; + +		spin_lock_irqsave(&iwqp->lock, flags2); +		while (IRDMA_RING_MORE_WORK(*rq_ring)) { +			cmpl = kzalloc(sizeof(*cmpl), GFP_ATOMIC); +			if (!cmpl) { +				spin_unlock_irqrestore(&iwqp->lock, flags2); +				spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1); +				return; +			} + +			wqe_idx = rq_ring->tail; +			irdma_set_cpi_common_values(&cmpl->cpi, qp, qp->qp_id); + +			cmpl->cpi.wr_id = qp->rq_wrid_array[wqe_idx]; +			cmpl->cpi.op_type = IRDMA_OP_TYPE_REC; +			/* remove the RQ WR by moving RQ tail */ +			IRDMA_RING_SET_TAIL(*rq_ring, rq_ring->tail + 1); +			ibdev_dbg(iwqp->iwrcq->ibcq.device, +				  "DEV: %s: adding wr_id = 0x%llx RQ Completion to list qp_id=%d, wqe_idx=%d\n", +				  __func__, cmpl->cpi.wr_id, qp->qp_id, +				  wqe_idx); +			list_add_tail(&cmpl->list, &iwqp->iwrcq->cmpl_generated); + +			compl_generated = true; +		} +		spin_unlock_irqrestore(&iwqp->lock, flags2); +		spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1); +		if (compl_generated) +			irdma_comp_handler(iwqp->iwrcq); +	} else { +		spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1); +		mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush, +				 msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS)); +	} +} | 
