diff options
Diffstat (limited to 'drivers/net/ethernet/freescale/fec_main.c')
| -rw-r--r-- | drivers/net/ethernet/freescale/fec_main.c | 184 | 
1 files changed, 126 insertions, 58 deletions
| diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 8fbe47703d47..66b5cbdb43b9 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -397,7 +397,7 @@ static void fec_dump(struct net_device *ndev)  			fec16_to_cpu(bdp->cbd_sc),  			fec32_to_cpu(bdp->cbd_bufaddr),  			fec16_to_cpu(bdp->cbd_datlen), -			txq->tx_skbuff[index]); +			txq->tx_buf[index].skb);  		bdp = fec_enet_get_nextdesc(bdp, &txq->bd);  		index++;  	} while (bdp != txq->bd.base); @@ -654,7 +654,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,  	index = fec_enet_get_bd_index(last_bdp, &txq->bd);  	/* Save skb pointer */ -	txq->tx_skbuff[index] = skb; +	txq->tx_buf[index].skb = skb;  	/* Make sure the updates to rest of the descriptor are performed before  	 * transferring ownership. @@ -672,9 +672,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,  	skb_tx_timestamp(skb); -	/* Make sure the update to bdp and tx_skbuff are performed before -	 * txq->bd.cur. -	 */ +	/* Make sure the update to bdp is performed before txq->bd.cur. */  	wmb();  	txq->bd.cur = bdp; @@ -862,7 +860,7 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,  	}  	/* Save skb pointer */ -	txq->tx_skbuff[index] = skb; +	txq->tx_buf[index].skb = skb;  	skb_tx_timestamp(skb);  	txq->bd.cur = bdp; @@ -952,16 +950,33 @@ static void fec_enet_bd_init(struct net_device *dev)  		for (i = 0; i < txq->bd.ring_size; i++) {  			/* Initialize the BD for every fragment in the page. */  			bdp->cbd_sc = cpu_to_fec16(0); -			if (bdp->cbd_bufaddr && -			    !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr))) -				dma_unmap_single(&fep->pdev->dev, -						 fec32_to_cpu(bdp->cbd_bufaddr), -						 fec16_to_cpu(bdp->cbd_datlen), -						 DMA_TO_DEVICE); -			if (txq->tx_skbuff[i]) { -				dev_kfree_skb_any(txq->tx_skbuff[i]); -				txq->tx_skbuff[i] = NULL; +			if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) { +				if (bdp->cbd_bufaddr && +				    !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr))) +					dma_unmap_single(&fep->pdev->dev, +							 fec32_to_cpu(bdp->cbd_bufaddr), +							 fec16_to_cpu(bdp->cbd_datlen), +							 DMA_TO_DEVICE); +				if (txq->tx_buf[i].skb) { +					dev_kfree_skb_any(txq->tx_buf[i].skb); +					txq->tx_buf[i].skb = NULL; +				} +			} else { +				if (bdp->cbd_bufaddr) +					dma_unmap_single(&fep->pdev->dev, +							 fec32_to_cpu(bdp->cbd_bufaddr), +							 fec16_to_cpu(bdp->cbd_datlen), +							 DMA_TO_DEVICE); + +				if (txq->tx_buf[i].xdp) { +					xdp_return_frame(txq->tx_buf[i].xdp); +					txq->tx_buf[i].xdp = NULL; +				} + +				/* restore default tx buffer type: FEC_TXBUF_T_SKB */ +				txq->tx_buf[i].type = FEC_TXBUF_T_SKB;  			} +  			bdp->cbd_bufaddr = cpu_to_fec32(0);  			bdp = fec_enet_get_nextdesc(bdp, &txq->bd);  		} @@ -1357,9 +1372,10 @@ fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts,  }  static void -fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) +fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)  {  	struct	fec_enet_private *fep; +	struct xdp_frame *xdpf;  	struct bufdesc *bdp;  	unsigned short status;  	struct	sk_buff	*skb; @@ -1387,16 +1403,39 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)  		index = fec_enet_get_bd_index(bdp, &txq->bd); -		skb = txq->tx_skbuff[index]; -		txq->tx_skbuff[index] = NULL; -		if (!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr))) -			dma_unmap_single(&fep->pdev->dev, -					 fec32_to_cpu(bdp->cbd_bufaddr), -					 fec16_to_cpu(bdp->cbd_datlen), -					 DMA_TO_DEVICE); -		bdp->cbd_bufaddr = cpu_to_fec32(0); -		if (!skb) -			goto skb_done; +		if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) { +			skb = txq->tx_buf[index].skb; +			txq->tx_buf[index].skb = NULL; +			if (bdp->cbd_bufaddr && +			    !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr))) +				dma_unmap_single(&fep->pdev->dev, +						 fec32_to_cpu(bdp->cbd_bufaddr), +						 fec16_to_cpu(bdp->cbd_datlen), +						 DMA_TO_DEVICE); +			bdp->cbd_bufaddr = cpu_to_fec32(0); +			if (!skb) +				goto tx_buf_done; +		} else { +			/* Tx processing cannot call any XDP (or page pool) APIs if +			 * the "budget" is 0. Because NAPI is called with budget of +			 * 0 (such as netpoll) indicates we may be in an IRQ context, +			 * however, we can't use the page pool from IRQ context. +			 */ +			if (unlikely(!budget)) +				break; + +			xdpf = txq->tx_buf[index].xdp; +			if (bdp->cbd_bufaddr) +				dma_unmap_single(&fep->pdev->dev, +						 fec32_to_cpu(bdp->cbd_bufaddr), +						 fec16_to_cpu(bdp->cbd_datlen), +						 DMA_TO_DEVICE); +			bdp->cbd_bufaddr = cpu_to_fec32(0); +			if (!xdpf) { +				txq->tx_buf[index].type = FEC_TXBUF_T_SKB; +				goto tx_buf_done; +			} +		}  		/* Check for errors. */  		if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | @@ -1415,21 +1454,11 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)  				ndev->stats.tx_carrier_errors++;  		} else {  			ndev->stats.tx_packets++; -			ndev->stats.tx_bytes += skb->len; -		} -		/* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who -		 * are to time stamp the packet, so we still need to check time -		 * stamping enabled flag. -		 */ -		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS && -			     fep->hwts_tx_en) && -		    fep->bufdesc_ex) { -			struct skb_shared_hwtstamps shhwtstamps; -			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; - -			fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps); -			skb_tstamp_tx(skb, &shhwtstamps); +			if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) +				ndev->stats.tx_bytes += skb->len; +			else +				ndev->stats.tx_bytes += xdpf->len;  		}  		/* Deferred means some collisions occurred during transmit, @@ -1438,10 +1467,32 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)  		if (status & BD_ENET_TX_DEF)  			ndev->stats.collisions++; -		/* Free the sk buffer associated with this last transmit */ -		dev_kfree_skb_any(skb); -skb_done: -		/* Make sure the update to bdp and tx_skbuff are performed +		if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) { +			/* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who +			 * are to time stamp the packet, so we still need to check time +			 * stamping enabled flag. +			 */ +			if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS && +				     fep->hwts_tx_en) && fep->bufdesc_ex) { +				struct skb_shared_hwtstamps shhwtstamps; +				struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; + +				fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps); +				skb_tstamp_tx(skb, &shhwtstamps); +			} + +			/* Free the sk buffer associated with this last transmit */ +			dev_kfree_skb_any(skb); +		} else { +			xdp_return_frame(xdpf); + +			txq->tx_buf[index].xdp = NULL; +			/* restore default tx buffer type: FEC_TXBUF_T_SKB */ +			txq->tx_buf[index].type = FEC_TXBUF_T_SKB; +		} + +tx_buf_done: +		/* Make sure the update to bdp and tx_buf are performed  		 * before dirty_tx  		 */  		wmb(); @@ -1465,14 +1516,14 @@ skb_done:  		writel(0, txq->bd.reg_desc_active);  } -static void fec_enet_tx(struct net_device *ndev) +static void fec_enet_tx(struct net_device *ndev, int budget)  {  	struct fec_enet_private *fep = netdev_priv(ndev);  	int i;  	/* Make sure that AVB queues are processed first. */  	for (i = fep->num_tx_queues - 1; i >= 0; i--) -		fec_enet_tx_queue(ndev, i); +		fec_enet_tx_queue(ndev, i, budget);  }  static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq, @@ -1815,7 +1866,7 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget)  	do {  		done += fec_enet_rx(ndev, budget - done); -		fec_enet_tx(ndev); +		fec_enet_tx(ndev, budget);  	} while ((done < budget) && fec_enet_collect_events(fep));  	if (done < budget) { @@ -3249,9 +3300,19 @@ static void fec_enet_free_buffers(struct net_device *ndev)  		for (i = 0; i < txq->bd.ring_size; i++) {  			kfree(txq->tx_bounce[i]);  			txq->tx_bounce[i] = NULL; -			skb = txq->tx_skbuff[i]; -			txq->tx_skbuff[i] = NULL; -			dev_kfree_skb(skb); + +			if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) { +				skb = txq->tx_buf[i].skb; +				txq->tx_buf[i].skb = NULL; +				dev_kfree_skb(skb); +			} else { +				if (txq->tx_buf[i].xdp) { +					xdp_return_frame(txq->tx_buf[i].xdp); +					txq->tx_buf[i].xdp = NULL; +				} + +				txq->tx_buf[i].type = FEC_TXBUF_T_SKB; +			}  		}  	}  } @@ -3296,8 +3357,7 @@ static int fec_enet_alloc_queue(struct net_device *ndev)  		fep->total_tx_ring_size += fep->tx_queue[i]->bd.ring_size;  		txq->tx_stop_threshold = FEC_MAX_SKB_DESCS; -		txq->tx_wake_threshold = -			(txq->bd.ring_size - txq->tx_stop_threshold) / 2; +		txq->tx_wake_threshold = FEC_MAX_SKB_DESCS + 2 * MAX_SKB_FRAGS;  		txq->tso_hdrs = dma_alloc_coherent(&fep->pdev->dev,  					txq->bd.ring_size * TSO_HEADER_SIZE, @@ -3732,12 +3792,18 @@ static int fec_enet_bpf(struct net_device *dev, struct netdev_bpf *bpf)  		if (fep->quirks & FEC_QUIRK_SWAP_FRAME)  			return -EOPNOTSUPP; +		if (!bpf->prog) +			xdp_features_clear_redirect_target(dev); +  		if (is_run) {  			napi_disable(&fep->napi);  			netif_tx_disable(dev);  		}  		old_prog = xchg(&fep->xdp_prog, bpf->prog); +		if (old_prog) +			bpf_prog_put(old_prog); +  		fec_restart(dev);  		if (is_run) { @@ -3745,8 +3811,8 @@ static int fec_enet_bpf(struct net_device *dev, struct netdev_bpf *bpf)  			netif_tx_start_all_queues(dev);  		} -		if (old_prog) -			bpf_prog_put(old_prog); +		if (bpf->prog) +			xdp_features_set_redirect_target(dev, false);  		return 0; @@ -3778,7 +3844,7 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,  	entries_free = fec_enet_get_free_txdesc_num(txq);  	if (entries_free < MAX_SKB_FRAGS + 1) { -		netdev_err(fep->netdev, "NOT enough BD for SG!\n"); +		netdev_err_once(fep->netdev, "NOT enough BD for SG!\n");  		return -EBUSY;  	} @@ -3811,7 +3877,8 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,  		ebdp->cbd_esc = cpu_to_fec32(estatus);  	} -	txq->tx_skbuff[index] = NULL; +	txq->tx_buf[index].type = FEC_TXBUF_T_XDP_NDO; +	txq->tx_buf[index].xdp = frame;  	/* Make sure the updates to rest of the descriptor are performed before  	 * transferring ownership. @@ -3857,6 +3924,8 @@ static int fec_enet_xdp_xmit(struct net_device *dev,  	__netif_tx_lock(nq, cpu); +	/* Avoid tx timeout as XDP shares the queue with kernel stack */ +	txq_trans_cond_update(nq);  	for (i = 0; i < num_frames; i++) {  		if (fec_enet_txq_xmit_frame(fep, txq, frames[i]) < 0)  			break; @@ -4016,8 +4085,7 @@ static int fec_enet_init(struct net_device *ndev)  	if (!(fep->quirks & FEC_QUIRK_SWAP_FRAME))  		ndev->xdp_features = NETDEV_XDP_ACT_BASIC | -				     NETDEV_XDP_ACT_REDIRECT | -				     NETDEV_XDP_ACT_NDO_XMIT; +				     NETDEV_XDP_ACT_REDIRECT;  	fec_restart(ndev); | 
