diff options
Diffstat (limited to 'drivers/gpu/drm/vc4/vc4_dsi.c')
| -rw-r--r-- | drivers/gpu/drm/vc4/vc4_dsi.c | 152 | 
1 files changed, 120 insertions, 32 deletions
| diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c index 98308a17e4ed..b7b2c76770dc 100644 --- a/drivers/gpu/drm/vc4/vc4_dsi.c +++ b/drivers/gpu/drm/vc4/vc4_dsi.c @@ -181,8 +181,50 @@  #define DSI0_TXPKT_PIX_FIFO		0x20 /* AKA PIX_FIFO */ -#define DSI0_INT_STAT		0x24 -#define DSI0_INT_EN		0x28 +#define DSI0_INT_STAT			0x24 +#define DSI0_INT_EN			0x28 +# define DSI0_INT_FIFO_ERR		BIT(25) +# define DSI0_INT_CMDC_DONE_MASK	VC4_MASK(24, 23) +# define DSI0_INT_CMDC_DONE_SHIFT	23 +#  define DSI0_INT_CMDC_DONE_NO_REPEAT		1 +#  define DSI0_INT_CMDC_DONE_REPEAT		3 +# define DSI0_INT_PHY_DIR_RTF		BIT(22) +# define DSI0_INT_PHY_D1_ULPS		BIT(21) +# define DSI0_INT_PHY_D1_STOP		BIT(20) +# define DSI0_INT_PHY_RXLPDT		BIT(19) +# define DSI0_INT_PHY_RXTRIG		BIT(18) +# define DSI0_INT_PHY_D0_ULPS		BIT(17) +# define DSI0_INT_PHY_D0_LPDT		BIT(16) +# define DSI0_INT_PHY_D0_FTR		BIT(15) +# define DSI0_INT_PHY_D0_STOP		BIT(14) +/* Signaled when the clock lane enters the given state. */ +# define DSI0_INT_PHY_CLK_ULPS		BIT(13) +# define DSI0_INT_PHY_CLK_HS		BIT(12) +# define DSI0_INT_PHY_CLK_FTR		BIT(11) +/* Signaled on timeouts */ +# define DSI0_INT_PR_TO			BIT(10) +# define DSI0_INT_TA_TO			BIT(9) +# define DSI0_INT_LPRX_TO		BIT(8) +# define DSI0_INT_HSTX_TO		BIT(7) +/* Contention on a line when trying to drive the line low */ +# define DSI0_INT_ERR_CONT_LP1		BIT(6) +# define DSI0_INT_ERR_CONT_LP0		BIT(5) +/* Control error: incorrect line state sequence on data lane 0. */ +# define DSI0_INT_ERR_CONTROL		BIT(4) +# define DSI0_INT_ERR_SYNC_ESC		BIT(3) +# define DSI0_INT_RX2_PKT		BIT(2) +# define DSI0_INT_RX1_PKT		BIT(1) +# define DSI0_INT_CMD_PKT		BIT(0) + +#define DSI0_INTERRUPTS_ALWAYS_ENABLED	(DSI0_INT_ERR_SYNC_ESC | \ +					 DSI0_INT_ERR_CONTROL |	 \ +					 DSI0_INT_ERR_CONT_LP0 | \ +					 DSI0_INT_ERR_CONT_LP1 | \ +					 DSI0_INT_HSTX_TO |	 \ +					 DSI0_INT_LPRX_TO |	 \ +					 DSI0_INT_TA_TO |	 \ +					 DSI0_INT_PR_TO) +  # define DSI1_INT_PHY_D3_ULPS		BIT(30)  # define DSI1_INT_PHY_D3_STOP		BIT(29)  # define DSI1_INT_PHY_D2_ULPS		BIT(28) @@ -761,6 +803,9 @@ static void vc4_dsi_encoder_disable(struct drm_encoder *encoder)  	list_for_each_entry_reverse(iter, &dsi->bridge_chain, chain_node) {  		if (iter->funcs->disable)  			iter->funcs->disable(iter); + +		if (iter == dsi->bridge) +			break;  	}  	vc4_dsi_ulps(dsi, true); @@ -805,11 +850,9 @@ static bool vc4_dsi_encoder_mode_fixup(struct drm_encoder *encoder,  	/* Find what divider gets us a faster clock than the requested  	 * pixel clock.  	 */ -	for (divider = 1; divider < 8; divider++) { -		if (parent_rate / divider < pll_clock) { -			divider--; +	for (divider = 1; divider < 255; divider++) { +		if (parent_rate / (divider + 1) < pll_clock)  			break; -		}  	}  	/* Now that we've picked a PLL divider, calculate back to its @@ -894,6 +937,9 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)  		DSI_PORT_WRITE(PHY_AFEC0, afec0); +		/* AFEC reset hold time */ +		mdelay(1); +  		DSI_PORT_WRITE(PHY_AFEC1,  			       VC4_SET_FIELD(6,  DSI0_PHY_AFEC1_IDR_DLANE1) |  			       VC4_SET_FIELD(6,  DSI0_PHY_AFEC1_IDR_DLANE0) | @@ -1060,12 +1106,9 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)  		DSI_PORT_WRITE(CTRL, DSI_PORT_READ(CTRL) | DSI1_CTRL_EN);  	/* Bring AFE out of reset. */ -	if (dsi->variant->port == 0) { -	} else { -		DSI_PORT_WRITE(PHY_AFEC0, -			       DSI_PORT_READ(PHY_AFEC0) & -			       ~DSI1_PHY_AFEC0_RESET); -	} +	DSI_PORT_WRITE(PHY_AFEC0, +		       DSI_PORT_READ(PHY_AFEC0) & +		       ~DSI_PORT_BIT(PHY_AFEC0_RESET));  	vc4_dsi_ulps(dsi, false); @@ -1184,13 +1227,28 @@ static ssize_t vc4_dsi_host_transfer(struct mipi_dsi_host *host,  	/* Enable the appropriate interrupt for the transfer completion. */  	dsi->xfer_result = 0;  	reinit_completion(&dsi->xfer_completion); -	DSI_PORT_WRITE(INT_STAT, DSI1_INT_TXPKT1_DONE | DSI1_INT_PHY_DIR_RTF); -	if (msg->rx_len) { -		DSI_PORT_WRITE(INT_EN, (DSI1_INTERRUPTS_ALWAYS_ENABLED | -					DSI1_INT_PHY_DIR_RTF)); +	if (dsi->variant->port == 0) { +		DSI_PORT_WRITE(INT_STAT, +			       DSI0_INT_CMDC_DONE_MASK | DSI1_INT_PHY_DIR_RTF); +		if (msg->rx_len) { +			DSI_PORT_WRITE(INT_EN, (DSI0_INTERRUPTS_ALWAYS_ENABLED | +						DSI0_INT_PHY_DIR_RTF)); +		} else { +			DSI_PORT_WRITE(INT_EN, +				       (DSI0_INTERRUPTS_ALWAYS_ENABLED | +					VC4_SET_FIELD(DSI0_INT_CMDC_DONE_NO_REPEAT, +						      DSI0_INT_CMDC_DONE))); +		}  	} else { -		DSI_PORT_WRITE(INT_EN, (DSI1_INTERRUPTS_ALWAYS_ENABLED | -					DSI1_INT_TXPKT1_DONE)); +		DSI_PORT_WRITE(INT_STAT, +			       DSI1_INT_TXPKT1_DONE | DSI1_INT_PHY_DIR_RTF); +		if (msg->rx_len) { +			DSI_PORT_WRITE(INT_EN, (DSI1_INTERRUPTS_ALWAYS_ENABLED | +						DSI1_INT_PHY_DIR_RTF)); +		} else { +			DSI_PORT_WRITE(INT_EN, (DSI1_INTERRUPTS_ALWAYS_ENABLED | +						DSI1_INT_TXPKT1_DONE)); +		}  	}  	/* Send the packet. */ @@ -1207,7 +1265,7 @@ static ssize_t vc4_dsi_host_transfer(struct mipi_dsi_host *host,  		ret = dsi->xfer_result;  	} -	DSI_PORT_WRITE(INT_EN, DSI1_INTERRUPTS_ALWAYS_ENABLED); +	DSI_PORT_WRITE(INT_EN, DSI_PORT_BIT(INTERRUPTS_ALWAYS_ENABLED));  	if (ret)  		goto reset_fifo_and_return; @@ -1253,7 +1311,7 @@ reset_fifo_and_return:  		       DSI_PORT_BIT(CTRL_RESET_FIFOS));  	DSI_PORT_WRITE(TXPKT1C, 0); -	DSI_PORT_WRITE(INT_EN, DSI1_INTERRUPTS_ALWAYS_ENABLED); +	DSI_PORT_WRITE(INT_EN, DSI_PORT_BIT(INTERRUPTS_ALWAYS_ENABLED));  	return ret;  } @@ -1390,26 +1448,28 @@ static irqreturn_t vc4_dsi_irq_handler(int irq, void *data)  	DSI_PORT_WRITE(INT_STAT, stat);  	dsi_handle_error(dsi, &ret, stat, -			 DSI1_INT_ERR_SYNC_ESC, "LPDT sync"); +			 DSI_PORT_BIT(INT_ERR_SYNC_ESC), "LPDT sync");  	dsi_handle_error(dsi, &ret, stat, -			 DSI1_INT_ERR_CONTROL, "data lane 0 sequence"); +			 DSI_PORT_BIT(INT_ERR_CONTROL), "data lane 0 sequence");  	dsi_handle_error(dsi, &ret, stat, -			 DSI1_INT_ERR_CONT_LP0, "LP0 contention"); +			 DSI_PORT_BIT(INT_ERR_CONT_LP0), "LP0 contention");  	dsi_handle_error(dsi, &ret, stat, -			 DSI1_INT_ERR_CONT_LP1, "LP1 contention"); +			 DSI_PORT_BIT(INT_ERR_CONT_LP1), "LP1 contention");  	dsi_handle_error(dsi, &ret, stat, -			 DSI1_INT_HSTX_TO, "HSTX timeout"); +			 DSI_PORT_BIT(INT_HSTX_TO), "HSTX timeout");  	dsi_handle_error(dsi, &ret, stat, -			 DSI1_INT_LPRX_TO, "LPRX timeout"); +			 DSI_PORT_BIT(INT_LPRX_TO), "LPRX timeout");  	dsi_handle_error(dsi, &ret, stat, -			 DSI1_INT_TA_TO, "turnaround timeout"); +			 DSI_PORT_BIT(INT_TA_TO), "turnaround timeout");  	dsi_handle_error(dsi, &ret, stat, -			 DSI1_INT_PR_TO, "peripheral reset timeout"); +			 DSI_PORT_BIT(INT_PR_TO), "peripheral reset timeout"); -	if (stat & (DSI1_INT_TXPKT1_DONE | DSI1_INT_PHY_DIR_RTF)) { +	if (stat & ((dsi->variant->port ? DSI1_INT_TXPKT1_DONE : +					  DSI0_INT_CMDC_DONE_MASK) | +		    DSI_PORT_BIT(INT_PHY_DIR_RTF))) {  		complete(&dsi->xfer_completion);  		ret = IRQ_HANDLED; -	} else if (stat & DSI1_INT_HSTX_TO) { +	} else if (stat & DSI_PORT_BIT(INT_HSTX_TO)) {  		complete(&dsi->xfer_completion);  		dsi->xfer_result = -ETIMEDOUT;  		ret = IRQ_HANDLED; @@ -1487,13 +1547,29 @@ vc4_dsi_init_phy_clocks(struct vc4_dsi *dsi)  				      dsi->clk_onecell);  } +static void vc4_dsi_dma_mem_release(void *ptr) +{ +	struct vc4_dsi *dsi = ptr; +	struct device *dev = &dsi->pdev->dev; + +	dma_free_coherent(dev, 4, dsi->reg_dma_mem, dsi->reg_dma_paddr); +	dsi->reg_dma_mem = NULL; +} + +static void vc4_dsi_dma_chan_release(void *ptr) +{ +	struct vc4_dsi *dsi = ptr; + +	dma_release_channel(dsi->reg_dma_chan); +	dsi->reg_dma_chan = NULL; +} +  static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)  {  	struct platform_device *pdev = to_platform_device(dev);  	struct drm_device *drm = dev_get_drvdata(master);  	struct vc4_dsi *dsi = dev_get_drvdata(dev);  	struct vc4_dsi_encoder *vc4_dsi_encoder; -	dma_cap_mask_t dma_mask;  	int ret;  	dsi->variant = of_device_get_match_data(dev); @@ -1504,7 +1580,8 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)  		return -ENOMEM;  	INIT_LIST_HEAD(&dsi->bridge_chain); -	vc4_dsi_encoder->base.type = VC4_ENCODER_TYPE_DSI1; +	vc4_dsi_encoder->base.type = dsi->variant->port ? +			VC4_ENCODER_TYPE_DSI1 : VC4_ENCODER_TYPE_DSI0;  	vc4_dsi_encoder->dsi = dsi;  	dsi->encoder = &vc4_dsi_encoder->base.base; @@ -1527,6 +1604,8 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)  	 * so set up a channel for talking to it.  	 */  	if (dsi->variant->broken_axi_workaround) { +		dma_cap_mask_t dma_mask; +  		dsi->reg_dma_mem = dma_alloc_coherent(dev, 4,  						      &dsi->reg_dma_paddr,  						      GFP_KERNEL); @@ -1535,8 +1614,13 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)  			return -ENOMEM;  		} +		ret = devm_add_action_or_reset(dev, vc4_dsi_dma_mem_release, dsi); +		if (ret) +			return ret; +  		dma_cap_zero(dma_mask);  		dma_cap_set(DMA_MEMCPY, dma_mask); +  		dsi->reg_dma_chan = dma_request_chan_by_mask(&dma_mask);  		if (IS_ERR(dsi->reg_dma_chan)) {  			ret = PTR_ERR(dsi->reg_dma_chan); @@ -1546,6 +1630,10 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)  			return ret;  		} +		ret = devm_add_action_or_reset(dev, vc4_dsi_dma_chan_release, dsi); +		if (ret) +			return ret; +  		/* Get the physical address of the device's registers.  The  		 * struct resource for the regs gives us the bus address  		 * instead. | 
